|
{ |
|
"paper_id": "P10-1012", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T09:20:41.973615Z" |
|
}, |
|
"title": "Automatic Evaluation Method for Machine Translation using Noun-Phrase Chunking", |
|
"authors": [ |
|
{ |
|
"first": "Hiroshi", |
|
"middle": [], |
|
"last": "Echizen-Ya", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Hokkai-Gakuen University", |
|
"location": { |
|
"addrLine": "S 26-Jo, W 11-chome, Chuo-ku", |
|
"postCode": "064-0926", |
|
"settlement": "Sapporo", |
|
"country": "Japan" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Kenji", |
|
"middle": [], |
|
"last": "Araki", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Hokkaido University", |
|
"location": { |
|
"addrLine": "N 14-Jo, W 9-Chome, Kita-ku", |
|
"postCode": "060-0814", |
|
"settlement": "Sapporo", |
|
"country": "Japan" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "As described in this paper, we propose a new automatic evaluation method for machine translation using noun-phrase chunking. Our method correctly determines the matching words between two sentences using corresponding noun phrases. Moreover, our method determines the similarity between two sentences in terms of the noun-phrase order of appearance. Evaluation experiments were conducted to calculate the correlation among human judgments, along with the scores produced using automatic evaluation methods for MT outputs obtained from the 12 machine translation systems in NTCIR-7. Experimental results show that our method obtained the highest correlations among the methods in both sentence-level adequacy and fluency.", |
|
"pdf_parse": { |
|
"paper_id": "P10-1012", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "As described in this paper, we propose a new automatic evaluation method for machine translation using noun-phrase chunking. Our method correctly determines the matching words between two sentences using corresponding noun phrases. Moreover, our method determines the similarity between two sentences in terms of the noun-phrase order of appearance. Evaluation experiments were conducted to calculate the correlation among human judgments, along with the scores produced using automatic evaluation methods for MT outputs obtained from the 12 machine translation systems in NTCIR-7. Experimental results show that our method obtained the highest correlations among the methods in both sentence-level adequacy and fluency.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "High-quality automatic evaluation has become increasingly important as various machine translation systems have developed. The scores of some automatic evaluation methods can obtain high correlation with human judgment in document-level automatic evaluation (Coughlin, 2007) . However, sentence-level automatic evaluation is insufficient. A great gap exists between language processing of automatic evaluation and the processing by humans. Therefore, in recent years, various automatic evaluation methods particularly addressing sentence-level automatic evaluations have been proposed. Methods based on word strings (e.g., BLEU (Papineni et al., 2002) , NIST(NIST, 2002) , METEOR (Banerjee and Lavie., 2005) , ROUGE-L (Lin and Och, 2004) , and IMPACT(Echizen-ya and Araki, 2007)) calculate matching scores using only common words between MT outputs and references from bilingual humans. However, these methods cannot determine the correct word correspondences sufficiently because they fail to focus solely on phrase correspondences. Moreover, various methods using syntactic analytical tools (Pozar and Charniak, 2006; Mutton et al., 2007; Mehay and Brew, 2007) are proposed to address the sentence structure. Nevertheless, those methods depend strongly on the quality of the syntactic analytical tools.", |
|
"cite_spans": [ |
|
{ |
|
"start": 258, |
|
"end": 274, |
|
"text": "(Coughlin, 2007)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 628, |
|
"end": 651, |
|
"text": "(Papineni et al., 2002)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 654, |
|
"end": 670, |
|
"text": "NIST(NIST, 2002)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 680, |
|
"end": 707, |
|
"text": "(Banerjee and Lavie., 2005)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 718, |
|
"end": 737, |
|
"text": "(Lin and Och, 2004)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 1093, |
|
"end": 1119, |
|
"text": "(Pozar and Charniak, 2006;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 1120, |
|
"end": 1140, |
|
"text": "Mutton et al., 2007;", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 1141, |
|
"end": 1162, |
|
"text": "Mehay and Brew, 2007)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "As described herein, for use with MT systems, we propose a new automatic evaluation method using noun-phrase chunking to obtain higher sentence-level correlations. Using noun phrases produced by chunking, our method yields the correct word correspondences and determines the similarity between two sentences in terms of the noun phrase order of appearance. Evaluation experiments using MT outputs obtained by 12 machine translation systems in NTCIR-7 (Fujii et al., 2008) demonstrate that the scores obtained using our system yield the highest correlation with the human judgments among the automatic evaluation methods in both sentence-level adequacy and fluency. Moreover, the differences between correlation coefficients obtained using our method and other methods are statistically significant at the 5% or lower significance level for adequacy. Results confirmed that our method using noun-phrase chunking is effective for automatic evaluation for machine translation.", |
|
"cite_spans": [ |
|
{ |
|
"start": 451, |
|
"end": 471, |
|
"text": "(Fujii et al., 2008)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The system based on our method has four processes. First, the system determines the corre-spondences of noun phrases between MT outputs and references using chunking. Secondly, the system calculates word-level scores based on the correct matched words using the determined correspondences of noun phrases. Next, the system calculates phrase-level scores based on the noun-phrase order of appearance. The system calculates the final scores combining word-level scores and phrase-level scores.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Automatic Evaluation Method using Noun-Phrase Chunking", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The system obtains the noun phrases from each sentence by chunking. It then determines corresponding noun phrases between MT outputs and references calculating the similarity for two noun phrases by the PER score (Su et al., 1992) . In that case, PER scores of two kinds are calculated. One is the ratio of the number of match words between an MT output and reference for the number of all words of the MT output. The other is the ratio of the number of match words between the MT output and reference for the number of all words of the reference. The similarity is obtained as an F -measure between two PER scores. The high score represents that the similarity between two noun phrases is high. Figure 1 presents an example of the determination of the corresponding noun phrases. (2) Determination of corresponding noun phrases In Fig. 1 , \"the amount\", \"the crowning fall\" and \"the end\" are obtained as noun phrases in MT output by chunking, and \"it\", \"the end part\", \"the amount\" and \"crowning drop\" are obtained in the reference by chunking. Next, the system determines the corresponding noun phrases from these noun phrases between the MT output and reference. The score between \"the end\" and \"the end part\" is the highest among the scores between \"the end\" in the MT output and \"it\", \"the end part\", \"the amount\", and \"crowning drop\" in the reference. Moreover, the score between \"the end part\" and \"the end\" is the highest among the scores between \"the end part\" in reference and \"the amount\", \"the crowning fall\", \"the end\" in the MT output. Consequently, \"the end\" and \"the end part\" are selected as noun phrases with the highest mutual scores: \"the end\" and \"the end part\" are determined as one corresponding noun phrase. In Fig. 1 , \"the amount\" in the MT output and \"the amount\" in reference, and \"the crowning fall\" in the MT output and \"crowning drop\" in the reference also are determined as the respective corresponding noun phrases. The noun phrase for which the score between it and other noun phrases is 0.0 (e.g., \"it\" in reference) has no corresponding noun phrase. The use of the noun phrases is effective because the frequency of the noun phrases is higher than those of other phrases. The verb phrases are not used for this study, but they can also be generated by chunking. It is difficult to determine the corresponding verb phrases correctly because the words in each verb phrase are often fewer than the noun phrases.", |
|
"cite_spans": [ |
|
{ |
|
"start": 213, |
|
"end": 230, |
|
"text": "(Su et al., 1992)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 696, |
|
"end": 704, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF1" |
|
}, |
|
{ |
|
"start": 832, |
|
"end": 838, |
|
"text": "Fig. 1", |
|
"ref_id": "FIGREF1" |
|
}, |
|
{ |
|
"start": 1735, |
|
"end": 1741, |
|
"text": "Fig. 1", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Correspondence of Noun Phrases by Chunking", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "The system calculates the word-level scores between MT output and reference using the corresponding noun phrases. First, the system determines the common words based on Longest Common Subsequence (LCS). The system selects only one LCS route when several LCS routes exist. In such cases, the system calculates the Route Score (RS) using the following Eqs. (1) and (2):", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word-level Score", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "RS = c\u2208LCS w\u2208c weight(w) \u03b2 (1) weight(w) = \u23a7 \u23aa \u23aa \u23aa \u23a8 \u23aa \u23aa \u23aa \u23a9", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word-level Score", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "words in corresponding 2 noun phrase words in non 1 corresponding noun phrase", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word-level Score", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "(2) In Eq. 1, \u03b2 is a parameter for length weighting of common parts; it is greater than 1.0. Figure 2 portrays an example of determination of the common parts. In the first process of Fig. 2 , LCS is 7. In this example, several LCS routes exist. The system selects the LCS route which has \",\", \"the amount of\", \"crowning\", \"is\", and \".\" as the common parts. The common part is the part for which the common words appear continuously. In contrast, IMPACT selects a different LCS route that includes \", the\", \"amount of\", \"crowning\", \"is\", and \".\" as the common parts. In IMPACT, using no analytical knowledge, the LCS route is determined using the information of the number of words in the common parts and the position of the common parts. The RS for LCS route selected using our method is 32 (= 1 2.0 + (2 + 2 + 1) 2.0 + 2 2.0 + 1 2.0 + 1 2.0 ) when \u03b2 is 2.0. The RS for LCS route selected by IMPACT is 19 (= (1 + 1) 2.0 + (2 + 1) 2.0 + 2 2.0 + 1 2.0 + 1 2.0 ). In the LCS route selected by IMPACT, the weight of \"the\" in the common part \", the\" is 1 because \"the\" in the reference is not included in the corresponding noun phrase. In the LCS route selected using our method, the weight of \"the\" in \"the amount of\" is 2 because \"the\" in MT output and \"the\" in the reference are included in the corresponding noun phrase \"NP1\". Therefore, the system based on our method can select the correct LCS route.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 93, |
|
"end": 101, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 184, |
|
"end": 190, |
|
"text": "Fig. 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Word-level Score", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Moreover, the word-level score is calculated using the common parts in the selected LCS route as the following Eqs. (3), (4), and (5).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word-level Score", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "R wd = \u239b \u239d RN i=0 \u03b1 i c\u2208LCS length(c) \u03b2 m \u03b2 \u239e \u23a0 1 \u03b2", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word-level Score", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "(3) (1) First process for determination of common parts : LCS = 7", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word-level Score", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "P wd = \u239b \u239d RN i=0 \u03b1 i c\u2208LCS length(c) \u03b2 n \u03b2 \u239e \u23a0 1 \u03b2", |
|
"eq_num": "(4)" |
|
} |
|
], |
|
"section": "Word-level Score", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "(2) Second process for determination of common parts : LCS=3 IMPACT 1 2.0 (2+2+1) 2.0 2 2.0 1 2.0 1 2.0 (1+1) 2.0 (2+1) 2.0 2 2.0 1 2.0 1 2.0 Figure 2 : Example of common-part determination.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 142, |
|
"end": 150, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Word-level Score", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "score wd = (1 + \u03b3 2 )R wd P wd R wd + \u03b3 2 P wd", |
|
"eq_num": "(5)" |
|
} |
|
], |
|
"section": "Word-level Score", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Equation 3represents recall and Eq. (4) represents precision. Therein, m signifies the word number of the reference in Eq. 3, and n stands for the word number of the MT output in Eq. (4). Here, RN denotes the repetition number of the determination process of the LCS route, and i, which has initial value 0, is the counter for RN . In Eqs. 3and 4, \u03b1 is a parameter for the repetition process of the determination of LCS route, and is less than 1.0. Therefore, R wd and P wd becomes small as the appearance order of the common parts between MT output and reference is different. Moreover, length(c) represents the number of words in each common part; \u03b2 is a parameter related to the length weight of common parts, as in Eq. (1). In this case, the weight of each common word in the common part is 1. The system calculates score wd as the wordlevel score in Eq. (5). In Eq. 5, \u03b3 is determined as P wd /R wd . The score wd is between 0.0 and 1.0.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word-level Score", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "In the first process of Fig. 2 , \u03b1 i c\u2208LCS length(c) \u03b2 is 13.0 (=0.5 0 \u00d7 (1 2.0 + 3 2.0 + 1 2.0 + 1 2.0 + 1 2.0 )) when \u03b1 and \u03b2 are 0.5 and 2.0, respectively. In this case, the counter i is 0. Moreover, in the second process of Fig. 2, \u03b1 i c\u2208LCS length(c) \u03b2 is 2.5 (=0.5 1 \u00d7 (1 2.0 +2 2.0 )) using two common parts \"the\" and \"the end\", except the common parts determined using the first process. In Fig. 2 , RN is 1 because the system finishes calculating \u03b1 i c\u2208LCS length(c) \u03b2 when counter i became 1: this means that all common parts were processed until the second process. As a result, R wd is 0.1969 (= (13.0 + 2.5)/20 2.0 = \u221a 0.0388), and P wd is 0.2625 (= (13.0 + 2.5)/15 2.0 = \u221a 0.0689). Consequently, score wd is 0.2164 (= (1+1.3332 2 )\u00d70. 1969\u00d70.2625 0.1969+1.3332 2 \u00d70.2625 ). In this case, \u03b3 becomes 1.3332 (= 0.2625 0.1969 ). The system can determine the matching words correctly using the corresponding noun phrases between the MT output and the reference.", |
|
"cite_spans": [ |
|
{ |
|
"start": 751, |
|
"end": 762, |
|
"text": "1969\u00d70.2625", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 24, |
|
"end": 32, |
|
"text": "Fig. 2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 230, |
|
"end": 241, |
|
"text": "Fig. 2, \u03b1 i", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 401, |
|
"end": 407, |
|
"text": "Fig. 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Word-level Score", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "The system calculates score wd multi using R wd multi and P wd multi which are, respectively, maximum R wd and P wd when multiple references are used as the following Eqs. (6), (7) and (8). In Eq. (8), \u03b3 is determined as P wd multi /R wd multi . The score wd multi is between 0.0 and 1.0.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word-level Score", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "R wd multi = max u j=1 \u239b \u239c \u239c \u239c \u239c \u239c \u239c \u239d \u239b \u239c \u239c \u239c \u239c \u239c \u239d RN i=0 \u03b1 i c\u2208LCS length(c) \u03b2 j m \u03b2 j \u239e \u239f \u239f \u239f \u239f \u239f \u23a0 1 \u03b2 \u239e \u239f \u239f \u239f \u239f \u239f \u239f \u23a0 (6) P wd multi = max u j=1 \u239b \u239c \u239c \u239c \u239c \u239c \u239c \u239d \u239b \u239c \u239c \u239c \u239c \u239c \u239d RN i=0 \u03b1 i c\u2208LCS length(c) \u03b2 j n \u03b2 j \u239e \u239f \u239f \u239f \u239f \u239f \u23a0 1 \u03b2 \u239e \u239f \u239f \u239f \u239f \u239f \u239f \u23a0 (7) score wd multi = (1 + \u03b3 2 R wd multi )P wd multi R wd multi + \u03b3 2 P wd multi (8)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word-level Score", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "The system calculates the phrase-level score using the noun phrases obtained by chunking. First, the system extracts only noun phrases from sentences. Then it generalizes each noun phrase as each word. Figure 3 presents examples of generalization by noun phrases. (1) Corresponding noun phrases", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 202, |
|
"end": 210, |
|
"text": "Figure 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Phrase-level Score", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "(2) Generalization by noun phrases MT output : NP1 NP2 NP3", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Phrase-level Score", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "Reference : NP NP3 NP1 NP2 Figure 3 : Example of generalization by noun phrases. Figure 3 presents three corresponding noun phrases between the MT output and the reference. The noun phrase \"it\", which has no corresponding noun phrase, is expressed as \"NP\" in the reference. Consequently, the MT output is generalized as \"NP1 NP2 NP3\"; the reference is generalized as \"NP NP3 NP1 NP2\". Subsequently, the system obtains the phraselevel score between the generalized MT output and reference as the following Eqs. (9), (10), and (11). ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 27, |
|
"end": 35, |
|
"text": "Figure 3", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 81, |
|
"end": 89, |
|
"text": "Figure 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Phrase-level Score", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "R np = \u239b \u239c \u239d RN i=0 \u03b1 i cnpp\u2208LCS length(cnpp) \u03b2 m cnp \u00d7 \u221a m no cnp \u03b2 \u239e \u239f \u23a0 1 \u03b2", |
|
"eq_num": "(9)" |
|
} |
|
], |
|
"section": "Phrase-level Score", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "P np = \u239b \u239c \u239d RN i=0 \u03b1 i cnpp\u2208LCS length(cnpp) \u03b2 n cnp \u00d7 \u221a n no cnp \u03b2 \u239e \u239f \u23a0 1 \u03b2", |
|
"eq_num": "(10)" |
|
} |
|
], |
|
"section": "Phrase-level Score", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "score np = (1 + \u03b3 2 )R np P np R np + \u03b3 2 P np", |
|
"eq_num": "(11)" |
|
} |
|
], |
|
"section": "Phrase-level Score", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "In Eqs. (9) and (10), cnpp denotes the common noun phrase parts; m cnp and n cnp respectively signify the quantities of common noun phrases in the reference and MT output. Moreover, m no cnp and n no cnp are the quantities of noun phrases except the common noun phrases in the reference and MT output. The values of m no cnp and n no cnp are processed as 1 when no non-corresponding noun phrases exist. The square root used for m no cnp and n no cnp is to decrease the weight of the noncorresponding noun phrases. In Eq. 11, \u03b3 is determined as P np /R np . In Fig. 3 , R np and P np are 0.7071 (= 1\u00d72 2.0 +0.5\u00d71 2.0 (3\u00d71) 2.0 ) when \u03b1 is 0.5 and \u03b2 is 2.0. Therefore, score np is 0.7071.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 560, |
|
"end": 566, |
|
"text": "Fig. 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Phrase-level Score", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "The system obtains score np multi calculating the average of score np when multiple references are used as the following Eq. (12).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Phrase-level Score", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "score np multi = u j=0 (score np ) j u", |
|
"eq_num": "(12)" |
|
} |
|
], |
|
"section": "Phrase-level Score", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "The system calculates the final score by combining the word-level score and the phraselevel score as shown in the following Eq. (13).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Final Score", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "score = score wd + \u03b4 \u00d7 score np 1 + \u03b4", |
|
"eq_num": "(13)" |
|
} |
|
], |
|
"section": "Final Score", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "Therein, \u03b4 represents a parameter for the weight of score np : it is between 0.0 and 1.0. The ratio of score wd to score np is 1:1 when \u03b4 is 1.0. Moreover, score wd multi and score np multi are used for Eq. (13) in multiple references. In Figs. 2 and 3 , the final score between the MT output and the reference is 0.4185 (= 0.2164+0.7\u00d70.7071", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 239, |
|
"end": 252, |
|
"text": "Figs. 2 and 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Final Score", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": ") when \u03b4 is 0.7. The system can realize high-quality automatic evaluation using both word-level information and phraselevel information.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "1+0.7", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We calculated the correlation between the scores obtained using our method and scores produced by human judgment. The system based on our method obtained the evaluation scores for 1,200 English output sentences related to the patent sentences. These English output sentences are sentences that 12 machine translation systems in NTCIR-7 translated from 100 Japanese sentences. Moreover, the number of references to each English sentence in 100 English sentences is four. These references were obtained from four bilingual humans. Table 1 presents types of the 12 machine translation systems.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 529, |
|
"end": 536, |
|
"text": "Table 1", |
|
"ref_id": "TABREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experimental Procedure", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Moreover, three human judges evaluated 1,200 English output sentences from the perspective of adequacy and fluency on a scale of 1-5. We used the median value in the evaluation results of three human judges as the final scores of 1-5. We calculated Pearson's correlation efficient and Spearman's rank correlation efficient between the scores obtained using our method and the scores by human judgments in terms of sentence-level adequacy and fluency.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Procedure", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Additionally, we calculated the correlations between the scores using seven other methods and the scores by human judgments to compare our method with other automatic evaluation methods. The other seven methods were IMPACT, ROUGE-L, BLEU 1 , NIST, NMG-WN (Ehara, 2007; Echizen-ya et al., 2009) , METEOR 2 , and WER (Leusch et al., 2003) . Using our method, 0.1 was used as the value of the parameter \u03b1 in Eqs. (3)-(10) and 1.1 was used as the value of the parameter \u03b2 in Eqs.", |
|
"cite_spans": [ |
|
{ |
|
"start": 255, |
|
"end": 268, |
|
"text": "(Ehara, 2007;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 269, |
|
"end": 293, |
|
"text": "Echizen-ya et al., 2009)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 315, |
|
"end": 336, |
|
"text": "(Leusch et al., 2003)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Procedure", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "(1)-(10). Moreover, 0.3 was used as the value of the parameter \u03b4 in Eq. (13). These val- (Utiyama and Isahara, 2003) . Moreover, we obtained the noun phrases using a shallow parser (Sha and Pereira, 2003) as the chunking tool. We revised some erroneous results that were obtained using the chunking tool.", |
|
"cite_spans": [ |
|
{ |
|
"start": 89, |
|
"end": 116, |
|
"text": "(Utiyama and Isahara, 2003)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 181, |
|
"end": 204, |
|
"text": "(Sha and Pereira, 2003)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Procedure", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "As described in this paper, we performed comparison experiments using our method and seven other methods. Tables 2 and 3 Underlining in our method signifies that the differences between correlation coefficients obtained using our method and IMPACT are statistically significant at the 5% significance level. Moreover, \"Avg.\" signifies the average of the correlation coefficients obtained by 12 machine translation systems in respective automatic evaluation methods, and \"All\" are the correlation coefficients using the scores of 1,200 output sentences obtained using the 12 machine translation systems.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 106, |
|
"end": 120, |
|
"text": "Tables 2 and 3", |
|
"ref_id": "TABREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experimental Results", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "In Tables 2-5, the \"Avg.\" score of our method is shown to be higher than those of other methods. Especially in terms of the sentence-level adequacy shown in Tables 2 and 4, \"Avg.\" of our method is about 0.03 higher than that of IMPACT. Moreover, in system No. 8 and \"All\" of Tables 2 and 4, the differences between correlation coefficients obtained using our method and IMPACT are statistically significant at the 5% significance level. Moreover, we investigated the correlation of machine translation systems of every type. Table 6 shows \"All\" of Pearson's correlation coefficient and Spearman's rank correlation coefficient in SMT (i.e., system Nos. 1-2, system Nos. 4-8 and system Nos. 10-11) and RBMT (i.e., system Nos. 3 and 12). The scores of 900 output sentences obtained by 9 machine Table 6 because EBMT is only system No. 9. In Table 6 , our method obtained the highest correlation among the eight methods, except in terms of the adequacy of RBMT in Pearson's correlation coefficient. The differences between correlation coefficients obtained using our method and IMPACT are statistically significant at the 5% significance level for adequacy of SMT.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 792, |
|
"end": 799, |
|
"text": "Table 6", |
|
"ref_id": "TABREF9" |
|
}, |
|
{ |
|
"start": 838, |
|
"end": 845, |
|
"text": "Table 6", |
|
"ref_id": "TABREF9" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "To confirm the effectiveness of noun-phrase chunking, we performed the experiment using a system combining BLEU with our method. In this case, BLEU scores were used as score wd in Eq. (13). This experimental result is shown as \"BLEU with our method\" in Tables 2-5. In the results of \"BLEU with our method\" in Tables 2-5, underlining signifies that the differences between correlation coefficients obtained using BLEU with our method and BLEU alone are statistically significant at the 5% significance level. The coefficients of correlation for BLEU with our method are higher than those of BLEU in any machine translation system, \"Avg.\" and \"All\" in Tables 2-5. Moreover, for sentence-level adequacy, BLEU with our method is significantly better than BLEU in almost all machine translation systems and \"All\" in Tables 2 and 4. These results indicate that our method using noun-phrase chunking is effective for some methods and that it is statistically significant in each machine translation system, not only \"All\", which has large sentences.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "Subsequently, we investigated the precision of the determination process of the corresponding noun phrases described in section 2.1: in the results of system No. 1, we calculated the precision as the ratio of the number of the correct corresponding noun phrases for the number of all noun-phrase correspondences obtained using the system based on our method. Results show that the precision was 93.4%, demonstrating that our method can determine the corresponding noun phrases correctly.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "Moreover, we investigated the relation be- tween the correlation obtained by our method and the quality of chunking. In \"Our method\" shown in Tables 2-5, noun phrases for which some erroneous results obtained using the chunking tool were revised. \"Our method II\" of Tables 2-5 used noun phrases that were given as results obtained using the chunking tool. Underlining in \"Our method II\" of Tables 2-5 signifies that the differences between correlation coefficients obtained using our method II and IMPACT are statistically significant at the 5% significance level. Fundamentally, in both \"Avg.\" and \"All\" of Tables 2-5, the correlation coefficients of our method II without the revised noun phrases are lower than those of our method using the revised noun phrases. However, the difference between our method and our method II in \"Avg.\" and \"All\" of Tables 2-5 is not large. The performance of the chunking tool has no great influence on the results of our method because score wd in Eqs. (3), (4), and (5) do not depend strongly on the performance of the chunking tool. For example, in sentences shown in Fig. 2 , all common parts are the same as the common parts of Fig. 2 when \"the crowning fall\" in the MT output and \"crowning drop\" in the reference are not determined as the noun phrases. Other common parts are determined correctly because the weight of the common part \"the amount of\" is higher than those of other common parts by Eqs. (1) and (2). Consequently, the determination of the common parts except \"the amount of\" is not difficult.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 1106, |
|
"end": 1112, |
|
"text": "Fig. 2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1168, |
|
"end": 1174, |
|
"text": "Fig. 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "In other language sentences, we already performed the experiments using Japanese sentences from Reuters articles (Oyamada et al., 2010) . Results show that the correlation coefficients of IMPACT with our method, for which IMPACT scores were used as score wd in Eq. (13), were highest among some methods. Therefore, our method might not be languagedependent. Nevertheless, experiments using various language data are necessary to elucidate this point.", |
|
"cite_spans": [ |
|
{ |
|
"start": 113, |
|
"end": 135, |
|
"text": "(Oyamada et al., 2010)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "As described herein, we proposed a new automatic evaluation method for machine transla- tion. Our method calculates the scores for MT outputs using noun-phrase chunking. Consequently, the system obtains scores using the correctly matched words and phrase-level information based on the corresponding noun phrases. Experimental results demonstrate that our method yields the highest correlation among eight methods in terms of sentencelevel adequacy and fluency.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Future studies will improve our method, enabling it to achieve high correlation in sentence-level fluency. Future studies will also include experiments using data of various languages.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "BLEU was improved to perform sentence-level evaluation: the maximum N value between MT output and reference is used(Echizen-ya et al., 2009).2 The matching modules of METEOR are the exact and stemmed matching module, and a WordNet-based synonym-matching module.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "This work was done as research under the AAMT/JAPIO Special Interest Group on Patent Translation. The Japan Patent Information Organization (JAPIO) and the National Institute of Informatics (NII) provided corpora used in this work. The author gratefully acknowledges JAPIO and NII for their support. Moreover, this work was partially supported by Grants from the High-Tech Research Center of Hokkai-Gakuen University and the Kayamori Foundation of Informational Science Advancement.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgements", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "ME-TEOR: An Automatic Metric for MT Evaluation with Improved Correlation with Human Judgments", |
|
"authors": [ |
|
{ |
|
"first": "Satanjeev", |
|
"middle": [], |
|
"last": "Banerjee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alon", |
|
"middle": [], |
|
"last": "Lavie", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proc. of ACL Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "65--72", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Satanjeev Banerjee and Alon Lavie. 2005. ME- TEOR: An Automatic Metric for MT Eval- uation with Improved Correlation with Hu- man Judgments. In Proc. of ACL Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summariza- tion, 65-72.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Correlating Automated and Human Assessments of Machine Translation Quality", |
|
"authors": [ |
|
{ |
|
"first": "Deborah", |
|
"middle": [], |
|
"last": "Coughlin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Proc. of MT Summit IX", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "63--70", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Deborah Coughlin. 2003. Correlating Automated and Human Assessments of Machine Translation Quality. In Proc. of MT Summit IX, 63-70.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Automatic Evaluation of Machine Translation based on Recursive Acquisition of an Intuitive Common Parts Continuum", |
|
"authors": [ |
|
{ |
|
"first": "-", |
|
"middle": [], |
|
"last": "Hiroshi Echizen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenji", |
|
"middle": [], |
|
"last": "Araki", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proc. of MT Summit XII", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "151--158", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hiroshi Echizen-ya and Kenji Araki. 2007. Auto- matic Evaluation of Machine Translation based on Recursive Acquisition of an Intuitive Com- mon Parts Continuum. In Proc. of MT Summit XII, 151-158.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Meta-Evaluation of Automatic Evaluation Methods for Machine Translation using Patent Translation Data in NTCIR-7", |
|
"authors": [ |
|
{ |
|
"first": "Terumasa", |
|
"middle": [], |
|
"last": "Hiroshi Echizen-Ya", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sayori", |
|
"middle": [], |
|
"last": "Ehara", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Atsushi", |
|
"middle": [], |
|
"last": "Shimohata", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Masao", |
|
"middle": [], |
|
"last": "Fujii", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mikio", |
|
"middle": [], |
|
"last": "Utiyama", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Takehito", |
|
"middle": [], |
|
"last": "Yamamoto", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noriko", |
|
"middle": [], |
|
"last": "Utsuro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Kando", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proc. of the 3rd Workshop on Patent Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "9--16", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hiroshi Echizen-ya, Terumasa Ehara, Sayori Shi- mohata, Atsushi Fujii, Masao Utiyama, Mikio Yamamoto, Takehito Utsuro and Noriko Kando. 2009. Meta-Evaluation of Automatic Evaluation Methods for Machine Translation using Patent Translation Data in NTCIR-7. In Proc. of the 3rd Workshop on Patent Translation, 9-16.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Rule Based Machine Translation Combined with Statistical Post Editor for Japanese to English Patent Translation", |
|
"authors": [ |
|
{ |
|
"first": "Terumasa", |
|
"middle": [], |
|
"last": "Ehara", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proc. of MT Summit XII Workshop on Patent Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "13--18", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Terumasa Ehara. 2007. Rule Based Machine Translation Combined with Statistical Post Ed- itor for Japanese to English Patent Transla- tion. In Proc. of MT Summit XII Workshop on Patent Translation, 13-18.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Overview of the Patent Translation Task at the NTCIR-7 Workshop", |
|
"authors": [ |
|
{ |
|
"first": "Atsushi", |
|
"middle": [], |
|
"last": "Fujii", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Masao", |
|
"middle": [], |
|
"last": "Utiyama", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mikio", |
|
"middle": [], |
|
"last": "Yamamoto", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Takehito", |
|
"middle": [], |
|
"last": "Utsuro", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proc. of 7th NTCIR Workshop Meeting on Evaluation of Information Access Technologies: Information Retrieval, Question Answering and Cross-lingual Information Access", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "389--400", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Atsushi Fujii, Masao Utiyama, Mikio Yamamoto and Takehito Utsuro. 2008. Overview of the Patent Translation Task at the NTCIR-7 Work- shop. In Proc. of 7th NTCIR Workshop Meeting on Evaluation of Information Access Technolo- gies: Information Retrieval, Question Answer- ing and Cross-lingual Information Access, 389- 400.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "A Novel String-to-String Distance Measure with Applications to Machine Translation Evaluation", |
|
"authors": [ |
|
{ |
|
"first": "Gregor", |
|
"middle": [], |
|
"last": "Leusch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nicola", |
|
"middle": [], |
|
"last": "Ueffing", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hermann", |
|
"middle": [], |
|
"last": "Ney", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Proc. of MT Summit IX", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "240--247", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gregor Leusch, Nicola Ueffing and Hermann Ney. 2003. A Novel String-to-String Distance Mea- sure with Applications to Machine Translation Evaluation. In Proc. of MT Summit IX, 240- 247.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Automatic Evaluation of Machine Translation Quality Using Longest Common Subsequence and Skip-Bigram Statistics", |
|
"authors": [ |
|
{ |
|
"first": "Chin-Yew", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Franz Josef", |
|
"middle": [], |
|
"last": "Och", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proc. of ACL'04", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "606--613", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chin-Yew Lin and Franz Josef Och. 2004. Auto- matic Evaluation of Machine Translation Qual- ity Using Longest Common Subsequence and Skip-Bigram Statistics. In Proc. of ACL'04, 606-613.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "BLEU\u00c2TRE: Flattening Syntactic Dependencies for MT Evaluation", |
|
"authors": [ |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Dennis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Mehay", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Brew", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proc. of MT Summit XII", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "122--131", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dennis N. Mehay and Chris Brew. 2007. BLEU\u00c2TRE: Flattening Syntactic Dependen- cies for MT Evaluation. In Proc. of MT Summit XII, 122-131.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "GLEU: Automatic Evaluation of Sentence-Level Fluency", |
|
"authors": [ |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Mutton", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Dras", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephen", |
|
"middle": [], |
|
"last": "Wan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robert", |
|
"middle": [], |
|
"last": "Dale", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proc. of ACL'07", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "344--351", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Andrew Mutton, Mark Dras, Stephen Wan and Robert Dale. 2007. GLEU: Automatic Eval- uation of Sentence-Level Fluency. In Proc. of ACL'07, 344-351.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Automatic Evaluation of Machine Translation Quality Using N-gram Co-Occurrence Statistics", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Nist", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "NIST. 2002. Automatic Evaluation of Machine Translation Quality Us- ing N-gram Co-Occurrence Statistics.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Automatic Evaluation of Machine Translation Using both Words Information and Comprehensive Phrases Information", |
|
"authors": [ |
|
{ |
|
"first": "Takashi", |
|
"middle": [], |
|
"last": "Oyamada", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hiroshi", |
|
"middle": [], |
|
"last": "Echizen-Ya", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenji", |
|
"middle": [], |
|
"last": "Araki", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "IPSJ SIG Technical Report", |
|
"volume": "2010", |
|
"issue": "3", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Takashi Oyamada, Hiroshi Echizen-ya and Kenji Araki. 2010. Automatic Evaluation of Machine Translation Using both Words Information and Comprehensive Phrases Information. In IPSJ SIG Technical Report, Vol.2010-NL-195, No. 3 (in Japanese).", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "BLEU: a Method for Automatic Evaluation of Machine Translation", |
|
"authors": [ |
|
{ |
|
"first": "Kishore", |
|
"middle": [], |
|
"last": "Papineni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Salim", |
|
"middle": [], |
|
"last": "Roukos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Todd", |
|
"middle": [], |
|
"last": "Ward", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei-Jing", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proc. of ACL'02", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "311--318", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kishore Papineni, Salim Roukos, Todd Ward and Wei-Jing Zhu. 2002. BLEU: a Method for Au- tomatic Evaluation of Machine Translation. In Proc. of ACL'02, 311-318.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Bllip: An Improved Evaluation Metric for Machine Translation", |
|
"authors": [ |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Pozar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eugene", |
|
"middle": [], |
|
"last": "Charniak", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Michael Pozar and Eugene Charniak. 2006. Bllip: An Improved Evaluation Metric for Machine Translation. Brown University Master Thesis.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Shallow Parsing with Conditional Random Fields", |
|
"authors": [ |
|
{ |
|
"first": "Fei", |
|
"middle": [], |
|
"last": "Sha", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fernando", |
|
"middle": [], |
|
"last": "Pereira", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Proc. of HLT-NAACL 2003", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "134--141", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Fei Sha and Fernando Pereira. 2003. Shallow Pars- ing with Conditional Random Fields. In Proc. of HLT-NAACL 2003, 134-141.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "A New Quantitative Quality Measure for Machine Translation Systems", |
|
"authors": [ |
|
{ |
|
"first": "Keh-Yih", |
|
"middle": [], |
|
"last": "Su", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wen", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jing-Shin", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1992, |
|
"venue": "Proc. of GOL-ING'92", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "433--439", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Keh-Yih Su, Ming-Wen Wu and Jing-Shin Chang. 1992. A New Quantitative Quality Measure for Machine Translation Systems. In Proc. of GOL- ING'92, 433-439.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Reliable Measures for Aligning Japanese-English News Articles and Sentences", |
|
"authors": [ |
|
{ |
|
"first": "Masao", |
|
"middle": [], |
|
"last": "Utiyama", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hitoshi", |
|
"middle": [], |
|
"last": "Isahara", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Proc. of the ACL'03", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "72--79", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Masao Utiyama and Hitoshi Isahara. 2003. Re- liable Measures for Aligning Japanese-English News Articles and Sentences. In Proc. of the ACL'03, pp.72-79.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF1": { |
|
"text": "Example of determination of corresponding noun phrases.", |
|
"uris": null, |
|
"num": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF2": { |
|
"text": "respectively show Pearson's correlation coefficient for sentence-level adequacy and fluency. Tables 4 and 5 respectively show Spearman's rank correlation coefficient for sentence-level adequacy and fluency. In Tables 2-5, bold typeface signifies the maximum correlation coefficients among eight automatic evaluation methods.", |
|
"uris": null, |
|
"num": null, |
|
"type_str": "figure" |
|
}, |
|
"TABREF1": { |
|
"num": null, |
|
"html": null, |
|
"type_str": "table", |
|
"text": "MT output : in general , [ NP1 the amount ] of [ NP2 the crowning fall ] is large like [ NP3 the end ] .", |
|
"content": "<table/>" |
|
}, |
|
"TABREF4": { |
|
"num": null, |
|
"html": null, |
|
"type_str": "table", |
|
"text": "Machine translation system types. System No. 8 System No. 9 System No. 10 System No. 11 System No. 12", |
|
"content": "<table><tr><td/><td colspan=\"3\">System No. 1 System No. 2 System No. 3</td><td>System No. 4</td><td>System No. 5</td><td>System No. 6</td></tr><tr><td>Type</td><td>SMT</td><td>SMT</td><td>RBMT</td><td>SMT</td><td>SMT</td><td>SMT</td></tr><tr><td colspan=\"2\">System No. 7 Type SMT</td><td>SMT</td><td>EBMT</td><td>SMT</td><td>SMT</td><td>RBMT</td></tr></table>" |
|
}, |
|
"TABREF5": { |
|
"num": null, |
|
"html": null, |
|
"type_str": "table", |
|
"text": "Pearson's correlation coefficient for sentence-level adequacy.", |
|
"content": "<table><tr><td/><td>No. 1</td><td>No. 2</td><td>No. 3</td><td>No. 4</td><td>No. 5</td><td>No. 6</td><td>No. 7</td></tr><tr><td>Our method</td><td colspan=\"7\">0.7862 0.4989 0.5970 0.5713 0.6581 0.6779 0.7682</td></tr><tr><td>IMPACT</td><td>0.7639</td><td>0.4487</td><td>0.5980</td><td>0.5371</td><td>0.6371</td><td>0.6255</td><td>0.7249</td></tr><tr><td>ROUGE-L</td><td>0.7597</td><td colspan=\"3\">0.4264 0.6111 0.5229</td><td>0.6183</td><td>0.5927</td><td>0.7079</td></tr><tr><td>BLEU</td><td>0.6473</td><td>0.2463</td><td>0.4230</td><td>0.4336</td><td>0.3727</td><td>0.4124</td><td>0.5340</td></tr><tr><td>NIST</td><td>0.5135</td><td>0.2756</td><td>0.4142</td><td>0.3086</td><td>0.2553</td><td>0.2300</td><td>0.3628</td></tr><tr><td>NMG-WN</td><td>0.7010</td><td>0.3432</td><td>0.6067</td><td>0.4719</td><td>0.5441</td><td>0.5885</td><td>0.5906</td></tr><tr><td>METEOR</td><td>0.4509</td><td>0.0892</td><td>0.3907</td><td>0.2781</td><td>0.3120</td><td>0.2744</td><td>0.3937</td></tr><tr><td>WER</td><td>0.7464</td><td>0.4114</td><td>0.5519</td><td>0.5185</td><td>0.5461</td><td>0.5970</td><td>0.6902</td></tr><tr><td>Our method II</td><td>0.7870</td><td>0.5066</td><td>0.5967</td><td>0.5191</td><td>0.6529</td><td>0.6635</td><td>0.7698</td></tr><tr><td>BLEU with our method</td><td>0.7244</td><td>0.3935</td><td>0.5148</td><td>0.5231</td><td>0.4882</td><td>0.5554</td><td>0.6459</td></tr><tr><td/><td>No. 8</td><td>No. 9</td><td colspan=\"3\">No. 10 No. 11 No. 12</td><td>Avg.</td><td>All</td></tr><tr><td>Our method</td><td colspan=\"7\">0.7664 0.7208 0.6355 0.7781 0.5707 0.6691 0.6846</td></tr><tr><td>IMPACT</td><td>0.7007</td><td>0.7125</td><td>0.5981</td><td>0.7621</td><td>0.5345</td><td>0.6369</td><td>0.6574</td></tr><tr><td>ROUGE-L</td><td>0.6834</td><td>0.7042</td><td>0.5691</td><td>0.7480</td><td>0.5293</td><td>0.6228</td><td>0.6529</td></tr><tr><td>BLEU</td><td>0.5188</td><td>0.5884</td><td>0.3697</td><td>0.5459</td><td>0.4357</td><td>0.4607</td><td>0.4722</td></tr><tr><td>NIST</td><td>0.4218</td><td>0.4092</td><td>0.1721</td><td>0.3521</td><td>0.4769</td><td>0.3493</td><td>0.3326</td></tr><tr><td>NMG-WN</td><td>0.6658</td><td>0.6068</td><td>0.6116</td><td colspan=\"3\">0.6770 0.5740 0.5818</td><td>0.5669</td></tr><tr><td>METEOR</td><td>0.3881</td><td>0.4947</td><td>0.3127</td><td>0.2987</td><td>0.4162</td><td>0.3416</td><td>0.2958</td></tr><tr><td>WER</td><td>0.6656</td><td>0.6570</td><td>0.5740</td><td>0.7491</td><td>0.5301</td><td>0.6031</td><td>0.5205</td></tr><tr><td>Our method II</td><td>0.7676</td><td>0.7217</td><td>0.6343</td><td>0.7917</td><td>0.5474</td><td>0.6632</td><td>0.6774</td></tr><tr><td>BLEU with our method</td><td>0.6395</td><td>0.6696</td><td>0.5139</td><td>0.6611</td><td>0.5079</td><td>0.5698</td><td>0.5790</td></tr><tr><td colspan=\"4\">ues of the parameter are determined using En-</td><td/><td/><td/><td/></tr><tr><td colspan=\"3\">glish sentences from Reuters articles</td><td/><td/><td/><td/><td/></tr></table>" |
|
}, |
|
"TABREF6": { |
|
"num": null, |
|
"html": null, |
|
"type_str": "table", |
|
"text": "Pearson's correlation coefficient for sentence-level fluency.", |
|
"content": "<table><tr><td/><td>No. 1</td><td>No. 2</td><td>No. 3</td><td>No. 4</td><td>No. 5</td><td>No. 6</td><td>No. 7</td></tr><tr><td>Our method</td><td colspan=\"3\">0.5853 0.3782 0.5689</td><td>0.4673</td><td colspan=\"3\">0.5739 0.5344 0.7193</td></tr><tr><td>IMPACT</td><td>0.5581</td><td>0.3407</td><td>0.5821</td><td colspan=\"3\">0.4586 0.5768 0.4852</td><td>0.6896</td></tr><tr><td>ROUGE-L</td><td>0.5551</td><td colspan=\"3\">0.3056 0.5925 0.4391</td><td>0.5666</td><td>0.4475</td><td>0.6756</td></tr><tr><td>BLEU</td><td>0.4793</td><td>0.0963</td><td>0.4488</td><td>0.3033</td><td>0.4690</td><td>0.3602</td><td>0.5272</td></tr><tr><td>NIST</td><td>0.4139</td><td>0.0257</td><td>0.4987</td><td>0.1682</td><td>0.3923</td><td>0.2236</td><td>0.3749</td></tr><tr><td>NMG-WN</td><td>0.5782</td><td>0.3090</td><td colspan=\"3\">0.5434 0.4680 0.5070</td><td>0.5234</td><td>0.5363</td></tr><tr><td>METEOR</td><td>0.4050</td><td>0.1405</td><td>0.4420</td><td>0.1825</td><td>0.4259</td><td>0.2336</td><td>0.4873</td></tr><tr><td>WER</td><td>0.5143</td><td>0.3031</td><td>0.5220</td><td>0.4262</td><td>0.4936</td><td>0.4405</td><td>0.6351</td></tr><tr><td>Our method II</td><td>0.5831</td><td>0.3689</td><td>0.5753</td><td>0.3991</td><td>0.5610</td><td>0.5445</td><td>0.7186</td></tr><tr><td>BLEU with our method</td><td>0.5425</td><td>0.2304</td><td>0.5115</td><td>0.3770</td><td>0.5358</td><td>0.4741</td><td>0.6142</td></tr><tr><td/><td>No. 8</td><td>No. 9</td><td colspan=\"3\">No. 10 No. 11 No. 12</td><td>Avg.</td><td>All</td></tr><tr><td>Our method</td><td colspan=\"3\">0.5796 0.6424 0.3241</td><td>0.5920</td><td colspan=\"3\">0.4321 0.5331 0.5574</td></tr><tr><td>IMPACT</td><td>0.5612</td><td>0.6320</td><td>0.3492</td><td>0.6034</td><td>0.4166</td><td>0.5211</td><td>0.5469</td></tr><tr><td>ROUGE-L</td><td>0.5414</td><td>0.6347</td><td>0.3231</td><td>0.5889</td><td>0.4127</td><td>0.5069</td><td>0.5387</td></tr><tr><td>BLEU</td><td>0.5040</td><td>0.5521</td><td>0.2134</td><td>0.4783</td><td>0.4078</td><td>0.4033</td><td>0.4278</td></tr><tr><td>NIST</td><td>0.3682</td><td>0.3811</td><td>0.1682</td><td colspan=\"3\">0.3116 0.4484 0.3146</td><td>0.3142</td></tr><tr><td>NMG-WN</td><td>0.5526</td><td colspan=\"4\">0.5799 0.4509 0.6308 0.4124</td><td>0.5007</td><td>0.5074</td></tr><tr><td>METEOR</td><td>0.2511</td><td>0.4153</td><td>0.1376</td><td>0.3351</td><td>0.2902</td><td>0.3122</td><td>0.2933</td></tr><tr><td>WER</td><td>0.5492</td><td>0.6421</td><td>0.3962</td><td>0.6228</td><td>0.4063</td><td>0.4960</td><td>0.4478</td></tr><tr><td>Our method II</td><td>0.5774</td><td>0.6486</td><td>0.3428</td><td>0.5975</td><td>0.4197</td><td>0.5280</td><td>0.5519</td></tr><tr><td>BLEU with our method</td><td>0.5660</td><td>0.6247</td><td>0.2536</td><td>0.5495</td><td>0.4550</td><td>0.4770</td><td>0.5014</td></tr><tr><td colspan=\"4\">translation systems in SMT and the scores of</td><td/><td/><td/><td/></tr><tr><td colspan=\"4\">200 output sentences obtained by 2 machine</td><td/><td/><td/><td/></tr><tr><td colspan=\"4\">translation systems in RBMT are used respec-</td><td/><td/><td/><td/></tr><tr><td colspan=\"3\">tively. However, EBMT is not included in</td><td/><td/><td/><td/><td/></tr></table>" |
|
}, |
|
"TABREF7": { |
|
"num": null, |
|
"html": null, |
|
"type_str": "table", |
|
"text": "Spearman's rank correlation coefficient for sentence-level adequacy.", |
|
"content": "<table><tr><td/><td>No. 1</td><td>No. 2</td><td>No. 3</td><td>No. 4</td><td>No. 5</td><td>No. 6</td><td>No. 7</td></tr><tr><td>Our method</td><td colspan=\"7\">0.7456 0.5049 0.5837 0.5146 0.6514 0.6557 0.6746</td></tr><tr><td>IMPACT</td><td>0.7336</td><td>0.4881</td><td>0.5992</td><td>0.4741</td><td>0.6382</td><td>0.5841</td><td>0.6409</td></tr><tr><td>ROUGE-L</td><td>0.7304</td><td colspan=\"3\">0.4822 0.6092 0.4572</td><td>0.6135</td><td>0.5365</td><td>0.6368</td></tr><tr><td>BLEU</td><td>0.5525</td><td>0.2206</td><td>0.4327</td><td>0.3449</td><td>0.3230</td><td>0.2805</td><td>0.4375</td></tr><tr><td>NIST</td><td>0.5032</td><td>0.2438</td><td>0.4218</td><td>0.2489</td><td>0.2342</td><td>0.1534</td><td>0.3529</td></tr><tr><td>NMG-WN</td><td colspan=\"2\">0.7541 0.3829</td><td>0.5579</td><td>0.4472</td><td>0.5560</td><td>0.5828</td><td>0.6263</td></tr><tr><td>METEOR</td><td>0.4409</td><td>0.1509</td><td>0.4018</td><td>0.2580</td><td>0.3085</td><td>0.1991</td><td>0.4115</td></tr><tr><td>WER</td><td>0.6566</td><td>0.4147</td><td>0.5478</td><td>0.4272</td><td>0.5524</td><td>0.4884</td><td>0.5539</td></tr><tr><td>Our method II</td><td>0.7478</td><td>0.4972</td><td>0.5817</td><td>0.4892</td><td>0.6437</td><td>0.6428</td><td>0.6707</td></tr><tr><td>BLEU with our method</td><td>0.6644</td><td>0.3926</td><td>0.5065</td><td>0.4522</td><td>0.4639</td><td>0.4715</td><td>0.5460</td></tr><tr><td/><td>No. 8</td><td>No. 9</td><td colspan=\"3\">No. 10 No. 11 No. 12</td><td>Avg.</td><td>All</td></tr><tr><td>Our method</td><td colspan=\"7\">0.7298 0.7258 0.5961 0.7633 0.6078 0.6461 0.6763</td></tr><tr><td>IMPACT</td><td>0.6703</td><td>0.7067</td><td>0.5617</td><td>0.7411</td><td>0.5583</td><td>0.6164</td><td>0.6515</td></tr><tr><td>ROUGE-L</td><td>0.6603</td><td>0.6983</td><td>0.5340</td><td>0.7280</td><td>0.5281</td><td>0.6012</td><td>0.6435</td></tr><tr><td>BLEU</td><td>0.4571</td><td>0.5827</td><td>0.3220</td><td>0.4987</td><td>0.4302</td><td>0.4069</td><td>0.4227</td></tr><tr><td>NIST</td><td>0.4255</td><td>0.4424</td><td>0.1313</td><td>0.2950</td><td>0.4785</td><td>0.3276</td><td>0.3062</td></tr><tr><td>NMG-WN</td><td>0.6863</td><td colspan=\"3\">0.6524 0.6412 0.7015</td><td>0.5728</td><td>0.5968</td><td>0.5836</td></tr><tr><td>METEOR</td><td>0.4242</td><td>0.4776</td><td>0.3335</td><td>0.2861</td><td>0.4455</td><td>0.3448</td><td>0.2887</td></tr><tr><td>WER</td><td>0.6234</td><td>0.6480</td><td>0.5463</td><td>0.7131</td><td>0.5684</td><td>0.5617</td><td>0.4797</td></tr><tr><td>Our method II</td><td>0.7287</td><td>0.7255</td><td>0.5936</td><td>0.7761</td><td>0.5798</td><td>0.6397</td><td>0.6699</td></tr><tr><td>BLEU with our method</td><td>0.5850</td><td>0.6757</td><td>0.4596</td><td>0.6272</td><td>0.5452</td><td>0.5325</td><td>0.5474</td></tr></table>" |
|
}, |
|
"TABREF8": { |
|
"num": null, |
|
"html": null, |
|
"type_str": "table", |
|
"text": "Spearman's rank correlation coefficient for sentence-level fluency.", |
|
"content": "<table><tr><td/><td>No. 1</td><td>No. 2</td><td>No. 3</td><td>No. 4</td><td>No. 5</td><td>No. 6</td><td>No. 7</td></tr><tr><td>Our method</td><td colspan=\"2\">0.5697 0.3299</td><td>0.5446</td><td>0.4199</td><td>0.5733</td><td colspan=\"2\">0.5060 0.6459</td></tr><tr><td>IMPACT</td><td>0.5481</td><td>0.3285</td><td>0.5572</td><td colspan=\"3\">0.3976 0.5960 0.4317</td><td>0.6334</td></tr><tr><td>ROUGE-L</td><td>0.5470</td><td colspan=\"3\">0.3041 0.5646 0.3661</td><td>0.5638</td><td>0.3879</td><td>0.6255</td></tr><tr><td>BLEU</td><td>0.4157</td><td>0.0559</td><td>0.4286</td><td>0.2018</td><td>0.4475</td><td>0.2569</td><td>0.4909</td></tr><tr><td>NIST</td><td>0.4209</td><td>0.0185</td><td>0.4559</td><td>0.1093</td><td>0.3186</td><td>0.1898</td><td>0.3634</td></tr><tr><td>NMG-WN</td><td colspan=\"7\">0.5569 0.3461 0.5381 0.4300 0.5052 0.5264 0.5328</td></tr><tr><td>METEOR</td><td>0.4608</td><td>0.1429</td><td>0.4438</td><td>0.1783</td><td>0.4073</td><td>0.1596</td><td>0.4821</td></tr><tr><td>WER</td><td>0.4469</td><td>0.2395</td><td>0.5087</td><td>0.3292</td><td>0.4995</td><td>0.3482</td><td>0.5637</td></tr><tr><td>Our method II</td><td>0.5659</td><td>0.3216</td><td>0.5484</td><td>0.3773</td><td>0.5638</td><td>0.5211</td><td>0.6343</td></tr><tr><td>BLEU with our method</td><td>0.5188</td><td>0.1534</td><td>0.4793</td><td>0.3005</td><td>0.5255</td><td>0.3942</td><td>0.5676</td></tr><tr><td/><td>No. 8</td><td>No. 9</td><td colspan=\"3\">No. 10 No. 11 No. 12</td><td>Avg.</td><td>All</td></tr><tr><td>Our method</td><td colspan=\"3\">0.5646 0.6617 0.3319</td><td>0.6256</td><td colspan=\"3\">0.4485 0.5185 0.5556</td></tr><tr><td>IMPACT</td><td>0.5471</td><td>0.6454</td><td>0.3222</td><td>0.6319</td><td>0.4358</td><td>0.5062</td><td>0.5489</td></tr><tr><td>ROUGE-L</td><td>0.5246</td><td>0.6428</td><td>0.2949</td><td>0.6159</td><td>0.3928</td><td>0.4858</td><td>0.5359</td></tr><tr><td>BLEU</td><td>0.4882</td><td>0.5419</td><td>0.1407</td><td>0.4740</td><td>0.4176</td><td>0.3633</td><td>0.3971</td></tr><tr><td>NIST</td><td>0.4150</td><td>0.4193</td><td>0.0889</td><td colspan=\"3\">0.3006 0.4752 0.2980</td><td>0.2994</td></tr><tr><td>NMG-WN</td><td colspan=\"5\">0.5684 0.5850 0.4451 0.6502 0.4387</td><td>0.5102</td><td>0.5156</td></tr><tr><td>METEOR</td><td>0.2911</td><td>0.4267</td><td>0.1735</td><td>0.3264</td><td>0.3512</td><td>0.3158</td><td>0.2886</td></tr><tr><td>WER</td><td>0.5320</td><td>0.6505</td><td>0.3828</td><td>0.6501</td><td>0.4003</td><td>0.4626</td><td>0.4193</td></tr><tr><td>Our method II</td><td>0.5609</td><td>0.6687</td><td>0.3629</td><td>0.6223</td><td>0.4384</td><td>0.5155</td><td>0.5531</td></tr><tr><td>BLEU with our method</td><td>0.5470</td><td>0.6213</td><td>0.2184</td><td>0.5808</td><td>0.4870</td><td>0.4495</td><td>0.4825</td></tr></table>" |
|
}, |
|
"TABREF9": { |
|
"num": null, |
|
"html": null, |
|
"type_str": "table", |
|
"text": "Correlation coefficient for SMT and RBMT.", |
|
"content": "<table><tr><td/><td colspan=\"4\">Pearson's correlation coefficient</td><td colspan=\"4\">Spearman's rank correlation coefficient</td></tr><tr><td/><td colspan=\"2\">Adequacy</td><td colspan=\"2\">Fluency</td><td colspan=\"2\">Adequacy</td><td colspan=\"2\">Fluency</td></tr><tr><td/><td>SMT</td><td>RBMT</td><td>SMT</td><td>RBMT</td><td>SMT</td><td>RBMT</td><td>SMT</td><td>RBMT</td></tr><tr><td colspan=\"8\">Our method 0.7054 0.5840 0.5477 0.5016 0.6710 0.5961 0.5254</td><td>0.5003</td></tr><tr><td>IMPACT</td><td>0.6721</td><td>0.5650</td><td>0.5364</td><td>0.4960</td><td>0.6397</td><td>0.5811</td><td>0.5162</td><td>0.4951</td></tr><tr><td>ROUGE-L</td><td>0.6560</td><td>0.5691</td><td>0.5179</td><td>0.4988</td><td>0.6225</td><td>0.5701</td><td>0.4942</td><td>0.4783</td></tr><tr><td>NMG-WN</td><td colspan=\"3\">0.5958 0.5850 0.5201</td><td>0.4732</td><td>0.6129</td><td>0.5755</td><td>0.5238</td><td>0.4959</td></tr></table>" |
|
} |
|
} |
|
} |
|
} |