|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T07:57:18.506809Z" |
|
}, |
|
"title": "Synthesizing Parallel Data of User-Generated Texts with Zero-Shot Neural Machine Translation", |
|
"authors": [ |
|
{ |
|
"first": "Benjamin", |
|
"middle": [], |
|
"last": "Marie", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "National Institute of Information and Communications Technology", |
|
"location": { |
|
"addrLine": "3-5 Hikaridai, Seika-cho, Soraku-gun", |
|
"postCode": "619-0289", |
|
"settlement": "Kyoto", |
|
"country": "Japan" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Atsushi", |
|
"middle": [], |
|
"last": "Fujita", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "National Institute of Information and Communications Technology", |
|
"location": { |
|
"addrLine": "3-5 Hikaridai, Seika-cho, Soraku-gun", |
|
"postCode": "619-0289", |
|
"settlement": "Kyoto", |
|
"country": "Japan" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Neural machine translation (NMT) systems are usually trained on clean parallel data. They can perform very well for translating clean in-domain texts. However, as demonstrated by previous work, the translation quality significantly worsens when translating noisy texts, such as user-generated texts (UGT) from online social media. Given the lack of parallel data of UGT that can be used to train or adapt NMT systems, we synthesize parallel data of UGT, exploiting monolingual data of UGT through crosslingual language model pretraining and zero-shot NMT systems. This paper presents two different but complementary approaches: One alters given clean parallel data into UGT-like parallel data whereas the other generates translations from monolingual data of UGT. On the MTNT translation tasks, we show that our synthesized parallel data can lead to better NMT systems for UGT while making them more robust in translating texts from various domains and styles.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Neural machine translation (NMT) systems are usually trained on clean parallel data. They can perform very well for translating clean in-domain texts. However, as demonstrated by previous work, the translation quality significantly worsens when translating noisy texts, such as user-generated texts (UGT) from online social media. Given the lack of parallel data of UGT that can be used to train or adapt NMT systems, we synthesize parallel data of UGT, exploiting monolingual data of UGT through crosslingual language model pretraining and zero-shot NMT systems. This paper presents two different but complementary approaches: One alters given clean parallel data into UGT-like parallel data whereas the other generates translations from monolingual data of UGT. On the MTNT translation tasks, we show that our synthesized parallel data can lead to better NMT systems for UGT while making them more robust in translating texts from various domains and styles.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Neural machine translation (NMT) requires large parallel data for training. However, even when trained on large clean parallel data, NMT generates translations of very poor quality when translating out-of-domain or noisy texts. For instance, Michel and Neubig (2018) empirically showed that NMT systems trained on clean parallel data from the news and parliamentary debate domains perform reasonably well when translating news articles but poorly perform at translating user-generated texts (UGT) from a social media. UGT can be from various domains and manifest various forms of natural noise. For instance, they can exhibit spelling/typographical errors, words omission/insertion/repetition, grammatical/syntactic errors, or noise markers even more specific to the writing style of social media such as abbreviations, obfuscated profanities, inconsistent capitalization, Internet slang, and emojis. Normalizing and correcting them in a preprocessing step is a solution to facilitate translation (Gerlach et al., 2013; Matos Veliz et al., 2019) , but it impedes the correct transfer of the style of the source text to its translation. In this paper, we posit that the NMT system should preserve the style during the translation. Another trend of work focuses on making NMT more robust in handling noisy tokens, such as tokens with spelling mistakes, which can greatly disturb NMT (Belinkov and Bisk, 2018) . However, it has only a minimal impact in translating UGT (Karpukhin et al., 2019) that contains other types of noise/errors.", |
|
"cite_spans": [ |
|
{ |
|
"start": 242, |
|
"end": 266, |
|
"text": "Michel and Neubig (2018)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 997, |
|
"end": 1019, |
|
"text": "(Gerlach et al., 2013;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 1020, |
|
"end": 1045, |
|
"text": "Matos Veliz et al., 2019)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 1381, |
|
"end": 1406, |
|
"text": "(Belinkov and Bisk, 2018)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 1466, |
|
"end": 1490, |
|
"text": "(Karpukhin et al., 2019)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Whereas domain adaptation methods are helpful in improving NMT for UGT , we do not usually have bilingual parallel data of UGT created by professional translators to train or adapt an NMT system. Consequently, previous work on NMT for UGT merely focused on scenarios for which we have UGT parallel data, such as the MTNT dataset (Michel and Neubig, 2018) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 329, |
|
"end": 354, |
|
"text": "(Michel and Neubig, 2018)", |
|
"ref_id": "BIBREF28" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In contrast to previous work, we assume that parallel data of UGT are not available and that we can only rely on the formal and clean texts that are usually used to train NMT systems. In addition, we exploit UGT monolingual data that are publicly available in large quantity on the Internet for many languages. We propose to synthesize parallel data of UGT to train better NMT systems for UGT. For this purpose, we present two complementary approaches that associate a pre-trained crosslingual language model with zero-shot NMT systems. Our contributions are as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 A method for altering clean parallel data into UGT parallel data Table 2 . Vanilla NMT is trained on clean parallel data, whereas ''our work'' refers to the configuration #1+#2 presented in Section 5.4 trained on synthetic parallel data of UGT.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 67, |
|
"end": 74, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 A method for synthesizing parallel data of UGT from monolingual data", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 An empirical evaluation, in four translation directions, of our methods that shows consistent improvements in translation quality over previous work for UGT but also on various domains and styles", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The remainder of this paper is organized as follows. In Section 2, we present the research problem and questions that we answer in this work. Then, in Section 3, we present a zero-shot NMT framework that we use to synthesize parallel data of UGT by our two methods presented in Section 4. We evaluate the usefulness of our approaches for better translating UGT in Section 5. In Sections 6 and 7, we evaluate alternative configurations for our zero-shot NMT systems, and in Section 8 we verify whether our NMT systems trained on the synthetic parallel data are more robust to changes of domain and style. We analyze the synthetic sentences and present examples in Section 9 to better understand why our data lead to better NMT systems. Following the presentation of related work in Section 10, we conclude the paper in Section 11.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "UGT contains many different types of noise that can also differ from one type of UGT to another. For instance, posts on Twitter contain many spelling errors intentionally introduced for text compression, whereas this kind of error is rather marginal in the discussions from Reddit (Michel and Neubig, 2018) . Figure 1 shows the impact on MT of two different types of noise: spelling (Ex1) and syntactic (Ex2) errors, compared to the translation of the same but clean sentence (Ex3). Ex1 has an intentional spelling error ''vl\u00e0'' (instead of ''voil\u00e0'') and a UGT-specific symbol, ''#.'' Comparison with Ex3 suggests that they have negative effects on the vanilla NMT system and eventually lead to an incorrect translation largely different from the translation of the clean source of Ex3. In Ex2, a syntactic error ''arrive est'' instead of ''arrive'' has also an impact, but to a lesser extent, by inducing the past tense in English. Vanilla NMT gives the best translation for the clean source sentence (Ex3) only failing in translating ''COVID19.'' For indicative purpose, we present in the row ''our work'' translations generated by our work. These examples highlight the inability of vanilla NMT in translating sentences with various types of noise.", |
|
"cite_spans": [ |
|
{ |
|
"start": 281, |
|
"end": 306, |
|
"text": "(Michel and Neubig, 2018)", |
|
"ref_id": "BIBREF28" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 309, |
|
"end": 317, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Motivation", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "In conducting the research to better translate UGT, we answer the following research questions: Q1 How can we generate synthetic parallel data for UGT in a specific domain/style without relying on any manually produced parallel data of UGT?", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Motivation", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Q2 Do the synthetic parallel data lead to a better NMT system for the targeted UGT and do they make it more robust to the change of domain or style?", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Motivation", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "3 Zero-Shot NMT for Synthesizing Parallel Data", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Motivation", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "We describe in this section our zero-shot NMT system used to synthesize parallel data of UGT.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Motivation", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Let L1 and L2 be two languages for clean texts and R1 and R2 for the same languages, respectively, but for UGT. The data prerequisites for our NMT system described in Section 3.2 are as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Objective and Prerequisites", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "\u2022 P L1-L2 parallel data of clean and formal texts that are usually used for training NMT,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Objective and Prerequisites", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "\u2022 M L1 and M L2 monolingual data from any domains, and", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Objective and Prerequisites", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "\u2022 M R1 and M R2 monolingual data of UGT.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Objective and Prerequisites", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Unlike previous work on NMT for UGT, we do not assume any P R1-R2 parallel data for training or validating NMT systems, except for evaluation. P L1-L2 , M L1 , and M L2 , parallel and monolingual data, are usually used to build state-of-the-art NMT systems. M R1 and M R2 monolingual data are obtained by crawling social media.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Objective and Prerequisites", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Our objective is to synthesize parallel data of UGT, which we henceforth denote as P S R1-R2 . To this end, we propose the following two approaches: #1 Alter a clean parallel data P L1-L2 into P S", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Objective and Prerequisites", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "#2 Synthesize P S R1-R2 parallel data by translating M R2 monolingual data into R1", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "R1-R2", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "These approaches must regard L1 and R1, and similarly L2 and R2, as two different languages. For #1, we alter the P L1-L2 parallel data by performing L1\u2192R2 and L2\u2192R1 translations. 1 For #2, we generate the data via R2\u2192R1 translation. Note that L1\u2192R2, L2\u2192R1, and R2\u2192R1 are all zero-shot translation tasks, because we do not assume any P L1-R2 , P L2-R1 , P R1-R2 parallel data, nor any parallel data using a pivot language.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "R1-R2", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "For a given language pair L1-L2, we require only one multilingual and multidirectional NMT system to synthesize parallel data. The compo- Figure 2 . Inspired by previous work in unsupervised NMT (Conneau and Lample, 2019) , we first pretrain a cross-lingual language model to initialize the NMT system. We use the XLM approach (Conneau and Lample, 2019) trained with the combination of the following two different objectives:", |
|
"cite_spans": [ |
|
{ |
|
"start": 195, |
|
"end": 221, |
|
"text": "(Conneau and Lample, 2019)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 327, |
|
"end": 353, |
|
"text": "(Conneau and Lample, 2019)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 138, |
|
"end": 146, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Zero-Shot NMT", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Masked Language Model (MLM): MLM has a similar objective to BERT (Devlin et al., 2019) but uses text streams for training instead of pairs of sentences. We optimize the MLM objective on the M L1 , M L2 , M R1 , and M R2 monolingual data.", |
|
"cite_spans": [ |
|
{ |
|
"start": 65, |
|
"end": 86, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Zero-Shot NMT", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Translation Language Model (TLM): TLM is an extension of MLM where parallel data are leveraged so that we can rely on context in two different languages to predict masked words. We optimize the TLM objective on P L1-L2 parallel data, alternatively exploiting both translation directions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Zero-Shot NMT", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The XLM approach alternates between MLM and TLM objectives to train a single model. By sharing a single vocabulary for all of L1, L2, R1, and R2, we expect XLM to implicitly model translation knowledge for our zero-shot translation directions, namely, L1\u2192R2, L2\u2192R1, and R2\u2192R1, thanks to the joint training of MLM and TLM, also maximally exploiting the similarity between L1 and R1, and between L2 and R2.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Zero-Shot NMT", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Then, the embeddings from the XLM model are used to initialize the encoder and decoder embeddings of the NMT system instead of the standard random initialization. We exploit unsupervised NMT objectives (Lample et al., 2018) to which we associate a supervised NMT objective as follows:", |
|
"cite_spans": [ |
|
{ |
|
"start": 202, |
|
"end": 223, |
|
"text": "(Lample et al., 2018)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Zero-Shot NMT", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Auto-encoder (AE) Objectives: Using a noise model that drops and swaps words, the objective is to reconstruct the original sentences. We use AE objectives for L1, L2, R1, and R2.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Zero-Shot NMT", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Back-translation (BT) Objectives: For training translation directions for which we do not have parallel data, a round-trip translation is performed during training in which a sentence s from monolingual data is translated, and its translation backtranslated, with the objective of generating s. We use the BT objectives corresponding to our targeted zero-shot translation directions: L1\u2192R2 \u2192L1, R2\u2192L1\u2192R2, L2\u2192R1\u2192L2, R1\u2192L2\u2192R1, R1\u2192R2\u2192R1, and R2\u2192R1\u2192R2.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Zero-Shot NMT", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Machine Translation (MT) Objectives: we use this objective for L1\u2192L2 and L2\u2192L1, for which we have parallel data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Zero-Shot NMT", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "AE and BT are unsupervised NMT objectives used to train our zero-shot translation directions. However, using only these objectives would result in very poor performance, especially for distant and difficult language pairs. We thus also use MT objectives for the necessary supervision.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Zero-Shot NMT", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "To alter P L1-L2 into P S R1-R2 by our method #1, we could have trained an NMT system for L1\u2192R1 and L2\u2192R2 with the BT objectives L1\u2192R1\u2192L1 and L2\u2192R2\u2192L2. However, due to the similarity between L1 and R1, the NMT system would often perform a copy of M L1 to M S R1 . Therefore, as done by previous work in paraphrase generation (Bannard and Callison-Burch, 2005; Mallinson et al., 2017) , we instead rely on pivot languages, for instance, by translating the L1 side of P L1-L2 parallel data into R2 as a translation of L2.", |
|
"cite_spans": [ |
|
{ |
|
"start": 325, |
|
"end": 359, |
|
"text": "(Bannard and Callison-Burch, 2005;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 360, |
|
"end": 383, |
|
"text": "Mallinson et al., 2017)", |
|
"ref_id": "BIBREF25" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Zero-Shot NMT", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "This section presents our two approaches to synthesize parallel data of UGT mentioned in Section 3.1: #1 alters existing parallel data and #2 generates translations of UGT monolingual data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Synthesizing Parallel Data of UGT", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "There exist several methods to synthesize parallel data of UGT from existing parallel data in various style or domains, but mostly requiring the use of UGT parallel data. Vaibhav et al. (2019) Figure 3: Alteration of P L1-L2 parallel data to synthesize P S R1-R2 parallel data.", |
|
"cite_spans": [ |
|
{ |
|
"start": 171, |
|
"end": 192, |
|
"text": "Vaibhav et al. (2019)", |
|
"ref_id": "BIBREF36" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Parallel Data Alteration", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "proposed a synthetic noise induction (SNI) that applies manually defined editing operations, such as adding/dropping characters from a word or adding emojis, to introduce noise into existing parallel data. The resulting data were used for adapting an NMT system for translating UGT. They also proposed a tag-based method given a small P R1-R2 parallel data: concatenate P R1-R2 and P L1-L2 parallel data, prepend a tag onto each source sentence to indicate whether the sentence pair is from P R1-R2 or P L1-L2 , and train NMT systems on that data. Then, they used this NMT system to translate the L1 side of another P L1-L2 parallel data prepended with the tag for P R1-R2 so that the system is forced to translate L1 sentences as R1 sentences. The resulting parallel data are noisier than the original data and potentially more suitable to train NMT systems for UGT. The data are used to fine-tune NMT systems trained on P L1-L2 parallel data. In contrast, as illustrated in Figure 3 , our approach uses a zero-shot NMT system that does not require any manually produced P R1-R2 nor relies on manually defined editing operations. Given P L1-L2 , we perform L1\u2192R2 and L2\u2192R1 translation for each of L1 and L2 sentences, respectively, to obtain a synthetic R1-R2 version, that is, P S R1-R2 , of the original P L1-L2 . The resulting P S R1-R2 can be too noisy to be used to train NMT. To filter P S R1-R2 , we evaluate the similarity between original L1 and L2 sentences with their respective R1 and R2 versions using sentence-level BLEU (Lin and Och, 2004) (sBLEU) . Given a sentence pair in P S R1-R2 , if either sBLEU of L1 with respect to R1 or sBLEU of L2 with respect to R2 is below a predetermined threshold T , we filter out the sentence pair, consider that it has been too much altered. T can be set empirically: Create several version of P S R1-R2 using different T values, train an NMT system for each version, and choose the value that leads to the NMT system achieving the best BLEU score on some P L1-L2 validation data.", |
|
"cite_spans": [ |
|
{ |
|
"start": 1536, |
|
"end": 1563, |
|
"text": "(Lin and Och, 2004) (sBLEU)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 976, |
|
"end": 984, |
|
"text": "Figure 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Parallel Data Alteration", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Finally, after filtering, we exploit the resulting P S R1-R2 by concatenating it to the original P L1-L2 parallel data and train a new NMT system for translating UGT, or by using it for fine-tuning an NMT system trained on P L1-L2 parallel data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Parallel Data Alteration", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Previous work also proposed to synthesize parallel data from monolingual data using NMT (Sennrich et al., 2016a) : An L1\u2192L2 NMT system is used to translate M L1 monolingual data into L2, and then the synthesized P S L1-L2 parallel data are concatenated to original parallel data and used to train new L2\u2192L1 (back-translation) or L1\u2192L2 (forward translation) NMT systems. However, to the best of our knowledge, nobody has studied the use of large UGT monolingual data, without any manually produced P R1-R2 parallel data, and its impact on translation quality. 2 In our scenario, translating R1 texts with an L1\u2192L2 would lead to translations of R1, that we can denote R2, of a very poor quality (see Section 2). Consequently, back-translations or forward translations generated this way would be too noisy to train R1\u2194R2 NMT systems. We verify this assumption in Section 5.2.1. Instead, as illustrated in Figure 4 , we use R1\u2192R2 and R2\u2192R1 zero-shot NMT to synthesize parallel data from M R1 and M R2 monolingual data, respectively. Because our NMT system uses a pre-trained language model for R1 and R2, we can expect it to generate better translation than a standard NMT system trained only on P L1-L2 parallel data, (i.e., that never saw UGT during training). As in Section 4.1, the resulting P S", |
|
"cite_spans": [ |
|
{ |
|
"start": 88, |
|
"end": 112, |
|
"text": "(Sennrich et al., 2016a)", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 559, |
|
"end": 560, |
|
"text": "2", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 903, |
|
"end": 911, |
|
"text": "Figure 4", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Translation of Monolingual Data", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "parallel data can be used for fine-tuning or concatenated with the original P L1-L2 parallel data for training. In this work, we only examine the use of P S R1-R2 parallel data with their synthetic part on the source side, as back-translations, because in our preliminary experiments we have consistently observed better results than when P S R1-R2 is used as forward translations. 3 Note also that we do not filter the synthesized data and use all the data generated from the monolingual data, in contrast to another approach presented in Section 4.1. We could potentially obtain better results by filtering synthetic parallel data with some existing methods proposed, for instance, for filtering backtranslations (Imankulova et al., 2019) . We leave the investigation of such filtering techniques for future work.", |
|
"cite_spans": [ |
|
{ |
|
"start": 715, |
|
"end": 740, |
|
"text": "(Imankulova et al., 2019)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "R1-R2", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In this section, we empirically evaluate the usefulness of the parallel data synthesized by our proposed approaches in training better NMT systems for translating UGT.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "We conducted experiments for two language pairs, English-French (en-fr) and English-Japanese (en-ja), with the MTNT translation tasks (Michel and Neubig, 2018) . The test sets were made from posts extracted from an online discussion Web site, Reddit. Translations in the MTNT test sets were produced by professional translators with the instructions of keeping the style. Errors in the source texts were also preserved. In the four test sets, one for each translation direction, the source side contains original texts, that is, our systems will not have to translate translationese.", |
|
"cite_spans": [ |
|
{ |
|
"start": 134, |
|
"end": 159, |
|
"text": "(Michel and Neubig, 2018)", |
|
"ref_id": "BIBREF28" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "For parallel data, we did not use any of the Reddit parallel data of the MTNT, since our approach is supposed to be agnostic of manually produced P R1-R2 translations. To make our settings comparable with previous work, we used only the clean parallel data in MTNT as P L1-L2 data for training and validating our NMT systems. For the en-fr pair, P L1-L2 data contain 2.2M sentence pairs consisting of the news-commentary (news commentaries) and Europarl (parliamentary debates) corpora provided by WMT15 (Bojar et al., 2015) . For the en-ja pair, P L1-L2 data consist of the KFTT (Wikipedia articles), TED (transcripts of online conference talks), and JESC (subtitles) corpora, resulting in a total of 3.9 M sentence pairs. All P L1-L2 parallel data can be considered rather clean and/or formal in contrast to Reddit data.", |
|
"cite_spans": [ |
|
{ |
|
"start": 504, |
|
"end": 524, |
|
"text": "(Bojar et al., 2015)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "As monolingual data, M L1 and M L2 , we used the entire News Crawl provided for WMT20 4 for Japanese, 3.4M lines, and a sample of 25M lines for English and French. As M R1 and M R2 , we crawled data using the Reddit API and applied fastText 5 for language identification. 6 As preprocessing steps for English and French, we first normalized the punctuation of all the data, except for the reference translations in the test sets, with the Moses (Koehn et al., 2007) 7 punctuation normalizer, and then tokenized all the data with the Moses tokenizer. Finally, we truecased the data with the Moses truecaser trained on the Reddit monolingual data. As for Japanese, we only tokenized the data with MeCab. 8 We removed all empty lines and lines longer than 120 tokens from the monolingual and parallel data. Because we could crawled plenty of English data (595M lines) on Reddit, we only selected its noisiest part, similarly to Michel and Neubig (2018) when they built the MTNT dataset. We trained a language model on the English News Crawl monolingual data using LMPLZ (Heafield et al., 2013) , scored all lines of English Reddit data with the language model, normalized the score by the number of tokens in each line, and kept only the 25M lines with the lowest score. Because there are significantly less Japanese and French Reddit data, 0.8M and 1.2M sentences, respectively, we did not apply this filtering for these two languages. English Reddit data are thus much larger and can also be considered noisier than French and Japanese Reddit data.", |
|
"cite_spans": [ |
|
{ |
|
"start": 272, |
|
"end": 273, |
|
"text": "6", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 445, |
|
"end": 465, |
|
"text": "(Koehn et al., 2007)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 702, |
|
"end": 703, |
|
"text": "8", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 925, |
|
"end": 949, |
|
"text": "Michel and Neubig (2018)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 1067, |
|
"end": 1090, |
|
"text": "(Heafield et al., 2013)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "For validation, we used the P L1-L2 validation data from the MTNT dataset: Newsdiscuss-dev2015 for en-fr and the concatenation of the validation data provided with the KFTT, TED, and JESC corpora.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "For evaluation, we used SacreBLEU (Post, 2018) that includes the MTNT test sets. For en\u2192ja, we report on scores using the characterlevel metric chrF (Popovi\u0107, 2015) instead of BLEU (Papineni et al., 2002) to avoid any tokenization mismatch with previous/future work. 9 We tested the significance of our results via bootstrap re-sampling and approximate randomization with MultEval (Clark et al., 2011). 10", |
|
"cite_spans": [ |
|
{ |
|
"start": 34, |
|
"end": 46, |
|
"text": "(Post, 2018)", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 149, |
|
"end": 164, |
|
"text": "(Popovi\u0107, 2015)", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 181, |
|
"end": 204, |
|
"text": "(Papineni et al., 2002)", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 267, |
|
"end": 268, |
|
"text": "9", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "To train NMT systems, we first segmented tokens into sub-words using a BPE segmentation (Sennrich et al., 2016b) Tagged back-translation systems (TBT) were trained on back-translations of News Crawl or Reddit monolingual data. ''+'' indicates that the generated data were concatenated to the original P L1-L2 parallel data. ''FT'' denotes the fine-tuning of the vanilla NMT system. ''*'' denotes systems significantly better than the vanilla NMT system with a p-value < 0.05.", |
|
"cite_spans": [ |
|
{ |
|
"start": 88, |
|
"end": 112, |
|
"text": "(Sennrich et al., 2016b)", |
|
"ref_id": "BIBREF35" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baselines Systems", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "jointly learned for each language pair on the Reddit monolingual data. We used the Transformer (Vaswani et al., 2017 ) implementation in Marian (Junczys-Dowmunt et al., 2018) with standard hyper-parameters: 6 encoder and decoder layers, 512 dimensions for the embeddings and hidden states, 8 attention heads, and 2,048 dimensions for the feed-forward filter. During training, we evaluated the model using a mean cross-entropy score computed on the MTNT P L1-L2 validation data after every 5k mini-batch updates and stopped training when it had not been improved for 5 consecutive times. We selected the model that yields the best BLEU, using the BLEU metric implemented in Marian, on the same validation data. We used the same training procedure for our vanilla NMT systems and all the NMT systems trained on synthetic parallel data. Table 1 reports on the results for our vanilla NMT systems and other baseline systems described in Sections 5.2.1 and 5.2.2.", |
|
"cite_spans": [ |
|
{ |
|
"start": 95, |
|
"end": 116, |
|
"text": "(Vaswani et al., 2017", |
|
"ref_id": "BIBREF38" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 834, |
|
"end": 841, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Baselines Systems", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "We generated back-translations from Reddit monolingual data, tagged (Caswell et al., 2019) and concatenated them to the original P L1-L2 parallel data, and trained a new NMT system from scratch. Because Reddit data are noisy UGT, the generated back-translations may be of a very poor quality and harm the training of NMT. As contrastive experiments, we also evaluated the use of back-translations of News Crawl for which we can expect the system trained on P L1-L2 to generate better but out-of-domain translations. In all experiments, we used as many monolingual sentences as in the P L1-L2 parallel, or all of the Reddit data for French and Japanese since we do not have enough Reddit data to match the size of P L1-L2 .", |
|
"cite_spans": [ |
|
{ |
|
"start": 68, |
|
"end": 90, |
|
"text": "(Caswell et al., 2019)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tagged Back-translation", |
|
"sec_num": "5.2.1" |
|
}, |
|
{ |
|
"text": "As shown in Table 1 , back-translations of Reddit are mostly useful, with up to 3.8 BLEU points of improvement, but dramatically failed for ja\u2192en potentially due to the very low quality of the back-translations generated by the en\u2192ja vanilla NMT system. Using back-translations of News Crawl is more helpful, especially for fr\u2192en and ja\u2192en. Berard et al. (2019a) showed improvements when using back-translations of UGT. In contrast, we did not consistently observe improvements without using any manually produced P R1-R2 to train the NMT systems for back-translation.", |
|
"cite_spans": [ |
|
{ |
|
"start": 341, |
|
"end": 362, |
|
"text": "Berard et al. (2019a)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 12, |
|
"end": 19, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Tagged Back-translation", |
|
"sec_num": "5.2.1" |
|
}, |
|
{ |
|
"text": "As potential baselines, we also evaluated the methods proposed by Vaibhav et al. (2019) for SNI, because it does not require any manually produced P R1-R2 . We applied their method to P L1-L2 using their scripts 11 to create a noisy version of parallel data, namely, P S R1-R2 . We also evaluated a similar approach to the tagged backtranslations proposed by Vaibhav et al. (2019) (see Section 4.1). We used our systems trained on backtranslations of Reddit to decode L1 sentences from P L1-L2 parallel data, to which we added the backtranslation tags to let the NMT system generate translation of L1 similar to UGT. We denote this noise generation from back-translation ''NGBT.'' As in Vaibhav et al. (2019) , we introduced noise only to the source side of the parallel data performing L1\u2192L2\u2192L1 where the resulting L1 sentences comprise a noisy version of the original L1 sentences. We then replace L1 sentences in the P L1-L2 parallel data with their noisy version. In addition to the use of the resulting P S R1-R2 data for fine-tuning as in Vaibhav et al. (2019) , we also evaluated NMT systems trained from scratch on the concatenation of the P S R1-R2 and P L1-L2 . As shown in Table 1 , fine-tuning our vanilla NMT system on SNI actually improves translation quality for all the tasks, except en\u2192ja. These results are not in accordance with the results in Vaibhav et al. (2019) that show a slight drop of the BLEU score for fr\u2192en. 12 We speculate that the difference may come from the use of a different, better, vanilla NMT system for which we used a larger P L1-L2 parallel data than in Vaibhav et al. (2019) . Using the P S R1-R2 synthetic parallel data concatenated to the original P S L1-L2 leads to lower BLEU scores than fine-tuning, except for ja\u2192en.", |
|
"cite_spans": [ |
|
{ |
|
"start": 66, |
|
"end": 87, |
|
"text": "Vaibhav et al. (2019)", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 687, |
|
"end": 708, |
|
"text": "Vaibhav et al. (2019)", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 1045, |
|
"end": 1066, |
|
"text": "Vaibhav et al. (2019)", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 1363, |
|
"end": 1384, |
|
"text": "Vaibhav et al. (2019)", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 1438, |
|
"end": 1440, |
|
"text": "12", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1596, |
|
"end": 1617, |
|
"text": "Vaibhav et al. (2019)", |
|
"ref_id": "BIBREF36" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1184, |
|
"end": 1191, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Synthetic Noise Generation", |
|
"sec_num": "5.2.2" |
|
}, |
|
{ |
|
"text": "As expected, our adaptation of NGBT performed very poorly, showing that our systems trained on Reddit back-translations are not good enough to generate a useful noisy version of P L1-L2 parallel data. We do not further explore this configuration in this paper.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Synthetic Noise Generation", |
|
"sec_num": "5.2.2" |
|
}, |
|
{ |
|
"text": "Our NMT systems used for synthesizing P S R1-R2 parallel data are initialized with XLM (Section 3.2). To train XLM, we used the data presented in Section 5.1 on which we applied the same BPE segmentation used by our vanilla NMT systems. For the MLM objectives, we used the News Crawl corpora as M L1 and M L2 and the Reddit corpora as M R1 and M R2 monolingual data. For the TLM objectives, we used the parallel data used to train our vanilla NMT system as P L1-L2 parallel data. We used the publicly available XLM framework 13 with the standard hyperparameters proposed for unsupervised NMT: 6 layers for the encoder and the decoder, 1,024 dimensions for the embeddings, a dropout rate of 0.1, and the GELU activation. We used text streams of 256 tokens and a mini-batch size of 64. The Adam optimizer (Kingma and Ba, 2014) with a linear warm-up (Vaswani et al., 2017) was used. During training, the model was evaluated every 200k sentences on the MTNT validation parallel data for TLM and the monolingual validation data of MTNT for MLM. The training was stopped when the averaged perplexity of MLM and TLM had not been improved for 10 consecutive times.", |
|
"cite_spans": [ |
|
{ |
|
"start": 847, |
|
"end": 869, |
|
"text": "(Vaswani et al., 2017)", |
|
"ref_id": "BIBREF38" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "System Settings for our Approaches", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "We initialized our zero-shot NMT with XLM and trained it with the AE, BT, and MT objectives presented in Section 3.2, all having the same 12 Vaibhav et al. (2019) observed improvements only when used in combination with an manually produced P S R1-R2 . 13 We refer the reader to the section III given at this URL to retrieve the complete settings of our training for XLM and unsupervised NMT: https://github.com /facebookresearch/XLM. The only difference is that we used our data in different languages, which is also used to train our own BPE vocabulary. Table 2 : Results for the MTNT test sets using P S R1-R2 synthesized by our approaches. ''zero-shot NMT'' is the NMT system used for synthesizing P S R1-R2 . ''FT on P S R1-R2 '' are configurations for which we sampled 100k sentence pairs from P S R1-R2 to fine-tune the vanilla NMT system. The last row is given for reference: the vanilla NMT system fined-tuned on the official MTNT training parallel data. ''*'' denotes systems significantly better than the FT on SNI system with a p-value < 0.05.", |
|
"cite_spans": [ |
|
{ |
|
"start": 138, |
|
"end": 140, |
|
"text": "12", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 253, |
|
"end": 255, |
|
"text": "13", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 556, |
|
"end": 563, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "System Settings for our Approaches", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "weights, using the same hyperparameters as XLM. We evaluated the model every 200k sentences on the MTNT validation parallel data and stopped training when the average BLEU of L1\u2192L2 and L2\u2192L1 had not been improved for 10 consecutive times.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "System Settings for our Approaches", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "Finally, we synthesized P S R1-R2 data with our approaches using this system and trained final NMT models on the resulting P S R1-R2 .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "System Settings for our Approaches", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "Our results are presented in Table 2 . First, we checked the performance of our zero-shot NMT system. Whereas for fr\u2194en, it was comparable with the vanilla NMT system, for ja\u2194en, it performed much worse than the vanilla NMT model as expected. This is due to the use of unsupervised MT objectives that were shown to be very difficult to optimize for distant and difficult language pairs (Marie et al., 2019) with almost no shared entries in the respective vocabulary of the two languages.", |
|
"cite_spans": [ |
|
{ |
|
"start": 386, |
|
"end": 406, |
|
"text": "(Marie et al., 2019)", |
|
"ref_id": "BIBREF26" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 29, |
|
"end": 36, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "With approach #1, we synthesized P S R1-R2 from P L1-L2 and filtered them with T = 0.5 for en-fr and T = 0.25 for en-ja, respectively, resulting 196,788 and 301,519 sentence pairs. 14 As shown in Table 2 , fine-tuning on P S R1-R2 brings larger improvements than doing so on SNI, except for fr\u2192en. Despite the small size of the P S R1-R2 , concatenating it with P L1-L2 achieves the best BLEU with up to 3.0 BLEU points of improvements. We conclude that our approach successfully alters P L1-L2 into P S", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 196, |
|
"end": 203, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "useful to train NMT for UGT.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "R1-R2", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We give an analysis of the altered sentences later in Section 9.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "R1-R2", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Our approach #2 to synthesize P S R1-R2 brought even larger improvements. In contrast to the backtranslations of Reddit generated by the vanilla NMT system (see Table 1 ), P S R1-R2 synthesized by our zero-shot NMT systems from M R2 Reddit monolingual data (the same data used to generate ''TBT Reddit'') lead to larger improvements, especially when concatenated to P L1-L2 . For fr\u2192en, for instance, the gain over the vanilla NMT system is 7.7 BLEU points. Note also that further gains may potentially be attainable by exploring upsampling or downsampling strategies to find the optimal ratio between the sizes of P L1-L2 and P S R1-R2 . Finally, concatenating P S R1-R2 parallel data synthesized by #1 and #2 provides slightly better results than, or comparable to, the use of only parallel data synthesized by #2. Table 3 : Results for the MTNT test sets using the configurations #A and #B. ''original'' denotes the system presented in Section 5.4. ''*'' denotes systems significantly worse than the the ''original'' configuration with a p-value < 0.05.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 161, |
|
"end": 168, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
}, |
|
{ |
|
"start": 817, |
|
"end": 824, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "R1-R2", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "#B Because we have only few Reddit monolingual data for French and Japanese, #A is significantly disadvantaged by using much less monolingual data compared with our original system that also used News Crawl. In configuration #B, M L1 and M L2 are the concatenation of News Crawl and Reddit data with French and Japanese Reddit data upsampled to respectively match the size of the French and Japanese News Crawl corpora.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "R1-R2", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "With #A and #B, we no longer have zeroshot translation directions for synthesizing P S R1-R2 . Instead, we have an NMT system initialized using a pre-trained crosslingual language model also exploiting Reddit monolingual data. 15 With these configurations, we assume that the presence of a significant amount of Reddit data in the monolingual data may bias the NMT system in synthesizing Reddit-like texts.", |
|
"cite_spans": [ |
|
{ |
|
"start": 227, |
|
"end": 229, |
|
"text": "15", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "R1-R2", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The results of NMT systems trained on parallel data synthesized by #A and #B are presented in Table 3 . With both our approaches #1 and #2, both configurations #A and #B perform significantly worse than our proposed NMT systems that exploit P S R1-R2 synthesized by zero-shot NMT systems. These results point out the necessity to set zeroshot NMT systems, differentiating clean texts from UGT, to synthesize useful parallel data of UGT.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 94, |
|
"end": 101, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "R1-R2", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "7 Ablation Study on Zero-Shot NMT's Objective", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "R1-R2", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We performed an ablation study of the objectives exploited for training the zero-shot NMT presented in Section 3.2. We compared the following four combinations of objectives:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "R1-R2", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "AE+BT+MT: The original combination used to train our zero-shot NMT system.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "R1-R2", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The AE objective is removed. This excludes any random noise in the source sentences. The system is no longer restricted to perform a simple copy of the source when performing round-trip BT.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "BT+MT:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "AE+BT: Typical combination of objectives used for unsupervised NMT (Lample et al., 2018) . Without the supervised MT objective, we expect a drop of the translation quality.", |
|
"cite_spans": [ |
|
{ |
|
"start": 67, |
|
"end": 88, |
|
"text": "(Lample et al., 2018)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "BT+MT:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "BT: Without AE and MT objectives, we can expect the system to be able to properly model neither languages nor translations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "BT+MT:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Note that we cannot remove the BT objectives as this is the only objective that trained the system to translate, for instance, from L1 to R2 and from R2 to R1. We evaluated the zero-shot NMT itself and NMT systems exploiting the synthetic parallel data generated by the zero-shot NMT system using our approaches #1 without filtering 16 and #2. The results are presented in Table 4 . None of the alternative combinations performs better than AE+BT+MT in our original proposal. Removing AE (i.e., BT+MT) has a minimal impact but it is necessary to obtain the best results. In contrast, removing the MT objective (i.e., AE+BT) led to a significant drop of the translation quality as the zero-shot NMT is not supervised at all. Using only the BT objective led to extremely noisy synthetic data that cannot be used to train NMT.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 373, |
|
"end": 380, |
|
"text": "Table 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "BT+MT:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Using extra test suites, we evaluated to what extent our NMT systems trained on synthetic parallel data of UGT are robust to domain/style changes or only adapted to better translate Reddit data. Table 4 : BLEU scores for the MTNT test sets with some of the objectives deactivated for training the zero-shot NMT system that synthesizes P S R1-R2 . The configurations using #1 synthetic data were trained exclusively on this data. ''*'' denotes systems significantly worse than using all the objectives with a p-value < 0.05.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 195, |
|
"end": 202, |
|
"text": "Table 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Impact on the Robustness of NMT", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "containing clean texts of news.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Newstest2014 (en-fr): Translation task of WMT14", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Newsdiscuss2015 (en-fr): Translation task of WMT15 containing UGT of discussions on news.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Newstest2014 (en-fr): Translation task of WMT14", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Foursquare (en-fr): A corpus of restaurant reviews (Berard et al., 2019a) that is another instance of UGT.", |
|
"cite_spans": [ |
|
{ |
|
"start": 51, |
|
"end": 73, |
|
"text": "(Berard et al., 2019a)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Newstest2014 (en-fr): Translation task of WMT14", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "JESC, KFTT, and TED (en-ja): Test sets released with their respective training data in the MTNT dataset (see Section 5.1).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Newstest2014 (en-fr): Translation task of WMT14", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Twitter (en-ja): We collected 1,400 English tweets from the natural disaster domain and hired a translation firm to translate them into Japanese with specific instructions to preserve the style of the source texts. This test set is particularly noisy because it presents many tokens specific to tweets (user identifiers, hash tags, abbreviations, etc.) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 302, |
|
"end": 352, |
|
"text": "(user identifiers, hash tags, abbreviations, etc.)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Newstest2014 (en-fr): Translation task of WMT14", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "For all these translation tasks, we experimented only with the original translation direction to avoid translating translationese, except for the cases Table 5 : BLEU ( * \u2192{en,fr}) and chrf (en\u2192ja) scores obtained on the extra test sets. Best scores are in bold. ''*'' denotes systems significantly better than the vanilla NMT system with a p-value < 0.05.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 152, |
|
"end": 159, |
|
"text": "Table 5", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Newstest2014 (en-fr): Translation task of WMT14", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "where the origin of the source texts is unknown or mixed. The results obtained with the same systems presented in Section 5.4 are presented in Table 5 . These results point out that our approaches did not only adapt NMT systems to the domain and style of Reddit but also improved them overall. NMT systems trained on the parallel data synthesized by our approaches perform better than the vanilla NMT systems irrespective of the domain and style of the text to translate. In contrast, exploiting the Reddit monolingual data through tagged back-translation consistently led to lower BLEU scores (except for en\u2192fr Newsdiscuss2015), highlighting the ability of our framework in producing better synthetic parallel data. The configuration ''TBT News,'' which exploits tagged back-translation from News Crawl, is as expected the best system for translating Newstest2014, Newsdiscuss2015, and tweets, since some of the tweets have been posted by news agencies, but performed lower than our system for translating UGT from Foursquare.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 143, |
|
"end": 150, |
|
"text": "Table 5", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Newstest2014 (en-fr): Translation task of WMT14", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "With these results and the results obtained on the MTNT test sets (see Section 5.4), we conclude that our approaches improve translation quality for UGT in general and did not only adapt the NMT system to translate a specific type of UGT.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Newstest2014 (en-fr): Translation task of WMT14", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "This section takes a closer look at the parallel data synthesized by approach #1 to observe how the clean sentences from P L1-L2 parallel data were altered and to better understand why the use of synthetic data leads to a better NMT system for UGT.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Analysis of Clean Sentences Altered into UGT", |
|
"sec_num": "9" |
|
}, |
|
{ |
|
"text": "We first focus on some of the characteristics of the MTNT datasets and compare how well these characteristics are exhibited in P S R1-R2 . For this analysis, we mainly relied on the scripts and resources provided by Michel and Neubig (2018) . 17 We randomly sampled source sentences from P L1-L2 and P S R1-R2 as much as there are in the MTNT test sets, and performed our analysis on them. 18 We counted the occurrences of profanities in the English, French, and Japanese. For English, we also counted the number of word contractions 19 and Internet slang expressions. We also counted words ending by ''-ise'' and ''-ize'' to account for some of the differences between US English and UK English word spellings. Because P L1-L2 is mainly made of Europarl, we can expect that UK English spelling is mainly used, whereas we expect to find a higher ratio of US English spelling in the Reddit data, since Reddit is an American platform. For Japanese, we counted the numbers of formal and informal pronouns, assuming that MTNT datasets contain more informal pronouns than P L1-L2 . Michel and Neubig (2018) also counted spelling and grammar errors, and emojis. We did not count spelling and grammar errors, expecting that they are artificially numerous in our synthetic data, since they had been automatically generated. As for the emojis, both P L1-L2 and P S R1-R2 did not contain any. Table 6 demonstrates that according to all the indicators, P S R1-R2 exhibits more of the characteristics of MTNT datasets than P L1-L2 . For instance, P S R1-R2 is in more US English, contains more Internet slang, and uses significantly more Table 6 : Quantitative analysis of the generated data. ''%'' indicates the number for occurrences per 100 tokens. For English, we compute the statistics on the en-fr data. For the MTNT test sets, the statistics are computed on the source side. R S L1-L2 has been generated by the alteration of P L1-L2 by our approach #1. English contractions. This partly explains the usefulness of P S R1-R2 as NMT training data for the MTNT translation tasks, but most indicators show that P S R1-R2 is still far from perfectly matching with the characteristics of Reddit data, suggesting some room for improvement.", |
|
"cite_spans": [ |
|
{ |
|
"start": 216, |
|
"end": 240, |
|
"text": "Michel and Neubig (2018)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 243, |
|
"end": 245, |
|
"text": "17", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 390, |
|
"end": 392, |
|
"text": "18", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 534, |
|
"end": 536, |
|
"text": "19", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1077, |
|
"end": 1101, |
|
"text": "Michel and Neubig (2018)", |
|
"ref_id": "BIBREF28" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1383, |
|
"end": 1390, |
|
"text": "Table 6", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1626, |
|
"end": 1633, |
|
"text": "Table 6", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Analysis of Clean Sentences Altered into UGT", |
|
"sec_num": "9" |
|
}, |
|
{ |
|
"text": "For a more concrete illustration of our synthetic data, we present in Figure 5 four English and four French example sentences altered by our approach #1. These examples are all instances of a successful alteration of clean texts into UGT. En1 introduces an English contraction ''we're'' that is a characteristic of less formal English. En2, En3, and Fr3 show spelling errors (for Fr3, ''Ca'' should be written ''\u00c7 a'') that may guide the system to make itself more robust.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 70, |
|
"end": 78, |
|
"text": "Figure 5", |
|
"ref_id": "FIGREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Analysis of Clean Sentences Altered into UGT", |
|
"sec_num": "9" |
|
}, |
|
{ |
|
"text": "En4 introduces an instance of Internet slang with a profanity, as in Fr1 where ''tr\u00e8s chiante,'' a vulgar translation of ''very annoying'' diverges from the original meaning of ''franche'' that can be translated by ''frank.'' Fr2, Fr3, and Fr4 are simplifications that make the sentences less formal: ''en outre'' and ''impliquent'' are usually used in texts that perform a formal demonstration, while ''\u00e7a veux dire'' is a more familiar turn of phrase for ''impliquent'' in this context. We also observed many instances of person names written with Reddit syntax for referring to a Reddit user account by prepending ''/u/,'' e.g., ''Berlusconi'' becomes ''/u/Berlusconi.'' All these examples are evidence that our approach successfully generates UGT in the style of Reddit.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Analysis of Clean Sentences Altered into UGT", |
|
"sec_num": "9" |
|
}, |
|
{ |
|
"text": "Several approaches for better translating UGT have been proposed taking advantage of the parallel data of UGT in the MTNT datasets (Michel and Neubig, 2018 ). Because of their relatively small size, they have been mostly used for fine-tuning and designing specific pre-and post-processing rules to improve translation quality (Berard et al., 2019b) . Vaibhav et al. (2019) also proposed to generate synthetic parallel data of UGT through back-translation by exploiting the parallel data in MTNT. Monolingual data of UGT have been exploited to a lesser extent through forward translation (Li and Specia, 2019) or back-translation (Berard et al., 2019a) and always with NMT systems trained on parallel data of UGT. To the best of our knowledge, Vaibhav et al. (2019) proposed the only approach that synthesizes parallel data of UGT without relying on existing parallel data of UGT. Having obtained texts in the target style of UGT, they designed editing operations to make existing parallel data in other styles more similar to the targeted style.", |
|
"cite_spans": [ |
|
{ |
|
"start": 131, |
|
"end": 155, |
|
"text": "(Michel and Neubig, 2018", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 326, |
|
"end": 348, |
|
"text": "(Berard et al., 2019b)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 351, |
|
"end": 372, |
|
"text": "Vaibhav et al. (2019)", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 587, |
|
"end": 608, |
|
"text": "(Li and Specia, 2019)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 629, |
|
"end": 651, |
|
"text": "(Berard et al., 2019a)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 743, |
|
"end": 764, |
|
"text": "Vaibhav et al. (2019)", |
|
"ref_id": "BIBREF36" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "10" |
|
}, |
|
{ |
|
"text": "Another line of work exploits NMT to perform style transfer across texts, that is, applying some characteristics of one text to another, without exploiting any parallel data of UGT, but has never been applied to NMT for UGT. Prabhumoye et al. (2018) performed style transfer through back-translation to preserve the meaning of the text while reducing its stylistic properties and then exploit adversarial generation algorithms to apply the desired style to the back-translated texts, assuming that meaning and style can be disentangled. Their approach also requires a classifier that can accurately predict the style of a given text. Zhang et al. (2018) proposed a threestep pipeline combining unsupervised statistical and neural MT to generate instances of texts in the targeted style that is then evaluated by a given style classifier as in Prabhumoye et al. (2018).", |
|
"cite_spans": [ |
|
{ |
|
"start": 634, |
|
"end": 653, |
|
"text": "Zhang et al. (2018)", |
|
"ref_id": "BIBREF39" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "10" |
|
}, |
|
{ |
|
"text": "We described two new methods for synthesizing parallel data to train better NMT systems for UGT. Both methods work through a zero-shot NMT system, initialized with a pre-trained crosslingual language model that exploits monolingual corpora of UGT. Our first method (#1) successfully alters clean parallel data into parallel data that exhibit the characteristics of UGT of the targeted style. Our second method (#2) uses the same zero-shot NMT system to translate monolingual corpora of UGT for synthesizing parallel data useful to train NMT. We showed that both methods, separately or combined, improve translation quality for UGT.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "11" |
|
}, |
|
{ |
|
"text": "For future work, we will study the use of manually produced UGT parallel data to better train our NMT system that synthesizes the parallel data. We will also explore other applications for this framework, such as paraphrase generation. We will also investigate the use of the recently proposed mirror-generative NMT (Zheng et al., 2020) , a semi-supervised architecture that exploits jointly large source and target monolingual corpora, such as those of UGT, during training using source and target language models in the same latent space.", |
|
"cite_spans": [ |
|
{ |
|
"start": 316, |
|
"end": 336, |
|
"text": "(Zheng et al., 2020)", |
|
"ref_id": "BIBREF40" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "11" |
|
}, |
|
{ |
|
"text": "We do not consider L1\u2192R1 and L2\u2192R2 (see Section 4.1).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Berard et al. (2019a) showed that a large monolingual corpus of UGT can be successfully back-translated with a system trained on P R1-R2 parallel data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Li and Specia (2019) observed improvements using forward translations but only in combination with manually produced P R1-R2 parallel data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "http://www.statmt.org/wmt20/translation -task.html.5 https://fasttext.cc/.6 In our preliminary experiments, we observed large improvements in translation quality (beyond 5.0 BLEU points) with our approaches when the crawled M R1 contains the source side of the test sets. We rather chose to experiment without the knowledge of the source side of the test set and carefully removed it from the monolingual data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://github.com/moses-smt/mosesdecoder. 8 https://taku910.github.io/mecab/. 9 The sacreBLEU signatures, where xx is among {en,fr,ja} are as follows: BLEU+case.mixed+lang.xx-xx+numrefs.1 +smooth.exp+test.mtnt1.1/test+tok.13a+version.1.4.2; chrF2+ case.mixed+lang.en-ja+numchars.6+numrefs.1 +space.False+ test.mtnt1.1/test+version.1.4.2.10 https://github.com/jhclark/multeval.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "h t t p s : / / g i t h u b . c o m / M y s teryVaibhav /robust mtnt.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In terms of BLEU scores, we observed differences, in the range of 2.0 BLEU points, considering all the thresholds tested.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We used the same framework used by our zero-shot NMT systems for #A and #B, also using the AE and BT objectives since removing them did not have a positive impact.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We did not filter the data unlike our original proposal, because our goal is only to evaluate the quality of the data given the different systems used to generate them while saving the computational cost of finding a good threshold for filtering.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://github.com/pmichel31415/mtnt.18 For this analysis, the sentences sampled from P S R1-R2 are the synthetic versions of the sentences sampled from P L1-L2 .19 We searched for the tokens: 're, 's,'t, 'd, 'll, and 've.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "We would like to thank the action editor, Philipp Koehn, and reviewers for their useful comments and suggestions. A part of this work was conducted under the program ''Research and Development of Enhanced Multilingual and Multipurpose Speech Translation System'' of the Ministry of Internal Affairs and Communications (MIC), Japan. Benjamin Marie was partly supported by JSPS KAKENHI grant number 20K19879 and the tenure-track researcher start-up fund in NICT. Atsushi Fujita was partly supported by JSPS KAKENHI grant number 19H05660.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Paraphrasing with bilingual parallel corpora", |
|
"authors": [ |
|
{ |
|
"first": "Colin", |
|
"middle": [], |
|
"last": "Bannard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Callison-Burch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proceedings of the 43rd Annual Meeting on Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "597--604", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.3115/1219840.1219914" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Colin Bannard and Chris Callison-Burch. 2005. Paraphrasing with bilingual parallel corpora. In Proceedings of the 43rd Annual Meeting on Association for Computational Linguistics, page 597-604. Ann Arbor, MI, USA. Asso- ciation for Computational Linguistics. DOI: https://doi.org/10.3115/1219840 .1219914", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Synthetic and natural noise both break neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Yonatan", |
|
"middle": [], |
|
"last": "Belinkov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yonatan", |
|
"middle": [], |
|
"last": "Bisk", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 6th International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yonatan Belinkov and Yonatan Bisk. 2018. Syn- thetic and natural noise both break neural ma- chine translation. In Proceedings of the 6th International Conference on Learning Repre- sentations. Vancouver, Canada.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Machine translation of restaurant reviews: New corpus for domain adaptation and robustness", |
|
"authors": [ |
|
{ |
|
"first": "Alexandre", |
|
"middle": [], |
|
"last": "Berard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ioan", |
|
"middle": [], |
|
"last": "Calapodescu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marc", |
|
"middle": [], |
|
"last": "Dymetman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Claude", |
|
"middle": [], |
|
"last": "Roux", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jean-Luc", |
|
"middle": [], |
|
"last": "Meunier", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vassilina", |
|
"middle": [], |
|
"last": "Nikoulina", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 3rd Workshop on Neural Generation and Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "168--176", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alexandre Berard, Ioan Calapodescu, Marc Dymetman, Claude Roux, Jean-Luc Meunier, and Vassilina Nikoulina. 2019a. Machine trans- lation of restaurant reviews: New corpus for domain adaptation and robustness. In Proceedings of the 3rd Workshop on Neural Generation and Translation, pages 168-176.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Association for Computational Linguistics", |
|
"authors": [ |
|
{ |
|
"first": "Hong", |
|
"middle": [], |
|
"last": "Kong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "China", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D19-5617" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hong Kong, China. Association for Computa- tional Linguistics. DOI: https://doi .org/10.18653/v1/D19-5617", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Naver Labs Europe's systems for the WMT19 machine translation robustness task", |
|
"authors": [ |
|
{ |
|
"first": "Alexandre", |
|
"middle": [], |
|
"last": "Berard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ioan", |
|
"middle": [], |
|
"last": "Calapodescu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Claude", |
|
"middle": [], |
|
"last": "Roux", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the Fourth Conference on Machine Translation", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "526--532", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W19-5361" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alexandre Berard, Ioan Calapodescu, and Claude Roux. 2019b. Naver Labs Europe's systems for the WMT19 machine translation robustness task. In Proceedings of the Fourth Conference on Machine Translation (Volume 2: Shared Task Papers, Day 1), pages 526-532. Florence, Italy. Association for Computational Linguis- tics. DOI: https://doi.org/10.18653 /v1/W19-5361", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Findings of the 2015 workshop on statistical machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Ond\u0159ej", |
|
"middle": [], |
|
"last": "Bojar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rajen", |
|
"middle": [], |
|
"last": "Chatterjee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christian", |
|
"middle": [], |
|
"last": "Federmann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Barry", |
|
"middle": [], |
|
"last": "Haddow", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthias", |
|
"middle": [], |
|
"last": "Huck", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Hokamp", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Philipp", |
|
"middle": [], |
|
"last": "Koehn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Varvara", |
|
"middle": [], |
|
"last": "Logacheva", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christof", |
|
"middle": [], |
|
"last": "Monz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matteo", |
|
"middle": [], |
|
"last": "Negri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matt", |
|
"middle": [], |
|
"last": "Post", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Carolina", |
|
"middle": [], |
|
"last": "Scarton", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lucia", |
|
"middle": [], |
|
"last": "Specia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marco", |
|
"middle": [], |
|
"last": "Turchi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the Tenth Workshop on Statistical Machine Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--46", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ond\u0159ej Bojar, Rajen Chatterjee, Christian Federmann, Barry Haddow, Matthias Huck, Chris Hokamp, Philipp Koehn, Varvara Logacheva, Christof Monz, Matteo Negri, Matt Post, Carolina Scarton, Lucia Specia, and Marco Turchi. 2015. Findings of the 2015 workshop on statistical machine translation. In Proceedings of the Tenth Workshop on Statistical Machine Translation, pages 1-46.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Association for Computational Linguistics", |
|
"authors": [ |
|
{ |
|
"first": "Portugal", |
|
"middle": [], |
|
"last": "Lisbon", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W15-3001" |
|
], |
|
"PMID": [ |
|
"25955892" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lisbon, Portugal. Association for Computatio- nal Linguistics. DOI: https://doi.org /10.18653/v1/W15-3001, PMID: 25955892", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Association for Computational Linguistics", |
|
"authors": [ |
|
{ |
|
"first": "Isaac", |
|
"middle": [], |
|
"last": "Caswell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ciprian", |
|
"middle": [], |
|
"last": "Chelba", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Grangier", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the Fourth Conference on Machine Translation", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "53--63", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W19-5206" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Isaac Caswell, Ciprian Chelba, and David Grangier. 2019. Tagged back-translation. In Proceedings of the Fourth Conference on Machine Translation (Volume 1: Research Papers), pages 53-63. Florence, Italy. Assoc- iation for Computational Linguistics. DOI: https://doi.org/10.18653/v1/W19 -5206", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Better hypothesis testing for statistical machine translation: Controlling for optimizer instability", |
|
"authors": [ |
|
{ |
|
"first": "Jonathan", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "Clark", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Dyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alon", |
|
"middle": [], |
|
"last": "Lavie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Smith", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "176--181", |
|
"other_ids": { |
|
"DOI": [ |
|
"https://dl.acm.org/doi/10.5555/2002736.2002774" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jonathan H. Clark, Chris Dyer, Alon Lavie, and Noah A. Smith. 2011. Better hypothesis testing for statistical machine translation: Controlling for optimizer instability. In Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Lan- guage Technologies, pages 176-181. Portland, MN, USA. Association for Computational Linguistics. DOI: https://dl.acm.org /doi/10.5555/2002736.2002774", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Cross-lingual language model pretraining", |
|
"authors": [ |
|
{ |
|
"first": "Alexis", |
|
"middle": [], |
|
"last": "Conneau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guillaume", |
|
"middle": [], |
|
"last": "Lample", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of Advances in Neural Information Processing Systems", |
|
"volume": "32", |
|
"issue": "", |
|
"pages": "7057--7067", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alexis Conneau and Guillaume Lample. 2019. Cross-lingual language model pretraining. In Proceedings of Advances in Neural Informa- tion Processing Systems 32, pages 7057-7067. Vancouver, Canada. Curran Associates, Inc.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "4171--4186", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N19-1423" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language understanding. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186. Minneapolis, MN, USA. Association for Computational Linguistics. DOI: https:// doi.org/10.18653/v1/N19-1423", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Combining pre-editing and post-editing to improve SMT of user-generated content", |
|
"authors": [ |
|
{ |
|
"first": "Johanna", |
|
"middle": [], |
|
"last": "Gerlach", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Victoria", |
|
"middle": [ |
|
"Porro" |
|
], |
|
"last": "Rodriguez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pierrette", |
|
"middle": [], |
|
"last": "Bouillon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sabine", |
|
"middle": [], |
|
"last": "Lehmann", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of MT Summit XIV Workshop on Post-editing Technology and Practice", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "45--53", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Johanna Gerlach, Victoria Porro Rodriguez, Pierrette Bouillon, and Sabine Lehmann. 2013. Combining pre-editing and post-editing to improve SMT of user-generated content. In Proceedings of MT Summit XIV Workshop on Post-editing Technology and Practice, pages 45-53. Nice, France.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Scalable modified Kneser-Ney language model estimation", |
|
"authors": [ |
|
{ |
|
"first": "Kenneth", |
|
"middle": [], |
|
"last": "Heafield", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ivan", |
|
"middle": [], |
|
"last": "Pouzyrevsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jonathan", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "Clark", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Philipp", |
|
"middle": [], |
|
"last": "Koehn", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "690--696", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kenneth Heafield, Ivan Pouzyrevsky, Jonathan H. Clark, and Philipp Koehn. 2013. Scalable mod- ified Kneser-Ney language model estimation. In Proceedings of the 51st Annual Meeting of the Association for Computational Linguis- tics (Volume 2: Short Papers), pages 690-696.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Filtered pseudoparallel corpus improves low-resource neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Aizhan", |
|
"middle": [], |
|
"last": "Imankulova", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Takayuki", |
|
"middle": [], |
|
"last": "Sato", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mamoru", |
|
"middle": [], |
|
"last": "Komachi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "ACM Transactions on Asian and Low-Resource Language Information Processing", |
|
"volume": "19", |
|
"issue": "2", |
|
"pages": "24--25", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/3341726" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Aizhan Imankulova, Takayuki Sato, and Mamoru Komachi. 2019. Filtered pseudo- parallel corpus improves low-resource neural machine translation. ACM Transactions on Asian and Low-Resource Language Informa- tion Processing, 19(2):24:1-16. DOI: https:// doi.org/10.1145/3341726", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Marian: Fast neural machine translation in C++", |
|
"authors": [ |
|
{ |
|
"first": "Marcin", |
|
"middle": [], |
|
"last": "Junczys-Dowmunt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roman", |
|
"middle": [], |
|
"last": "Grundkiewicz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tomasz", |
|
"middle": [], |
|
"last": "Dwojak", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hieu", |
|
"middle": [], |
|
"last": "Hoang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenneth", |
|
"middle": [], |
|
"last": "Heafield", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tom", |
|
"middle": [], |
|
"last": "Neckermann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Frank", |
|
"middle": [], |
|
"last": "Seide", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ulrich", |
|
"middle": [], |
|
"last": "Germann", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of ACL 2018, System Demonstrations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "116--121", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marcin Junczys-Dowmunt, Roman Grundkiewicz, Tomasz Dwojak, Hieu Hoang, Kenneth Heafield, Tom Neckermann, Frank Seide, Ulrich Germann, Alham Fikri Aji, Nikolay Bogoychev, Andr\u00e9 F. T. Martins, and Alexandra Birch. 2018. Marian: Fast neural machine translation in C++. In Proceedings of ACL 2018, System Demonstrations, pages 116-121.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Association for Computational Linguistics", |
|
"authors": [ |
|
{ |
|
"first": "Australia", |
|
"middle": [], |
|
"last": "Melbourne", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P18-4020" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Melbourne, Australia. Association for Com- putational Linguistics. DOI: https://doi .org/10.18653/v1/P18-4020", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Training on synthetic noise improves robustness to natural noise in machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Vladimir", |
|
"middle": [], |
|
"last": "Karpukhin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Eisenstein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marjan", |
|
"middle": [], |
|
"last": "Ghazvininejad", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 5th Workshop on Noisy User-generated Text (W-NUT 2019)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "42--47", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D19-5506" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Vladimir Karpukhin, Omer Levy, Jacob Eisenstein, and Marjan Ghazvininejad. 2019. Training on synthetic noise improves robust- ness to natural noise in machine translation. In Proceedings of the 5th Workshop on Noisy User-generated Text (W-NUT 2019), pages 42-47. Hong Kong, China. Associa- tion for Computational Linguistics. DOI: https://doi.org/10.18653/v1/D19-5506", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Adam: A method for stochastic optimization", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Diederik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jimmy", |
|
"middle": [], |
|
"last": "Kingma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ba", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Diederik P. Kingma and Jimmy Ba. 2014. Adam: A method for stochastic optimization. CoRR, abs/1412.6980.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Moses: Open source toolkit for statistical machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Philipp", |
|
"middle": [], |
|
"last": "Koehn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hieu", |
|
"middle": [], |
|
"last": "Hoang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandra", |
|
"middle": [], |
|
"last": "Birch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Callison-Burch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marcello", |
|
"middle": [], |
|
"last": "Federico", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nicola", |
|
"middle": [], |
|
"last": "Bertoldi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Brooke", |
|
"middle": [], |
|
"last": "Cowan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wade", |
|
"middle": [], |
|
"last": "Shen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christine", |
|
"middle": [], |
|
"last": "Moran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Zens", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Dyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ond\u0159ej", |
|
"middle": [], |
|
"last": "Bojar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandra", |
|
"middle": [], |
|
"last": "Constantin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Evan", |
|
"middle": [], |
|
"last": "Herbst", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of the 45th Annual Meeting of the Association for Computational Linguistics Companion Volume Proceedings of the Demo and Poster Sessions", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "177--180", |
|
"other_ids": { |
|
"DOI": [ |
|
"https://dl.acm.org/doi/10.5555/1557769.1557821" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Philipp Koehn, Hieu Hoang, Alexandra Birch, Chris Callison-Burch, Marcello Federico, Nicola Bertoldi, Brooke Cowan, Wade Shen, Christine Moran, Richard Zens, Chris Dyer, Ond\u0159ej Bojar, Alexandra Constantin, and Evan Herbst. 2007. Moses: Open source toolkit for statistical machine translation. In Proceedings of the 45th Annual Meet- ing of the Association for Computational Linguistics Companion Volume Proceed- ings of the Demo and Poster Sessions, pages 177-180. Prague, Czech Republic. Asso- ciation for Computational Linguistics. DOI: https://dl.acm.org/doi/10.5555 /1557769.1557821", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Phrase-based & neural unsupervised machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Guillaume", |
|
"middle": [], |
|
"last": "Lample", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Myle", |
|
"middle": [], |
|
"last": "Ott", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexis", |
|
"middle": [], |
|
"last": "Conneau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ludovic", |
|
"middle": [], |
|
"last": "Denoyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marc'aurelio", |
|
"middle": [], |
|
"last": "Ranzato", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5039--5049", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Guillaume Lample, Myle Ott, Alexis Conneau, Ludovic Denoyer, and Marc'Aurelio Ranzato. 2018. Phrase-based & neural unsupervised machine translation. In Proceedings of the 2018 Conference on Empirical Methods in Na- tural Language Processing, pages 5039-5049.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Association for Computational Linguistics", |
|
"authors": [ |
|
{ |
|
"first": "Belgium", |
|
"middle": [], |
|
"last": "Brussels", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D18-1549" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Brussels, Belgium. Association for Comput- ational Linguistics. DOI: https://doi .org/10.18653/v1/D18-1549", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Findings of the first shared task on machine translation robustness", |
|
"authors": [ |
|
{ |
|
"first": "Xian", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paul", |
|
"middle": [], |
|
"last": "Michel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Antonios", |
|
"middle": [], |
|
"last": "Anastasopoulos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yonatan", |
|
"middle": [], |
|
"last": "Belinkov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nadir", |
|
"middle": [], |
|
"last": "Durrani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Orhan", |
|
"middle": [], |
|
"last": "Firat", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Philipp", |
|
"middle": [], |
|
"last": "Koehn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Graham", |
|
"middle": [], |
|
"last": "Neubig", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Juan", |
|
"middle": [], |
|
"last": "Pino", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hassan", |
|
"middle": [], |
|
"last": "Sajjad", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the Fourth Conference on Machine Translation", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "91--102", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W19-5303" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xian Li, Paul Michel, Antonios Anastasopoulos, Yonatan Belinkov, Nadir Durrani, Orhan Firat, Philipp Koehn, Graham Neubig, Juan Pino, and Hassan Sajjad. 2019. Findings of the first shared task on machine translation robustness. In Pro- ceedings of the Fourth Conference on Machine Translation (Volume 2: Shared Task Papers, Day 1), pages 91-102. Florence, Italy. Asso- ciation for Computational Linguistics. DOI: https://doi.org/10.18653/v1/W19 -5303", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Improving neural machine translation robustness via data augmentation: Beyond back-translation", |
|
"authors": [ |
|
{ |
|
"first": "Zhenhao", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lucia", |
|
"middle": [], |
|
"last": "Specia", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 5th Workshop on Noisy User-generated Text (W-NUT 2019)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "328--336", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D19-5543" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhenhao Li and Lucia Specia. 2019. Improv- ing neural machine translation robustness via data augmentation: Beyond back-translation. In Proceedings of the 5th Workshop on Noisy User-generated Text (W-NUT 2019), pages 328-336. Hong Kong, China. Asso- ciation for Computational Linguistics. DOI: https://doi.org/10.18653/v1/D19-5543", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "ORANGE: A method for evaluating automatic evaluation metrics for machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Chin-Yew", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Franz Josef", |
|
"middle": [], |
|
"last": "Och", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proceedings of the 20th International Conference on Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "501--507", |
|
"other_ids": { |
|
"DOI": [ |
|
"https://dl.acm.org/doi/10.3115/1220355.1220427" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chin-Yew Lin and Franz Josef Och. 2004. ORANGE: A method for evaluating automatic evaluation metrics for machine translation. In Proceedings of the 20th International Conference on Computational Linguistics, pages 501-507. Geneva, Switzerland. DOI: https://dl.acm.org/doi/10.3115 /1220355.1220427", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Paraphrasing revisited with neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Jonathan", |
|
"middle": [], |
|
"last": "Mallinson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rico", |
|
"middle": [], |
|
"last": "Sennrich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mirella", |
|
"middle": [], |
|
"last": "Lapata", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 15th Conference of the European Chapter of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "881--893", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/E17-1083" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jonathan Mallinson, Rico Sennrich, and Mirella Lapata. 2017. Paraphrasing revisited with neural machine translation. In Proceedings of the 15th Conference of the European Chapter of the Association for Computa- tional Linguistics: Volume 1, Long Papers, pages 881-893. Valencia, Spain. Associa- tion for Computational Linguistics. DOI: https://doi.org/10.18653/v1/E17 -1083", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "NICT's unsupervised neural and statistical machine translation systems for the WMT19 news translation task", |
|
"authors": [ |
|
{ |
|
"first": "Benjamin", |
|
"middle": [], |
|
"last": "Marie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Haipeng", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rui", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kehai", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Atsushi", |
|
"middle": [], |
|
"last": "Fujita", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Masao", |
|
"middle": [], |
|
"last": "Utiyama", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eiichiro", |
|
"middle": [], |
|
"last": "Sumita", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the Fourth Conference on Machine Translation", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "294--301", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W19-5330" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Benjamin Marie, Haipeng Sun, Rui Wang, Kehai Chen, Atsushi Fujita, Masao Utiyama, and Eiichiro Sumita. 2019. NICT's unsupervised neural and statistical machine translation sys- tems for the WMT19 news translation task. In Proceedings of the Fourth Conference on Ma- chine Translation (Volume 2: Shared Task Papers, Day 1), pages 294-301. Florence, Italy. Association for Computational Linguis- tics. DOI: https://doi.org/10.18653 /v1/W19-5330", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Benefits of data augmentation for NMT-based text normalization of user-generated content", |
|
"authors": [ |
|
{ |
|
"first": "Claudia", |
|
"middle": [ |
|
"Matos" |
|
], |
|
"last": "Veliz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Orphee", |
|
"middle": [], |
|
"last": "De Clercq", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Veronique", |
|
"middle": [], |
|
"last": "Hoste", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 5th Workshop on Noisy User-generated Text (W-NUT 2019)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "275--285", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D19-5536" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Claudia Matos Veliz, Orphee De Clercq, and Veronique Hoste. 2019. Benefits of data augmentation for NMT-based text normaliza- tion of user-generated content. In Proceedings of the 5th Workshop on Noisy User-generated Text (W-NUT 2019), pages 275-285. Hong Kong, China. Association for Computational Linguistics. DOI: https://doi.org/10 .18653/v1/D19-5536", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "MTNT: A testbed for machine translation of noisy text", |
|
"authors": [ |
|
{ |
|
"first": "Paul", |
|
"middle": [], |
|
"last": "Michel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Graham", |
|
"middle": [], |
|
"last": "Neubig", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "543--553", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D18-1050" |
|
], |
|
"PMID": [ |
|
"29565364" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Paul Michel and Graham Neubig. 2018. MTNT: A testbed for machine translation of noisy text. In Proceedings of the 2018 Conference on Empir- ical Methods in Natural Language Processing, pages 543-553. Brussels, Belgium. Associ- ation for Computational Linguistics. DOI: https://doi.org/10.18653/v1/D18 -1050, PMID: 29565364", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Bleu: a method for automatic evaluation of machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Kishore", |
|
"middle": [], |
|
"last": "Papineni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Salim", |
|
"middle": [], |
|
"last": "Roukos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Todd", |
|
"middle": [], |
|
"last": "Ward", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei-Jing", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proceedings of the 40th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "311--318", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.3115/1073083.1073135" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei-Jing Zhu. 2002. Bleu: a method for automatic evaluation of machine translation. In Proceedings of the 40th Annual Meeting of the Association for Computational Linguistics, pages 311-318. Philadelphia, PA, USA. Asso- ciation for Computational Linguistics. DOI: https://doi.org/10.3115/1073083 .1073135", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "chrF: character n-gram f-score for automatic MT evaluation", |
|
"authors": [ |
|
{ |
|
"first": "Maja", |
|
"middle": [], |
|
"last": "Popovi\u0107", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the Tenth Workshop on Statistical Machine Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "392--395", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W15-3049" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Maja Popovi\u0107. 2015. chrF: character n-gram f-score for automatic MT evaluation. In Pro- ceedings of the Tenth Workshop on Statistical Machine Translation, pages 392-395. Lisbon, Portugal. Association for Computational Lin- guistics. DOI: https://doi.org/10.18653 /v1/W15-3049", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "A call for clarity in reporting BLEU scores", |
|
"authors": [ |
|
{ |
|
"first": "Matt", |
|
"middle": [], |
|
"last": "Post", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the Third Conference on Machine Translation: Research Papers", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "186--191", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W18-6319" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matt Post. 2018. A call for clarity in reporting BLEU scores. In Proceedings of the Third Conference on Machine Translation: Research Papers, pages 186-191. Brussels, Belgium. Association for Computational Linguistics. DOI: https://doi.org/10.18653/v1 /W18-6319", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Style transfer through back-translation", |
|
"authors": [ |
|
{ |
|
"first": "Yulia", |
|
"middle": [], |
|
"last": "Shrimai Prabhumoye", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruslan", |
|
"middle": [], |
|
"last": "Tsvetkov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alan", |
|
"middle": [ |
|
"W" |
|
], |
|
"last": "Salakhutdinov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Black", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "866--876", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shrimai Prabhumoye, Yulia Tsvetkov, Ruslan Salakhutdinov, and Alan W. Black. 2018. Style transfer through back-translation. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguis- tics (Volume 1: Long Papers), pages 866-876.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Association for Computational Linguistics", |
|
"authors": [ |
|
{ |
|
"first": "Australia", |
|
"middle": [], |
|
"last": "Melbourne", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P18-1080" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Melbourne, Australia. Association for Com- putational Linguistics. DOI: https://doi .org/10.18653/v1/P18-1080", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Improving neural machine translation models with monolingual data", |
|
"authors": [ |
|
{ |
|
"first": "Rico", |
|
"middle": [], |
|
"last": "Sennrich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Barry", |
|
"middle": [], |
|
"last": "Haddow", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandra", |
|
"middle": [], |
|
"last": "Birch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "16--1009", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P16-1009" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rico Sennrich, Barry Haddow, and Alexandra Birch. 2016a. Improving neural machine trans- lation models with monolingual data. In Pro- ceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 86-96. Berlin, Germany. Association for Computational Lin- guistics. DOI: https://doi.org/10.18653 /v1/P16-1009", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Neural machine translation of rare words with subword units", |
|
"authors": [ |
|
{ |
|
"first": "Rico", |
|
"middle": [], |
|
"last": "Sennrich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Barry", |
|
"middle": [], |
|
"last": "Haddow", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandra", |
|
"middle": [], |
|
"last": "Birch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1715--1725", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P16-1162" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rico Sennrich, Barry Haddow, and Alexan- dra Birch. 2016b. Neural machine trans- lation of rare words with subword units. In Proceedings of the 54th Annual Meet- ing of the Association for Computa- tional Linguistics (Volume 1: Long Papers), pages 1715-1725. Berlin, Germany. Asso- ciation for Computational Linguistics. DOI: https://doi.org/10.18653/v1/P16-1162", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "Improving robustness of machine translation with synthetic noise", |
|
"authors": [ |
|
{ |
|
"first": "Vaibhav", |
|
"middle": [], |
|
"last": "Vaibhav", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sumeet", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Craig", |
|
"middle": [], |
|
"last": "Stewart", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Graham", |
|
"middle": [], |
|
"last": "Neubig", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1916--1920", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Vaibhav Vaibhav, Sumeet Singh, Craig Stewart, and Graham Neubig. 2019. Improving robust- ness of machine translation with synthetic noise. In Proceedings of the 2019 Confer- ence of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 1916-1920.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "Association for Computational Linguistics", |
|
"authors": [ |
|
{ |
|
"first": "Usa", |
|
"middle": [], |
|
"last": "Minneapolis", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N19-1190" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Minneapolis, USA. Association for Compu- tational Linguistics. DOI: https://doi .org/10.18653/v1/N19-1190", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "Attention is all you need", |
|
"authors": [ |
|
{ |
|
"first": "Ashish", |
|
"middle": [], |
|
"last": "Vaswani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Niki", |
|
"middle": [], |
|
"last": "Parmar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jakob", |
|
"middle": [], |
|
"last": "Uszkoreit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Llion", |
|
"middle": [], |
|
"last": "Jones", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aidan", |
|
"middle": [ |
|
"N" |
|
], |
|
"last": "Gomez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "\u0141ukasz", |
|
"middle": [], |
|
"last": "Kaiser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Illia", |
|
"middle": [], |
|
"last": "Polosukhin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of Advances in Neural Information Processing Systems", |
|
"volume": "30", |
|
"issue": "", |
|
"pages": "5998--6008", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, \u0141ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Proceedings of Advances in Neural Information Processing Systems 30, pages 5998-6008. Long Beach, USA. Curran Associates, Inc.", |
|
"links": null |
|
}, |
|
"BIBREF39": { |
|
"ref_id": "b39", |
|
"title": "Style transfer as unsupervised machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Zhirui", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shuo", |
|
"middle": [], |
|
"last": "Ren", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shujie", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianyong", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peng", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mu", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Enhong", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhirui Zhang, Shuo Ren, Shujie Liu, Jianyong Wang, Peng Chen, Mu Li, Ming Zhou, and Enhong Chen. 2018. Style transfer as unsupervised machine translation. CoRR, abs /1808.07894.", |
|
"links": null |
|
}, |
|
"BIBREF40": { |
|
"ref_id": "b40", |
|
"title": "Mirror-generative neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Zaixiang", |
|
"middle": [], |
|
"last": "Zheng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hao", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shujian", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lei", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xin-Yu", |
|
"middle": [], |
|
"last": "Dai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiajun", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 8th International Conference on Learning Representations. Virtual", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zaixiang Zheng, Hao Zhou, Shujian Huang, Lei Li, Xin-Yu Dai, and Jiajun Chen. 2020. Mirror-generative neural machine translation. In Proceedings of the 8th International Confer- ence on Learning Representations. Virtual.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"text": "Examples of the impact of noise in NMT. The NMT systems are presented in", |
|
"type_str": "figure", |
|
"num": null, |
|
"uris": null |
|
}, |
|
"FIGREF1": { |
|
"text": "Our zero-shot NMT framework. nents of this system are presented in", |
|
"type_str": "figure", |
|
"num": null, |
|
"uris": null |
|
}, |
|
"FIGREF2": { |
|
"text": "Translation of monolingual data M R1 and M R2 to synthesize P S R1-R2 parallel data.", |
|
"type_str": "figure", |
|
"num": null, |
|
"uris": null |
|
}, |
|
"FIGREF3": { |
|
"text": "Examples of French and English original sentence from the Europarl and News Commentary corpora (M L1 ) altered by our approach #1 (M R1 ). Bold indicates the alterations that we want to highlight for each example. We have manually masked a profanity in En4 with ''*******''.", |
|
"type_str": "figure", |
|
"num": null, |
|
"uris": null |
|
}, |
|
"TABREF1": { |
|
"text": "Results for the MTNT test sets.", |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"num": null, |
|
"html": null |
|
} |
|
} |
|
} |
|
} |