|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T14:46:42.028863Z" |
|
}, |
|
"title": "Towards End-to-End In-Image Neural Machine Translation", |
|
"authors": [ |
|
{ |
|
"first": "Elman", |
|
"middle": [], |
|
"last": "Mansimov", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "New York University", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Mitchell", |
|
"middle": [], |
|
"last": "Stern", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Mia", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Orhan", |
|
"middle": [], |
|
"last": "Firat", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Jakob", |
|
"middle": [], |
|
"last": "Uszkoreit", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Puneet", |
|
"middle": [], |
|
"last": "Jain", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "U", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Berkeley", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Google", |
|
"middle": [], |
|
"last": "Research", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "In this paper, we offer a preliminary investigation into the task of in-image machine translation: transforming an image containing text in one language into an image containing the same text in another language. We propose an end-to-end neural model for this task inspired by recent approaches to neural machine translation, and demonstrate promising initial results based purely on pixel-level supervision. We then offer a quantitative and qualitative evaluation of our system outputs and discuss some common failure modes. Finally, we conclude with directions for future work.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "In this paper, we offer a preliminary investigation into the task of in-image machine translation: transforming an image containing text in one language into an image containing the same text in another language. We propose an end-to-end neural model for this task inspired by recent approaches to neural machine translation, and demonstrate promising initial results based purely on pixel-level supervision. We then offer a quantitative and qualitative evaluation of our system outputs and discuss some common failure modes. Finally, we conclude with directions for future work.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "End-to-end neural models have emerged in recent years as the dominant approach to a wide variety of sequence generation tasks in natural language processing, including speech recognition, machine translation, and dialog generation, among many others. While highly accurate, these models typically operate by outputting tokens from a predetermined symbolic vocabulary, and require integration into larger pipelines for use in user-facing applications such as voice assistants where neither the input nor output modality is text.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In the speech domain, neural methods have recently been successfully applied to end-to-end speech translation (Jia et al., 2019; Liu et al., 2019; Inaguma et al., 2019) , in which the goal is to translate directly from speech in one language to speech in another language. We propose to study the analogous problem of in-image machine translation. Specifically, an image containing text in one language is to be transformed into an image containing the same text in another language, removing the dependency of any predetermined symbolic vocabulary or processing.", |
|
"cite_spans": [ |
|
{ |
|
"start": 110, |
|
"end": 128, |
|
"text": "(Jia et al., 2019;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 129, |
|
"end": 146, |
|
"text": "Liu et al., 2019;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 147, |
|
"end": 168, |
|
"text": "Inaguma et al., 2019)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In-image neural machine translation is a com-pelling test-bed for both research and engineering communities for a variety of reasons. Although there are existing commercial products that address this problem such as image translation feature of Google Translate 1 the underlying technical solutions are unknown. By leveraging large amounts of data and compute, end-to-end neural system could potentially improve overall quality of pipelined approaches for image translation. Second, and arguably more importantly, working directly with pixels has the potential to sidestep issues related to vocabularies, segmentation, and tokenization, allowing for the possibility of more universal approaches to neural machine translation, by unifying input and output spaces via pixels.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Why In-Image Neural Machine Translation ?", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Text preprocessing and vocabulary construction has been an active research area leading to work on investigating neural machine translation systems operating on subword units (Sennrich et al., 2016) , characters (Lee et al., 2017) and even bytes and has been highlighted to be one of the major challenges when dealing with many languages simultaneously in multilingual machine translation (Arivazhagan et al., 2019) , and crosslingual natural language understanding (Conneau et al., 2019) . Pixels serve as a straightforward way to share vocabulary among all languages at the expense of being a significantly harder learning task for the underlying models.", |
|
"cite_spans": [ |
|
{ |
|
"start": 175, |
|
"end": 198, |
|
"text": "(Sennrich et al., 2016)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 212, |
|
"end": 230, |
|
"text": "(Lee et al., 2017)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 389, |
|
"end": 415, |
|
"text": "(Arivazhagan et al., 2019)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 466, |
|
"end": 488, |
|
"text": "(Conneau et al., 2019)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Why In-Image Neural Machine Translation ?", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In this work, we propose an end-to-end neural approach to in-image machine translation that combines elements from recent neural approaches to the relevant sub-tasks in an end-to-end differentiable manner. We provide the initial problem definition and demonstrate promising first qualitative results using only pixel-level supervision on the target side. We then analyze some of the errors made by our models, and in the process of doing so uncover a common deficiency that suggests a path forward for future work.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Why In-Image Neural Machine Translation ?", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "To our knowledge, there are no publicly available datasets for the task of in-image machine translation task. Since collecting aligned natural data for in-image translation would be a difficult and costly process, a more practical approach is to bootstrap by generating pairs of rendered images containing sentences from the WMT 2014 German-English parallel corpus. The dataset consists of 4.5M German-English parallel sentence pairs. We use newstest-2013 as a development set. For each sentence pair, we create a minimal web page for the source and target, then render each using Headless Chrome 2 to obtain a pair of images. The text is displayed in a black 16-pixel sans-serif font on a white background inside of a fixed-size 1024x32-pixel frame. For simplicity, all sentences are vertically centered and left-aligned without any line-wrapping. The consistent position and styling of the text in our synthetic dataset represents an ideal scenario for in-image translation, serving as a good test-bed for initial attempts. Later, one could generalize to more realistic settings by varying the location, size, typeface, and perspective of the text and by using non-uniform backgrounds.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data Generation", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Our goal is to build a neural model for the inimage translation task that can be trained end-toend on example image pairs (X * , Y * ) of height and width H and W using only pixel-level supervision. We evaluate two approaches for this task: convolutional encoder-decoder model and full model that combines soft versions of the traditional pipeline in order to arrive at a modular yet fully differentiable solution.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Inspired by the success of convolutional encoderdecoder architectures for medical image segmentation (Ronneberger et al., 2015) , we begin with a U-net style convolutional baseline. In this version of the model, the source image X * is first compressed into a single continuous vector h enc using a convolutional encoder h enc = enc(X * ).", |
|
"cite_spans": [ |
|
{ |
|
"start": 101, |
|
"end": 127, |
|
"text": "(Ronneberger et al., 2015)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Convolutional Baseline", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Then, the compressed representation is used as the input to a convolutional decoder that aims to predict all target pixels in parallel. Decoder outputs the probabilities of each pixel p(Y ) = H i=1 W j=1 softmax(dec(h enc )). The convolutional encoder consists of four residual blocks with the dimensions shown in Table 1 , and the convolutional decoder uses the same network structure in reverse order, composing a simple encoderdecoder architecture with a representational bottleneck. We threshold the grayscale value of each pixel in the groundtruth output image at 0.5 to obtain a binary black-and-white target, and use a binary cross-entropy loss on the pixels of the model output as our loss function for training. In order to solve the proposed task, this baseline must address the combined challenges of recognizing and rendering text at a pixel level, capturing the meaning of a sentence in a single vector as in early sequence-to-sequence models (Sutskever et al., 2014) , and performing non-autoregressive translation (Gu et al., 2018) . Although the model can sometimes produce the first few words of the output, it is unable to learn much beyond that; see Figure 1 for a representative example.", |
|
"cite_spans": [ |
|
{ |
|
"start": 956, |
|
"end": 980, |
|
"text": "(Sutskever et al., 2014)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 1029, |
|
"end": 1046, |
|
"text": "(Gu et al., 2018)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 314, |
|
"end": 321, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
}, |
|
{ |
|
"start": 1169, |
|
"end": 1177, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Convolutional Baseline", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "To better take advantage of the problem structure, we next propose a modular neural model that breaks the problem down into more manageable sub-tasks while still being trainable end-to-end. Intuitively, one would expect a model that can successfully carry out the in-image machine trans- Figure 1 : Example predictions made by the baseline convolutional model from Section 3.1. We show two pairs of groundtruth target images followed by generated target images. Although it successfully predicts one or two words, it quickly devolves into noise thereafter.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 288, |
|
"end": 296, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Full Model", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "lation task to first recognize the text represented in the input image, next perform some computation over its internal representation to obtain a soft translation, and finally generate the output image through a learned rendering process. Moreover, just as modern neural machine translation systems predict the output over the span of multiple time steps in a auto-regressive way rather than all at once, it stands to reason that such a decomposition would be of use here as well.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Full Model", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "To this end, we propose a revised model that receives as input both the source image X * and a partial (or proposal) target image Y * <n , applies separate convolutional encoders to each source and target images in order to recognize the text contained therein. The model then applies a selfattention encoder (Vaswani et al., 2017) to the concatenated output of two convolutional encoders to extend the translation by one step, and runs the result through a convolutional decoder. The convolutional decoder is tasked to obtain a new partial output at every generation step, Y * \u2264n , that is one step closer to the final target image. The model uses the same structure as the baseline for the convolutional encoder and decoder components, and includes a 6-layer self-attention encoder with hidden dimension 512 and feed-forward dimension 2048 in the middle to help carry out translation within the learned continuous representation space. A visualization of the architecture is given in Figure 2 .", |
|
"cite_spans": [ |
|
{ |
|
"start": 309, |
|
"end": 331, |
|
"text": "(Vaswani et al., 2017)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 986, |
|
"end": 994, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Full Model", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "With this approach, the problem is decomposed into a sequence of image predictions, each of which conditions on the previously generated output when generating the next candidate output. We use a SentencePiece vocabulary (Kudo and Richardson, 2018) to break the underlying sen-Der Pr\u00e4sident hielt heute eine Rede.", |
|
"cite_spans": [ |
|
{ |
|
"start": 221, |
|
"end": 248, |
|
"text": "(Kudo and Richardson, 2018)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Full Model", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The president gave a", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Full Model", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Self-Attention Encoder", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Source Image Input Target Image", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The president gave a speech Output Target Image Figure 2 : One decoding step for our full model on an example German-English in-image translation pair. The model can be viewed as a fully differentiable analog of the more traditional OCR \u2192 translate \u2192 render pipeline.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 48, |
|
"end": 56, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Convolutional Decoder", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "tence into sentence pieces, and decompose each example into one sub-example per sentence piece.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Convolutional Decoder", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The nth sub-example has a target-side input image consisting of the first n \u2212 1 sentence pieces, and is trained to predict an output image consisting of the first n sentence pieces from the target sentence. We use the same pixel-level loss as in the baseline. Since the model fully regenerates the output at each step, it must learn to copy the portion that is already present in the target-side input in addition to predicting and rendering the next token. Decoding is done sequentially in a greedy fashion by feeding the model its own predictions (generated partial image) in place of the gold image prefixes. The use of an external symbolic vocabulary is chosen to speed up the prototyping by making use of existing neural machine translation baselines. The sole purpose is to provide pixel spans that do not cut the characters in half, and simplify the stopping policy. A simple character-based splitting could also be used and/or a stopping policy network could be trained in exchange for increased complexity, and training/inference costs.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Convolutional Decoder", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In Figure 3 we share various source images and corresponding predicted target images generated by the full model. Despite the very high dimensionality of the output, the model occasionally succeeds at predicting the full output translation as shown in Figure 3a . Additionally, Figure 3b shows examples where model makes a minor typo in the earlier proposals and is able to correct itself further during generation process. Figure 3c and Figure 3d show two major failure modes of our model where it is either able to generate the first part of the sentence and fails to generate the rest or completely fails at generating the image. To better understand the source of errors made by the full model we visualize the probabilities of pixels in Figure 4 . We can see that model is uncertain between producing word However and Nevertheless which leads to artifacts when taking the arg max value of each pixel during decoding similar to ones displayed in Figure 3d .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 3, |
|
"end": 11, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF0" |
|
}, |
|
{ |
|
"start": 252, |
|
"end": 261, |
|
"text": "Figure 3a", |
|
"ref_id": "FIGREF0" |
|
}, |
|
{ |
|
"start": 278, |
|
"end": 287, |
|
"text": "Figure 3b", |
|
"ref_id": "FIGREF0" |
|
}, |
|
{ |
|
"start": 424, |
|
"end": 433, |
|
"text": "Figure 3c", |
|
"ref_id": "FIGREF0" |
|
}, |
|
{ |
|
"start": 438, |
|
"end": 447, |
|
"text": "Figure 3d", |
|
"ref_id": "FIGREF0" |
|
}, |
|
{ |
|
"start": 742, |
|
"end": 750, |
|
"text": "Figure 4", |
|
"ref_id": "FIGREF1" |
|
}, |
|
{ |
|
"start": 950, |
|
"end": 959, |
|
"text": "Figure 3d", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "On Table 2 we provide quantitative results of the both models proposed. The full model achieves significantly lower negative log likelihood score compared to convolutional baseline due to an easier task of predicting parts of the image. Neither of the models overfit on the development set. We further quantitatively measure our models by transcribing the generated images into text with a neu- ral OCR model and measuring the BLEU (Papineni et al., 2002) score. Convolutional baseline fails to produce images that contain transcribed text and is significantly outperformed by our full model in terms of BLEU score.", |
|
"cite_spans": [ |
|
{ |
|
"start": 432, |
|
"end": 455, |
|
"text": "(Papineni et al., 2002)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 3, |
|
"end": 10, |
|
"text": "Table 2", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "In this paper, we introduce the task of in-image neural machine translation and develop an end-toend model that shows promising results on learning to translate text through purely pixel-level supervision. By doing so, we demonstrate a viable first step towards applying such models in more natural settings, such as translating texts, menus, or street signs within real-world images. Future work should explore models that do not rely on off-the-shelf text tokenizers to decompose the very hard image generation problem into sequence of simpler image predictions. We hypothesize that discrete latent variables (van den Oord et al., 2017) are best suited to implicitly segment the image and capture the sequential nature of this task.", |
|
"cite_spans": [ |
|
{ |
|
"start": 611, |
|
"end": 638, |
|
"text": "(van den Oord et al., 2017)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "blog.google/translate/instant-camera-translation", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "developers.google.com/headless-chrome", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Massively multilingual neural machine translation in the wild: Findings and challenges", |
|
"authors": [ |
|
{ |
|
"first": "Naveen", |
|
"middle": [], |
|
"last": "Arivazhagan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ankur", |
|
"middle": [], |
|
"last": "Bapna", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Orhan", |
|
"middle": [], |
|
"last": "Firat", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dmitry", |
|
"middle": [], |
|
"last": "Lepikhin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Melvin", |
|
"middle": [], |
|
"last": "Johnson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maxim", |
|
"middle": [], |
|
"last": "Krikun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mia", |
|
"middle": [ |
|
"Xu" |
|
], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuan", |
|
"middle": [], |
|
"last": "Cao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "George", |
|
"middle": [], |
|
"last": "Foster", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Colin", |
|
"middle": [], |
|
"last": "Cherry", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wolfgang", |
|
"middle": [], |
|
"last": "Macherey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhifeng", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yonghui", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Naveen Arivazhagan, Ankur Bapna, Orhan Firat, Dmitry Lepikhin, Melvin Johnson, Maxim Krikun, Mia Xu Chen, Yuan Cao, George Foster, Colin Cherry, Wolfgang Macherey, Zhifeng Chen, and Yonghui Wu. 2019. Massively multilingual neural machine translation in the wild: Findings and chal- lenges.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Nonautoregressive neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Jiatao", |
|
"middle": [], |
|
"last": "Gu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Bradbury", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Caiming", |
|
"middle": [], |
|
"last": "Xiong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "O", |
|
"middle": [ |
|
"K" |
|
], |
|
"last": "Victor", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jiatao Gu, James Bradbury, Caiming Xiong, Vic- tor O.K. Li, and Richard Socher. 2018. Non- autoregressive neural machine translation. In Inter- national Conference on Learning Representations.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Multilingual end-to-end speech translation", |
|
"authors": [ |
|
{ |
|
"first": "Hirofumi", |
|
"middle": [], |
|
"last": "Inaguma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Duh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tatsuya", |
|
"middle": [], |
|
"last": "Kawahara", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shinji", |
|
"middle": [], |
|
"last": "Watanabe", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hirofumi Inaguma, Kevin Duh, Tatsuya Kawahara, and Shinji Watanabe. 2019. Multilingual end-to-end speech translation.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Direct speech-to-speech translation with a sequence-to-sequence model", |
|
"authors": [ |
|
{ |
|
"first": "Ye", |
|
"middle": [], |
|
"last": "Jia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ron", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Weiss", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fadi", |
|
"middle": [], |
|
"last": "Biadsy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wolfgang", |
|
"middle": [], |
|
"last": "Macherey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Melvin", |
|
"middle": [], |
|
"last": "Johnson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhifeng", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yonghui", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ye Jia, Ron J. Weiss, Fadi Biadsy, Wolfgang Macherey, Melvin Johnson, Zhifeng Chen, and Yonghui Wu. 2019. Direct speech-to-speech translation with a sequence-to-sequence model.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Sentence-Piece: A simple and language independent subword tokenizer and detokenizer for neural text processing", |
|
"authors": [ |
|
{ |
|
"first": "Taku", |
|
"middle": [], |
|
"last": "Kudo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Richardson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing: System Demonstrations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "66--71", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D18-2012" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Taku Kudo and John Richardson. 2018. Sentence- Piece: A simple and language independent subword tokenizer and detokenizer for neural text processing. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing: System Demonstrations, pages 66-71, Brussels, Belgium. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Fully character-level neural machine translation without explicit segmentation", |
|
"authors": [ |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Hofmann", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "TACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jason Lee, Kyunghyun Cho, and Thomas Hofmann. 2017. Fully character-level neural machine trans- lation without explicit segmentation. In TACL.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "End-to-end speech translation with knowledge distillation", |
|
"authors": [ |
|
{ |
|
"first": "Yuchen", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hao", |
|
"middle": [], |
|
"last": "Xiong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhongjun", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiajun", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hua", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Haifeng", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chengqing", |
|
"middle": [], |
|
"last": "Zong", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yuchen Liu, Hao Xiong, Zhongjun He, Jiajun Zhang, Hua Wu, Haifeng Wang, and Chengqing Zong. 2019. End-to-end speech translation with knowl- edge distillation.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Neural discrete representation learning", |
|
"authors": [ |
|
{ |
|
"first": "A\u00e4ron", |
|
"middle": [], |
|
"last": "Van Den Oord", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oriol", |
|
"middle": [], |
|
"last": "Vinyals", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Koray", |
|
"middle": [], |
|
"last": "Kavukcuoglu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "NIPS", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "A\u00e4ron van den Oord, Oriol Vinyals, and Koray Kavukcuoglu. 2017. Neural discrete representation learning. In NIPS.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Bleu: a method for automatic evaluation of machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Kishore", |
|
"middle": [], |
|
"last": "Papineni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Salim", |
|
"middle": [], |
|
"last": "Roukos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Todd", |
|
"middle": [], |
|
"last": "Ward", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei-Jing", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei- Jing Zhu. 2002. Bleu: a method for automatic eval- uation of machine translation. In ACL.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "U-net: Convolutional networks for biomedical image segmentation", |
|
"authors": [ |
|
{ |
|
"first": "Olaf", |
|
"middle": [], |
|
"last": "Ronneberger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Philipp", |
|
"middle": [], |
|
"last": "Fischer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Brox", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Medical Image Computing and Computer-Assisted Intervention (MICCAI)", |
|
"volume": "9351", |
|
"issue": "", |
|
"pages": "234--241", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1505.04597" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Olaf Ronneberger, Philipp Fischer, and Thomas Brox. 2015. U-net: Convolutional networks for biomedi- cal image segmentation. In Medical Image Comput- ing and Computer-Assisted Intervention (MICCAI), volume 9351 of LNCS, pages 234-241. Springer. (available on arXiv:1505.04597 [cs.CV]).", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Neural machine translation of rare words with subword units", |
|
"authors": [ |
|
{ |
|
"first": "Rico", |
|
"middle": [], |
|
"last": "Sennrich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Barry", |
|
"middle": [], |
|
"last": "Haddow", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandra", |
|
"middle": [], |
|
"last": "Birch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rico Sennrich, Barry Haddow, and Alexandra Birch. 2016. Neural machine translation of rare words with subword units. In ACL.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Sequence to sequence learning with neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Ilya", |
|
"middle": [], |
|
"last": "Sutskever", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oriol", |
|
"middle": [], |
|
"last": "Vinyals", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quoc V", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "27", |
|
"issue": "", |
|
"pages": "3104--3112", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ilya Sutskever, Oriol Vinyals, and Quoc V Le. 2014. Sequence to sequence learning with neural net- works. In Z. Ghahramani, M. Welling, C. Cortes, N. D. Lawrence, and K. Q. Weinberger, editors, Ad- vances in Neural Information Processing Systems 27, pages 3104-3112. Curran Associates, Inc.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Attention is all you need", |
|
"authors": [ |
|
{ |
|
"first": "Ashish", |
|
"middle": [], |
|
"last": "Vaswani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Niki", |
|
"middle": [], |
|
"last": "Parmar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jakob", |
|
"middle": [], |
|
"last": "Uszkoreit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Llion", |
|
"middle": [], |
|
"last": "Jones", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aidan", |
|
"middle": [ |
|
"N" |
|
], |
|
"last": "Gomez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Illia", |
|
"middle": [], |
|
"last": "Kaiser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Polosukhin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "30", |
|
"issue": "", |
|
"pages": "5998--6008", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141 ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In I. Guyon, U. V. Luxburg, S. Bengio, H. Wallach, R. Fergus, S. Vishwanathan, and R. Gar- nett, editors, Advances in Neural Information Pro- cessing Systems 30, pages 5998-6008. Curran As- sociates, Inc.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Neural machine translation with byte-level subwords", |
|
"authors": [ |
|
{ |
|
"first": "Changhan", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiatao", |
|
"middle": [], |
|
"last": "Gu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Changhan Wang, Kyunghyun Cho, and Jiatao Gu. 2019. Neural machine translation with byte-level subwords.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "(a) Examples of correct predictions by our model (b) Examples of predictions with minor typos (c) Examples of partially correct predictions by our model (d) Examples of failed predictions by our model Various types of predictions made by our full model. For more qualitative results please see Appendix." |
|
}, |
|
"FIGREF1": { |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "Analysis of the predictions made by the full model. First row shows the source sentence. Second row shows the groundtruth at the first timestep. Third row shows the probabilities of the pixels predicted by the full model." |
|
}, |
|
"TABREF1": { |
|
"text": "", |
|
"num": null, |
|
"content": "<table/>", |
|
"html": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF3": { |
|
"text": "Per pixel loss function (NLL\u2193) and generation quality (BLEU\u2191) of convolutional baseline and full model on WMT'14 German-English Train/Dev sets.", |
|
"num": null, |
|
"content": "<table/>", |
|
"html": null, |
|
"type_str": "table" |
|
} |
|
} |
|
} |
|
} |