|
{ |
|
"paper_id": "S19-1032", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T15:46:31.830047Z" |
|
}, |
|
"title": "Generating Animations from Screenplays", |
|
"authors": [ |
|
{ |
|
"first": "Yeyao", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "ETH Zurich", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Eleftheria", |
|
"middle": [], |
|
"last": "Tsipidi", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Sasha", |
|
"middle": [], |
|
"last": "Schriber", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Mubbasir", |
|
"middle": [], |
|
"last": "Kapadia", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Rutgers University", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Markus", |
|
"middle": [], |
|
"last": "Gross", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "ETH Zurich", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Ashutosh", |
|
"middle": [], |
|
"last": "Modi", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Disney", |
|
"middle": [], |
|
"last": "Research", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Automatically generating animation from natural language text finds application in a number of areas e.g. movie script writing, instructional videos, and public safety. However, translating natural language text into animation is a challenging task. Existing text-toanimation systems can handle only very simple sentences, which limits their applications. In this paper, we develop a text-to-animation system which is capable of handling complex sentences. We achieve this by introducing a text simplification step into the process. Building on an existing animation generation system for screenwriting, we create a robust NLP pipeline to extract information from screenplays and map them to the system's knowledge base. We develop a set of linguistic transformation rules that simplify complex sentences. Information extracted from the simplified sentences is used to generate a rough storyboard and video depicting the text. Our sentence simplification module outperforms existing systems in terms of BLEU and SARI metrics.We further evaluated our system via a user study: 68 % participants believe that our system generates reasonable animation from input screenplays.", |
|
"pdf_parse": { |
|
"paper_id": "S19-1032", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Automatically generating animation from natural language text finds application in a number of areas e.g. movie script writing, instructional videos, and public safety. However, translating natural language text into animation is a challenging task. Existing text-toanimation systems can handle only very simple sentences, which limits their applications. In this paper, we develop a text-to-animation system which is capable of handling complex sentences. We achieve this by introducing a text simplification step into the process. Building on an existing animation generation system for screenwriting, we create a robust NLP pipeline to extract information from screenplays and map them to the system's knowledge base. We develop a set of linguistic transformation rules that simplify complex sentences. Information extracted from the simplified sentences is used to generate a rough storyboard and video depicting the text. Our sentence simplification module outperforms existing systems in terms of BLEU and SARI metrics.We further evaluated our system via a user study: 68 % participants believe that our system generates reasonable animation from input screenplays.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Generating animation from texts can be useful in many contexts e.g. movie script writing (Ma and Kevitt, 2006; Liu and Leung, 2006; Hanser et al., 2010) , instructional videos (Lu and Zhang, 2002) , and public safety (Johansson et al., 2004) . Text-toanimation systems can be particularly valuable for screenwriting by enabling faster iteration, prototyping and proof of concept for content creators.", |
|
"cite_spans": [ |
|
{ |
|
"start": 89, |
|
"end": 110, |
|
"text": "(Ma and Kevitt, 2006;", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 111, |
|
"end": 131, |
|
"text": "Liu and Leung, 2006;", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 132, |
|
"end": 152, |
|
"text": "Hanser et al., 2010)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 176, |
|
"end": 196, |
|
"text": "(Lu and Zhang, 2002)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 217, |
|
"end": 241, |
|
"text": "(Johansson et al., 2004)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "In this paper, we propose a text-to-animation generation system. Given an input text describing a certain activity, the system generates a rough animation of the text. We are addressing a practical setting, where we do not have any annotated data for training a supervised end-to-end system. The aim is not to generate a polished, final animation, but a pre-visualization of the input text. The purpose of the system is not to replace writers and artists, but to make their work more efficient and less tedious. We are aiming for a system which is robust and could be deployed in a production environment.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "Existing text-to-animation systems for screenwriting ( \u00a72) visualize stories by using a pipeline of Natural Language Processing (NLP) techniques for extracting information from texts and mapping them to appropriate action units in the animation engine. The NLP modules in these systems translate the input text into predefined intermediate action representations and the animation generation engine produces simple animation from these representations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "Although these systems can generate animation from carefully handcrafted simple sentences, translating real screenplays into coherent animation still remains a challenge. This can be attributed to the limitations of the NLP modules used with regard to handling complex sentences. In this paper, we try to address the limitations of the current text-to-animation systems. Main contributions of this paper are:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "We propose a screenplay parsing architecture which generalizes well on different screenplay formats ( \u00a73.1). We develop a rich set of linguistic rules to reduce complex sentences into simpler ones to facilitate information extraction ( \u00a73.2). We develop a new NLP pipeline to generate animation from actual screenplays ( \u00a73). The potential applications of our contributions are not restricted to just animating screenplays. The techniques we develop are fairly general and can be used in other applications as well e.g. in- ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "Translating texts into animation is not a trivial task, given that neither the input sentences nor the output animations have a fixed structure. Prior work addresses this problem from different perspectives (Hassani and Lee, 2016) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 207, |
|
"end": 230, |
|
"text": "(Hassani and Lee, 2016)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2." |
|
}, |
|
{ |
|
"text": "CONFUCIUS (Ma and Kevitt, 2006 ) is a system that converts natural language to animation using the FDG parser (Tapanainen and J\u00e4rvinen, 1997) and WordNet (Miller, 1995) . ScriptViz (Liu and Leung, 2006 ) is another similar system, created for screenwriting. It uses the Apple Pie parser (Sekine, 1998) to parse input text and then recognizes objects via an object-specific reasoner. It is limited to sentences having conjunction between two verbs. SceneMaker (Hanser et al., 2010) adopts the same NLP techniques as proposed in CONFUCIUS (Ma and Kevitt, 2006) followed by a context reasoning module. Similar to previously proposed systems, we also use dependency parsing followed by linguistic reduction ( \u00a73.2).", |
|
"cite_spans": [ |
|
{ |
|
"start": 10, |
|
"end": 30, |
|
"text": "(Ma and Kevitt, 2006", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 110, |
|
"end": 141, |
|
"text": "(Tapanainen and J\u00e4rvinen, 1997)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 146, |
|
"end": 168, |
|
"text": "WordNet (Miller, 1995)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 181, |
|
"end": 201, |
|
"text": "(Liu and Leung, 2006", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 459, |
|
"end": 480, |
|
"text": "(Hanser et al., 2010)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 537, |
|
"end": 558, |
|
"text": "(Ma and Kevitt, 2006)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2." |
|
}, |
|
{ |
|
"text": "Recent advances in deep learning have pushed the state of the art results on different NLP tasks (Honnibal and Johnson, 2015; Wolf et al., 2018; He et al., 2017) . We use pre-trained models for dependency parsing, coreference resolution and SRL to build a complete NLP pipeline to create intermediate action representations. For the action representation ( \u00a73.4), we use a key-value pair structure inspired by the PAR architecture (Badler et al., 2000) , which is a knowledge base of representations for actions performed by virtual agents.", |
|
"cite_spans": [ |
|
{ |
|
"start": 97, |
|
"end": 125, |
|
"text": "(Honnibal and Johnson, 2015;", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 126, |
|
"end": 144, |
|
"text": "Wolf et al., 2018;", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 145, |
|
"end": 161, |
|
"text": "He et al., 2017)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 431, |
|
"end": 452, |
|
"text": "(Badler et al., 2000)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2." |
|
}, |
|
{ |
|
"text": "Our work comes close to the work done in the area of Open Information Extraction (IE) (Niklaus et al., 2018) . In particular, to extract information, Clause-Based Open IE systems (Del Corro and Gemulla, 2013; Angeli et al., 2015; Schmidek and Barbosa, 2014 ) reduce a complex sentence into simpler sentences using linguistic patterns. However, the techniques developed for these systems do not generalize well to screenplay texts, as these systems have been developed using well-formed and factual texts like Wikipedia, Penn TreeBank, etc. An initial investigation with the popular Open IE system OLLIE (Open Language Learning for Information Extraction) (Mausam et al., 2012) did not yield good results on our corpus.", |
|
"cite_spans": [ |
|
{ |
|
"start": 86, |
|
"end": 108, |
|
"text": "(Niklaus et al., 2018)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 179, |
|
"end": 208, |
|
"text": "(Del Corro and Gemulla, 2013;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 209, |
|
"end": 229, |
|
"text": "Angeli et al., 2015;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 230, |
|
"end": 256, |
|
"text": "Schmidek and Barbosa, 2014", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 655, |
|
"end": 676, |
|
"text": "(Mausam et al., 2012)", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2." |
|
}, |
|
{ |
|
"text": "Previous work related to information extraction for narrative technologies includes the CARDI-NAL system (Marti et al., 2018; Sanghrajka et al., 2018) , as well as the conversational agent PICA (Falk et al., 2018) . They focus on querying knowledge from stories. The CARDINAL system also generates animations from input texts. However, neither of the tools can handle complex sentences. We build on the CARDINAL system. We develop a new NLP module to support complex sentences and leverage the animation engine of CAR-DINAL.", |
|
"cite_spans": [ |
|
{ |
|
"start": 105, |
|
"end": 125, |
|
"text": "(Marti et al., 2018;", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 126, |
|
"end": 150, |
|
"text": "Sanghrajka et al., 2018)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 194, |
|
"end": 213, |
|
"text": "(Falk et al., 2018)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2." |
|
}, |
|
{ |
|
"text": "Recently, a number of end-to-end image generation systems have been proposed (Mansimov et al., 2015; Reed et al., 2016) . But these systems do not synthesize satisfactory images yet, and are not suitable for our application. It is hoped that the techniques proposed in this paper could be used for automatically generating labelled da-ta (e.g. (text,video) pairs) for training end-to-end text-to-animation systems.", |
|
"cite_spans": [ |
|
{ |
|
"start": 77, |
|
"end": 100, |
|
"text": "(Mansimov et al., 2015;", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 101, |
|
"end": 119, |
|
"text": "Reed et al., 2016)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2." |
|
}, |
|
{ |
|
"text": "We adopt a modular approach for generating animations from screenplays. The general overview of our approach is presented in Figure 1 . The system is divided into three modules: Script Parsing Module: Given an input screenplay text, this module automatically extracts the relevant text for generating the animation ( \u00a73.1). NLP Module: It processes the extracted text to get relevant information. This has two submodules:", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 125, |
|
"end": 133, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Text-to-Animation System", |
|
"sec_num": "3." |
|
}, |
|
{ |
|
"text": "\u2022 Text Simplification Module: It simplifies complex sentences using a set of linguistic rules ( \u00a73.2). \u2022 Information Extraction Module: It extracts information from the simplified sentences into pre-defined action representations ( \u00a73.4). Animation Generation Module: It generates animation based on action representations ( \u00a73.5).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Text-to-Animation System", |
|
"sec_num": "3." |
|
}, |
|
{ |
|
"text": "Typically, screenplays or movie scripts or scripts (we use the terms interchangeably), are made of several scenes, each of which corresponds to a series of consecutive motion shots. Each scene contains several functional components 1 : Headings (time and location), Descriptions (scene description, character actions), Character Cues (character name before dialog), Dialogs (conversation content), Slug Lines (actions inserted into continuous dialog) and Transitions (camera movement). In many scripts, these components are easily identifiable by indentation, capitalization and keywords. We call these scripts well-formatted, and the remaining ones ill-formatted. We want to segment the screenplays into components and are mainly interested in the Descriptions component for animation generation. Well-formatted Scripts: We initially tried ScreenPy (Winer and Young, 2017) . In order to link mentions of an entity, an accurate coreference resolution system is required. The extracted Descriptions components are processed with the NeuralCoref 2 system. Given the text, it resolves mentions (typically pronouns) to the entity they refer to in the text. To facilitate entity resolution, we prepend each Description component with the Character Cues component which appears before it in the screenplay (e.g.", |
|
"cite_spans": [ |
|
{ |
|
"start": 850, |
|
"end": 873, |
|
"text": "(Winer and Young, 2017)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Script Parsing Module", |
|
"sec_num": "3.1." |
|
}, |
|
{ |
|
"text": "[character]MARTHA: [dialog]\"I knew it!\" [description]She then jumps triumphantly \u2192 MART-HA. She then jumps triumphantly).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Script Parsing Module", |
|
"sec_num": "3.1." |
|
}, |
|
{ |
|
"text": "In a typical text-to-animation system, one of the main tasks is to process the input text to extract", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Text Simplification Module", |
|
"sec_num": "3.2." |
|
}, |
|
{ |
|
"text": "Identify procedure Transform procedure Coordination search if cc and conj in dependency tree cut cc and conj link. If conj is verb, mark it as new root; else replace it with its sibling node. Pre-Correlative Conjugation locates position of keywords: \"either\", \"both\",\"neither\" removed the located word from dependency tree Appositive Clause find appos token and its head (none) glue appositive noun phrase with \"to be\" Relative Clause find relcl token and its head cut appos link, then traverse from root. Then, if no \"wh\" word present, put head part after anchor part; else, we divide them into 5 subcases (Table 2) the relevant information about actions (typically verbs) and participants (typically subject/object of the verb), which is subsequently used for generating animation. This works well for simple sentences having a single verb with one subject and one (optional) object. However, the sentences in a screenplay are complicated and sometimes informal. In this work, a sentence is said to be complicated if it deviates from easily extractable and simple subject-verb-object (and its permutations) syntactic structures and possibly has multiple actions mentioned within the same sentence with syntactic interactions between them. By syntactic structure we refer to the dependency graph of the sentence. In the case of screenplays, the challenge is to process such complicated texts. We take the text simplification approach, i.e. the system first simplifies a complicated sentence and then extracts the relevant information. Simplification reduces a complicated sentence into multiple simpler sentences, each having a single action along with its participants, making it straightforward to extract necessary information.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 607, |
|
"end": 616, |
|
"text": "(Table 2)", |
|
"ref_id": "TABREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Syntactic Structure", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Recently, end-to-end Neural Text Simplification (NTS) systems (Nisioi et al., 2017; Saggion, 2017) have shown reasonable accuracy. However, these systems have been trained on factual data such as Wikipedia and do not generalize well to screenplay texts. Our experiments with such a pretrained neural text simplification system did not yield good results ( \u00a75.1). Moreover, in the context of text-to-animation, there is no standard labeled corpus to train an end-to-end system.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Syntactic Structure", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "There has been work on text simplification using linguistic rules-based approaches. For exam-ple, (Siddharthan, 2011) propose a set of rules to manipulate sentence structure to output simplified sentences using syntactic dependency parsing. Similarly, the YATS system (Ferr\u00e9s et al., 2016) implements a set of rules in the JAPE language (Cunningham et al., 2000) to address six syntactic structures: Passive Constructions, Appositive Phrases, Relative Clauses, Coordinated Clauses, Correlated Clauses and Adverbial Clauses. Most of the rules focus on case and tense correction, with only 1-2 rules for sentence splitting. We take inspiration from the YATS system, and our system incorporates modules to identify and transform sentence structures into simpler ones using a broader set of rules.", |
|
"cite_spans": [ |
|
{ |
|
"start": 337, |
|
"end": 362, |
|
"text": "(Cunningham et al., 2000)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Syntactic Structure", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In our system, each syntactic structure is handled by an Analyzer, which contains two processes: Identify and Transform. The Identify process takes in a sentence and determines if it contains a particular syntactic structure. Subsequently, the Transform process focuses on the first occurrence of the identified syntactic structure and then splits and assembles the sentence into simpler sentences. Both Identify and Transform use Part-of-Speech (POS) tagging and dependency parsing (Honnibal and Montani, 2017) modules implemented in spaCy 2.0 3", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Syntactic Structure", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The simplification algorithm (Algorithm 1) starts with an input sentence and recursively processes it until no further simplification is possible. It uses a queue to manage intermediate simplified sentences, and runs in a loop until the queue is empty. For each sentence, the system applies each syntactic analyzer to Identify the correspon- The sophomore runs through the kitchen.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Syntactic Structure", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The sophomore comes.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Syntactic Structure", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Stifler has a toothbrush hanging[acl] from his mouth.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Adjective", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "A toothbrush hangs from Stifler's mouth.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Adjective", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Stifler has a toothbrush. ding syntactic structure in the sentence (line 14). If the result is positive, the sentence is processed by the Transform function to convert it to simple sentences (line 16). Each of the output sentences is pushed by the controller into the queue (line 19).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Adjective", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The process is repeated with each of the Identify analyzers (line 13). If none of the analyzers can be applied, the sentence is assumed to be simple and it is pushed into the result list (line 21). We summarize linguistic rules in Table 1 and examples are given in Table 2 . Next, we describe the coordination linguistic rules. For details regarding other rules, please refer to Appendix B.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 231, |
|
"end": 238, |
|
"text": "Table 1", |
|
"ref_id": "TABREF2" |
|
}, |
|
{ |
|
"start": 265, |
|
"end": 272, |
|
"text": "Table 2", |
|
"ref_id": "TABREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Adjective", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Coordination: Coordination is used for entities having the same syntactic relation with the head and serving the same functional role (e.g. subj, obj, etc.). It is the most important component in our simplification system. The parser tags word units such as \"and\" and \"as well as\" with the dependency label cc, and the conjugated words as conj. Our system deals with coordination based on the dependency tag of the conjugated word.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Adjective", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In the case of coordination, the Identify function simply returns whether cc or conj is in the dependency graph of the input sentence. The Transform function manipulates the graph structure based on the dependency tags of the conjugated words as shown in Figure 2 . If the conjugated word is a verb, then we mark it as another root of the sentence. Cutting cc and conj edges in the graph and traversing from this new root results in a new sentence parallel to the original one. In other cases, such as the conjugation between nouns, we simply replace the noun phrases with their siblings and traverse from root again.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 255, |
|
"end": 263, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Adjective", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In order to generate animation, actions and participants extracted from simplified sentences are mapped to existing actions and objects in the animation engine. Due to practical reasons, it is not possible to create a library of animations for all possible actions in the world. We limit our library to a predefined list of 52 actions/animations, expanded to 92 by a dictionary of synonyms ( \u00a73.5). We also have a small library of pre-uploaded objects (such as \"campfire\", \"truck\" and others).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Lexical Simplification", |
|
"sec_num": "3.3." |
|
}, |
|
{ |
|
"text": "To animate unseen actions not in our list, we use a word2vec-based similarity function to find the nearest action in the list. Moreover, we use WordNet (Miller, 1995) to exclude antonyms. This helps to map non-list actions (such as \"squint at\") to the similar action in the list (e.g. \"look\"). If we fail to find a match, we check for a mapping while including the verb's preposition or syntactic object. We also use WordNet to obtain hypernyms for further checks, when the similarity function fails to find a close-enough animation. Correspondingly, for objects, we use the same similarity function and WordNet's holonyms. In-order traverse from the original root and the new root will result in simplified sentences as shown in Table 2 .", |
|
"cite_spans": [ |
|
{ |
|
"start": 152, |
|
"end": 166, |
|
"text": "(Miller, 1995)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 730, |
|
"end": 737, |
|
"text": "Table 2", |
|
"ref_id": "TABREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Lexical Simplification", |
|
"sec_num": "3.3." |
|
}, |
|
{ |
|
"text": "Our list of actions and objects is not exhaustive. Currently, we do not cover actions which may not be visual. For out of list actions, we give the user a warning that the action cannot be animated. Nevertheless, this is a work in progress and we are working on including more animations for actions and objects in our knowledge base.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Lexical Simplification", |
|
"sec_num": "3.3." |
|
}, |
|
{ |
|
"text": "For each of the simplified sentences, information is extracted and populated into a predefined key-value pair structure. We will refer to the keys of this structure as Action Representation Fields (ARFs). These are similar to entities and relations in Knowledge Bases. ARFs include: owner, target, prop, action, origin action, manner, modifier location, modifier direction, start-time, duration, speed, translation, rotation, emotion, partial start time (for more details see Appendix C). This structure is inspired by the PAR (Badler et al., 2000) architecture, but adapted to our needs.", |
|
"cite_spans": [ |
|
{ |
|
"start": 527, |
|
"end": 548, |
|
"text": "(Badler et al., 2000)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Action Representation Field (ARF): Information Extraction", |
|
"sec_num": "3.4." |
|
}, |
|
{ |
|
"text": "To extract the ARFs from the simplified sentences, we use a Semantic Role Labelling (SRL) model in combination with some heuristics, for example creating word lists for duration, speed, translation, rotation, emotion. We use a pretrained Semantic Role Labelling model 4 based on a Bi-directional LSTM network (He et al., 2017) with pre-trained ELMo embeddings (Peters et al., 2018) . We map information from each sentence to the knowledge base of animations and objects.", |
|
"cite_spans": [ |
|
{ |
|
"start": 309, |
|
"end": 326, |
|
"text": "(He et al., 2017)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 360, |
|
"end": 381, |
|
"text": "(Peters et al., 2018)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Action Representation Field (ARF): Information Extraction", |
|
"sec_num": "3.4." |
|
}, |
|
{ |
|
"text": "We use the animation pipeline of the CAR-DINAL system. We plug in our NLP module in CARDINAL to generate animation. CARDINAL creates pre-visualizations of the text, both in storyboard form and animation. A storyboard is a series of pictures that demonstrates the sequence of scenes from a script. The animation is a 3-D animated video that approximately depicts the script. CARDINAL uses the Unreal game engine (Games, 2007) for generating pre-visualizations. It has a knowledge base of pre-baked animations (52 animations, plus a dictionary of synonyms, resulting in 92) and pre-uploaded objects (e.g. \"campfire\", \"tent\"). It also has 3-D models which can be used to create characters.", |
|
"cite_spans": [ |
|
{ |
|
"start": 411, |
|
"end": 424, |
|
"text": "(Games, 2007)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Animation Generation", |
|
"sec_num": "3.5." |
|
}, |
|
{ |
|
"text": "We initially used a corpus of Descriptions components from ScreenPy (Winer and Young, 2017), in order to study linguistic patterns in the movie script domain. Specifically, we used the \"heading\" and \"transition\" fields from ScreenPy's published JSON output on 1068 movie scripts scraped from IMSDb. We also scraped screenplays from SimplyScripts and ScriptORama 5 . After separating screenplays into well-formatted and illformatted, Descriptions components were extracted using our model ( \u00a73.1). This gave a corpus of Descriptions blocks from 996 screenplays.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Text-to-Animation Corpus", |
|
"sec_num": "4." |
|
}, |
|
{ |
|
"text": "The corpus contains a total of 525,708 Descriptions components. The Descriptions components contain a total of 1,402,864 sentences. Out of all the Descriptions components, 49.45 % (259,973) contain at least one verb which is in the animation list (henceforth called \"action verbs\"). Descriptions components having at least one action verb have in total 920,817 sentences. Out of the- ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Text-to-Animation Corpus", |
|
"sec_num": "4." |
|
}, |
|
{ |
|
"text": "There are no standard corpora for text-toanimation generation. It is also not clear how should such systems be evaluated and what should be the most appropriate evaluation metric. Nevertheless, it is important to assess how our system is performing. We evaluate our system using two types of evaluation: Intrinsic and Extrinsic. Intrinsic evaluation is for evaluating the NLP pipeline of our system using the BLEU metric. Extrinsic evaluation is an end-to-end qualitative evaluation of our text-to-animation generation system, done via a user study.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation and Analysis", |
|
"sec_num": "5." |
|
}, |
|
{ |
|
"text": "To evaluate the performance of our proposed NLP pipeline, 500 Descriptions components from the test set were randomly selected. Three annotators manually translated these 500 Descriptions components into simplified sentences and extracted all the necessary ARFs from the simplified sentences. This is a time intensive process and took around two months. 30 % of the Descriptions blocks contain verbs not in the list of 92 animation verbs. There are approximately 1000 sentences in the test set, with average length of 12 words. Each Descriptions component is also annotated by the three annotators for the ARFs.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Intrinsic Evaluation", |
|
"sec_num": "5.1." |
|
}, |
|
{ |
|
"text": "Taking inspiration from the text simplification community (Nisioi et al., 2017; Saggion, 2017), we use the BLEU score (Papineni et al., 2002) for evaluating our simplification and information extraction modules. For each simplified sentence s i we have 3 corresponding references r 1 i , r 2 i and r 3 i . We also evaluate using the SARI (Xu et al., 2016) score to evaluate our text simplification module.", |
|
"cite_spans": [ |
|
{ |
|
"start": 118, |
|
"end": 141, |
|
"text": "(Papineni et al., 2002)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 338, |
|
"end": 355, |
|
"text": "(Xu et al., 2016)", |
|
"ref_id": "BIBREF27" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Intrinsic Evaluation", |
|
"sec_num": "5.1." |
|
}, |
|
{ |
|
"text": "Each action block a is reduced to a set of simple sentences S a = {s 1 , s 2 , ....s na }. And for the same action block a, each annotator t, t \u2208 {1, 2, 3} produces a set of simplified sen-", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentence Simplification", |
|
"sec_num": "5.1.1." |
|
}, |
|
{ |
|
"text": "tences R t a = {r t 1 , r t 2 , ...r t m t a }.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentence Simplification", |
|
"sec_num": "5.1.1." |
|
}, |
|
{ |
|
"text": "Since the simplification rules in our system may not maintain the original ordering of verbs, we do not have sentence level alignment between elements in S a and R t a . For example, action block a = He laughs after he jumps into the water is reduced by our system into two simplified sentences S a = {s 1 = He jumps into the water, s 2 = He laughs} by the temporal heuristics, while annotator 3 gives us R 3 a = {r 3 1 = He laughs, r 3 2 = He jumps into the water}. In such cases, sequentially matching s i to r j will result in a wrong (hypothesis, reference) alignment which is (s 1 , r 3 1 ) and (s 2 , r 3", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentence Simplification", |
|
"sec_num": "5.1.1." |
|
}, |
|
{ |
|
"text": "2 ). To address this problem, for each hypothesis s i \u2208 S a , we take the corresponding reference r t i \u2208 R t a as the one with the least Levenshtein Distance (Navarro, 2001) ", |
|
"cite_spans": [ |
|
{ |
|
"start": 159, |
|
"end": 174, |
|
"text": "(Navarro, 2001)", |
|
"ref_id": "BIBREF25" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentence Simplification", |
|
"sec_num": "5.1.1." |
|
}, |
|
{ |
|
"text": "to s i , i.e, r t i = arg min r t j lev dist(s i , r t j ), \u2200j \u2208 {1, ..., m t a }", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentence Simplification", |
|
"sec_num": "5.1.1." |
|
}, |
|
{ |
|
"text": "As per this alignment, in the above example, we will have correct alignments (s 1 , r 3", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentence Simplification", |
|
"sec_num": "5.1.1." |
|
}, |
|
{ |
|
"text": "2 ) and (s 2 , r 3 1 ). Thus, for each simplified sentence s i we have 3 corresponding references r 1 i , r 2 i and r 3 i . The aligned sentences are used to calculate corpus level BLEU score 6 and SARI score 7 .", |
|
"cite_spans": [ |
|
{ |
|
"start": 192, |
|
"end": 193, |
|
"text": "6", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentence Simplification", |
|
"sec_num": "5.1.1." |
|
}, |
|
{ |
|
"text": "The evaluation results for text simplification are summarized in Table 4 . We compare against YATS (Ferr\u00e9s et al., 2016) and neural end-to-end text simplification system NTS-w2v (Nisioi et al., 2017). YATS is also a rule-based text simplification system. As shown in Table 4 , our system performs better than YATS on both the metrics, indicative of the limitations of the YATS system. A manual examination of the results also showed the same trend. However, the key point to note is that we are not aiming for text simplification in the conventional sense. Existing text simplification systems tend to summarize text and discard some of the information. Our aim is to break a complex sentence into simpler ones while preserving the information. An example of a Descriptions component with BLEU 2 scores is given in Table 3 . In the first simplified sentence, the space between Ellie and 's causes the drop in the score. But it gives exactly the same answer as both annotators. In the second sentence, the system output is the same as the annotator I's answer, so the BLEU 2 score is 1. In the last case, the score is low, as annotators possibly failed to replace her with the actual Character Cue Ellie. Qualitative examination reveals, in general, that our system gives a reasonable result for the syntactic simplification module. As exemplified, BLEU is not the perfect metric to evaluate our system, and therefore in the future we plan to explore other metrics.", |
|
"cite_spans": [ |
|
{ |
|
"start": 99, |
|
"end": 120, |
|
"text": "(Ferr\u00e9s et al., 2016)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 65, |
|
"end": 72, |
|
"text": "Table 4", |
|
"ref_id": "TABREF7" |
|
}, |
|
{ |
|
"start": 267, |
|
"end": 274, |
|
"text": "Table 4", |
|
"ref_id": "TABREF7" |
|
}, |
|
{ |
|
"start": 815, |
|
"end": 822, |
|
"text": "Table 3", |
|
"ref_id": "TABREF6" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Sentence Simplification", |
|
"sec_num": "5.1.1." |
|
}, |
|
{ |
|
"text": "We also evaluate the system's output for action representation fields against gold annotations. In our case, some of the fields can have multiple (2 or 3) words such as owner, target, prop, action, origin action, manner, location and direction. We use BLEU 1 as the evaluation metric to measure the BOW similarity between system output and ground truth references. The results are shown in Table 5 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 390, |
|
"end": 397, |
|
"text": "Table 5", |
|
"ref_id": "TABREF9" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "ARF Evaluation", |
|
"sec_num": "5.1.2." |
|
}, |
|
{ |
|
"text": "In identifying owner, target and prop, the system tends to use a fixed long mention, while annotators prefer short mentions for the same character/object. The score of prop is relatively lower than all other fields, which is caused by a systematic SRL mapping error. The relatively high accu-racy on the action field indicates the consistency between system output and annotator answers.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "ARF Evaluation", |
|
"sec_num": "5.1.2." |
|
}, |
|
{ |
|
"text": "Annotation on the emotion ARF is rather subjective. Responses on the this field are biased and noisy. The BLEU 1 score on this is relatively low. For the other non-textual ARFs, we use precision and recall to measure the system's behavior. Results are shown in Table 6 . These fields are subjective: annotators tend to give different responses for the same input sentence.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 261, |
|
"end": 268, |
|
"text": "Table 6", |
|
"ref_id": "TABREF10" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "ARF Evaluation", |
|
"sec_num": "5.1.2." |
|
}, |
|
{ |
|
"text": "rotation and translation have Boolean values. Annotators agree on these two fields in most of the sentences. The system, on the other hand, fails to identify actions involving rotation. For example, in the sentence \"Carl closes CARL 's door sharply\" all four annotators think that this sentence involves rotation, which is not found by the system. This is due to the specificity of rules on identifying these two fields.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "ARF Evaluation", |
|
"sec_num": "5.1.2." |
|
}, |
|
{ |
|
"text": "speed, duration and start time have high precision and low recall. This indicates the inconsistency in annotators' answers. For example, in the sentence \"Woody runs around to the back of the pizza truck\", two annotators give 2 seconds and another gives 1 second in duration. These fields are subjective and need the opinion of the script author or the director. In the future, we plan to involve script editors in the evaluation process.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "ARF Evaluation", |
|
"sec_num": "5.1.2." |
|
}, |
|
{ |
|
"text": "We conducted a user study to evaluate the performance of the system qualitatively. The focus of the study was to evaluate (from the end user's perspective) the performance of the NLP component w.r.t. generating reasonable animations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Extrinsic Evaluation", |
|
"sec_num": "5.2." |
|
}, |
|
{ |
|
"text": "We developed a questionnaire consisting of 20 sentence-animation video pairs. The animations were generated by our system. The questionnaire was filled by 22 participants. On an average it took around 25 minutes for a user to complete the study.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Extrinsic Evaluation", |
|
"sec_num": "5.2." |
|
}, |
|
{ |
|
"text": "We asked users to evaluate, on a five-point Likert scale (Likert, 1932) , if the video shown was a reasonable animation for the text, how much of the text information was depicted in the video and how much of the information in the video was present in the text ( Table 7 ). The 68.18 % of the participants rated the overall pre-visualization as neutral or above. The rating was 64.32 % (neutral or above) for the conservation of textual information in the video, which is reasonable, given limitations of the system that are not related to the NLP component. For the last question, 75.90 % (neutral or above) agreed that the video did not have extra information. In general, there seemed to be reasonable consensus in the responses. Besides the limitations of our system, disagreement can be attributed to the ambiguity and subjectivity of the task.", |
|
"cite_spans": [ |
|
{ |
|
"start": 57, |
|
"end": 71, |
|
"text": "(Likert, 1932)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 264, |
|
"end": 271, |
|
"text": "Table 7", |
|
"ref_id": "TABREF12" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Extrinsic Evaluation", |
|
"sec_num": "5.2." |
|
}, |
|
{ |
|
"text": "We also asked the participants to describe qualitatively what textual information, if any, was missing from the videos. Most of the missing information was due to limitations of the overall system rather than the NLP component: facial expression information was not depicted because the character 3-D models are deliberately designed without faces, so that animators can draw on them. Information was also missing in the videos if it referred to objects or actions that do not have a close enough match in the object list or animations list. Furthermore, the animation component only supports animations referring to a character or object as a whole, not parts, (e.g. \"Ben raises his head\" is not supported).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Extrinsic Evaluation", |
|
"sec_num": "5.2." |
|
}, |
|
{ |
|
"text": "However, there were some cases where the NLP component can be improved. For example, lexical simplification failed to map the verb \"watches\" to the similar animation \"look\". In one case, syntactic simplification created only two simplified sentences for a verb which had three subjects in the original sentence. In a few cases, lexical simplification successfully mapped to the most similar animation (e.g.\"argue\" to \"talk\") but the participants were not satisfied -they were expecting a more exact animation. We plan to address these shortcomings in future work.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Extrinsic Evaluation", |
|
"sec_num": "5.2." |
|
}, |
|
{ |
|
"text": "In this paper, we proposed a new text-toanimation system. The system uses linguistic text simplification techniques to map screenplay text to animation. Evaluating such systems is a challenge. Nevertheless, intrinsic and extrinsic evaluations show reasonable performance of the system. The proposed system is not perfect, for example, the current system does not take into account the discourse information that links the actions implied in the text, as currently the system only processes sentences independently. In the future, we would like to leverage discourse information by considering the sequence of actions which are described in the text (Modi and Titov, 2014; Modi, 2016) . This would also help to resolve ambiguity in text with regard to actions Modi, 2017) . Moreover, our system can be used for generating training data which could be used for training an end-to-end neural system. COMPLEX: Another parent , Mike Munson , sits on the bench with a tablet and uses an app to track and analyze the team 's shots. NSELSTM-B: Another parent, Mike Munson, sits on the bench with a tablet and uses an app to track. YATS: Another parent sits on the bench with a tablet and uses an app to track and examines the team' s shots. This parent is Mike Munson. OURS: Another parent is Mike Munson. Another parent sits on the bench with a tablet. Another parent uses an app. COMPLEX: Stowell believes that even documents about Lincoln's death will give people a better understanding of the man who was assassinated 150 years ago this April. NSELSTM-B: Stowell believes that the discovery about Lincoln's death will give people a better understanding of the man. YATS: Stowell believes that even documents about Lincoln' s death will give people a better reason of the man. This man was assassinated 150 years ago this April. OURS: Stowell believes. Even documents about Lincoln 's death give people a better understanding of the man. Somebody assassinates the man 150 years ago this April. ", |
|
"cite_spans": [ |
|
{ |
|
"start": 649, |
|
"end": 671, |
|
"text": "(Modi and Titov, 2014;", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 672, |
|
"end": 683, |
|
"text": "Modi, 2016)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 759, |
|
"end": 770, |
|
"text": "Modi, 2017)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion and Future Work", |
|
"sec_num": "6." |
|
}, |
|
{ |
|
"text": "github.com/huggingface/neuralcoref", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://spacy.io", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "AllenNLP SRL model: https://github.com/ allenai/allennlp", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "http://www.simplyscripts.com and http: //www.script-o-rama.com", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We used NLTK's API with default parameters: http: //www.nltk.org/api/nltk.translate.html# nltk.translate.bleu_score.corpus_bleu 7 Implementation available at https://github. com/cocoxu/simplification/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "We would like to thank anonymous reviewers for their insightful comments. We would also like to thank Daniel Inversini, Isabel Sim\u00f3 Ayn\u00e9s, Carolina Ferrari, Roberto Comella and Max Grosse for their help and support in developing the Cardinal system. Mubbasir Kapadia has been funded in part by NSF IIS-1703883 and NSF S&AS-1723869.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "If input starts with '( , return True, otherwise False 4If input ends with ') , return True, otherwise False 5If |#lastindents \u2212 #currentindents| < 3, return True, otherwise False 6If the input is uppercase and contains transition words such as 'DISSOLVE', 'CUT TO'. etc, return True, otherwise False 7If the input equals to 'THE END', return True. Otherwise False. return traverse a string(root-token)Algorithm 9 Transform() in Appositive Clause Analyzer 1: procedure APPOSITIVE CLAUSE TRANSFORM PROCEDURE(anchor, head)The APPOS token and its head token.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Appendix A", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "cut edge between anchor and head token remove anchor from head's right children Appendix C: Action Representation Fields Action Representation Fields (ARFs) in the demo sentence James gently throws a red ball to Alice in the restaurant from back, extracted with SRL:owner: James target: a red ball prop: to Alice action: throw origin action: throws manner: gently modifier location: in the restaurant modifier direction: from back In this case, our output for the prop and target is not correct; they should be swapped. This is one example where this module can introduce errors.Additional ARFs, extracted heuristically:startTime: Calculated by current scene time duration: We have a pre-defined list of words that when appearing in the sentence, they will indicate a short duration (e.g \"run\" or \"fast\") and therefore the duration will be set to 1 second; in contrast, for words like \"slowly\" we assign a duration of 4 seconds; otherwise, the duration is 2 seconds.speed: Similarly to duration, we have pre-defined lists of words that would affect the speed of the pre-baked animation: \"angrily\" would result in faster movement, but \"carefully\" in slower movement. We have 3 scales: 0.5, 1, 2 which corresponds to slow, normal and fast.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "2:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We have a list of actions which would entail a movement in from one place to another, e.g. \"go\". If the value of the action exists in this list, it is set to True, otherwise False.rotation: If the action exists in our list of verbs that entail rotation, this field is True, otherwise False. Rotation refers to movement in place e.g. \"turn\" or \"sit\". emotion: We find the closet neighbor of each word in the sentence in list of words that indicate emotion, using word vector similarity. If the similarity exceeds an empirically tested threshold, then we take the corresponding emotion word as the emotion field of this action. partial start time: an important field, since it controls the sequence order of each action. It determines which actions happen in parallel and which happen sequentially. This is still an open question. We solve this problem when doing sentence simplification. Together with the input sentence, current time is also fed into each Analyzer. There are several rules in some of the Analyzers to obtain temporal information. For example, in Line 5 of the Adverbial Clause Analyzer (c.f.3), we assign different temporal sequences for simplified actions. The algorithm is shown in Algorithm 15. The sign together with specific prepositions determines the change direction of current temporal id. In the Coordination Analyzer, the current temporal id changes when it encounters two verbs sharing same subject. Then the later action will get a bigger temporal id.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "translation:", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Leveraging linguistic structure for open domain information extraction", |
|
"authors": [ |
|
{ |
|
"first": "Gabor", |
|
"middle": [], |
|
"last": "Angeli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Melvin Jose Johnson", |
|
"middle": [], |
|
"last": "Premkumar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher D", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "344--354", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gabor Angeli, Melvin Jose Johnson Premkumar, and Christopher D Manning. 2015. Leveraging linguis- tic structure for open domain information extraction. In Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Lan- guage Processing (Volume 1: Long Papers), volu- me 1, pages 344-354.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Embodied conversational agents. chapter Parameterized Action Representation for Virtual Human Agents", |
|
"authors": [ |
|
{ |
|
"first": "Norman", |
|
"middle": [ |
|
"I" |
|
], |
|
"last": "Badler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rama", |
|
"middle": [], |
|
"last": "Bindiganavale", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jan", |
|
"middle": [], |
|
"last": "Allbeck", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "William", |
|
"middle": [], |
|
"last": "Schuler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Liwei", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Martha", |
|
"middle": [], |
|
"last": "Palmer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "256--284", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Norman I. Badler, Rama Bindiganavale, Jan Allbeck, William Schuler, Liwei Zhao, and Martha Palmer. 2000. Embodied conversational agents. chapter Pa- rameterized Action Representation for Virtual Hu- man Agents, pages 256-284. MIT Press, Cambrid- ge, MA, USA.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Jape: a java annotation patterns engine", |
|
"authors": [ |
|
{ |
|
"first": "Hamish", |
|
"middle": [], |
|
"last": "Cunningham", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Diana", |
|
"middle": [], |
|
"last": "Maynard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Valentin", |
|
"middle": [], |
|
"last": "Tablan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hamish Cunningham, Diana Maynard, and Valentin Tablan. 2000. Jape: a java annotation patterns en- gine.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Clausie: clause-based open information extraction", |
|
"authors": [ |
|
{ |
|
"first": "Luciano", |
|
"middle": [], |
|
"last": "Del Corro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rainer", |
|
"middle": [], |
|
"last": "Gemulla", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the 22nd international conference on World Wide Web", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "355--366", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Luciano Del Corro and Rainer Gemulla. 2013. Clau- sie: clause-based open information extraction. In Proceedings of the 22nd international conference on World Wide Web, pages 355-366. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Pica: Proactive intelligent conversational agent for interactive narratives", |
|
"authors": [ |
|
{ |
|
"first": "Jessica", |
|
"middle": [], |
|
"last": "Falk", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Steven", |
|
"middle": [], |
|
"last": "Poulakos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mubbasir", |
|
"middle": [], |
|
"last": "Kapadia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robert W", |
|
"middle": [], |
|
"last": "Sumner", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 18th International Conference on Intelligent Virtual Agents", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "141--146", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jessica Falk, Steven Poulakos, Mubbasir Kapadia, and Robert W Sumner. 2018. Pica: Proactive intelligent conversational agent for interactive narratives. In Proceedings of the 18th International Conference on Intelligent Virtual Agents, pages 141-146. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Yats: Yet another text simplifier", |
|
"authors": [], |
|
"year": 2016, |
|
"venue": "Natural Language Processing and Information Systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "335--342", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Daniel Ferr\u00e9s, Montserrat Marimon, Horacio Saggion, and Ahmed AbuRa'ed. 2016. Yats: Yet another text simplifier. In Natural Language Processing and In- formation Systems, pages 335-342, Cham. Springer International Publishing.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Unreal engine", |
|
"authors": [], |
|
"year": 2007, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Epic Games. 2007. Unreal engine. Online: https://www. unrealengine. com.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Scenemaker: Intelligent multimodal visualization of natural language scripts", |
|
"authors": [ |
|
{ |
|
"first": "Eva", |
|
"middle": [], |
|
"last": "Hanser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paul", |
|
"middle": [ |
|
"Mc" |
|
], |
|
"last": "Kevitt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tom", |
|
"middle": [], |
|
"last": "Lunney", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joan", |
|
"middle": [], |
|
"last": "Condell", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of the 20th Irish Conference on Artificial Intelligence and Cognitive Science, AICS'09", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "144--153", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Eva Hanser, Paul Mc Kevitt, Tom Lunney, and Joan Condell. 2010. Scenemaker: Intelligent multimodal visualization of natural language scripts. In Pro- ceedings of the 20th Irish Conference on Artificial Intelligence and Cognitive Science, AICS'09, pages 144-153, Berlin, Heidelberg. Springer-Verlag.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Visualizing natural language descriptions: A survey", |
|
"authors": [ |
|
{ |
|
"first": "Kaveh", |
|
"middle": [], |
|
"last": "Hassani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Won-Sook", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "ACM Comput. Surv", |
|
"volume": "49", |
|
"issue": "1", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/2932710" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kaveh Hassani and Won-Sook Lee. 2016. Visualizing natural language descriptions: A survey. ACM Com- put. Surv., 49(1):17:1-17:34.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Deep semantic role labeling: What works and what's next", |
|
"authors": [ |
|
{ |
|
"first": "Luheng", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "473--483", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Luheng He, Kenton Lee, Mike Lewis, and Luke Zettle- moyer. 2017. Deep semantic role labeling: What works and what's next. In Proceedings of the 55th Annual Meeting of the Association for Computatio- nal Linguistics (Volume 1: Long Papers), volume 1, pages 473-483.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "An improved non-monotonic transition system for dependency parsing", |
|
"authors": [ |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Honnibal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Johnson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1373--1378", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matthew Honnibal and Mark Johnson. 2015. An im- proved non-monotonic transition system for depen- dency parsing. In Proceedings of the 2015 Confe- rence on Empirical Methods in Natural Language Processing, pages 1373-1378, Lisbon, Portugal. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "2017. spacy 2: Natural language understanding with bloom embeddings, convolutional neural networks and incremental parsing", |
|
"authors": [ |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Honnibal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ines", |
|
"middle": [], |
|
"last": "Montani", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matthew Honnibal and Ines Montani. 2017. spacy 2: Natural language understanding with bloom embed- dings, convolutional neural networks and incremen- tal parsing. To appear.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Carsim: a system to visualize written road accident reports as animated 3d scenes", |
|
"authors": [ |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Johansson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Williams", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anders", |
|
"middle": [], |
|
"last": "Berglund", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pierre", |
|
"middle": [], |
|
"last": "Nugues", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proceedings of the 2nd Workshop on Text Meaning and Interpretation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "57--64", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Richard Johansson, David Williams, Anders Berglund, and Pierre Nugues. 2004. Carsim: a system to vi- sualize written road accident reports as animated 3d scenes. In Proceedings of the 2nd Workshop on Text Meaning and Interpretation, pages 57-64. Associa- tion for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "A technique for the measurement of attitudes", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Likert", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1932, |
|
"venue": "Archives of Psychology", |
|
"volume": "22", |
|
"issue": "140", |
|
"pages": "1--55", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "R. Likert. 1932. A technique for the measurement of attitudes. Archives of Psychology, 22(140):1-55.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Script visualization (scriptviz): a smart system that makes writing fun", |
|
"authors": [ |
|
{ |
|
"first": "Zhi-Qiang", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ka-Ming", |
|
"middle": [], |
|
"last": "Leung", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Soft Computing", |
|
"volume": "10", |
|
"issue": "1", |
|
"pages": "34--40", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1007/s00500-005-0461-4" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhi-Qiang Liu and Ka-Ming Leung. 2006. Script vi- sualization (scriptviz): a smart system that makes writing fun. Soft Computing, 10(1):34-40.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Automatic generation of computeranimation: using AI for movie animation", |
|
"authors": [ |
|
{ |
|
"first": "Ruqian", |
|
"middle": [], |
|
"last": "Lu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Songmao", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ruqian Lu and Songmao Zhang. 2002. Automatic ge- neration of computeranimation: using AI for movie animation. Springer-Verlag.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Virtual human animation in natural language visualisation", |
|
"authors": [ |
|
{ |
|
"first": "Minhua", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paul", |
|
"middle": [], |
|
"last": "Kevitt", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Artif. Intell. Rev", |
|
"volume": "25", |
|
"issue": "1-2", |
|
"pages": "37--53", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1007/s10462-007-9042-5" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Minhua Ma and Paul Kevitt. 2006. Virtual human ani- mation in natural language visualisation. Artif. In- tell. Rev., 25(1-2):37-53.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Generating images from captions with attention", |
|
"authors": [ |
|
{ |
|
"first": "Elman", |
|
"middle": [], |
|
"last": "Mansimov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Emilio", |
|
"middle": [], |
|
"last": "Parisotto", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jimmy", |
|
"middle": [ |
|
"Lei" |
|
], |
|
"last": "Ba", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruslan", |
|
"middle": [], |
|
"last": "Salakhutdinov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Elman Mansimov, Emilio Parisotto, Jimmy Lei Ba, and Ruslan Salakhutdinov. 2015. Generating ima- ges from captions with attention. arXiv preprint ar- Xiv:1511.02793.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Cardinal: Computer assisted authoring of movie scripts", |
|
"authors": [ |
|
{ |
|
"first": "Marcel", |
|
"middle": [], |
|
"last": "Marti", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jodok", |
|
"middle": [], |
|
"last": "Vieli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wojciech", |
|
"middle": [], |
|
"last": "Wito\u0144", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rushit", |
|
"middle": [], |
|
"last": "Sanghrajka", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Inversini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Diana", |
|
"middle": [], |
|
"last": "Wotruba", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Isabel", |
|
"middle": [], |
|
"last": "Simo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sasha", |
|
"middle": [], |
|
"last": "Schriber", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mubbasir", |
|
"middle": [], |
|
"last": "Kapadia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Markus", |
|
"middle": [], |
|
"last": "Gross", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "23rd International Conference on Intelligent User Interfaces", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "509--519", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marcel Marti, Jodok Vieli, Wojciech Wito\u0144, Rushit Sanghrajka, Daniel Inversini, Diana Wotruba, Isabel Simo, Sasha Schriber, Mubbasir Kapadia, and Mar- kus Gross. 2018. Cardinal: Computer assisted aut- horing of movie scripts. In 23rd International Con- ference on Intelligent User Interfaces, pages 509- 519. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Open language learning for information extraction", |
|
"authors": [ |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Mausam", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robert", |
|
"middle": [], |
|
"last": "Schmitz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephen", |
|
"middle": [], |
|
"last": "Bart", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oren", |
|
"middle": [], |
|
"last": "Soderland", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Etzioni", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the 2012 Joint Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "523--534", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mausam, Michael Schmitz, Robert Bart, Stephen So- derland, and Oren Etzioni. 2012. Open language learning for information extraction. In Proceedings of the 2012 Joint Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning, pages 523-534. Asso- ciation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Wordnet: A lexical database for english", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "George", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Miller", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1995, |
|
"venue": "COMMUNICATIONS OF THE ACM", |
|
"volume": "38", |
|
"issue": "", |
|
"pages": "39--41", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "George A. Miller. 1995. Wordnet: A lexical database for english. COMMUNICATIONS OF THE ACM, 38:39-41.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Event embeddings for semantic script modeling", |
|
"authors": [ |
|
{ |
|
"first": "Ashutosh", |
|
"middle": [], |
|
"last": "Modi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of The 20th SIGNLL Conference on Computational Natural Language Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ashutosh Modi. 2016. Event embeddings for se- mantic script modeling. In Proceedings of The 20th SIGNLL Conference on Computational Natu- ral Language Learning.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Modeling common sense knowledge via scripts", |
|
"authors": [ |
|
{ |
|
"first": "Ashutosh", |
|
"middle": [], |
|
"last": "Modi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ashutosh Modi. 2017. Modeling common sense know- ledge via scripts. Ph.D. thesis, Universit\u00e4t des Saar- landes.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Inducing neural models of script knowledge", |
|
"authors": [ |
|
{ |
|
"first": "Ashutosh", |
|
"middle": [], |
|
"last": "Modi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ivan", |
|
"middle": [], |
|
"last": "Titov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Conference on Computational Natural Language Learning (CoNLL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ashutosh Modi and Ivan Titov. 2014. Inducing neu- ral models of script knowledge. In Conferen- ce on Computational Natural Language Learning (CoNLL).", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Modelling semantic expectation: Using script knowledge for referent prediction", |
|
"authors": [ |
|
{ |
|
"first": "Ashutosh", |
|
"middle": [], |
|
"last": "Modi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ivan", |
|
"middle": [], |
|
"last": "Titov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vera", |
|
"middle": [], |
|
"last": "Demberg", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "5", |
|
"issue": "", |
|
"pages": "31--44", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ashutosh Modi, Ivan Titov, Vera Demberg, Asad Sa- yeed, and Manfred Pinkal. 2017. Modelling se- mantic expectation: Using script knowledge for re- ferent prediction. Transactions of the Association for Computational Linguistics, 5:31-44.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "A guided tour to approximate string matching", |
|
"authors": [ |
|
{ |
|
"first": "Gonzalo", |
|
"middle": [], |
|
"last": "Navarro", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "ACM computing surveys (CSUR)", |
|
"volume": "33", |
|
"issue": "1", |
|
"pages": "31--88", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gonzalo Navarro. 2001. A guided tour to approximate string matching. ACM computing surveys (CSUR), 33(1):31-88.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Neuralcoref: Coreference resolution in spacy with neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Wolf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Ravenscroft", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julien", |
|
"middle": [], |
|
"last": "Chaumond", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maxwell", |
|
"middle": [], |
|
"last": "Rebo", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thomas Wolf, James Ravenscroft, Julien Chau- mond, and Maxwell Rebo. 2018. Neu- ralcoref: Coreference resolution in spacy with neural networks. Available online at https://github.com/huggingface/neuralcoref.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Optimizing statistical machine translation for text simplification", |
|
"authors": [ |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Courtney", |
|
"middle": [], |
|
"last": "Napoles", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ellie", |
|
"middle": [], |
|
"last": "Pavlick", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quanze", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Callison-Burch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "4", |
|
"issue": "", |
|
"pages": "401--415", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wei Xu, Courtney Napoles, Ellie Pavlick, Quanze Chen, and Chris Callison-Burch. 2016. Optimizing statistical machine translation for text simplification. Transactions of the Association for Computational Linguistics, 4:401-415.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "System Architecture: Screenplays are first segmented into different functional blocks. Then, the descriptive action sentences are simplified. Simplified sentences are used to generate animation. formation extraction tasks.", |
|
"uris": null |
|
}, |
|
"FIGREF1": { |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "Transform in an example coordination sentence. Firstly the dependency links of cc and conj are cut. Then we look for a noun in the left direct children of the original root LAUGHS and link the new root gives with it.", |
|
"uris": null |
|
}, |
|
"TABREF1": { |
|
"type_str": "table", |
|
"num": null, |
|
"content": "<table/>", |
|
"html": null, |
|
"text": "Adverbial Clause Modifier find advcl token and its head. Also conjuncts of head token cut advcl edge. If advcl token does not have subject, add subject of root as advcl's most-left child and remove prep and mark token. Then traverse from both root and advcl token Inverted Clausal Subject attr token has to be the child of head of csubj token change position of actual verb and subject Clausal Complement find ccomp token in dependency tree cut ccomp link, add subject to subordinate clause if necessary Passive Voice check presence of nsubjpass or csubjpass optionally for auxpass and agent cut auxpass link if any. Cut nsubjpass or csubjpass link. Prepend subject token to verb token's right children. Finally append suitable subject. Open Clause Complement find xcomp verb token adn actual verb token if aux token presents, cut aux link, then replace xcomp-verb in subject's children with actual-verb, traverse from actual-verb; else, cut xcomp link, traverse from xcomp-verb Adjective Clause find acl verb token and its head cut acl link. Link subject node with it. Traverser from acl node" |
|
}, |
|
"TABREF2": { |
|
"type_str": "table", |
|
"num": null, |
|
"content": "<table/>", |
|
"html": null, |
|
"text": "Linguistic rules for text simplification module" |
|
}, |
|
"TABREF3": { |
|
"type_str": "table", |
|
"num": null, |
|
"content": "<table><tr><td>Type</td><td colspan=\"3\">Example Input Sentence</td><td/><td/><td>System Output Sentence 1</td><td>System Output Sentence 2</td></tr><tr><td>Coordination</td><td colspan=\"5\">She LAUGHS, and[cc] gives[conj] Kevin a kiss.</td><td>She laughs.</td><td>She gives Kevin a kiss.</td></tr><tr><td>Pre-Correlative</td><td colspan=\"5\">It's followed by another squad car, both[preconj]</td><td>It's followed by another squad car,</td><td>-</td></tr><tr><td/><td colspan=\"2\">with sirens blaring.</td><td/><td/><td/><td>with sirens blaring.</td><td/></tr><tr><td>Appositive</td><td colspan=\"5\">Kevin is reading a book the Bible[appos]</td><td>Kevin reads a book</td><td/></tr><tr><td>Relative-nsubj</td><td>Frank</td><td>gestures</td><td>to</td><td>the</td><td>SALESMAN,</td><td>the SALESMAN waits on a woman.</td><td>Frank gestures to the SALESMAN.</td></tr><tr><td/><td colspan=\"5\">who[nsubj]'s waiting[relcl] on a woman</td><td/><td/></tr><tr><td>Relative-advmod</td><td colspan=\"5\">Chuck is in the stage of exposure whe-</td><td>Chuck is in the stage of exposure</td><td>the personality splits at exposure.</td></tr><tr><td/><td colspan=\"5\">re[advmod] the personality splits[relcl]</td><td/><td/></tr><tr><td>Relative-poss</td><td colspan=\"5\">The girl, whose[poss] name is[relcl] Helga, co-</td><td>The girl cowers</td><td>The girl 's name is Helga</td></tr><tr><td/><td>wers.</td><td/><td/><td/><td/><td/><td/></tr><tr><td>Relative-omit</td><td colspan=\"5\">Kim is the sexpot Peter saw[relcl] in Washington</td><td>Peter sees Kim in Washington Square</td><td>Kim is the sexpot.</td></tr><tr><td/><td colspan=\"2\">Square Park</td><td/><td/><td/><td>Park.</td><td/></tr><tr><td>Adverbial</td><td colspan=\"5\">Jim panics as[advcl] his mom reacts, shocked.</td><td>Jim panics, shocked.</td><td>Jim's mom reacts.</td></tr><tr><td>Adverbial-remove</td><td colspan=\"5\">Suddenly there's a KNOCK at the door, im-</td><td>Suddenly there 's a KNOCK at the</td><td>Immediately JIM 'S MOM enters.</td></tr><tr><td/><td colspan=\"5\">mediately after[prep] which JIM'S MOM en-</td><td>door.</td><td/></tr><tr><td/><td colspan=\"2\">ters[advcl].</td><td/><td/><td/><td/><td/></tr><tr><td>Inverted Cl. Subject</td><td colspan=\"5\">Running[csubj] towards Oz is Steve Stifler</td><td>Steve Stifler runs towards Oz.</td><td>-</td></tr><tr><td>Clausal Component</td><td colspan=\"5\">The thing is, it actually sounds[ccomp] really</td><td>The thing is.(will be eliminated by the</td><td>It actually sounds really good.</td></tr><tr><td/><td>good.</td><td/><td/><td/><td/><td>filter)</td><td/></tr><tr><td>Passive Voice</td><td colspan=\"5\">They[nsubjpass] are suddenly illuminated by the</td><td>Suddenly the glare of headlights illu-</td><td>-</td></tr><tr><td/><td colspan=\"2\">glare of headlights.</td><td/><td/><td/><td>minateds them.</td><td/></tr><tr><td>Open Clausal</td><td colspan=\"5\">The sophomore comes running[xcomp] through</td><td/><td/></tr><tr><td/><td colspan=\"2\">the kitchen.</td><td/><td/><td/><td/><td/></tr></table>", |
|
"html": null, |
|
"text": "The book is the Bible. Relative-dobj She pulls out a letter which[dobj] she hands[relcl] to Keven Shee pulls out a letter She hands a lettre to Kevin. Relative-pobj A reef encloses the cove where[pobj] he came[relcl] from.A reef encloses the cove he comes from the cove." |
|
}, |
|
"TABREF4": { |
|
"type_str": "table", |
|
"num": null, |
|
"content": "<table/>", |
|
"html": null, |
|
"text": "" |
|
}, |
|
"TABREF5": { |
|
"type_str": "table", |
|
"num": null, |
|
"content": "<table><tr><td>System output</td><td>Annotator I</td><td>Annotator II</td><td>BLEU2( %)</td></tr><tr><td>Carl touches Ellie 's shoulder</td><td>carl touches ellie's shoulder</td><td>carl touches ellie's shoulder.</td><td>38.73</td></tr><tr><td>the doctor explains</td><td>the doctor explains</td><td>the doctor is talking.</td><td>100</td></tr><tr><td>Ellie drops Ellie head in Ellie</td><td colspan=\"2\">ellie drops her head in her hands ellie drops her head in her</td><td>48.79</td></tr><tr><td>hands</td><td/><td>hands.</td><td/></tr></table>", |
|
"html": null, |
|
"text": "Carl touches Ellie's shoulder as the doctor explains. Ellie drops her head in her hands." |
|
}, |
|
"TABREF6": { |
|
"type_str": "table", |
|
"num": null, |
|
"content": "<table><tr><td/><td colspan=\"2\">BLEU SARI</td></tr><tr><td>NTS-w2v</td><td>61.45</td><td>36.04</td></tr><tr><td>YATS</td><td>58.83</td><td>48.75</td></tr><tr><td>Our System</td><td>67.68</td><td>50.65</td></tr></table>", |
|
"html": null, |
|
"text": "Differences between system output and annotator responses" |
|
}, |
|
"TABREF7": { |
|
"type_str": "table", |
|
"num": null, |
|
"content": "<table><tr><td>: Results on syntactic simplification</td></tr><tr><td>se, 42.2 % (388,597) of the sentences contain ac-</td></tr><tr><td>tion verbs. In the corpus, the average length of a</td></tr><tr><td>sentence is 12 words.</td></tr></table>", |
|
"html": null, |
|
"text": "" |
|
}, |
|
"TABREF9": { |
|
"type_str": "table", |
|
"num": null, |
|
"content": "<table><tr><td>Field</td><td>P</td><td>R</td><td>F1</td></tr><tr><td>s time</td><td colspan=\"3\">86.49 68.63 76.53</td></tr><tr><td>rot.</td><td colspan=\"3\">82.04 81.16 81.60</td></tr><tr><td colspan=\"4\">duration 94.72 73.92 83.04</td></tr><tr><td>transl.</td><td colspan=\"3\">75.49 86.47 80.61</td></tr><tr><td>speed</td><td colspan=\"3\">94.41 79.50 86.32</td></tr></table>", |
|
"html": null, |
|
"text": "Results on textual ARFs ( %)" |
|
}, |
|
"TABREF10": { |
|
"type_str": "table", |
|
"num": null, |
|
"content": "<table/>", |
|
"html": null, |
|
"text": "Result on Non-textual ARFs( %)" |
|
}, |
|
"TABREF12": { |
|
"type_str": "table", |
|
"num": null, |
|
"content": "<table/>", |
|
"html": null, |
|
"text": "User Study Results" |
|
}, |
|
"TABREF13": { |
|
"type_str": "table", |
|
"num": null, |
|
"content": "<table/>", |
|
"html": null, |
|
"text": "Example Model Outputs" |
|
} |
|
} |
|
} |
|
} |