|
{ |
|
"paper_id": "S10-1001", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T15:27:48.596117Z" |
|
}, |
|
"title": "SemEval-2010 Task 1: Coreference Resolution in Multiple Languages", |
|
"authors": [ |
|
{ |
|
"first": "Marta", |
|
"middle": [], |
|
"last": "Recasens", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Llu\u00eds", |
|
"middle": [], |
|
"last": "M\u00e0rquez", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Emili", |
|
"middle": [], |
|
"last": "Sapena", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Ant\u00f2nia Mart\u00ed", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Mariona", |
|
"middle": [], |
|
"last": "Taul\u00e9", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "V\u00e9ronique", |
|
"middle": [], |
|
"last": "Hoste", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Massimo", |
|
"middle": [], |
|
"last": "Poesio", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Yannick", |
|
"middle": [], |
|
"last": "Versley", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "This paper presents the SemEval-2010 task on Coreference Resolution in Multiple Languages. The goal was to evaluate and compare automatic coreference resolution systems for six different languages (Catalan, Dutch, English, German, Italian, and Spanish) in four evaluation settings and using four different metrics. Such a rich scenario had the potential to provide insight into key issues concerning coreference resolution: (i) the portability of systems across languages, (ii) the relevance of different levels of linguistic information, and (iii) the behavior of scoring metrics.", |
|
"pdf_parse": { |
|
"paper_id": "S10-1001", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "This paper presents the SemEval-2010 task on Coreference Resolution in Multiple Languages. The goal was to evaluate and compare automatic coreference resolution systems for six different languages (Catalan, Dutch, English, German, Italian, and Spanish) in four evaluation settings and using four different metrics. Such a rich scenario had the potential to provide insight into key issues concerning coreference resolution: (i) the portability of systems across languages, (ii) the relevance of different levels of linguistic information, and (iii) the behavior of scoring metrics.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "The task of coreference resolution, defined as the identification of the expressions in a text that refer to the same discourse entity (1), has attracted considerable attention within the NLP community.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "(1)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Major League Baseball sent its head of security to Chicago to review the second incident of an on-field fan attack in the last seven months. The league is reviewing security at all ballparks to crack down on spectator violence.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Using coreference information has been shown to be beneficial in a number of NLP applications including Information Extraction (McCarthy and Lehnert, 1995) , Text Summarization (Steinberger et al., 2007) , Question Answering (Morton, 1999) , and Machine Translation. There have been a few evaluation campaigns on coreference resolution in the past, namely MUC (Hirschman and Chinchor, 1997) , ACE (Doddington et al., 2004) , and ARE (Orasan et al., 2008 ), yet many questions remain open:", |
|
"cite_spans": [ |
|
{ |
|
"start": 127, |
|
"end": 155, |
|
"text": "(McCarthy and Lehnert, 1995)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 177, |
|
"end": 203, |
|
"text": "(Steinberger et al., 2007)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 225, |
|
"end": 239, |
|
"text": "(Morton, 1999)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 360, |
|
"end": 390, |
|
"text": "(Hirschman and Chinchor, 1997)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 397, |
|
"end": 422, |
|
"text": "(Doddington et al., 2004)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 433, |
|
"end": 453, |
|
"text": "(Orasan et al., 2008", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 To what extent is it possible to implement a general coreference resolution system portable to different languages? How much language-specific tuning is necessary?", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 How helpful are morphology, syntax and semantics for solving coreference relations? How much preprocessing is needed? Does its quality (perfect linguistic input versus noisy automatic input) really matter?", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 How (dis)similar are different coreference evaluation metrics-MUC, B-CUBED, CEAF and BLANC? Do they all provide the same ranking? Are they correlated?", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Our goal was to address these questions in a shared task. Given six datasets in Catalan, Dutch, English, German, Italian, and Spanish, the task we present involved automatically detecting full coreference chains-composed of named entities (NEs), pronouns, and full noun phrases-in four different scenarios. For more information, the reader is referred to the task website. 1 The rest of the paper is organized as follows. Section 2 presents the corpora from which the task datasets were extracted, and the automatic tools used to preprocess them. In Section 3, we describe the task by providing information about the data format, evaluation settings, and evaluation metrics. Participating systems are described in Section 4, and their results are analyzed and compared in Section 5. Finally, Section 6 concludes.", |
|
"cite_spans": [ |
|
{ |
|
"start": 373, |
|
"end": 374, |
|
"text": "1", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this section, we first present the sources of the data used in the task. We then describe the automatic tools that predicted input annotations for the coreference resolution systems. Test #docs #sents #tokens #docs #sents #tokens #docs #sents #tokens Catalan 829 8,709 253,513 142 1,445 42,072 167 1,698 49,260 Dutch 145 2,544 46,894 23 496 9,165 72 2,410 48,007 English 229 3,648 79,060 39 741 17,044 85 1,141 24,206 German 900 19,233 331,614 199 4,129 73,145 136 2,736 50,287 Italian 80 2,951 81,400 17 551 16,904 46 1,494 41,586 Spanish 875 9,022 284,179 140 1,419 44,460 168 1,705 51,040 German The T\u00fcBa-D/Z corpus (Hinrichs et al., 2005 ) is a newspaper treebank based on data taken from the daily issues of \"die tageszeitung\" (taz). It currently comprises 794k words manually annotated with semantic and coreference information. Due to licensing restrictions of the original texts, a taz-DVD must be purchased to obtain a license. 2", |
|
"cite_spans": [ |
|
{ |
|
"start": 691, |
|
"end": 713, |
|
"text": "(Hinrichs et al., 2005", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 186, |
|
"end": 656, |
|
"text": "Test #docs #sents #tokens #docs #sents #tokens #docs #sents #tokens Catalan 829 8,709 253,513 142 1,445 42,072 167 1,698 49,260 Dutch 145 2,544 46,894 23 496 9,165 72 2,410 48,007 English 229 3,648 79,060 39 741 17,044 85 1,141 24,206 German 900 19,233 331,614 199 4,129 73,145 136 2,736 50,287 Italian 80 2,951 81,400 17 551 16,904 46 1,494 41,586 Spanish 875 9,022 284,179 140 1,419 44,460 168 1,705", |
|
"ref_id": "TABREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Linguistic Resources", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Italian The LiveMemories corpus (Rodr\u00edguez et al., 2010) will include texts from the Italian Wikipedia, blogs, news articles, and dialogues (MapTask). They are being annotated according to the ARRAU annotation scheme with coreference, agreement, and NE information on top of automatically parsed data. The task dataset included Wikipedia texts already annotated.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Development", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The datasets that were used in the task were extracted from the above-mentioned corpora. Table 1 summarizes the number of documents (docs), sentences (sents), and tokens in the training, development and test sets. 3", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Development", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Catalan, Spanish, English Predicted lemmas and PoS were generated using FreeLing 4 for Catalan/Spanish and SVMTagger 5 for English. Dependency information and predicate semantic roles were generated with JointParser, a syntacticsemantic parser. 6 Dutch Lemmas, PoS and NEs were automatically provided by the memory-based shallow parser for Dutch (Daelemans et al., 1999) , and dependency information by the Alpino parser (van Noord et al., 2006) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 245, |
|
"end": 246, |
|
"text": "6", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 346, |
|
"end": 370, |
|
"text": "(Daelemans et al., 1999)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 421, |
|
"end": 445, |
|
"text": "(van Noord et al., 2006)", |
|
"ref_id": "BIBREF25" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Preprocessing Systems", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "German Lemmas were predicted by TreeTagger (Schmid, 1995) , PoS and morphology by RFTagger (Schmid and Laws, 2008) , and dependency information by MaltParser (Hall and Nivre, 2008) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 43, |
|
"end": 57, |
|
"text": "(Schmid, 1995)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 91, |
|
"end": 114, |
|
"text": "(Schmid and Laws, 2008)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 158, |
|
"end": 180, |
|
"text": "(Hall and Nivre, 2008)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Preprocessing Systems", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Italian Lemmas and PoS were provided by TextPro, 7 and dependency information by Malt-Parser. 8", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Preprocessing Systems", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Participants were asked to develop an automatic system capable of assigning a discourse entity to every mention, 9 thus identifying all the NP mentions of every discourse entity. As there is no standard annotation scheme for coreference and the source corpora differed in certain aspects, the coreference information of the task datasets was produced according to three criteria:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task Description", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "\u2022 Only NP constituents and possessive determiners can be mentions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task Description", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "\u2022 Mentions must be referential expressions, thus ruling out nominal predicates, appositives, expletive NPs, attributive NPs, NPs within idioms, etc.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task Description", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "\u2022 Singletons are also considered as entities (i.e., entities with a single mention).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task Description", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "To help participants build their systems, the task datasets also contained both gold-standard and automatically predicted linguistic annotations at the morphological, syntactic and semantic levels. Considerable effort was devoted to provide participants with a common and relatively simple data representation for the six languages.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task Description", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "The task datasets as well as the participants' answers were displayed in a uniform columnbased format, similar to the style used in previous CoNLL shared tasks on syntactic and semantic dependencies (2008/2009 ). 10 Each dataset was provided as a single file per language. Since coreference is a linguistic relation at the discourse level, documents constitute the basic unit, and are delimited by \"#begin document ID\" and \"#end document ID\" comment lines. Within a document, the information of each sentence is organized vertically with one token per line, and a blank line after the last token of each sentence. The information associated with each token is described in several columns (separated by \"\\t\" characters) representing the following layers of linguistic annotation.", |
|
"cite_spans": [ |
|
{ |
|
"start": 186, |
|
"end": 209, |
|
"text": "dependencies (2008/2009", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data Format", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "ID (column 1). Token identifiers in the sentence. Token (column 2). Word forms. Lemma (column 3). Token lemmas.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data Format", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "PoS (column 5). Coarse PoS.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data Format", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Feat (column 7). Morphological features (PoS type, number, gender, case, tense, aspect, etc.) separated by a pipe character. Head (column 9). ID of the syntactic head (\"0\" if the token is the tree root). DepRel (column 11). Dependency relations corresponding to the dependencies described in the Head column (\"sentence\" if the token is the tree root). NE (column 13). NE types in open-close notation. Pred (column 15). Predicate semantic class. APreds (column 17 and subsequent ones). For each predicate in the Pred column, its semantic roles/dependencies.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data Format", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "open-close notation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Coref (last column). Coreference relations in", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The above-mentioned columns are \"goldstandard columns,\" whereas columns 4, 6, 8, 10, 12, 14, 16 and the penultimate contain the same information as the respective previous column but automatically predicted-using the preprocessing systems listed in Section 2.2. Neither all layers of linguistic annotation nor all gold-standard and predicted columns were available for all six languages (underscore characters indicate missing information).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Coref (last column). Coreference relations in", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The coreference column follows an open-close notation with an entity number in parentheses (see Table 2 ). Every entity has an ID number, and every mention is marked with the ID of the entity it refers to: an opening parenthesis shows the beginning of the mention (first token), while a closing parenthesis shows the end of the mention (last token). For tokens belonging to more than one mention, a pipe character is used to separate multiple entity IDs. The resulting annotation is a wellformed nested structure (CF language).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 96, |
|
"end": 103, |
|
"text": "Table 2", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Coref (last column). Coreference relations in", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In order to address our goal of studying the effect of different levels of linguistic information (preprocessing) on solving coreference relations, the test was divided into four evaluation settings that differed along two dimensions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Settings", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Gold-standard versus Regular setting. Only in the gold-standard setting were participants allowed to use the gold-standard columns, including the last one (of the test dataset) with true mention boundaries. In the regular setting, they were allowed to use only the automatically predicted columns. Obtaining better results in the gold setting would provide evidence for the relevance of using high-quality preprocessing information. Since not all columns were available for all six languages, the gold setting was only possible for Catalan, English, German, and Spanish.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Settings", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Closed versus Open setting. In the closed setting, systems had to be built strictly with the information provided in the task datasets. In contrast, there was no restriction on the resources that participants could utilize in the open setting: systems could be developed using any external tools and resources to predict the preprocessing information, e.g., WordNet, Wikipedia, etc. The only requirement was to use tools that had not been developed with the annotations of the test set. This setting provided an open door into tools or resources that improve performance.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Settings", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Since there is no agreement at present on a standard measure for coreference resolution evaluation, one of our goals was to compare the rankings produced by four different measures. The task scorer provides results in the two mentionbased metrics B 3 (Bagga and Baldwin, 1998) and CEAF-\u03c6 3 (Luo, 2005) , and the two link-based metrics MUC (Vilain et al., 1995) and BLANC (Recasens and Hovy, in prep) . The first three measures have been widely used, while BLANC is a proposal of a new measure interesting to test.", |
|
"cite_spans": [ |
|
{ |
|
"start": 290, |
|
"end": 301, |
|
"text": "(Luo, 2005)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 339, |
|
"end": 360, |
|
"text": "(Vilain et al., 1995)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 371, |
|
"end": 399, |
|
"text": "(Recasens and Hovy, in prep)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Metrics", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "The mention detection subtask is measured with recall, precision, and F 1 . Mentions are rewarded with 1 point if their boundaries coincide with those of the gold NP, with 0.5 points if their boundaries are within the gold NP including its head, and with 0 otherwise.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Metrics", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "A total of twenty-two participants registered for the task and downloaded the training materials. From these, sixteen downloaded the test set but only six (out of which two task organizers) submitted valid results (corresponding to nine system runs or variants). These numbers show that the task raised considerable interest but that the final participation rate was comparatively low (slightly below 30%).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Participating Systems", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "The participating systems differed in terms of architecture, machine learning method, etc. Table 3 summarizes their main properties. Systems like BART and Corry support several machine learners, but Table 3 indicates the one used for the SemEval run. The last column indicates the external resources that were employed in the open setting, thus it is empty for systems that participated only in the closed setting. For more specific details we address the reader to the system description papers in Erk and Strapparava (2010) . Table 4 shows the results obtained by two naive baseline systems: (i) SINGLETONS considers each mention as a separate entity, and (ii) ALL-IN-ONE groups all the mentions in a document into a single entity. These simple baselines reveal limitations of the evaluation metrics, like the high scores of CEAF and B 3 for SINGLETONS. Interestingly enough, the naive baseline scores turn out to be hard to beat by the participating systems, as Table 5 shows. Similarly, ALL-IN-ONE obtains high scores in terms of MUC. Table 4 also reveals differences between the distribution of entities in the datasets. Dutch is clearly the most divergent corpus mainly due to the fact that it only contains singletons for NEs. Table 5 displays the results of all systems for all languages and settings in the four evaluation metrics (the best scores in each setting are highlighted in bold). Results are presented sequentially by language and setting, and participating systems are ordered alphabetically. The participation of systems across languages and settings is rather irregular, 11 thus making it difficult to draw firm conclu- sions about the aims initially pursued by the task. In the following, we summarize the most relevant outcomes of the evaluation.", |
|
"cite_spans": [ |
|
{ |
|
"start": 499, |
|
"end": 525, |
|
"text": "Erk and Strapparava (2010)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 199, |
|
"end": 206, |
|
"text": "Table 3", |
|
"ref_id": "TABREF4" |
|
}, |
|
{ |
|
"start": 528, |
|
"end": 535, |
|
"text": "Table 4", |
|
"ref_id": "TABREF6" |
|
}, |
|
{ |
|
"start": 965, |
|
"end": 972, |
|
"text": "Table 5", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1039, |
|
"end": 1046, |
|
"text": "Table 4", |
|
"ref_id": "TABREF6" |
|
}, |
|
{ |
|
"start": 1234, |
|
"end": 1241, |
|
"text": "Table 5", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Participating Systems", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Regarding languages, English concentrates the most participants (fifteen entries), followed by German (eight), Catalan and Spanish (seven each), Italian (five), and Dutch (three). The number of languages addressed by each system ranges from one (Corry) to six (UBIU and SUCRE); BART and RelaxCor addressed three languages, and TANL-1 five. The best overall results are obtained for English followed by German, then Catalan, Spanish and Italian, and finally Dutch. Apart from differences between corpora, there are other factors that might explain this ranking: (i) the fact that most of the systems were originally developed for English, and (ii) differences in corpus size (German having the largest corpus, and Dutch the smallest).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and Evaluation", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Regarding systems, there are no clear \"winners.\" Note that no language-setting was addressed by all six systems. The BART system, for instance, is either on its own or competing against a single system. It emerges from partial comparisons that SUCRE performs the best in closed\u00d7regular for English, German, and Italian, although it never outperforms the CEAF or B 3 singleton baseline. While SUCRE always obtains the best scores according to MUC and BLANC, Re-laxCor and TANL-1 usually win based on CEAF and B 3 . The Corry system presents three variants optimized for CEAF (Corry-C), MUC (Corry-M), and BLANC (Corry-B). Their results are consistent with the bias introduced in the optimization (see English:open\u00d7gold).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and Evaluation", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Depending on the evaluation metric then, the rankings of systems vary with considerable score differences. There is a significant positive correlation between CEAF and B 3 (Pearson's r = 0.91, p < 0.01), and a significant lack of correlation between CEAF and MUC in terms of recall (Pearson's r = 0.44, p < 0.01). This fact stresses the importance of defining appropriate metrics (or a combination of them) for coreference evaluation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and Evaluation", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Finally, regarding evaluation settings, the results in the gold setting are significantly better than those in the regular. However, this might be a direct effect of the mention recognition task. Mention recognition in the regular setting falls more than 20 F 1 points with respect to the gold setting (where correct mention boundaries were given).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and Evaluation", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "As for the open versus closed setting, there is only one system, RelaxCor for English, that addressed the two. As expected, results show a slight improvement from closed\u00d7gold to open\u00d7gold.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and Evaluation", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "This paper has introduced the main features of the SemEval-2010 task on coreference resolution. The goal of the task was to evaluate and compare automatic coreference resolution systems for six different languages in four evaluation settings and using four different metrics. This complex scenario aimed at providing insight into several aspects of coreference resolution, including portability across languages, relevance of linguistic information at different levels, and behavior of alternative scoring metrics.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "The task attracted considerable attention from a number of researchers, but only six teams submitted their final results. Participating systems did not run their systems for all the languages and evaluation settings, thus making direct comparisons between them very difficult. Nonetheless, we were able to observe some interesting aspects from the empirical evaluation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "An important conclusion was the confirmation that different evaluation metrics provide different system rankings and the scores are not commensurate. Attention thus needs to be paid to coreference evaluation. The behavior and applicability of the scoring metrics requires further investigation in order to guarantee a fair evaluation when comparing systems in the future. We hope to have the opportunity to thoroughly discuss this and the rest of interesting questions raised by the task during the SemEval workshop at ACL 2010.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "An additional valuable benefit is the set of resources developed throughout the task. As task organizers, we intend to facilitate the sharing of datasets, scorers, and documentation by keeping them available for future research use. We believe that these resources will help to set future bench-marks for the research community and will contribute positively to the progress of the state of the art in coreference resolution. We will maintain and update the task website with post-SemEval contributions. Table 5 : Official results of the participating systems for all languages, settings, and metrics.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 504, |
|
"end": 511, |
|
"text": "Table 5", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Conclusions", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "http://stel.ub.edu/semeval2010-coref", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Free user license agreements for the English and German task datasets were issued to the task participants.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The German and Dutch training datasets were not completely stable during the competition period due to a few errors. Revised versions were released on March 2 and 20, respectively. As to the test datasets, the Dutch and Italian documents with formatting errors were corrected after the evaluation period, with no variations in the ranking order of systems.4 http://www.lsi.upc.es/ nlp/freeling 5 http://www.lsi.upc.edu/ nlp/SVMTool 6 http://www.lsi.upc.edu// xlluis/?x=cat:5 7 http://textpro.fbk.eu 8 http://maltparser.org", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Following the terminology of the ACE program, a mention is defined as an instance of reference to an object, and an entity is the collection of mentions referring to the same object in a document.10 http://www.cnts.ua.ac.be/conll2008", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Only 45 entries inTable 5 from 192 potential cases.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "We would like to thank the following people who contributed to the preparation of the task datasets: Manuel Bertran (UB), Oriol Borrega (UB), Orph\u00e9e De Clercq (U. Ghent), Francesca Delogu (U. Trento), Jes\u00fas Gim\u00e9nez (UPC), Eduard Hovy (ISI-USC), Richard Johansson (U. Trento), Xavier Llu\u00eds (UPC), Montse Nofre (UB), Llu\u00eds Padr\u00f3 (UPC), Kepa Joseba Rodr\u00edguez (U. Trento), Mihai Surdeanu (Stanford), Olga Uryupina (U. Trento), Lente Van Leuven (UB), and Rita Zaragoza (UB). We would also like to thank LDC and die tageszeitung for distributing freely the English and German datasets.This work was funded in part by the Spanish Ministry of Science and Innovation through the projects TEXT-MESS 2.0 (TIN2009-13391-C04-04), OpenMT-2 (TIN2009-14675-C03), and KNOW2 (TIN2009-14715-C04-04), and an FPU doctoral scholarship (AP2006-00994) held by M. Recasens. It also received financial support from the Seventh Framework Programme of the EU (FP7/2007-2013 under GA 247762 (FAUST), from the STEVIN program of the Nederlandse Taalunie through the COREA and SoNaR projects, and from the Provincia Autonoma di Trento through the LiveMemories project.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "TANL-1: coreference resolution by parse analysis and similarity clustering", |
|
"authors": [ |
|
{ |
|
"first": "Giuseppe", |
|
"middle": [], |
|
"last": "Attardi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stefano", |
|
"middle": [ |
|
"Dei" |
|
], |
|
"last": "Rossi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maria", |
|
"middle": [], |
|
"last": "Simi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of SemEval-2", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Giuseppe Attardi, Stefano Dei Rossi, and Maria Simi. 2010. TANL-1: coreference resolution by parse analysis and similarity clustering. In Proceedings of SemEval-2.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Algorithms for scoring coreference chains", |
|
"authors": [ |
|
{ |
|
"first": "Amit", |
|
"middle": [], |
|
"last": "Bagga", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Breck", |
|
"middle": [], |
|
"last": "Baldwin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1998, |
|
"venue": "Proceedings of the LREC Workshop on Linguistic Coreference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "563--566", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Amit Bagga and Breck Baldwin. 1998. Algorithms for scoring coreference chains. In Proceedings of the LREC Workshop on Linguistic Coreference, pages 563-566.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "BART: A multilingual anaphora resolution system", |
|
"authors": [ |
|
{ |
|
"first": "Samuel", |
|
"middle": [], |
|
"last": "Broscheit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Massimo", |
|
"middle": [], |
|
"last": "Poesio", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Simone", |
|
"middle": [ |
|
"Paolo" |
|
], |
|
"last": "Ponzetto", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kepa Joseba", |
|
"middle": [], |
|
"last": "Rodr\u00edguez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lorenza", |
|
"middle": [], |
|
"last": "Romano", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Olga", |
|
"middle": [], |
|
"last": "Uryupina", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yannick", |
|
"middle": [], |
|
"last": "Versley", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roberto", |
|
"middle": [], |
|
"last": "Zanoli", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of SemEval-2", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Samuel Broscheit, Massimo Poesio, Simone Paolo Ponzetto, Kepa Joseba Rodr\u00edguez, Lorenza Ro- mano, Olga Uryupina, Yannick Versley, and Roberto Zanoli. 2010. BART: A multilingual anaphora res- olution system. In Proceedings of SemEval-2.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Memory-based shallow parsing", |
|
"authors": [ |
|
{ |
|
"first": "Walter", |
|
"middle": [], |
|
"last": "Daelemans", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sabine", |
|
"middle": [], |
|
"last": "Buchholz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jorn", |
|
"middle": [], |
|
"last": "Veenstra", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1999, |
|
"venue": "Proceedings of CoNLL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Walter Daelemans, Sabine Buchholz, and Jorn Veen- stra. 1999. Memory-based shallow parsing. In Pro- ceedings of CoNLL 1999.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "The Automatic Content Extraction (ACE) program -Tasks, data, and evaluation", |
|
"authors": [ |
|
{ |
|
"first": "George", |
|
"middle": [], |
|
"last": "Doddington", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexis", |
|
"middle": [], |
|
"last": "Mitchell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Przybocki", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lance", |
|
"middle": [], |
|
"last": "Ramshaw", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephanie", |
|
"middle": [], |
|
"last": "Strassel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ralph", |
|
"middle": [], |
|
"last": "Weischedel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proceedings of LREC 2004", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "837--840", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "George Doddington, Alexis Mitchell, Mark Przybocki, Lance Ramshaw, Stephanie Strassel, and Ralph Weischedel. 2004. The Automatic Content Extrac- tion (ACE) program -Tasks, data, and evaluation. In Proceedings of LREC 2004, pages 837-840.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Proceedings of SemEval-2", |
|
"authors": [ |
|
{ |
|
"first": "Katrin", |
|
"middle": [], |
|
"last": "Erk", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Carlo", |
|
"middle": [], |
|
"last": "Strapparava", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Katrin Erk and Carlo Strapparava, editors. 2010. Pro- ceedings of SemEval-2.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "A dependencydriven parser for German dependency and constituency representations", |
|
"authors": [ |
|
{ |
|
"first": "Johan", |
|
"middle": [], |
|
"last": "Hall", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joakim", |
|
"middle": [], |
|
"last": "Nivre", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of the ACL Workshop on Parsing German", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "47--54", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Johan Hall and Joakim Nivre. 2008. A dependency- driven parser for German dependency and con- stituency representations. In Proceedings of the ACL Workshop on Parsing German (PaGe 2008), pages 47-54.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "A unified representation for morphological, syntactic, semantic, and referential annotations", |
|
"authors": [ |
|
{ |
|
"first": "Erhard", |
|
"middle": [ |
|
"W" |
|
], |
|
"last": "Hinrichs", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sandra", |
|
"middle": [], |
|
"last": "K\u00fcbler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Karin", |
|
"middle": [], |
|
"last": "Naumann", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proceedings of the ACL Workshop on Frontiers in Corpus Annotation II: Pie in the Sky", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "13--20", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Erhard W. Hinrichs, Sandra K\u00fcbler, and Karin Nau- mann. 2005. A unified representation for morpho- logical, syntactic, semantic, and referential annota- tions. In Proceedings of the ACL Workshop on Fron- tiers in Corpus Annotation II: Pie in the Sky, pages 13-20.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "MUC-7 Coreference Task Definition -Version 3.0", |
|
"authors": [], |
|
"year": null, |
|
"venue": "Proceedings of MUC-7", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "MUC-7 Coreference Task Definition -Version 3.0. In Proceedings of MUC-7.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "KNACK-2002: A richly annotated corpus of Dutch written text", |
|
"authors": [ |
|
{ |
|
"first": "V\u00e9ronique", |
|
"middle": [], |
|
"last": "Hoste", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guy", |
|
"middle": [], |
|
"last": "De Pauw", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceedings of LREC 2006", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1432--1437", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "V\u00e9ronique Hoste and Guy De Pauw. 2006. KNACK- 2002: A richly annotated corpus of Dutch written text. In Proceedings of LREC 2006, pages 1432- 1437.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "SU-CRE: A modular system for coreference resolution", |
|
"authors": [ |
|
{ |
|
"first": "Hamidreza", |
|
"middle": [], |
|
"last": "Kobdani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hinrich", |
|
"middle": [], |
|
"last": "Sch\u00fctze", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of SemEval-2", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hamidreza Kobdani and Hinrich Sch\u00fctze. 2010. SU- CRE: A modular system for coreference resolution. In Proceedings of SemEval-2.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "On coreference resolution performance metrics", |
|
"authors": [ |
|
{ |
|
"first": "Xiaoqiang", |
|
"middle": [], |
|
"last": "Luo", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proceedings of HLT-EMNLP 2005", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "25--32", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xiaoqiang Luo. 2005. On coreference resolution performance metrics. In Proceedings of HLT- EMNLP 2005, pages 25-32.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Using decision trees for coreference resolution", |
|
"authors": [ |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Joseph", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wendy", |
|
"middle": [ |
|
"G" |
|
], |
|
"last": "Mccarthy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Lehnert", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1995, |
|
"venue": "Proceedings of IJCAI 1995", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1050--1055", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Joseph F. McCarthy and Wendy G. Lehnert. 1995. Us- ing decision trees for coreference resolution. In Pro- ceedings of IJCAI 1995, pages 1050-1055.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Using coreference in question answering", |
|
"authors": [ |
|
{ |
|
"first": "Thomas", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Morton", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1999, |
|
"venue": "Proceedings of TREC-8", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "85--89", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thomas S. Morton. 1999. Using coreference in ques- tion answering. In Proceedings of TREC-8, pages 85-89.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Anaphora Resolution Exercise: An overview", |
|
"authors": [ |
|
{ |
|
"first": "Constantin", |
|
"middle": [], |
|
"last": "Orasan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Cristea", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruslan", |
|
"middle": [], |
|
"last": "Mitkov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ant\u00f3nio", |
|
"middle": [], |
|
"last": "Branco", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of LREC", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Constantin Orasan, Dan Cristea, Ruslan Mitkov, and Ant\u00f3nio Branco. 2008. Anaphora Resolution Exer- cise: An overview. In Proceedings of LREC 2008.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Ontonotes: A unified relational semantic representation", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Sameer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eduard", |
|
"middle": [], |
|
"last": "Pradhan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mitch", |
|
"middle": [], |
|
"last": "Hovy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Martha", |
|
"middle": [], |
|
"last": "Marcus", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lance", |
|
"middle": [], |
|
"last": "Palmer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ralph", |
|
"middle": [], |
|
"last": "Ramshaw", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Weischedel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of the International Conference on Semantic Computing (ICSC 2007)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "517--526", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sameer S. Pradhan, Eduard Hovy, Mitch Mar- cus, Martha Palmer, Lance Ramshaw, and Ralph Weischedel. 2007. Ontonotes: A unified rela- tional semantic representation. In Proceedings of the International Conference on Semantic Comput- ing (ICSC 2007), pages 517-526.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "BLANC: Implementing the Rand Index for Coreference Evaluation", |
|
"authors": [ |
|
{ |
|
"first": "Marta", |
|
"middle": [], |
|
"last": "Recasens", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eduard", |
|
"middle": [], |
|
"last": "Hovy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marta Recasens and Eduard Hovy. in prep. BLANC: Implementing the Rand Index for Coreference Eval- uation.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "AnCora-CO: Coreferentially annotated corpora for Spanish and Catalan. Language Resources and Evaluation", |
|
"authors": [ |
|
{ |
|
"first": "Marta", |
|
"middle": [], |
|
"last": "Recasens", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M. Ant\u00f2nia", |
|
"middle": [], |
|
"last": "Mart\u00ed", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1007/s10579-009-9108-x" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marta Recasens and M. Ant\u00f2nia Mart\u00ed. 2009. AnCora- CO: Coreferentially annotated corpora for Spanish and Catalan. Language Resources and Evaluation, DOI:10.1007/s10579-009-9108-x.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Anaphoric annotation of Wikipedia and blogs in the Live Memories Corpus", |
|
"authors": [ |
|
{ |
|
"first": "Francesca", |
|
"middle": [], |
|
"last": "Kepa Joseba Rodr\u00edguez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yannick", |
|
"middle": [], |
|
"last": "Delogu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Egon", |
|
"middle": [], |
|
"last": "Versley", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Massimo", |
|
"middle": [], |
|
"last": "Stemle", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Poesio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of LREC 2010", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "157--163", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kepa Joseba Rodr\u00edguez, Francesca Delogu, Yannick Versley, Egon Stemle, and Massimo Poesio. 2010. Anaphoric annotation of Wikipedia and blogs in the Live Memories Corpus. In Proceedings of LREC 2010, pages 157-163.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "RelaxCor: A global relaxation labeling approach to coreference resolution for the SemEval-2 Coreference Task", |
|
"authors": [ |
|
{ |
|
"first": "Emili", |
|
"middle": [], |
|
"last": "Sapena", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Llu\u00eds", |
|
"middle": [], |
|
"last": "Padr\u00f3", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jordi", |
|
"middle": [], |
|
"last": "Turmo", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of SemEval-2", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Emili Sapena, Llu\u00eds Padr\u00f3, and Jordi Turmo. 2010. RelaxCor: A global relaxation labeling approach to coreference resolution for the SemEval-2 Corefer- ence Task. In Proceedings of SemEval-2.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Estimation of conditional probabilities with decision trees and an application to fine-grained POS tagging", |
|
"authors": [ |
|
{ |
|
"first": "Helmut", |
|
"middle": [], |
|
"last": "Schmid", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Florian", |
|
"middle": [], |
|
"last": "Laws", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of COLING 2008", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "777--784", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Helmut Schmid and Florian Laws. 2008. Estimation of conditional probabilities with decision trees and an application to fine-grained POS tagging. In Pro- ceedings of COLING 2008, pages 777-784.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Improvements in part-ofspeech tagging with an application to German", |
|
"authors": [ |
|
{ |
|
"first": "Helmut", |
|
"middle": [], |
|
"last": "Schmid", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1995, |
|
"venue": "Proceedings of the ACL SIGDAT Workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "47--50", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Helmut Schmid. 1995. Improvements in part-of- speech tagging with an application to German. In Proceedings of the ACL SIGDAT Workshop, pages 47-50.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Two uses of anaphora resolution in summarization. Information Processing and Management: an", |
|
"authors": [ |
|
{ |
|
"first": "Josef", |
|
"middle": [], |
|
"last": "Steinberger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Massimo", |
|
"middle": [], |
|
"last": "Poesio", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Mijail", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "International Journal", |
|
"volume": "43", |
|
"issue": "6", |
|
"pages": "1663--1680", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Josef Steinberger, Massimo Poesio, Mijail A. Kabad- jov, and Karel Jeek. 2007. Two uses of anaphora resolution in summarization. Information Process- ing and Management: an International Journal, 43(6):1663-1680.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Corry: A system for coreference resolution", |
|
"authors": [ |
|
{ |
|
"first": "Olga", |
|
"middle": [], |
|
"last": "Uryupina", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of SemEval-2", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Olga Uryupina. 2010. Corry: A system for corefer- ence resolution. In Proceedings of SemEval-2.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Syntactic annotation of large corpora in STEVIN", |
|
"authors": [ |
|
{ |
|
"first": "Ineke", |
|
"middle": [], |
|
"last": "Gertjan Van Noord", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vincent", |
|
"middle": [], |
|
"last": "Schuurman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Vandeghinste", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceedings of LREC", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gertjan van Noord, Ineke Schuurman, and Vincent Vandeghinste. 2006. Syntactic annotation of large corpora in STEVIN. In Proceedings of LREC 2006.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Dennis Connolly, and Lynette Hirschman. 1995. A modeltheoretic coreference scoring scheme", |
|
"authors": [ |
|
{ |
|
"first": "Marc", |
|
"middle": [], |
|
"last": "Vilain", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Burger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Aberdeen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "Proceedings of MUC-6", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "45--52", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marc Vilain, John Burger, John Aberdeen, Dennis Con- nolly, and Lynette Hirschman. 1995. A model- theoretic coreference scoring scheme. In Proceed- ings of MUC-6, pages 45-52.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "UBIU: A language-independent system for coreference resolution", |
|
"authors": [ |
|
{ |
|
"first": "Desislava", |
|
"middle": [], |
|
"last": "Zhekova", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sandra", |
|
"middle": [], |
|
"last": "K\u00fcbler", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of SemEval-2", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Desislava Zhekova and Sandra K\u00fcbler. 2010. UBIU: A language-independent system for coreference res- olution. In Proceedings of SemEval-2.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"TABREF0": { |
|
"text": "Size of the task datasets.", |
|
"type_str": "table", |
|
"num": null, |
|
"html": null, |
|
"content": "<table><tr><td>2.1 Source Corpora</td></tr><tr><td>Catalan and Spanish The AnCora corpora (Re-</td></tr><tr><td>casens and Mart\u00ed, 2009) consist of a Catalan and</td></tr><tr><td>a Spanish treebank of 500k words each, mainly</td></tr><tr><td>from newspapers and news agencies (El Peri\u00f3dico,</td></tr><tr><td>EFE, ACN). Manual annotation exists for ar-</td></tr><tr><td>guments and thematic roles, predicate semantic</td></tr><tr><td>classes, NEs, WordNet nominal senses, and coref-</td></tr><tr><td>erence relations. AnCora are freely available for</td></tr><tr><td>research purposes.</td></tr><tr><td>English The OntoNotes Release 2.0 corpus</td></tr><tr><td>(Pradhan et al., 2007) covers newswire and broad-</td></tr><tr><td>cast news data: 300k words from The Wall Street</td></tr><tr><td>Journal, and 200k words from the TDT-4 col-</td></tr><tr><td>lection, respectively. OntoNotes builds on the</td></tr><tr><td>Penn Treebank for syntactic annotation and on the</td></tr><tr><td>Penn PropBank for predicate argument structures.</td></tr><tr><td>Semantic annotations include NEs, words senses</td></tr><tr><td>(linked to an ontology), and coreference informa-</td></tr><tr><td>tion. The OntoNotes corpus is distributed by the</td></tr><tr><td>Linguistic Data Consortium. 2</td></tr></table>" |
|
}, |
|
"TABREF2": { |
|
"text": "", |
|
"type_str": "table", |
|
"num": null, |
|
"html": null, |
|
"content": "<table/>" |
|
}, |
|
"TABREF4": { |
|
"text": "Main characteristics of the participating systems.", |
|
"type_str": "table", |
|
"num": null, |
|
"html": null, |
|
"content": "<table/>" |
|
}, |
|
"TABREF6": { |
|
"text": "Baseline scores.", |
|
"type_str": "table", |
|
"num": null, |
|
"html": null, |
|
"content": "<table/>" |
|
} |
|
} |
|
} |
|
} |