|
{ |
|
"paper_id": "S13-1003", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T15:41:44.121834Z" |
|
}, |
|
"title": "Coarse to Fine Grained Sense Disambiguation in Wikipedia", |
|
"authors": [ |
|
{ |
|
"first": "Hui", |
|
"middle": [], |
|
"last": "Shen", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Ohio University Athens", |
|
"location": { |
|
"postCode": "45701", |
|
"region": "OH", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Razvan", |
|
"middle": [], |
|
"last": "Bunescu", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Ohio University", |
|
"location": { |
|
"postCode": "45701", |
|
"settlement": "Athens", |
|
"region": "OH", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Rada", |
|
"middle": [], |
|
"last": "Mihalcea", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Wikipedia articles are annotated by volunteer contributors with numerous links that connect words and phrases to relevant titles. Links to general senses of a word are used concurrently with links to more specific senses, without being distinguished explicitly. We present an approach to training coarse to fine grained sense disambiguation systems in the presence of such annotation inconsistencies. Experimental results show that accounting for annotation ambiguity in Wikipedia links leads to significant improvements in disambiguation.", |
|
"pdf_parse": { |
|
"paper_id": "S13-1003", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Wikipedia articles are annotated by volunteer contributors with numerous links that connect words and phrases to relevant titles. Links to general senses of a word are used concurrently with links to more specific senses, without being distinguished explicitly. We present an approach to training coarse to fine grained sense disambiguation systems in the presence of such annotation inconsistencies. Experimental results show that accounting for annotation ambiguity in Wikipedia links leads to significant improvements in disambiguation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "The vast amount of world knowledge available in Wikipedia has been shown to benefit many types of text processing tasks, such as coreference resolution (Ponzetto and Strube, 2006; Haghighi and Klein, 2009; Bryl et al., 2010; Rahman and Ng, 2011) , information retrieval (Milne, 2007; Li et al., 2007; Potthast et al., 2008; Cimiano et al., 2009) , or question answering (Ahn et al., 2004; Kaisser, 2008; Ferrucci et al., 2010) . In particular, the user contributed link structure of Wikipedia has been shown to provide useful supervision for training named entity disambiguation (Bunescu and Pasca, 2006; Cucerzan, 2007) and word sense disambiguation (Mihalcea, 2007; Ponzetto and Navigli, 2010) systems. Articles in Wikipedia often contain mentions of concepts or entities that already have a corresponding article. When contributing authors mention an existing Wikipedia entity inside an article, they are required to link at least its first mention to the corresponding article, by using links or piped links. Consider, for example, the following Wiki source annotations: The [[capital city|capital]] of Georgia is [[Atlanta] ]. The bracketed strings identify the title of the Wikipedia articles that describe the corresponding named entities. If the editor wants a different string displayed in the rendered text, then the alternative string is included in a piped link, after the title string. Based on these Wiki processing rules, the text that is rendered for the aforementioned example is: The capital of Georgia is Atlanta.", |
|
"cite_spans": [ |
|
{ |
|
"start": 152, |
|
"end": 179, |
|
"text": "(Ponzetto and Strube, 2006;", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 180, |
|
"end": 205, |
|
"text": "Haghighi and Klein, 2009;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 206, |
|
"end": 224, |
|
"text": "Bryl et al., 2010;", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 225, |
|
"end": 245, |
|
"text": "Rahman and Ng, 2011)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 270, |
|
"end": 283, |
|
"text": "(Milne, 2007;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 284, |
|
"end": 300, |
|
"text": "Li et al., 2007;", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 301, |
|
"end": 323, |
|
"text": "Potthast et al., 2008;", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 324, |
|
"end": 345, |
|
"text": "Cimiano et al., 2009)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 370, |
|
"end": 388, |
|
"text": "(Ahn et al., 2004;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 389, |
|
"end": 403, |
|
"text": "Kaisser, 2008;", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 404, |
|
"end": 426, |
|
"text": "Ferrucci et al., 2010)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 579, |
|
"end": 604, |
|
"text": "(Bunescu and Pasca, 2006;", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 605, |
|
"end": 620, |
|
"text": "Cucerzan, 2007)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 651, |
|
"end": 667, |
|
"text": "(Mihalcea, 2007;", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 668, |
|
"end": 695, |
|
"text": "Ponzetto and Navigli, 2010)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 1079, |
|
"end": 1103, |
|
"text": "[[capital city|capital]]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1118, |
|
"end": 1128, |
|
"text": "[[Atlanta]", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction and Motivation", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Since many words and names mentioned in Wikipedia articles are inherently ambiguous, their corresponding links can be seen as a useful source of supervision for training named entity and word sense disambiguation systems.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction and Motivation", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "For example, Wikipedia contains articles that describe possible senses of the word \"capital\", such as CAPITAL CITY, CAPITAL (ECONOMICS), FINANCIAL CAPITAL, or HUMAN CAPITAL, to name only a few. When disambiguating a word or a phrase in Wikipedia, a contributor uses the context to determine the appropriate Wikipedia title to include in the link. In the example above, the editor of the article determined that the word \"capital\" was mentioned with the political center meaning, consequently it was mapped to the article CAPITAL CITY through a piped link.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction and Motivation", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In order to use Wikipedia links for training a WSD system for a given word, one needs first to define a sense repository that specifies the possible meanings for that word, and then use the Wikipedia links to create training examples for each sense in the repository. This approach might be implemented using the following sequence of steps:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction and Motivation", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In global climate models, the state and properties of the [[atmosphere] ] are specified at a number of discrete locations General = ATMOSPHERE; Specific = ATMOSPHERE OF EARTH; Label = A \u2192 A(S) \u2192 AE", |
|
"cite_spans": [ |
|
{ |
|
"start": 58, |
|
"end": 71, |
|
"text": "[[atmosphere]", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction and Motivation", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The principal natural phenomena that contribute gases to the [[Atmosphere of Earth|atmosphere]] are emissions from volcanoes General = ATMOSPHERE; Specific = ATMOSPHERE OF EARTH; Label = A \u2192 A(S) \u2192 AE An aerogravity assist is a spacecraft maneuver designed to change velocity when arriving at a body with an [[atmosphere] ] General = ATMOSPHERE; Specific = ATMOSPHERE \u22b2 generic; Label = A \u2192 A(G)", |
|
"cite_spans": [ |
|
{ |
|
"start": 308, |
|
"end": 321, |
|
"text": "[[atmosphere]", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction and Motivation", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Assuming the planet's [[atmosphere] ] is close to equilibrium, it is predicted that 55 Cancri d is covered with water clouds General = ATMOSPHERE; Specific = ATMOSPHERE OF CANCRI \u22b2 missing; A \u2192 A(G)", |
|
"cite_spans": [ |
|
{ |
|
"start": 22, |
|
"end": 35, |
|
"text": "[[atmosphere]", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction and Motivation", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Figure 1: Coarse and fine grained sense annotations in Wikipedia (bold). The proposed hierarchical Label (right). A(S) = ATMOSPHERE (S), A(G) = ATMOSPHERE (G), A = ATMOSPHERE, AE = ATMOSPHERE OF EARTH.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction and Motivation", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "1. Collect all Wikipedia titles that are linked from the ambiguous anchor word. 2. Create a repository of senses from all titles that have sufficient support in Wikipedia i.e., titles that are referenced at least a predefined minimum number of times using the ambiguous word as anchor. 3. Use the links extracted for each sense in the repository as labeled examples for that sense and train a WSD model to distinguish between alternative senses of the ambiguous word.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction and Motivation", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Taking the word \"atmosphere\" as an example, the first step would result in a wide array of titles, ranging from the general ATMOSPHERE and its instantiations ATMOSPHERE OF EARTH or ATMO-SPHERE OF MARS, to titles as diverse as ATMO-SPHERE (UNIT), MOOD (PSYCHOLOGY), or AT-MOSPHERE (MUSIC GROUP). In the second step, the most frequent titles for the anchor word \"atmosphere\" would be assembled into a repository R = {ATMOSPHERE, ATMOSPHERE OF EARTH, AT-MOSPHERE OF MARS, ATMOSPHERE OF VENUS, STELLAR ATMOSPHERE, ATMOSPHERE (UNIT), ATMOSPHERE (MUSIC GROUP)}. The classifier trained in the third step would use features extracted from the context to discriminate between word senses. This Wikipedia-based approach to creating training data for word sense disambiguation has a major shortcoming. Many of the training examples extracted for the title ATMOSPHERE could very well belong to more specific titles such as ATMOSPHERE OF EARTH or ATMOSPHERE OF MARS. Whenever the word \"atmosphere\" is used in a context with the sense of \"a layer of gases that may surround a ma-terial body of sufficient mass, and that is held in place by the gravity of the body,\" the contributor has the option of adding a link either to the title AT-MOSPHERE that describes this general sense of the word, or to the title of an article that describes the atmosphere of the actual celestial body that is referred in that particular context, as shown in the first 2 examples in Figure 1 . As shown in bold in Figure 1, different occurrences of the same word may be tagged with either a general or a specific link, an ambiguity that is pervasive in Wikipedia for words like \"atmosphere\" that have general senses that subsume multiple, popular specific senses. There does not seem to be a clear, general rule underlying the decision to tag a word or a phrase with a general or specific sense link in Wikipedia. We hypothesize that, in some cases, editors may be unaware that an article exists in Wikipedia for the actual reference of a word or for a more specific sense of the word, and therefore they end up using a link to an article describing the general sense of the word. There is also the possibility that more specific articles are introduced only in newer versions of Wikipedia, and thus earlier annotations were not aware of these recent articles. Furthermore, since annotating words with the most specific sense available in Wikipedia may require substantial cognitive effort, editors may often choose to link to a general sense of the word, a choice that is still correct, yet less informative than the more specific sense.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 1449, |
|
"end": 1457, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1480, |
|
"end": 1486, |
|
"text": "Figure", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction and Motivation", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In order to get a sense of the potential magnitude of the general vs. specific sense annotation ambiguity, we extracted all Wikipedia link annotations for the words \"atmosphere\", \"president\", \"game\", \"dollar\", \"diamond\" and \"Corinth\", and created a special subset from those that were labeled by Wikipedia editors with the general sense links AT-MOSPHERE, PRESIDENT, GAME, DOLLAR, DIA-MOND, and CORINTH, respectively. Then, for each of the 7,079 links in this set, we used the context to manually determine the corresponding more specific title, whenever such a title exists in Wikipedia. The statistics in Tables 1 and 2 show a significant overlap between the general and specific sense categories. For example, out of the 932 links from \"atmosphere\" to ATMOSPHERE that were extracted in total, 518 were actually about the ATMOSPHERE OF EARTH, but the user linked them to the more general sense category ATMOSPHERE. On the other hand, there are 345 links to ATMOSPHERE OF EARTH that were explicitly made by the user. We manually assigned general links (G) whenever the word is used with a generic sense, or when the reference is not available in the repository of titles collected for that word because either the more specific title does not exist in Wikipedia or the specific title exists, but it does not have sufficient support -at least 20 linked anchors -in Wikipedia. We grouped the more specific links for any given sense into a special category suffixed with (S), to distinguish them from the general links (generic use, or missing reference) that were grouped into the category suffixed with (G).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 607, |
|
"end": 621, |
|
"text": "Tables 1 and 2", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Annotation Inconsistencies in Wikipedia", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "For many ambiguous words, the annotation inconsistencies appear when the word has senses that are in a subsumption relationship: the ATMO-SPHERE OF EARTH is an instance of ATMOSPHERE, whereas a STELLAR ATMOSPHERE is a particular type of ATMOSPHERE. Subsumed senses can be identified automatically using the category graph in Wikipedia. The word \"Corinth\" is an interesting case: the subsumption relationship between AN-CIENT CORINTH and CORINTH appears because of a temporal constraint. Furthermore, in the case of the word \"diamond\", the annotation inconsistencies are not caused by a subsumption relation between senses. Instead of linking to the DIAMOND (GEM-STONE) sense, Wikipedia contributors often link to the related DIAMOND sense indicating the mineral used in the gemstone.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Annotation Inconsistencies in Wikipedia", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "A supervised learning algorithm that uses the extracted links for training a WSD classification model to distinguish between categories in the sense repository assumes implicitly that the categories, and hence their training examples, are mutually disjoint. This assumption is clearly violated for words like \"atmosphere,\" consequently the learned model will have a poor performance on distinguishing between the overlapping categories. Alternatively, we can say that sense categories like ATMOSPHERE are ill defined, since their supporting dataset contains examples that could also belong to more specific sense categories such as ATMOSPHERE OF EARTH. We see two possible solutions to the problem of inconsistent link annotations. In one solution, specific senses are grouped together with the subsuming general sense, such that all categories in the resulting repository become disjoint. For \"atmosphere\", the general category ATMOSPHERE would be augmented to contain all the links previously annotated as ATMOSPHERE, ATMOSPHERE OF EARTH, AT-MOSPHERE OF MARS, ATMOSPHERE OF VENUS, or STELLAR ATMOSPHERE. This solution is straightforward to implement, however it has the disadvantage that the resulting WSD model will never link words to more specific titles in Wikipedia like ATMOSPHERE OF MARS.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Annotation Inconsistencies in Wikipedia", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Another solution is to reorganize the original sense repository into a hierarchical classification scheme such that sense categories at each classification level become mutually disjoint. The resulting WSD system has the advantage that it can make fine grained sense distinctions for an ambiguous word, despite the annotation inconsistencies present in the training data. The rest of this paper describes a feasible implementation for this second solution that does not require any manual annotation beyond the links that are already provided by Wikipedia volunteers.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Annotation Inconsistencies in Wikipedia", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Sense Disambiguation Figure 2 shows our proposed hierarchical classification scheme for disambiguation, using \"atmosphere\" as the ambiguous word. Shaded leaf nodes show the final categories in the sense repository for each word, whereas the doted elliptical frames on the second level in the hierarchy denote artificial categories introduced to enable a finer grained classification into more specific senses. Thick dotted arrows illustrate the classification decisions that are made in order to obtain a fine grained disambiguation of the word. Thus, the word \"atmosphere\" is first classified to have the general sense ATMO-SPHERE, i.e. \"a layer of gases that may surround a material body of sufficient mass, and that is held in place by the gravity of the body\". In the first solution, the disambiguation process would stop here and output the general sense ATMOSPHERE. In the second solution, the disambiguation process continues and further classifies the word to be a reference to ATMOSPHERE OF EARTH. To get to this final classification, the process passes through an intermediate binary classification level where it determines whether the word has a more specific sense covered in Wikipedia, corresponding to the artificial category ATMOSPHERE (S). If the answer is no, the system stops the disambiguation process and outputs the general sense category ATMOSPHERE. This basic sense hierarchy can be replicated depending on the existence of even finer sense distinctions in Wikipedia. For example, Wikipedia articles describing atmospheres of particular stars could be used to further refine STELLAR ATMOSPHERE with two additional levels of the type Level 2 and Level 3. Overall, the proposed disambiguation scheme could be used to relabel the ATMOSPHERE links in Wikipedia with more specific, and therefore more informative, senses such as ATMOSPHERE OF EARTH. In general, the Wikipedia category graph could be used to automatically create hierarchical structures for re- lated senses of the same word.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 21, |
|
"end": 29, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Learning for Coarse to Fine Grained", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Training word sense classifiers for Levels 1 and 3 is straightforward. For Level 1, Wikipedia links that are annotated by users as ATMOSPHERE, ATMO-SPHERE OF EARTH, ATMOSPHERE OF MARS, AT-MOSPHERE OF VENUS, or STELLAR ATMOSPHERE are collected as training examples for the general sense category ATMOSPHERE. Similarly, links that are annotated as ATMOSPHERE (UNIT) and ATMO-SPHERE (MUSIC GROUP) will be used as training examples for the two categories, respectively. A multiclass classifier is then trained to distinguish between the three categories at this level. For Level 3, a multiclass classifiers is trained on Wikipedia links collected for each of the 4 specific senses.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Learning for Coarse to Fine Grained", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "For the binary classifier at Level 2, we could use as training examples for the category ATMO-SPHERE (G) all Wikipedia links that were annotated as ATMOSPHERE, whereas for the category ATMOSPHERE (S) we could use as training examples all Wikipedia links that were annotated specifically as ATMOSPHERE OF EARTH, ATMOSPHERE OF MARS, ATMOSPHERE OF VENUS, or STELLAR ATMOSPHERE. A traditional binary classification SVM could be trained on this dataset to distinguish between the two categories. We call this approach Naive SVM, since it does not account for the fact that a significant number of the links that are annotated by Wikipedia contributors as ATMOSPHERE should actually belong to the ATMOSPHERE (S) categoryabout 60% of them, according to Table 1 . Instead, we propose treating all ATMOSPHERE links as unlabeled examples. If we consider the specific links in ATMOSPHERE (S) to be positive examples, then the problem becomes one of learning with positive and unlabeled examples.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 746, |
|
"end": 753, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Learning for Coarse to Fine Grained", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "This general type of semi-supervised learning has been studied before in the context of tasks such as text classification and information retrieval (Lee and , or bioinformatics . In this setting, the training data consists of positive examples x \u2208 P and unlabeled examples x \u2208 U . Following the notation of , we define s(x) = 1 if the example is positive and s(x) = \u22121 if the example is unlabeled. The true label of an example is y(x) = 1 if the example is positive and y(x) = \u22121 if the example is negative. Thus, x \u2208 P \u21d2 s(x) = y(x) = 1 and x \u2208 U \u21d2 s(x) = \u22121 i.e., the true label y(x) of an unlabeled example is unknown. For the experiments reported in this paper, we use our implementation of two state-of-the-art approaches to Learning with Positive and Unlabeled (LPU) examples: the Biased SVM formulation of and the Weighted Samples SVM formulation of . The original version of Biased SVM was designed to maximize the product between precision and recall. In the next section we describe a modification to the Biased SVM approach that can be used to maximize accuracy, a measure that is often used to evaluate WSD performance.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Learning with positive and unlabeled examples", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "In the Biased SVM formulation (Lee and , all unlabeled examples are considered to be negative and the decision function f (x) = w T \u03c6(x) + b is learned using the standard soft-margin SVM formulation shown in Figure 3 . minimize: The capacity parameters C P and C U control how much we penalize errors on positive examples vs. errors on unlabeled examples. Since not all unlabeled examples are negative, one would want to select capacity parameters satisfying C P > C U , such that false negative errors are penalized more than false positive errors. In order to find the best capacity parameters to use during training, the Biased SVM approach runs a grid search on a separate development dataset. This search is aimed at finding values for the parameters C P and C U that maximize pr, the product between precision p = p(y = 1|f = 1) and recall r = p(f = 1|y = 1). Lee and show that maximizing the pr criterion is equivalent with maximizing the objective r 2 /p(f = 1), where both r = p(f = 1|y = 1) and p(f = 1) can be estimated using the trained decision function f (x) on the development dataset.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 208, |
|
"end": 216, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "The Biased SVM", |
|
"sec_num": "3.1.1" |
|
}, |
|
{ |
|
"text": "1 2 w 2 + C P x\u2208P \u03be x + C U x\u2208U \u03be x subject to: s(x) w T \u03c6(x) + b \u2265 1 \u2212 \u03be x \u03be x \u2265 0, \u2200x \u2208 P \u222a U", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Biased SVM", |
|
"sec_num": "3.1.1" |
|
}, |
|
{ |
|
"text": "Maximizing the pr criterion in the original Biased SVM formulation was motivated by the need to optimize the F measure in information retrieval settings, where F = 2pr(p + r). In the rest of this section we show that classification accuracy can be maximized using only positive and unlabeled examples, an important result for problems where classification accuracy is the target performance measure.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Biased SVM", |
|
"sec_num": "3.1.1" |
|
}, |
|
{ |
|
"text": "The accuracy of a binary decision function f (x) is, by definition, acc = p(f = 1|y = 1) + p(f = \u22121|y = \u22121). Since the recall is r = p(f = 1|y = 1), the accuracy can be re-written as:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Biased SVM", |
|
"sec_num": "3.1.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "acc = r + 1 \u2212 p(f = 1|y = \u22121)", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "The Biased SVM", |
|
"sec_num": "3.1.1" |
|
}, |
|
{ |
|
"text": "Using Bayes' rule twice, the false positive term p(f = 1|y = \u22121) can be re-written as:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Biased SVM", |
|
"sec_num": "3.1.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "p(f = 1|y = \u22121) = p(f = 1)p(y = \u22121|f = 1) p(y = \u22121) = p(f = 1) p(y = \u22121) \u00d7 (1 \u2212 p(y = 1|f = 1)) = p(f = 1) p(y = \u22121) \u2212 p(f = 1) p(y = \u22121) \u00d7 p(y = 1)p(f = 1|y = 1) p(f = 1) = p(f = 1) \u2212 p(y = 1) \u00d7 r p(y = \u22121)", |
|
"eq_num": "(2)" |
|
} |
|
], |
|
"section": "The Biased SVM", |
|
"sec_num": "3.1.1" |
|
}, |
|
{ |
|
"text": "Plugging identity 2 in Equation 1 leads to:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Biased SVM", |
|
"sec_num": "3.1.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "acc = 1 + r + r \u00d7 p(y = 1) \u2212 p(f = 1) p(y = \u22121) = 1 + r \u2212 p(f = 1) p(y = \u22121)", |
|
"eq_num": "(3)" |
|
} |
|
], |
|
"section": "The Biased SVM", |
|
"sec_num": "3.1.1" |
|
}, |
|
{ |
|
"text": "Since p(y = \u22121) can be assimilated with a constant, Equation 3 implies that maximizing accuracy is equivalent with maximizing the criterion r \u2212 p(f = 1), where both the recall r and p(f = 1) can be estimated on the positive and unlabeled examples from a separate development dataset.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Biased SVM", |
|
"sec_num": "3.1.1" |
|
}, |
|
{ |
|
"text": "In conclusion, one can use the original Biased SVM formulation to maximize r 2 /p(f = 1), which has been shown by to maximize pr, a criterion that has a similar behavior with the F-measure used in retrieval applications. Alternatively, if the target performance measure is accuracy, we can choose instead to maximize r \u2212 p(f = 1), which we have shown above to correspond to accuracy maximization. introduced two approaches for learning with positive and unlabeled data. Both approaches are based on the assumption that labeled examples {x|s(x) = 1} are selected at random from the positive examples {x|y(x) = 1} i.e., p(s = 1|x, y = 1) = p(s = 1|y = 1). Their best performing approach uses the positive and unlabeled examples to train two distinct classifiers. First, the dataset P \u222a U is split into a training set and a validation set, and a classifier g(x) is trained on the labeling s to approximate the label distribution i.e. g(x) = p(s = 1|x). The validation set is then used to estimate p(s = 1|y = 1) as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Biased SVM", |
|
"sec_num": "3.1.1" |
|
}, |
|
{ |
|
"text": "p(s = 1|y = 1) = p(s = 1|x, y = 1) = 1 |P | x\u2208P g(x) (4)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Weighted Samples SVM", |
|
"sec_num": "3.1.2" |
|
}, |
|
{ |
|
"text": "The second and final classifier f (x) is trained on a dataset of weighted examples that are sampled from the original training set as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Weighted Samples SVM", |
|
"sec_num": "3.1.2" |
|
}, |
|
{ |
|
"text": "-Each positive example x \u2208 P is copied as a positive example in the new training set with weight p(y = 1|x, s = 1) = 1. -Each unlabeled example x \u2208 U is duplicated into two training examples in the new dataset: a positive example with weight p(y = 1|x, s = 0) and a negative example with weight p(y = \u22121|x, s = 0) = 1 \u2212 p(y = 1|x, s = 0). show that the weights above can be derived as:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Weighted Samples SVM", |
|
"sec_num": "3.1.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "p(y = 1|x, s = 0) = 1\u2212p(s = 1|y = 1) p(s = 1|y = 1) \u00d7 p(s = 1|x) 1\u2212p(s = 1|x)", |
|
"eq_num": "(5)" |
|
} |
|
], |
|
"section": "The Weighted Samples SVM", |
|
"sec_num": "3.1.2" |
|
}, |
|
{ |
|
"text": "The output of the first classifier g(x) is used to approximate the probability p(s = 1|x), whereas p(s = 1|y = 1) is estimated using Equation 4. The two classifiers g and f are trained using SVMs and a linear kernel. Platt scaling is used with the first classifier to obtain the probability estimates g(x) = p(s = 1|x), which are then converted into weights following Equations 4 and 5, and used during the training of the second classifier.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Weighted Samples SVM", |
|
"sec_num": "3.1.2" |
|
}, |
|
{ |
|
"text": "We ran disambiguation experiments on the 6 ambiguous words atmosphere, president, dollar, game, diamond and Corinth. The corresponding Wikipedia sense repositories have been summarized in Tables 1 and 2. All WSD classifiers used the same set of standard WSD features (Ng and Lee, 1996; Stevenson and Wilks, 2001 ), such as words and their part-ofspeech tags in a window of 3 words around the ambiguous keyword, the unigram and bigram content words that are within 2 sentences of the current sentence, the syntactic governor of the keyword, and its chains of syntactic dependencies of lengths up to two. Furthermore, for each example, a Wikipedia specific feature was computed as the cosine similarity between the context of the ambiguous word and the text of the article for the target sense or reference.", |
|
"cite_spans": [ |
|
{ |
|
"start": 267, |
|
"end": 285, |
|
"text": "(Ng and Lee, 1996;", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 286, |
|
"end": 311, |
|
"text": "Stevenson and Wilks, 2001", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Evaluation", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "The Level 1 and Level 3 classifiers were trained using the SVM multi component of the SVM light package. 1 The WSD classifiers were evaluated in a 4-fold cross validation scenario in which 50% of the data was used for training, 25% for tuning the capacity parameter C, and 25% for testing. The final accuracy numbers, shown in Table 3 The evaluation of the binary classifiers at the second level follows the same 4-fold cross validation scheme that was used for Level 1 and Level 3 . The manual labels for specific senses and references in the unlabeled datasets are always ignored during training and tuning and used only during testing.", |
|
"cite_spans": [ |
|
{ |
|
"start": 105, |
|
"end": 106, |
|
"text": "1", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 327, |
|
"end": 334, |
|
"text": "Table 3", |
|
"ref_id": "TABREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experimental Evaluation", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "We compare the Naive SVM, Biased SVM, and Weighted SVM in the two evaluation settings, using for all of them the same train/development/test splits of the data and the same features. We emphasize that our manual labels are used only for testing purposes -the manual labels are ignored during training and tuning, when the data is assumed to contain only positive and unlabeled examples. We implemented the Biased SVM approach on top of the binary SVM light package. The C P and C U parameters of the Biased SVM were tuned through the c and j parameters of SVM light (c = C U and j = C P /C U ). Eventually, all three methods use the development data for tuning the c and j parameters of the SVM. However, whereas the Naive SVM tunes these parameters to optimize the accuracy with respect to the noisy label s(x), the Biased SVM tunes the same parameters to maximize an estimate of the accuracy or F-measure with respect to the true label y(x). The Weighted SVM approach was implemented on top of the LibSVM 2 package. Even though the original Weighted SVM method of does not specify tuning any parameters, we noticed it gave better results when the capacity c and weight j parameters were tuned for the first classifier g(x). Table 4 shows the accuracy results of the three methods for Level 2 , whereas Table 5 shows the Fmeasure results. The Biased SVM outperforms the Naive SVM on all the words, in terms of both accuracy and F-measure. The most dramatic increases are seen for the words atmosphere, game, diamond, and Corinth. For these words, the number of positive examples is significantly smaller compared to the total number of positive and unlabeled examples. Thus, the percentage of positive examples relative to the total number of positive and unlabeled examples is 31.9% for atmosphere, 29.1% for game, 9.0% for diamond, and 11.6% for Corinth. The positive to total ratio is however significantly larger for the other two words: 67.2% for president and 91.5% for dollar. When the number of positive examples is large, the false negative noise from the unlabeled dataset in the Naive SVM approach will be relatively small, hence the good performance of Naive SVM in these cases. To check whether this is the case, we have also run experiments where we used only half of the available positive examples for the word president and one tenth of the positive examples for the word dollar, such that the positive datasets became comparable in size with the unlabeled datasets. The results for these experiments are shown in Tables 4 and 5 in the rows labeled president S and dollar S . As expected, the difference between the performance of Naive SVM and Biased SVM gets larger on these smaller datasets, especially for the word dollar.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 1226, |
|
"end": 1233, |
|
"text": "Table 4", |
|
"ref_id": "TABREF7" |
|
}, |
|
{ |
|
"start": 1304, |
|
"end": 1311, |
|
"text": "Table 5", |
|
"ref_id": "TABREF8" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experimental Evaluation", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "The Weighted SVM outperforms the Naive SVM on five out of the six words, the exception being the word president. Comparatively, the Biased SVM has a more stable behavior and overall results in a more substantial improvement over the Naive SVM. Based on these initial results, we see the Biased SVM as the method of choice for learning with positive and unlabeled examples in the task of coarse to fine grained sense disambiguation in Wikipedia. In a final set of experiments, we compared the traditional flat classification approach and our proposed hierarchical classifier in terms of their overall disambiguation accuracy. In these experiments, the sense repository contains all the leaf nodes as distinct sense categories. For example, the word atmosphere would correspond to the sense repository R = {ATMOSPHERE (G), ATMOSPHERE OF EARTH, ATMOSPHERE OF MARS, ATMOSPHERE OF VENUS, STELLAR ATMOSPHERE, ATMO-SPHERE (UNIT), ATMOSPHERE (MUSIC GROUP)}. The overall accuracy results are shown in Table 6 and confirm the utility of using the LPU framework in the hierarchical model, which outperforms the traditional flat model, especially on words with low ratio of positive to unlabeled examples. ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 992, |
|
"end": 999, |
|
"text": "Table 6", |
|
"ref_id": "TABREF10" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experimental Evaluation", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Annotation inconsistencies in Wikipedia were circumvented by adapting two existing approaches that use only positive and unlabeled data to train binary classifiers. This binary classification constraint led to the introduction of the artificial specific (S) category on Level 2 in our disambiguation framework. In future work, we plan to investigate a direct extension of learning with positive and unlabeled data to the case of multiclass classification, which will reduce the number of classification levels from 3 to 2. We also plan to investigate the use of unsupervised techniques in order to incorporate less popular references of a word in the hierarchical classification.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Future Work", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "We presented an approach to training coarse to fine grained sense disambiguation systems that treats annotation inconsistencies in Wikipedia under the framework of learning with positive and unlabeled examples. Furthermore, we showed that the true accuracy of a decision function can be optimized using only positive and unlabeled examples. For testing purposes, we manually annotated 7,079 links belonging to six ambiguous words 3 . Experimental results demonstrate that accounting for annotation ambiguity in Wikipedia links leads to consistent improvements in disambiguation accuracy. The manual annotations were only used for testing and were ignored during training and development. Consequently, the proposed framework of learning with positive and unlabeled examples for sense disambiguation could be applied on the entire Wikipedia without any manual annotations. By augmenting general sense links with links to more specific articles, such an application could have a significant impact on Wikipedia itself.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "http://svmlight.joachims.org", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Data and code will be made publicly available.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "This work was supported in part by the National Science Foundation IIS awards #1018613 and #1018590, and an allocation of computing time from the Ohio Supercomputer Center.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Using Wikipedia at the TREC QA track", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Ahn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Jijkoun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Mishne", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Muller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "De Rijke", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Schlobach", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proceedings of the 13th Text Retrieval Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "D. Ahn, V. Jijkoun, G. Mishne, K. Muller, M. de Ri- jke, and S. Schlobach. 2004. Using Wikipedia at the TREC QA track. In Proceedings of the 13th Text Re- trieval Conference (TREC 2004).", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Using background knowledge to support coreference resolution", |
|
"authors": [ |
|
{ |
|
"first": "Volha", |
|
"middle": [], |
|
"last": "Bryl", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Claudio", |
|
"middle": [], |
|
"last": "Giuliano", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luciano", |
|
"middle": [], |
|
"last": "Serafini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kateryna", |
|
"middle": [], |
|
"last": "Tymoshenko", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of the 2010 conference on ECAI 2010: 19th European Conference on Artificial Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "759--764", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Volha Bryl, Claudio Giuliano, Luciano Serafini, and Kateryna Tymoshenko. 2010. Using background knowledge to support coreference resolution. In Pro- ceedings of the 2010 conference on ECAI 2010: 19th European Conference on Artificial Intelligence, pages 759-764, Amsterdam, The Netherlands.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Using encyclopedic knowledge for named entity disambiguation", |
|
"authors": [ |
|
{ |
|
"first": "Razvan", |
|
"middle": [], |
|
"last": "Bunescu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marius", |
|
"middle": [], |
|
"last": "Pasca", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceesings of the 11th Conference of the European Chapter of the Association for Computational Linguistics (EACL-06)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "9--16", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Razvan Bunescu and Marius Pasca. 2006. Using ency- clopedic knowledge for named entity disambiguation. In Proceesings of the 11th Conference of the European Chapter of the Association for Computational Linguis- tics (EACL-06), pages 9-16, Trento, Italy.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Explicit versus latent concept models for cross-language information retrieval", |
|
"authors": [ |
|
{ |
|
"first": "Philipp", |
|
"middle": [], |
|
"last": "Cimiano", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Antje", |
|
"middle": [], |
|
"last": "Schultz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sergej", |
|
"middle": [], |
|
"last": "Sizov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Philipp", |
|
"middle": [], |
|
"last": "Sorg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Steffen", |
|
"middle": [], |
|
"last": "Staab", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "International Joint Conference on Artificial Intelligence (IJCAI-09", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1513--1518", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Philipp Cimiano, Antje Schultz, Sergej Sizov, Philipp Sorg, and Steffen Staab. 2009. Explicit versus la- tent concept models for cross-language information re- trieval. In International Joint Conference on Artificial Intelligence (IJCAI-09, pages 1513-1518, Pasadena, CA, july.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Large-scale named entity disambiguation based on Wikipedia data", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Cucerzan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of the Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "708--716", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "S. Cucerzan. 2007. Large-scale named entity disam- biguation based on Wikipedia data. In Proceedings of the Conference on Empirical Methods in Natural Lan- guage Processing, pages 708-716.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Learning classifiers from only positive and unlabeled data", |
|
"authors": [ |
|
{ |
|
"first": "Charles", |
|
"middle": [], |
|
"last": "Elkan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Keith", |
|
"middle": [], |
|
"last": "Noto", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of the 14th ACM SIGKDD international conference on Knowledge discovery and data mining, KDD '08", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "213--220", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Charles Elkan and Keith Noto. 2008. Learning clas- sifiers from only positive and unlabeled data. In Proceedings of the 14th ACM SIGKDD international conference on Knowledge discovery and data mining, KDD '08, pages 213-220.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Building watson: An overview of the deepqa project", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Ferrucci", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eric", |
|
"middle": [ |
|
"W" |
|
], |
|
"last": "Brown", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jennifer", |
|
"middle": [], |
|
"last": "Chu-Carroll", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Fan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Gondek", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aditya", |
|
"middle": [], |
|
"last": "Kalyanpur", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Lally", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"William" |
|
], |
|
"last": "Murdock", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eric", |
|
"middle": [], |
|
"last": "Nyberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Prager", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nico", |
|
"middle": [], |
|
"last": "Schlaefer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Welty", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "AI Magazine", |
|
"volume": "31", |
|
"issue": "3", |
|
"pages": "59--79", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David A. Ferrucci, Eric W. Brown, Jennifer Chu-Carroll, James Fan, David Gondek, Aditya Kalyanpur, Adam Lally, J. William Murdock, Eric Nyberg, John M. Prager, Nico Schlaefer, and Christopher A. Welty. 2010. Building watson: An overview of the deepqa project. AI Magazine, 31(3):59-79.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Simple coreference resolution with rich syntactic and semantic features", |
|
"authors": [ |
|
{ |
|
"first": "Aria", |
|
"middle": [], |
|
"last": "Haghighi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Klein", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the 2009 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1152--1161", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Aria Haghighi and Dan Klein. 2009. Simple coreference resolution with rich syntactic and semantic features. In Proceedings of the 2009 Conference on Empiri- cal Methods in Natural Language Processing, pages 1152-1161, Singapore, August.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "The QuALiM question answering demo: Supplementing answers with paragraphs drawn from Wikipedia", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Kaisser", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of the ACL-08 Human Language Technology Demo Session", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "32--35", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "M. Kaisser. 2008. The QuALiM question answering demo: Supplementing answers with paragraphs drawn from Wikipedia. In Proceedings of the ACL-08 Hu- man Language Technology Demo Session, pages 32- 35, Columbus, Ohio.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Learning with positive and unlabeled examples using weighted logistic regression", |
|
"authors": [ |
|
{ |
|
"first": "Bing", |
|
"middle": [], |
|
"last": "Wee Sun Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Proceedings of the Twentieth International Conference on Machine Learning (ICML", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "448--455", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wee Sun Lee and Bing Liu. 2003. Learning with pos- itive and unlabeled examples using weighted logistic regression. In Proceedings of the Twentieth Interna- tional Conference on Machine Learning (ICML, pages 448-455, Washington, DC, August.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Improving weak ad-hoc queries using Wikipedia as external corpus", |
|
"authors": [ |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Luk", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Ho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Chung", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of the 30th Annual International ACM SIGIR Conference on Research and Development in Information Retrieval", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "797--798", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Y. Li, R. Luk, E. Ho, and K. Chung. 2007. Improv- ing weak ad-hoc queries using Wikipedia as external corpus. In Proceedings of the 30th Annual Interna- tional ACM SIGIR Conference on Research and De- velopment in Information Retrieval, pages 797-798, Amsterdam, Netherlands.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Building text classifiers using positive and unlabeled examples", |
|
"authors": [ |
|
{ |
|
"first": "Bing", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yang", |
|
"middle": [], |
|
"last": "Dai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaoli", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wee", |
|
"middle": [], |
|
"last": "Sun Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Philip", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Yu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Proceedings of the Third IEEE International Conference on Data Mining, ICDM '03", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "179--186", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bing Liu, Yang Dai, Xiaoli Li, Wee Sun Lee, and Philip S. Yu. 2003. Building text classifiers using pos- itive and unlabeled examples. In Proceedings of the Third IEEE International Conference on Data Mining, ICDM '03, pages 179-186, Washington, DC, USA.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Using Wikipedia for automatic word sense disambiguation", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Mihalcea", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Human Language Technologies 2007: The Conference of the North American Chapter of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "196--203", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "R. Mihalcea. 2007. Using Wikipedia for automatic word sense disambiguation. In Human Language Technolo- gies 2007: The Conference of the North American Chapter of the Association for Computational Linguis- tics, pages 196-203, Rochester, New York, April.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Computing semantic relatedness using Wikipedia link structure", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Milne", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of the New Zealand Computer Science Research Student Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "D. Milne. 2007. Computing semantic relatedness using Wikipedia link structure. In Proceedings of the New Zealand Computer Science Research Student Confer- ence, Hamilton, New Zealand.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Integrating multiple knowledge sources to disambiguate word sense: An exemplar-based approach", |
|
"authors": [ |
|
{ |
|
"first": "H", |
|
"middle": [ |
|
"B" |
|
], |
|
"last": "Hwee Tou Ng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1996, |
|
"venue": "Proceedings of the 34th", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hwee Tou Ng and H. B. Lee. 1996. Integrating multiple knowledge sources to disambiguate word sense: An exemplar-based approach. In Proceedings of the 34th", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Annual Meeting of the Association for Computational Linguistics (ACL-96)", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "40--47", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Annual Meeting of the Association for Computational Linguistics (ACL-96), pages 40-47, Santa Cruz, CA.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Learning to find relevant biological articles without negative training examples", |
|
"authors": [ |
|
{ |
|
"first": "Keith", |
|
"middle": [], |
|
"last": "Noto", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Milton", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "Saier", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jr", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Charles", |
|
"middle": [], |
|
"last": "Elkan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of the 21st Australasian Joint Conference on Artificial Intelligence: Advances in Artificial Intelligence, AI '08", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "202--213", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Keith Noto, Milton H. Saier, Jr., and Charles Elkan. 2008. Learning to find relevant biological articles without negative training examples. In Proceedings of the 21st Australasian Joint Conference on Artificial In- telligence: Advances in Artificial Intelligence, AI '08, pages 202-213.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Knowledge-rich word sense disambiguation rivaling supervised systems", |
|
"authors": [ |
|
{ |
|
"first": "Paolo", |
|
"middle": [], |
|
"last": "Simone", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roberto", |
|
"middle": [], |
|
"last": "Ponzetto", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Navigli", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of the 48th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1522--1531", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Simone Paolo Ponzetto and Roberto Navigli. 2010. Knowledge-rich word sense disambiguation rivaling supervised systems. In Proceedings of the 48th Annual Meeting of the Association for Computational Linguis- tics, pages 1522-1531, Stroudsburg, PA, USA. Asso- ciation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Exploiting semantic role labeling, wordnet and wikipedia for coreference resolution", |
|
"authors": [ |
|
{ |
|
"first": "Paolo", |
|
"middle": [], |
|
"last": "Simone", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Ponzetto", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Strube", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceedings of the Human Language Technology Conference of the North American Chapter of the Association of Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "192--199", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Simone Paolo Ponzetto and Michael Strube. 2006. Ex- ploiting semantic role labeling, wordnet and wikipedia for coreference resolution. In Proceedings of the Hu- man Language Technology Conference of the North American Chapter of the Association of Computa- tional Linguistics, pages 192-199.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Wikipedia-based multilingual retrieval model", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Potthast", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Stein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Anderka", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of the 30th European Conference on IR Research", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "M. Potthast, B. Stein, and M. A. Anderka. 2008. Wikipedia-based multilingual retrieval model. In Pro- ceedings of the 30th European Conference on IR Re- search, Glasgow.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Coreference resolution with world knowledge", |
|
"authors": [ |
|
{ |
|
"first": "Altaf", |
|
"middle": [], |
|
"last": "Rahman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vincent", |
|
"middle": [], |
|
"last": "Ng", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "814--824", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Altaf Rahman and Vincent Ng. 2011. Coreference res- olution with world knowledge. In Proceedings of the 49th Annual Meeting of the Association for Compu- tational Linguistics: Human Language Technologies - Volume 1, pages 814-824, Stroudsburg, PA, USA. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "The interaction of knowledge sources in word sense disambiguation", |
|
"authors": [ |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Stevenson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yorick", |
|
"middle": [], |
|
"last": "Wilks", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "Computational Linguistics", |
|
"volume": "27", |
|
"issue": "3", |
|
"pages": "321--349", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mark Stevenson and Yorick Wilks. 2001. The interaction of knowledge sources in word sense disambiguation. Computational Linguistics, 27(3):321-349, Septem- ber.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"text": "Hierarchical disambiguation scheme, from coarse to fine grained senses.", |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null |
|
}, |
|
"FIGREF1": { |
|
"text": "Biased SVM optimization problem.", |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null |
|
}, |
|
"TABREF1": { |
|
"content": "<table/>", |
|
"html": null, |
|
"num": null, |
|
"text": "", |
|
"type_str": "table" |
|
}, |
|
"TABREF3": { |
|
"content": "<table/>", |
|
"html": null, |
|
"num": null, |
|
"text": "Wiki (CAPS) and manual (italics) annotations.", |
|
"type_str": "table" |
|
}, |
|
"TABREF4": { |
|
"content": "<table><tr><td/><td colspan=\"2\">atmosphere president</td><td>dollar</td></tr><tr><td>Level1</td><td>93.1%</td><td>-</td><td>94.1%</td></tr><tr><td>Level3</td><td>85.6%</td><td>82.2%</td><td>90.8%</td></tr><tr><td/><td>game</td><td>diamond</td><td>Corinth</td></tr><tr><td>Level1</td><td>82.9%</td><td>95.5%</td><td>92.7%</td></tr><tr><td>Level3</td><td>92.9%</td><td>-</td><td>-</td></tr></table>", |
|
"html": null, |
|
"num": null, |
|
"text": ", were computed by averaging the results over the 4 folds. Since the word president has only one sense on Level 1 , no classifier needed to be trained for this case. Similarly, words diamond and Corinth have only one sense on Level 3 .", |
|
"type_str": "table" |
|
}, |
|
"TABREF5": { |
|
"content": "<table/>", |
|
"html": null, |
|
"num": null, |
|
"text": "Disambiguation accuracy at Levels 1 & 3.", |
|
"type_str": "table" |
|
}, |
|
"TABREF6": { |
|
"content": "<table><tr><td>Word</td><td colspan=\"3\">NaiveSVM BiasedSVM WeightedSVM</td></tr><tr><td>atmosphere</td><td>39.9%</td><td>79.6%</td><td>75.0%</td></tr><tr><td>president</td><td>91.9%</td><td>92.5%</td><td>89.5%</td></tr><tr><td>dollar</td><td>96.0%</td><td>97.0%</td><td>97.1%</td></tr><tr><td>game</td><td>83.8%</td><td>87.1%</td><td>84.6%</td></tr><tr><td>diamond</td><td>70.2%</td><td>74.5%</td><td>75.1%</td></tr><tr><td>Corinth</td><td>46.2%</td><td>75.1%</td><td>51.9%</td></tr><tr><td>presidentS</td><td>88.1%</td><td>90.6%</td><td>87.4%</td></tr><tr><td>dollarS</td><td>70.3%</td><td>84.9%</td><td>70.6%</td></tr></table>", |
|
"html": null, |
|
"num": null, |
|
"text": "http://www.csie.ntu.edu.tw/\u02dccjlin/libsvm", |
|
"type_str": "table" |
|
}, |
|
"TABREF7": { |
|
"content": "<table><tr><td>Word</td><td colspan=\"3\">NaiveSVM BiasedSVM WeightedSVM</td></tr><tr><td>atmosphere</td><td>30.5%</td><td>86.0%</td><td>83.2%</td></tr><tr><td>president</td><td>94.4%</td><td>95.0%</td><td>92.8%</td></tr><tr><td>dollar</td><td>97.9%</td><td>98.4%</td><td>98.5%</td></tr><tr><td>game</td><td>75.1%</td><td>81.8%</td><td>77.5%</td></tr><tr><td>diamond</td><td>8.6%</td><td>53.5%</td><td>46.3%</td></tr><tr><td>Corinth</td><td>15.3%</td><td>81.2%</td><td>68.0%</td></tr><tr><td>presidentS</td><td>90.0%</td><td>92.4%</td><td>89.5%</td></tr><tr><td>dollarS</td><td>77.9%</td><td>91.2%</td><td>78.2%</td></tr></table>", |
|
"html": null, |
|
"num": null, |
|
"text": "Disambiguation accuracy at Level 2 .", |
|
"type_str": "table" |
|
}, |
|
"TABREF8": { |
|
"content": "<table/>", |
|
"html": null, |
|
"num": null, |
|
"text": "Disambiguation F-measure at Level 2 .", |
|
"type_str": "table" |
|
}, |
|
"TABREF10": { |
|
"content": "<table/>", |
|
"html": null, |
|
"num": null, |
|
"text": "Flat vs. Hierarchical disambiguation accuracy.", |
|
"type_str": "table" |
|
} |
|
} |
|
} |
|
} |