|
{ |
|
"paper_id": "S12-1049", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T15:24:14.144283Z" |
|
}, |
|
"title": "SemEval-2012 Task 4: Evaluating Chinese Word Similarity", |
|
"authors": [ |
|
{ |
|
"first": "Jin", |
|
"middle": [ |
|
"Yunfang" |
|
], |
|
"last": "Peng", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Leshan Normal University Peking University Leshan", |
|
"location": { |
|
"postCode": "614000, 100871", |
|
"settlement": "Beijing", |
|
"country": "China, China" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Leshan Normal University Peking University Leshan", |
|
"location": { |
|
"postCode": "614000, 100871", |
|
"settlement": "Beijing", |
|
"country": "China, China" |
|
} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "This task focuses on evaluating word similarity computation in Chinese. We follow the way of Finkelstein et al. (2002) to select word pairs. Then we organize twenty undergraduates who are major in Chinese linguistics to annotate the data. Each pair is assigned a similarity score by each annotator. We rank the word pairs by the average value of similar scores among the twenty annotators. This data is used as gold standard. Four systems participating in this task return their results. We evaluate their results on gold standard data in term of Kendall's tau value, and the results show three of them have a positive correlation with the rank manually created while the taus' value is very small.", |
|
"pdf_parse": { |
|
"paper_id": "S12-1049", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "This task focuses on evaluating word similarity computation in Chinese. We follow the way of Finkelstein et al. (2002) to select word pairs. Then we organize twenty undergraduates who are major in Chinese linguistics to annotate the data. Each pair is assigned a similarity score by each annotator. We rank the word pairs by the average value of similar scores among the twenty annotators. This data is used as gold standard. Four systems participating in this task return their results. We evaluate their results on gold standard data in term of Kendall's tau value, and the results show three of them have a positive correlation with the rank manually created while the taus' value is very small.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "The goal of word similarity is to compute the similarity degree between words. It is widely used in natural language processing to alleviate data sparseness which is an open problem in this field. Many research have focus on English language (Lin, 1998; Curran and Moens, 2003; Dinu and Lapata, 2010) , some of which rely on the manual created thesaurus such as WordNet (Budanitsky and Hirst, 2006) , some of which obtain the similarity of the words via large scale corpus (Lee, 1999) , and some research integrate both thesaurus and corpus (Fujii et al., 1997) . This task tries to evaluate the approach on word similarity for Chinese language. To the best of our knowledge, this is first release of benchmark data for this study.", |
|
"cite_spans": [ |
|
{ |
|
"start": 242, |
|
"end": 253, |
|
"text": "(Lin, 1998;", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 254, |
|
"end": 277, |
|
"text": "Curran and Moens, 2003;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 278, |
|
"end": 300, |
|
"text": "Dinu and Lapata, 2010)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 370, |
|
"end": 398, |
|
"text": "(Budanitsky and Hirst, 2006)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 473, |
|
"end": 484, |
|
"text": "(Lee, 1999)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 541, |
|
"end": 561, |
|
"text": "(Fujii et al., 1997)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In English language, there are two data sets: Rubenstein and Goodenough (1965) and Finkelstein et al. (2002) created a ranking of word pairs as the benchmark data. Both of them are manually annotated. In this task, we follow the way to create the data and annotate the similarity score between word pairs by twenty Chinese native speakers. Finkelstein et al. (2002) carried out a psycholinguistic experiment: they selected out 353 word pairs, then ask the annotators assign a numerical similarity score between 0 and 10 (0 denotes that words are totally unrelated, 10 denotes that words are VERY closely related) to each pair. By definition, the similarity of the word to itself should be 10. A fractional score is allowed.", |
|
"cite_spans": [ |
|
{ |
|
"start": 61, |
|
"end": 78, |
|
"text": "Goodenough (1965)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 83, |
|
"end": 108, |
|
"text": "Finkelstein et al. (2002)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 340, |
|
"end": 365, |
|
"text": "Finkelstein et al. (2002)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "It should be noted that besides the rank of word pairs, the thesaurus such as Roget's thesaurus are often used for word similarity study (Gorman and Curran, 2006) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 137, |
|
"end": 162, |
|
"text": "(Gorman and Curran, 2006)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The paper is organized as follows. In section 2 we describe in detail the process of the data preparation. Section 3 introduces the four participating systems. Section 4 reports their results and gives a brief discussion.. And finally in section 5 we bring forward some suggestions for the next campaign and conclude the paper.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We use wordsim 353 (Finkelstein et al., 2002) as the original data set. First, each word pair is translated into Chinese by two undergraduates who are fluent in English. 169 word pairs are the same in their translation results. To the rest 184 word pairs, the third undergraduate student check them following the rules:", |
|
"cite_spans": [ |
|
{ |
|
"start": 19, |
|
"end": 45, |
|
"text": "(Finkelstein et al., 2002)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data Set", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "(i) Single character vs. two characters. If one translator translate one English word into the Chinese word which consists only one Chinese character and the other use two characters to convey the translation, we will prefer to the later provided that these two translations are semantically same. For example, \"tiger\" is translated into \"\u864e\" and \"\u8001\u864e\", we will treat them as same and use \"\u8001\u864e\" as the final translation. This was the same case in \"drug\" (\"\u836f\" and \"\u836f\u7269\" are same translations).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data Set", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "(ii) Alias. The typical instance is \"potato\", both \" \u571f\u8c46\" and \"\u9a6c\u94c3\u85af\" are the correct translations. So we will treat them as same and prefer \"\u571f\u8c46\" as the final translation because it is more general used than the latter one.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data Set", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "(iii) There are five distinct word pairs in the translations and are removed.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data Set", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "At last, 348 word pairs are used in this task. Among these 348 word pairs, 50 ones are used as the trial data and the rest ones are used as the test data 1 .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data Set", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Each word pair is assigned the similarity score by twenty Chinese native speakers. The score ranges from 0 to 5 and 0 means two words have nothing to do with each other and 5 means they are identically in semantic meaning. The higher score means the more similar between two words. Not only integer but also real is acceptable as the annotated score. We get the average of all the scores given by the annotators for each word pair and then sort them according to the similarity scores. The distribution of word pairs on the similar score is illustrated as table 1. Score 0.0-1.0 1.0-2.0 2.0-3.0 3.0-4.0 4. Table 2 and table 3 list top ten similar word pairs and top ten un-similar word pairs individually. Standard deviation (Std. dev) and relative standard deviation (RSD) are also computed. Obviously, the relative standard deviation of top ten similar word pairs is far less than the un-similar pairs. Figure 1 illustrates the relationship between the similarity score and relative standard deviation. The digits in \"x\" axes are the average similarity score of every integer interval, for an instance, 1.506 is the average of all word pairs' similarity score between 1.0 and 2.0.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 606, |
|
"end": 625, |
|
"text": "Table 2 and table 3", |
|
"ref_id": "TABREF1" |
|
}, |
|
{ |
|
"start": 905, |
|
"end": 913, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Manual Annotation", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Four systems coming from two teams participated in this task. Table 3 : Top ten un-similar word pairs MIXCC: This system used two machine readable dictionary (MRD), HIT IR-Lab Tongyici Cilin (Extended) (Cilin) and the other is Chinese Concept Dictionary (CCD). The extended CiLin consists of 12 large classes, 97 medium classes, 1,400 small classes (topics), and 17,817 small synonym sets which cover 77,343 head terms. All the items are constructed as a tree with five levels. With the increasing of levels, word senses are more finegrained. The Chinese Concept Dictionary is a Chinese WordNet produced by Peking University. Word concepts are presented as synsets corre-sponding to WordNet 1.6. Besides synonym, antonym, hypernym/hyponym, holonym/meronym, there is another semantic relation type named as attribute which happens between two words with different part-of-speeches.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 62, |
|
"end": 69, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Participating Systems", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "They first divide all word pairs into five parts and rank them according to their levels in Cilin in descending order. For each part, they computed word similarity by Jiang and Conrath (1997) ", |
|
"cite_spans": [ |
|
{ |
|
"start": 167, |
|
"end": 191, |
|
"text": "Jiang and Conrath (1997)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Participating Systems", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "meth- od 2 .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Participating Systems", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "MIXCD: Different form MIXCC, this system used the trial data to learn a multiple linear regression functions. The CCD was considered as a directed graph. The nodes were synsets and edges were the semantic relations between two synsets. The features for this system were derived from CCD and a corpus and listed as follows: \uf06c the shortest path between two synsets which contain the words \uf06c the rates of 5 semantic relation types \uf06c mutual information of a word pair in the corpus They used the result of multiple linear regressions to forecast the similarity of other word pairs and get the rank.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Participating Systems", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "This system used the method proposed by (Gabrilovich and Markovitch, 2007) . They downloaded the Wikipedia on 25th November, 2011 as the knowledge source. In order to bypass the Chinese segmentation, they extract one character (uni-gram) and two sequential characters (bi-gram) as the features.", |
|
"cite_spans": [ |
|
{ |
|
"start": 40, |
|
"end": 74, |
|
"text": "(Gabrilovich and Markovitch, 2007)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "GUO-ngram:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "This system is very similar to GUO-ngram except that the features consist of words rather than n-grams. They implemented a simple index method which searches all continuous character strings appearing in a dictionary. For example, given a text string ABCDEFG in which ABC, BC, and EF appear in the dictionary. The output of the tokenization algorithm is the three words ABC, BC, EF and the two characters E and G.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "GUO-words:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Each system is required to rank these 500 word pairs according to their similarity scores. Table 4 gives the overall results obtained by each of the systems. The ranks returned by these four systems will be compared with the rank from human annotation by the Kendall Rank Correlation Coefficient:", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 91, |
|
"end": 98, |
|
"text": "Table 4", |
|
"ref_id": "TABREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "\uf028 \uf029 \uf028 \uf029 2, 1 1 / 2 S NN \uf070\uf073 \uf074 \uf03d\uf02d \uf02d", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Where N is the number of objects. \uf070 and \uf073 are two distinct orderings of a object in two ranks.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "S \uf070\uf073 is the minimum number of adjacent transpositions needing to bring \uf070 and \uf073 (Lapata, 2006) . In this metric, tau's value ranges from -1 to +1 and -1 means that the two ranks are inverse to each other and +1 means the identical rank.", |
|
"cite_spans": [ |
|
{ |
|
"start": 79, |
|
"end": 93, |
|
"text": "(Lapata, 2006)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "( , )", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "From table 4, we can see that except the final system, three of them got the positive tau's value. It is regret that the tau's is very small even if the MIXCC system is the best one.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "( , )", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We organize an evaluation task focuses on word similarity in Chinese language. Totally 347 word pairs are annotated similarity scores by twenty native speakers. These word pairs are ordered by the similarity scores and this rank is used as benchmark data for evaluation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Four systems participated in this task. Except the system MIXCD, three ones got their own rank only via the corpus. Kendall's tau is used as the evaluation metric. Three of them got the positive correlation rank compared with the gold standard data Generally the tau's value is very small, it indicates that obtaining a good rank is still difficult. We will provide more word pairs and distinct them relatedness from similar, and attract more teams to participate in the interesting task.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "In fact there are 297 word pairs are evaluated because one pair is missed during the annotation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Because there is no sense-tagged corpus for CCD, the frequency of each concept was set to 1 in this system.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "This research is supported by National Natural Science Foundation of China (NSFC) under Grant No. 61003206, 60703063. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Evaluating WordNet-based Measures of Lexical Semantic Relatedness", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Budanitsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Hirst", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Computational Linguistics", |
|
"volume": "32", |
|
"issue": "1", |
|
"pages": "13--47", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "A. Budanitsky and G. Hirst. Evaluating WordNet-based Measures of Lexical Semantic Relatedness. Compu- tational Linguistics, 2006, 32(1):13-47.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Scaling Context Space. Proceedings of ACL", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Curran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Moens", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "231--238", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J. Curran and M. Moens. Scaling Context Space. Pro- ceedings of ACL, 2002, pp. 231-238.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Measuring Distributional Similarity in Context. Proceedings of EMNLP", |
|
"authors": [ |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Dinu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Lapata", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1162--1172", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "G. Dinu and M. Lapata. Measuring Distributional Simi- larity in Context. Proceedings of EMNLP, 2010, pp. 1162-1172.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Placing Search in Context: The Concept Revisited", |
|
"authors": [ |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Finkelstein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Gabrilovich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Matias", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Rivlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Z", |
|
"middle": [], |
|
"last": "Solan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Wolfman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Ruppin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "ACM Transactions on Information Systems", |
|
"volume": "20", |
|
"issue": "1", |
|
"pages": "116--131", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "L. Finkelstein, E. Gabrilovich, Y. Matias, E. Rivlin, Z. Solan, G. Wolfman, and E. Ruppin. 2002. Placing Search in Context: The Concept Revisited. ACM Transactions on Information Systems, 20(1):116-131.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Integration of Hand-Crafted and Statistical Resources in Measuring Word Similarity", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Fujii", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Hasegawa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Tokunaga", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Tanaka", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "Proceedings of Workshop of Automatic Information Extraction and Building of Lexical Semantic Resources for NLP Applications", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "45--51", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "A. Fujii, T. Hasegawa, T. Tokunaga and H. Tanaka. Integration of Hand-Crafted and Statistical Resources in Measuring Word Similarity. 1997. Proceedings of Workshop of Automatic Information Extraction and Building of Lexical Semantic Resources for NLP Ap- plications. pp. 45-51.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Computing Semantic Relatedness using Wikipedia-based Explicit Semantic Analysis", |
|
"authors": [ |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Gabrilovich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Markovitch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of IJCAI", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1606--1611", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "E. Gabrilovich and S. Markovitch, Computing Semantic Relatedness using Wikipedia-based Explicit Seman- tic Analysis, Proceedings of IJCAI, Hyderabad, 2007, pp. 1606-1611.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Scaling Distributional Similarity to Large Corpora", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Gorman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Curran", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceedings of ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "361--368", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J. Gorman and J. Curran. Scaling Distributional Similar- ity to Large Corpora. Proceedings of ACL, 2006, pp. 361-368.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Semantic similarity based on corpus statistics and lexical taxonomy", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Jiang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Conrath", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "Proceedings of International Conference on Research in Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J. Jiang and D. Conrath. 1997. Semantic similarity based on corpus statistics and lexical taxonomy. Pro- ceedings of International Conference on Research in Computational Linguistics, Taiwan.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Automatic Evaluation of Information Ordering: Kendall's Tau. Computational Linguistics", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Lapata", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "", |
|
"volume": "32", |
|
"issue": "", |
|
"pages": "471--484", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "M. Lapata. Automatic Evaluation of Information Order- ing: Kendall's Tau. Computational Linguistics, 2006, 32(4):471-484.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Automatic Retrieval and Clustering of Similar Words", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1998, |
|
"venue": "Proceedings of ACL / COLING", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "768--774", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "D. Lin. Automatic Retrieval and Clustering of Similar Words. Proceedings of ACL / COLING, 1998, pp. 768-774.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Measures of Distributional Similarity. Proceedings of ACL", |
|
"authors": [ |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1999, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "25--32", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "L. Lee. Measures of Distributional Similarity. Proceed- ings of ACL, 1999, pp. 25-32.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Contextual correlates of synonymy", |
|
"authors": [ |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Rubenstein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"B" |
|
], |
|
"last": "Goodenough", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1965, |
|
"venue": "Communications of the ACM", |
|
"volume": "8", |
|
"issue": "10", |
|
"pages": "627--633", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "H. Rubenstein and J.B. Goodenough. 1965. Contextual correlates of synonymy. Communications of the ACM, 8(10):627-633.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"text": "The relationship between RSD and similar score", |
|
"num": null, |
|
"type_str": "figure", |
|
"uris": null |
|
}, |
|
"TABREF1": { |
|
"type_str": "table", |
|
"html": null, |
|
"content": "<table/>", |
|
"num": null, |
|
"text": "" |
|
}, |
|
"TABREF4": { |
|
"type_str": "table", |
|
"html": null, |
|
"content": "<table/>", |
|
"num": null, |
|
"text": "The results of four systmes" |
|
} |
|
} |
|
} |
|
} |