|
{ |
|
"paper_id": "E03-1013", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T10:25:15.296281Z" |
|
}, |
|
"title": "Effect of Utilizing Terminology on Extraction of Protein-Protein Interaction Information from Biomedical Literature", |
|
"authors": [ |
|
{ |
|
"first": "Junko", |
|
"middle": [], |
|
"last": "Hosaka", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Judice", |
|
"middle": [ |
|
"L Y" |
|
], |
|
"last": "Koh", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Akihiko", |
|
"middle": [], |
|
"last": "Konagaya", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "As the amount of on-line scientific literature in the biomedical domain increases, automatic processing has become a promising approach for accelerating research. We are applying syntactic parsing trained on the general domain to identify proteinprotein interactions. One of the main difficulties obstructing the use of language processing is the prevalence of specialized terminology. Accordingly, we have created a specialized dictionary by compiling on-line glossaries, and have applied it for information extraction. We conducted preliminary experiments on one hundred sentences, and compared the extraction performance when (a) using only a general dictionary and (b) using this plus our specialized dictionary. Contrary to our expectation, using only the general dictionary resulted in better performance (recall 93.0%, precision 91.0%) than with the terminology-based approach (recall 92.9%, precision 89.6%).", |
|
"pdf_parse": { |
|
"paper_id": "E03-1013", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "As the amount of on-line scientific literature in the biomedical domain increases, automatic processing has become a promising approach for accelerating research. We are applying syntactic parsing trained on the general domain to identify proteinprotein interactions. One of the main difficulties obstructing the use of language processing is the prevalence of specialized terminology. Accordingly, we have created a specialized dictionary by compiling on-line glossaries, and have applied it for information extraction. We conducted preliminary experiments on one hundred sentences, and compared the extraction performance when (a) using only a general dictionary and (b) using this plus our specialized dictionary. Contrary to our expectation, using only the general dictionary resulted in better performance (recall 93.0%, precision 91.0%) than with the terminology-based approach (recall 92.9%, precision 89.6%).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "With the increasing amount of on-line literature in the biomedical domain, research can be greatly accelerated by extracting information automatically from text resources. Approaches to automatic extraction have used co-occurrence (Jenssen, 2001) , full parsing (Yakushiji, 2001) , manually built templates (Blaschke, 2001) , and a natural language system developed for a neighboring domain, with modifications e.g. regarding semantic categories (Friedman, 2001) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 231, |
|
"end": 246, |
|
"text": "(Jenssen, 2001)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 262, |
|
"end": 279, |
|
"text": "(Yakushiji, 2001)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 307, |
|
"end": 323, |
|
"text": "(Blaschke, 2001)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 446, |
|
"end": 462, |
|
"text": "(Friedman, 2001)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In order to extract information such as proteinprotein interactions from scientific text, it is insufficient to check only co-occurrences. Constructing a satisfactory set of rules for full parser is quite complex and the processing requires a tremendous amount of calculation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "One of the main difficulties in using language processing in the biomedical domain is the prevalence of specialized terminology, including protein names. It is impossible to obtain a complete list of protein names in the current rapidly developing circumstances: notations vary, and new names are steadily coined. To bypass these problems, we start with words expressing interactions, and then seek the elements which are actually interacting, based on the syntactic structure. These elements may be the proteins which interest us. We are using the Apple Pie Parser ver.5.9 1 , a syntactic parser trained on the Penn Tree Bank (PTB) (recall 77.45%, precision 75.58%).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We restricted test sentences to syntactically wellformed ones, so that we could examine the adequacy of our syntactically-based extraction rules. We assumed that a general-purpose dictionary (GPD) obtained from the PTB would be insufficient for handling biomedical literature. Therefore, we combined on-line glossaries to construct our own terminology dictionary, which we call the Medical Library Dictionary (MLD).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data Preparation", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "MeSH represents unique terms, and includes synonyms as well as chemical names: We received from a biologist a list of words denoting interactions and 1000 abstracts retrieved from Medline using the PubMed 2 . These abstracts are related to Interleukin-6, a secreted protein whose main function is to mediate inflammatory response in the body. Medline is the bibliographic database of the National Library of Medicine (NLM) in the United States. PubMed is an NLM service which provides access to Medline and additional life science journals.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Test Sentences", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Out of the word list, we focused on \"activate\", as this can effectively express the interaction of two elements. We first ran the syntactic parser on the sentences containing the string \"activat* 3 \", then picked only sentences that contain the verbal \"activat*\". There were approximately 1000 such sentences. Second, we consulted the sentences annotated by two professional annotators. They marked phrases containing verbal \"activat*\" and the corresponding agents and recipients. They also evaluated the parsing results related to the phrases. We then selected 100 sentences randomly from the sentences to which both annotators gave the same marking and same evaluation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Test Sentences", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "To determine the reliability of the annotators' judgment and the difficulty of the task, we calculated the KAPPA coefficient of their responses, and found it to be 0.54 (Hosaka and Umetsu, 2002) . This degree of agreement can be interpreted as \"moderate\" (Carletta, 1997) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 169, |
|
"end": 194, |
|
"text": "(Hosaka and Umetsu, 2002)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 255, |
|
"end": 271, |
|
"text": "(Carletta, 1997)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Test Sentences", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "We assumed that biological, chemical, and medical terminology is used in our domain. Therefore, the MLD was compiled from four glossaries in these areas: Biochemical Glossary 4 (BG), Cancernet Dictionary5 (CD), Medical Chemistry Dictionary 6 (MCD) and Life Science Dictionary (LSD). In addition to the MLD, we used the Medical Subject Headings (MeSH 8 ). MeSH is a controlled vocabulary created by the NLM. We used the C chapter (Diseases). The dictionary size is given in The MLD contained 32,698 unique terms and the GPD 88,707 words. We then removed MLD terms which already were listed in the GPD. This removal resulted in a reduced MLD consisting of 25,772 terms (uniMLD). In addition, there were 401 duplicated terms found in both the MeSH and the MLD. In this case, we retained the words in the MLD, so that the number of MeSH terms decreased to 300,263 (uniMeSH). For the experiment, we used the combination of uniMLD and uniMeSH (MLD-M). When we used both GPD and MLD-M, we called this combination MLD+. Among the four glossaries, only the LSD had part of speech (POS), since it was a bilingual resource. The MeSH had only nouns. In the other three glossaries, the POS has not been defined.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Medical Library Dictionary", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Our parser included out-of-vocabulary handling We supposed, however, that appropriate POS would raise the performance Therefore, we assigned POS to these entries semi-automatically.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Medical Library Dictionary", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "We manually defined extraction rules for active and passive sentences. We converted the parsing output into XML format, and then applied the rules. The following example illustrates the procedure. The parser can print the parsing results in several ways, with or without POS. Our extraction rules do specify POS; however, for simplicity, we suppress them in the example below.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Extraction Rules", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "We find that ACK-2 can be activated by cell adhesion Cdc42-dependent manner.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Input sentence:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We measured our system's recall and precision rates shown in Table 4 \u2022 Find a VP \"activat*\" as a starting word.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 61, |
|
"end": 68, |
|
"text": "Table 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Input sentence:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 Extract the highest VP containing \"activat*\" up to the point where a PP headed by \"by\" is encountered. 4 \"can be activated\"", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Input sentence:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 Find the nearest NP/NPL to the left of the \"activat*\" phrase.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Input sentence:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 Extract the highest NP/NPL. 4 \"ACK-2\"", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Input sentence:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We applied our extraction rules to two sets consisting of the parsing outputs from 100 sentences: parsing with the GPD and with the MLD+. To measure the extraction performance, we prepared a gold standard: a biologist marked phrases containing verbal \"activat*\" and its corresponding interacting entities. We regarded system extractions as correct if they contained the marked phrases.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Preliminary Evaluation", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "The matrix shown in Table 3 Table 4 . Extraction performance", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 20, |
|
"end": 27, |
|
"text": "Table 3", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 28, |
|
"end": 35, |
|
"text": "Table 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Preliminary Evaluation", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "We found that it is most difficult to extract an Agent. For this task only, use of our MLD+ improved the system's performance. For other phrases, however, the system performed slightly better when the GDP alone was used.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Preliminary Evaluation", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Our 100 sentences contained about 2,500 words. From the MLD-M, 236 terms (uniMeSH 48, un-iMLD 188) were identified. That is, specialized terms contributed about 9 percent of all words. If we consider that the uniMLD is about one-third the size of the GPD, as shown in Table 2 , the actual hit rate for terms turned out to be rather low. As shown in Table 4 , use of a terminology dictionary does not always raise the extraction performance. We analyzed sentences from which the information was correctly extracted when only the GPD was used but erroneously extracted when the MLD+ was used. There were six sentences with nine such cases. We found the following three reasons for negative effects: 1. A POS was incorrectly assigned for the context (three cases) 2. A term was correctly identified, but a multi-word building failed (two cases) 3. A POS was correctly assigned, but a phrase building failed (four cases) Some examples follow. In these, the categories were taken from the PTB \" . On the left is the parsing result with the GPD only, and on the right is that with the MLD+:", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 268, |
|
"end": 275, |
|
"text": "Table 2", |
|
"ref_id": "TABREF1" |
|
}, |
|
{ |
|
"start": 349, |
|
"end": 356, |
|
"text": "Table 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Effect of Specialized Terminology", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "NFL is a specific category for the parser, representing the lowest NP. 19 SS is a specific category for the parser, representing an S which is not the top S.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Effect of Specialized Terminology", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "NNPX is a specific category of the Apple Pie Parser, representing NNP or NNPS. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Effect of Specialized Terminology", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "D NNS: phosphorylation D PERIOD'", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Effect of Specialized Terminology", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "With GPD In the presence of Tax, both Cdk4 and Cdk6 were activated. With GPD With MLD+ Figure 1 . Failure in POS assignment", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 87, |
|
"end": 95, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Effect of Specialized Terminology", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "In, the string \"activated\", which should be a verb, was assigned falsely as an adjective. In the LSD, \"activated\" is listed as both POS. This suggests that \"activated\" is more often used as an adjective in this context in the general domain.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Effect of Specialized Terminology", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "We recently found that ... PI3K was activated in vitro by direct tyrosine phosphorylation. With GPD With MLD+ Figure 2 . Failure in multi-word building InFigure 2, the POS of \"tyrosine\" was correctly assigned. However, the system failed to build a multi-word-term with \"phosphorylation\".", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 110, |
|
"end": 118, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Effect of Specialized Terminology", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Further, the appearance of ... suggested that CNF1 activated the Cdc42... .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Effect of Specialized Terminology", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "In Figure 3 , \"CNF 1 \" got the right POS. However, the preceding \"that\" is falsely assigned as a determiner. Nouns may often be used with determiners in the general domain.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 3, |
|
"end": 11, |
|
"text": "Figure 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Figure 3. Failure in phrase construction", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In this experiment, information extraction with a general dictionary resulted in slightly better performance than that with specialized dictionary. Even if a POS is correctly assigned, parsing can fail if the parser is trained on a different domain. To retrain a parser, an annotated corpus is needed, though a construction of such a corpus will be time consuming In the meantime, we believe the best way is to represent domainspecific structures manually through rules. We observed cases where a term was correctly recognized but the system failed to identify a multiword-term. To cope with this problem, we will further integrate terminology dictionaries, such as the Unified Medical Language System 12 .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion and Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "We conducted this experiment with a small set of syntactically well-formed sentences. To examine the validity of the result, we are planning further tests with more sentences.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion and Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "http://www.cs.nyu.eduks/projects/proteus/app/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "http://www.nlm.nih.goviresearch/unils/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "We thank Dr. I. Kurochkin for his biomedical advice and Dr. M. Seligman for reading the draft.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgement", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "The potential use of SUISEKI as a protein interaction discovery tool", |
|
"authors": [ |
|
{ |
|
"first": "Christian", |
|
"middle": [], |
|
"last": "Blaschke", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alfonso", |
|
"middle": [], |
|
"last": "Valencia", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "Genome Informatics", |
|
"volume": "12", |
|
"issue": "", |
|
"pages": "123--134", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Blaschke, Christian and Valencia, Alfonso. 2001. The potential use of SUISEKI as a protein interaction discovery tool. Genome Informatics, 12: 123-134.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "The reliability of a Dialogue Structure Coding Scheme", |
|
"authors": [ |
|
{ |
|
"first": "Jean", |
|
"middle": [], |
|
"last": "Carletta", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "Computational Linguistics", |
|
"volume": "23", |
|
"issue": "1", |
|
"pages": "13--31", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Carletta, Jean, et al. 1997. The reliability of a Dia- logue Structure Coding Scheme. Computational Linguistics, 23(1): 13-31.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "GENIES: a naturallanguage processing system for the extraction of molecular pathways from journal articles", |
|
"authors": [ |
|
{ |
|
"first": "Carol", |
|
"middle": [], |
|
"last": "Friedman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "Proc. of ISMB", |
|
"volume": "17", |
|
"issue": "", |
|
"pages": "74--82", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Friedman, Carol, et.al . 2001. GENIES: a natural- language processing system for the extraction of molecular pathways from journal articles. Proc. of ISMB, 17(Supp1.1): S74-S82.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Toward the extraction of protein-protein interaction information from immunology literature", |
|
"authors": [ |
|
{ |
|
"first": "Junko", |
|
"middle": [], |
|
"last": "Hosaka", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ryo", |
|
"middle": [], |
|
"last": "Umetsu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proc. of IPSJ-SIG-NL", |
|
"volume": "150", |
|
"issue": "", |
|
"pages": "15--20", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hosaka, Junko and Umetsu, Ryo. 2002. Toward the extraction of protein-protein interaction informa- tion from immunology literature. Proc. of IPSJ- SIG-NL,150: 15-20.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "A literature network of human genes for high-throughput analysis of gene expression", |
|
"authors": [ |
|
{ |
|
"first": "Tor-Kristian", |
|
"middle": [], |
|
"last": "Jenssen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "Nature Genetics", |
|
"volume": "28", |
|
"issue": "", |
|
"pages": "21--28", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jenssen, Tor-Kristian, et al. 2001. A literature net- work of human genes for high-throughput analysis of gene expression. Nature Genetics, 28: 21-28.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Event extraction from biomedical papers using a full parser", |
|
"authors": [ |
|
{ |
|
"first": "Akane", |
|
"middle": [], |
|
"last": "Yakushiji", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "Proc. of PSB", |
|
"volume": "6", |
|
"issue": "", |
|
"pages": "408--419", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yakushiji, Akane, et al. 2001. Event extraction from biomedical papers using a full parser. Proc. of PSB, 6: 408-419.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"TABREF0": { |
|
"num": null, |
|
"content": "<table><tr><td colspan=\"3\">Dictionary Source glossary Number of terms</td></tr><tr><td>MLD</td><td>BG</td><td>723</td></tr><tr><td/><td>CD</td><td>2,414</td></tr><tr><td/><td>MCD</td><td>122</td></tr><tr><td/><td>LSD</td><td>32,405</td></tr><tr><td>MeSH</td><td>MeSH</td><td>300,263</td></tr><tr><td colspan=\"3\">Table 1. Size of terminology dictionaries</td></tr></table>", |
|
"type_str": "table", |
|
"html": null, |
|
"text": "The number of terms for 2 httn://www.ncbi.nlm.nih.eoy/entrez/uticry.fcei 3 \"*\" indicates any string. ://www.fhsu.cduichcmistry/twicsag1ossary/biochcmelossary.htm 5 http://www.caneer.eoy/dictionary/ 6 tiltp://www.chem.qmw.ac.uldiupacimedchem/ 7 http://isd.eharmskyoto-u.ac.ip/index.html 8 http://www.nlm.nih.eoy/mestilmeshhome.htnal" |
|
}, |
|
"TABREF1": { |
|
"num": null, |
|
"content": "<table><tr><td colspan=\"3\">summarizes the dictionary sizes:</td></tr><tr><td>Dictionary</td><td/><td>Number of terms</td></tr><tr><td>GPD</td><td/><td>88,707</td></tr><tr><td>MLD+ MLD-M</td><td>uniMLD uniMeSH</td><td>25,772 119,599</td></tr><tr><td colspan=\"3\">Table 2. Size of dictionaries used for experiment</td></tr></table>", |
|
"type_str": "table", |
|
"html": null, |
|
"text": "" |
|
} |
|
} |
|
} |
|
} |