|
{ |
|
"paper_id": "2022", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T16:22:08.659149Z" |
|
}, |
|
"title": "Unsupervised Knowledge Graph Generation Using Semantic Similarity Matching", |
|
"authors": [ |
|
{ |
|
"first": "Lixian", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "York University", |
|
"location": { |
|
"country": "Canada" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Amin", |
|
"middle": [], |
|
"last": "Omidvar", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "York University", |
|
"location": { |
|
"country": "Canada" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Zongyang", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "York University", |
|
"location": { |
|
"country": "Canada" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Ameeta", |
|
"middle": [], |
|
"last": "Agrawal", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Portland State University", |
|
"location": { |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Aijun", |
|
"middle": [], |
|
"last": "An", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "York University", |
|
"location": { |
|
"country": "Canada" |
|
} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Knowledge Graphs (KGs) are directed labeled graphs representing entities and the relationships between them. Most prior work focuses on supervised or semi-supervised approaches which require large amounts of annotated data. While unsupervised approaches do not need labeled training data, most existing methods either generate too many redundant relations or require manual mapping of the extracted relations to a known schema. To address these limitations, we propose an unsupervised method for KG generation that requires neither labeled data nor manual mapping to the predefined relation schema. Instead, our method leverages sentence-level semantic similarity for automatically generating relations between pairs of entities. Our proposed method outperforms two baseline systems when evaluated over four datasets.", |
|
"pdf_parse": { |
|
"paper_id": "2022", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Knowledge Graphs (KGs) are directed labeled graphs representing entities and the relationships between them. Most prior work focuses on supervised or semi-supervised approaches which require large amounts of annotated data. While unsupervised approaches do not need labeled training data, most existing methods either generate too many redundant relations or require manual mapping of the extracted relations to a known schema. To address these limitations, we propose an unsupervised method for KG generation that requires neither labeled data nor manual mapping to the predefined relation schema. Instead, our method leverages sentence-level semantic similarity for automatically generating relations between pairs of entities. Our proposed method outperforms two baseline systems when evaluated over four datasets.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "A knowledge graph (KG) is a directed labeled graph in which nodes represent entities and edges are labeled by well-defined relationships between entities. Formally, given a set E of entities and a set R of relations, a knowledge graph is a set T of triples, where T \u2286 E \u00d7 R \u00d7 E. A triple t \u2208 T can be expressed as (e h , r, e t ), where e h \u2208 E, r \u2208 R, e t \u2208 E, and e h and e t are referred to as the head entity and the tail entity, respectively. As a structured representation of world knowledge, knowledge graphs have been used in a number of applications such as Web search (Singhal, 2012; Wang et al., 2019a) , question answering (Huang et al., 2019) and recommender systems (Wang et al., 2019b) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 578, |
|
"end": 593, |
|
"text": "(Singhal, 2012;", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 594, |
|
"end": 613, |
|
"text": "Wang et al., 2019a)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 635, |
|
"end": 655, |
|
"text": "(Huang et al., 2019)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 680, |
|
"end": 700, |
|
"text": "(Wang et al., 2019b)", |
|
"ref_id": "BIBREF25" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Knowledge graphs can be constructed automatically from text. Most of the automatic KG generation methods are supervised or semi-supervised, where a large set of labeled data is required to train a KG generation model (e.g., PCNN (Zeng et al., 2015) , OLLIE (Schmitz et al., 2012) Figure 1 : A KG generated using Stanford OpenIE (left) and our method (right) for the input sentence \"Barack Obama was born in Hawaii\". (Fader et al., 2011) ). However, creating labeled data is labor-intensive and the generated graph is limited to the specific domain of the training corpus. In addition, supervised methods can only extract a predefined set of relations occurring in the training data and the model needs to be re-trained to work with other new relation schemas.", |
|
"cite_spans": [ |
|
{ |
|
"start": 229, |
|
"end": 248, |
|
"text": "(Zeng et al., 2015)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 257, |
|
"end": 279, |
|
"text": "(Schmitz et al., 2012)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 416, |
|
"end": 436, |
|
"text": "(Fader et al., 2011)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 280, |
|
"end": 288, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Unsupervised KG models (e.g., Stanford Ope-nIE (Angeli et al., 2015) ), on the other hand, do not need labeled training corpus. They often use syntactic parsing and a set of rules to extract relationships between two entities in a sentence. Although not normally confined to a predefined set of relations, too many unuseful or inaccurate relations can be generated. In Figure 1 , the left graph presents an example KG using triples generated with Stanford OpenIE (Angeli et al., 2015) , while the right graph presents the KG generated using our proposed method, both using the same single input sentence. In addition, in case only relations in a predefined set need to be generated, the unsupervised methods do not normally provide a mechanism to map the extracted relation to a known one in the set of relations", |
|
"cite_spans": [ |
|
{ |
|
"start": 47, |
|
"end": 68, |
|
"text": "(Angeli et al., 2015)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 463, |
|
"end": 484, |
|
"text": "(Angeli et al., 2015)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 369, |
|
"end": 377, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In a project to build knowledge graphs from news articles where no labeled data are given, we propose an unsupervised knowledge graph generation method using semantic similarity (KGSS) that does not need a labeled set of training data nor a complicated set of syntactic rules for KG generation. The method can work with any set of relations that a user prefers, and uses semantic similarity matching to automatically identify the relation between two entities. A salient feature of our method is the use of a pretrained language model (Reimers and Gurevych, 2019) to compute and measure the similarity between the sentence embedding and the embedding of candidate triples formed by the two entities and a candidate relation. The best matching candidate relation is identified as the relation between the two entities.", |
|
"cite_spans": [ |
|
{ |
|
"start": 535, |
|
"end": 563, |
|
"text": "(Reimers and Gurevych, 2019)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Since most supervised models underperform in low-resource settings where no or very limited labeled data are provided, our proposed unsupervised approach can extract useful relations from unlabeled data and can also be used to create a labeled data set for distant supervised learning, which can potentially lead to better results. In this paper, we focus on describing and evaluating the unsupervised method.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The contributions of this paper are as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 We propose a novel unsupervised KG generation system that requires no labeled data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 Our method is flexible and can work with any set of relations. The results of the empirical evaluation (automatic as well as human) demonstrate that our system significantly outperforms two state-of-the-art unsupervised methods for KG generation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 To facilitate research in KG construction or information extraction from news articles, we develop a new dataset called NewsKG21 1 that was created from recent news articles.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Research on KG construction falls under supervised, semi-supervised, or unsupervised categories. For the supervised methods, we name two of them. Bastos et al. (2021) propose the RECON model to extract relations from a sentence and align them to the KG, using a graph neural network for obtaining the sentence representations. Then a neural classifier is adopted to predict the relation of each entity pair in the sentence. Another supervised learning method for KG construction is SpERT (Eberts and Ulges, 2020) , which is a span-based deep learning model with the attention mechanism, targeting to extract entities and relations jointly. Semi-supervised approaches such as ReVerb (Fader et al., 2011) , OLLIE (Schmitz et al., 2012) , and Stanford OpenIE (Angeli et al., 2015) , to name a few, leverage linguistic features (e.g., dependency trees and POS tags) with many human-defined patterns and existing knowledge bases (e.g., Wikidata (Vrande\u010di\u0107 and Kr\u00f6tzsch, 2014) , DBpedia (Auer et al., 2007) ) to extract triples. These systems have a supervision component. For example, Stanford OpenIE uses distant supervision to create a noisy corpus of sentences annotated with relation mentions and train a logistic regression classifier to decide which action to perform on an edge on the parse tree when extracting relations. However, these systems miss many potential triples in a sentence since they use verbs as a signal to identify triples, whereas many relational triples may not be connected with a verb. They also tend to generate redundant triples and require manual mapping of the extracted relations to a fixed relation schema. The earliest unsupervised approaches (i.e., heuristics approaches) (Suchanek et al., 2007; Auer et al., 2007; Bollacker et al., 2008) were applied to Wikipedia data, building the pioneering Knowledge Graphs (e.g., YAGO, DBpedia, Freebase). However, these approaches leverage additional Figure 3 : A demo of our system. (1) An input box for users to enter text. (2) A button for users to select their preferred relation schema; if nothing is imported, a default relation schema is used. (3) Users can select the type of entities to be extracted; if nothing is selected, both Named Entity and Noun will be extracted. (4) A submit button. (5) An interactive KG will be generated and visualized where the users can drag the nodes around to modify the presentation of the graph as desired.", |
|
"cite_spans": [ |
|
{ |
|
"start": 146, |
|
"end": 166, |
|
"text": "Bastos et al. (2021)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 488, |
|
"end": 512, |
|
"text": "(Eberts and Ulges, 2020)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 682, |
|
"end": 702, |
|
"text": "(Fader et al., 2011)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 711, |
|
"end": 733, |
|
"text": "(Schmitz et al., 2012)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 756, |
|
"end": 777, |
|
"text": "(Angeli et al., 2015)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 940, |
|
"end": 970, |
|
"text": "(Vrande\u010di\u0107 and Kr\u00f6tzsch, 2014)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 981, |
|
"end": 1000, |
|
"text": "(Auer et al., 2007)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 1704, |
|
"end": 1727, |
|
"text": "(Suchanek et al., 2007;", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 1728, |
|
"end": 1746, |
|
"text": "Auer et al., 2007;", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 1747, |
|
"end": 1770, |
|
"text": "Bollacker et al., 2008)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1923, |
|
"end": 1931, |
|
"text": "Figure 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "knowledge to construct the graph, for example, the Wikipedia hierarchical categories in (Suchanek et al., 2007) . Another drawback of these approaches is that they are slow and costly to build the KG. The resultant KGs are also restricted to a specific domain of corpus. MAMA , an unsupervised KG construction model, uses the attention weight matrices of a pre-trained language model (e.g., BERT (Devlin et al., 2018)) to extract the candidate triples. For mapping the extracted relations to a fixed schema, they follow the method of Stanford OpenIE (Angeli et al., 2015) requiring some manual annotations. Goswami et al. (2020) propose the RE-Flex framework for unsupervised relation extraction, where given a set of relations, each of them is rewritten as a cloze template (e.g., the cloze template of DraftBy is X was created by Y, where X and Y denote subject and object respectively.). Then the cloze template is semantically matched with the context (e.g., \"Bill Gates founded Microsoft\") to determine if the context has the relation or not. Another simliar work is proposed in (Tran et al., 2020) where the importance of the feature ENTITY TYPE for relation extraction is emphasized in their model called EType+. However, the feed-forward neural network classifier which is incorporated in their EType+ model makes their method not entirely unsupervised.", |
|
"cite_spans": [ |
|
{ |
|
"start": 88, |
|
"end": 111, |
|
"text": "(Suchanek et al., 2007)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 396, |
|
"end": 418, |
|
"text": "(Devlin et al., 2018))", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 550, |
|
"end": 571, |
|
"text": "(Angeli et al., 2015)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 607, |
|
"end": 628, |
|
"text": "Goswami et al. (2020)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 1084, |
|
"end": 1103, |
|
"text": "(Tran et al., 2020)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Given a document, our system generates a knowledge graph from the document. Figure 2 illustrates an overview of our system, KGSS, which consists of four modules: entity extraction, entity tuple formation and filtering, relation extraction, and KG storage and visualization, and Figure 3 illustrates the user interface of our system and visualizes a KG generated given an input paragraph based on a relation schema in TACRED* with 6 additional relations: loc:province_of, loc:country_of, loc:city_of, org:is_part_of, per:position_held and per:friend. Since our proposed system is unsupervised, it can flexibly work with any user-specified relation schema.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 76, |
|
"end": 84, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF0" |
|
}, |
|
{ |
|
"start": 278, |
|
"end": 286, |
|
"text": "Figure 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Proposed Model: KGSS", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "The first step in our system is co-reference resolution, which identifies and replaces different expressions of the same real-world entity with the same expression. We use an end-to-end neural coreference resolution model (Lee et al., 2017) from AllenNLP (Gardner et al., 2018) for this task.", |
|
"cite_spans": [ |
|
{ |
|
"start": 222, |
|
"end": 240, |
|
"text": "(Lee et al., 2017)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 255, |
|
"end": 277, |
|
"text": "(Gardner et al., 2018)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Entity Extraction", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "In the second step, our system extracts all entities. We allow the user to specify in the user interface whether they would like to extract only named entities or also include other noun phrases. A named entity (NE) refers to a real-world object associated with a name, for example -a person, an organization, or a location (e.g., Barack Obama, Apple Inc., New York City). We use a transitionbased algorithm (Lample et al., 2016) from the spaCy 2 library to detect all the NEs in a given sentence. There are 18 categories of NEs, such as PER (for person), ORG (for organization), and LOC (for location) in the spaCy en_core_web_lg pipeline for the NER task. We keep the NEs in all categories. In addition, if noun phrases are to be included, we extract all noun phrases (also called noun chunks) as candidate entities.", |
|
"cite_spans": [ |
|
{ |
|
"start": 408, |
|
"end": 429, |
|
"text": "(Lample et al., 2016)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Entity Extraction", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "After extracting entities, we form a set of entity tuples for each sentence as follows. For each sentence s in the input document, let E = (e 1 , e 2 , ..., e k ) be the list of identified entities in s, where e i occurs before e j in s when i < j. The set T of entity tuples for s contains all pairs \u27e8e i , e j \u27e9 such that e i occurs before e j in s, that is, T = {\u27e8e i , e j \u27e9|i < j}. We refer to this tuple formation rule as TF1. Thus, for a sentence containing k extracted entities, there are k(k\u22121) 2 entity tuples in its T . As an example, consider the sentence \"Barack Obama was born in Honolulu and graduated from Columbia University.\". The list of extracted entities is Barack Obama, Honolulu, Columbia University, and the set of entity tuples is \u27e8Barack Obama, Honolulu\u27e9, \u27e8Barack Obama, Columbia University\u27e9, and \u27e8Honolulu, Columbia University\u27e9.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Entity Tuple Formation and Filtering", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "However, not all entity tuples lead to generation of good relations between the two entities. Thus, we use some heuristic rules to filter out unpromising tuples. Recall that NEs have categories. We use N E P ER to denote an NE in the person category, N E ORG an organization NE, and N E LOC a location NE. In addition, we denote all noun phrases as N E N OU N . Not all the combinations of entities will yield meaningful relations between them. For instance, a location subject is most likely to not have a relation with its non-location object (Wang, 2020) . Thus, we leverage the NE types and apply the following rules to keep quality candidate tuples and filter out some invalid ones: Rule TF2: keep all the tuples whose head entity is a N E P ER , a N E ORG or a N E LOC , and Rule TF3: if the first entity is a N E LOC , keep the tuple if the second entity is also a N E LOC ; otherwise remove the tuple.", |
|
"cite_spans": [ |
|
{ |
|
"start": 545, |
|
"end": 557, |
|
"text": "(Wang, 2020)", |
|
"ref_id": "BIBREF26" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Entity Tuple Formation and Filtering", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Thus, after applying filtering rules, the final set of entity tuples from the previous example is \u27e8Barack Obama, Honolulu\u27e9 and \u27e8Barack Obama, Columbia University\u27e9. Tuple \u27e8Honolulu, Columbia University\u27e9 is filtered out due to Rule TF2, which is beneficial because a relation between Honolulu and Columbia University is not visibly helpful.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Entity Tuple Formation and Filtering", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "We denote the final set of entity tuples for a sentence after applying the filtering rules as F . Each tuple in F is in the format of head-tail, denoted as \u27e8e h , e t \u27e9. Our algorithm for finding the relation between e h and e t is based on semantic matching.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Relation Extraction", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "Given a tuple \u27e8e h , e t \u27e9, its sentence s and a set of pre-defined relations R = (r 1 , r 2 , . . . , r n ), we collect all the tokens between e h and e t in s (including e h and e t ) and name this sequence of tokens as P sub . For each relation r i in R, we also construct a sequence of tokens as \"e h r i e t \" and name it R i . Using a state-of-the-art embedding model, Sentence-BERT (SBERT) 3 (Reimers and Gurevych, 2019), we compute the semantic similarity between P sub and R i by obtaining the embeddings of P sub and R i and computing their cosine similarity. We do this for all the r i 's in R and select the relation r i whose R i has the highest similarity score with P sub . If this highest similarity score is higher than a threshold 4 , then r i is selected as the relation between e h and e t . This generates a triple (e h , r i , e t ) for the knowledge graph. This process is repeated for all the entity tuples for sentence s and for all sentences in the input document. A triple is removed if it has been generated from a previous sentence. Figure 4 shows an example sentence, its two entities \u27e8Barack Obama, Columbia University\u27e9, the P sub formed by the two entities, the R i 's and the generated triple for the entity tuple. Note that even though the P sub span is considerably long, SBERT helps generate the correct relation in this case because of contextual knowledge encoded within such pretrained language models, thus validating the effectiveness of using semantic similarity in Figure 4 : An example for Relation Extraction phase. At the top is the sentence with e h and e t denoting head and tail entities, respectively. P sub is the part of sentence between e h and e t . R i 's are the sequences formed by the two entities and a relation. The final extracted triple for the two entities is also shown. KG relation extraction.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 1062, |
|
"end": 1070, |
|
"text": "Figure 4", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1508, |
|
"end": 1516, |
|
"text": "Figure 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Relation Extraction", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "To further improve relation extraction in the news domain, we apply the following pattern-based rules based on our observation of their occurrence frequency in news articles: (1) Relation Extraction Rule 1 (RE1): if an entity tuple contains a noun phrase and a named entity of type Person (N E P ER ) and the noun phrase is immediately before a N E P ER in the sentence (such as in \"U.S. President Biden\"), we assign \"job title\" as the relation; (2) Relation Extraction Rule 2 (RE2): if the two entities in a tuple appear as N E LOC , N E LOC in the sentence (such as in \"Seattle, Washington\"), the \"is part of\" relation is generated; and Relation Extraction Rule 3 (RE3): relation \"job title\" is generated in the tuple with the pattern N E P ER , noun phrase (such as in \"Caitlin, a cardiothoracic nurse\").", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Optional Pattern-Based Rules", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "We would like to emphasize that these rules are optional and even without these heuristics, our method outperforms the other unsupervised approaches, as demonstrated in Table 4 in section 5.3. Please also note that these rules may not be 100% accurate, but none of the existing KG generation methods is 100% accurate. These optional heuristics can better extract relations when two entities are next to each other in a sentence, where SBERT may not have enough information to correctly identify the relation between the two entities. We will show that these rules lead to a better overall result on news domains. Our goal here is to demonstrate that optional domain specific rules can be used to further improve the quality of the generated triples. If our purpose is to generate more labeled data for distant supervision, the use of these rules can reduce the overall noise ratio.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 169, |
|
"end": 176, |
|
"text": "Table 4", |
|
"ref_id": "TABREF8" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Optional Pattern-Based Rules", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "We evaluate our KG system by comparing the generated triples to manually annotated triples from three benchmark information extraction datasets and a new dataset on the news domain, all for English language.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Datasets", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "The three benchmark datasets are: (i) TACRED (Zhang et al., 2017) , (ii) NYT (Riedel et al., 2010) , and (iii) WEBNLG (Gardent et al., 2017) . Only their test datasets are used in our evaluation because our method does not need training. Each of the datasets contains a set of independent sentences and one or more ground truth triples for each sentence. TACRED has 41 relations originally from the TAC KBP yearly challenges 5 with a newly created relation called \"no_relation\" 6 . This dataset was manually constructed from an underlying corpus from TAC KBP where each sentence is labeled with a single ground truth triple and a standard evaluation tool is provided. NYT and WEBNLG datasets have 24 and 246 predefined relations, respectively. In both datasets, a sentence may have more than one ground truth triple. The statistics of the three benchmark datasets and our manuallycreated dataset are given in Table 1 .", |
|
"cite_spans": [ |
|
{ |
|
"start": 45, |
|
"end": 65, |
|
"text": "(Zhang et al., 2017)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 77, |
|
"end": 98, |
|
"text": "(Riedel et al., 2010)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 118, |
|
"end": 140, |
|
"text": "(Gardent et al., 2017)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 909, |
|
"end": 916, |
|
"text": "Table 1", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Benchmark Datasets", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Our goal in this research is to create a KG from news articles in order to build question-answering tools for editors of a news agency. The benchmark datasets we can obtain are not completely in the news domain. To evaluate our method on the news domain, we created a new dataset named NewsKG21. Another reason for us to develop a new KG generation dataset is that many public benchmark KG datasets are of poor quality since they were created mostly via crowdsourcing (e.g., in the TACRED dataset, the ground truth label for \"AIG SELLS ALICO TO METLIFE\" is ('ALICO', 'parents', 'AIG'), which is wrong). The evaluation results based on such datasets may be misleading. As a result, we carefully created a new dataset with as little noise as possible.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "New Dataset: NewsKG21", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Four volunteers assisted in the creation of this dataset. One is an author of this paper, and the others are senior undergraduate Computer Science students. We selected 685 sentences from news articles published in 2021 in CNN, CBC, USNEWS, The Star, and Wikipedia News. From the 685 sentences, 1247 unique triples were manually generated. We divided the dataset into two parts: a test data set containing 271 sentences and 705 ground truth triples and a training set with 414 sentences and 542 ground truth triples. To prevent bias and advantages for a certain system, no system was engaged in the dataset creation process. Only the testing set is used to assess all unsupervised models.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "New Dataset: NewsKG21", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "We compare our system with two other stateof-the-art unsupervised systems 7 , Stanford Ope-nIE (Angeli et al., 2015) and MAMA (with the BERT LARGE option) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 95, |
|
"end": 116, |
|
"text": "(Angeli et al., 2015)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baselines and Metrics", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Entity tuple extraction: To compare the extracted entities with those in the ground truth data, we use Token Set Ratio 8 , to calculate the similarity between two entities. Given an extracted entity E and the ground truth entity G, Token Set Ratio is defined as 2M", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baselines and Metrics", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "T where T is the total number of tokens in both E and G (that is, |E| + |G| where |X| is the number of tokens in entity X), M is the number of matched tokens between E and G, and tokens are separated by spaces in the entity (that is, tokens are basically the words in the entity). For example, if E is \"Trudeau\" and G is \"Justin Trudeau\", the token set ratio is 2/3. This entity matching method is used for all the evaluated methods. Empirically, the threshold of string similarity is set to 0.9 for all the systems. The need for partial matching over exact matching is motivated by the observation that some gold standard annotations in the benchmark datasets are incompletely-matched entities. For example, \"Apollo 12\" appears as an entity in the original text, but it appears as \"Apollo\" in the gold standard triple in a benchmark dataset.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baselines and Metrics", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Triple generation: For a fair comparison, we also map the extracted relations from all the methods (including Stanford OpenIE and MAMA) to each of the dataset's relations using the same method, i.e., using SBERT embeddings for computing the cosine similarity between extracted relations and predefined relations in the schema, and selecting the one with the highest similarity score. We chose this relation mapping approach for Stanford OpenIE and MAMA instead of their original manual relation mapping techniques, which are irreproducible in our experiments. For the TACRED* dataset, we calculate precision, recall, and F-score with the provided standard evaluation script. As the TACRED* dataset also contains pronouns and nouns as entities in the ground truth triples, we also extract these in addition to the named entities and omit the coreference resolution in our system for this dataset in order to have a fair comparison because both baselines can detect pronouns and nouns as entities. In our system, the user can choose types of entities that can be identified. For the NYT and WEBNLG datasets, we calculate the standard F1 score as F 1 = (2 * p * r)/(p + r), with p = c m and r = c g , where c denotes the number of correctly extracted triples, m is the total number of extracted triples, and g is the number of triples in the annotated dataset. Table 2 presents the results of KG triple generation over the four datasets. We note that our method KGSS consistently outperforms both unsupervised baselines across all the datasets by considerable margins on all the three metrics. One possible explanation for the improvement gains achieved by KGSS as compared to the unsupervised baselines is that the baseline methods tend to extract triples using verbs as signals which causes them to miss many triples, whereas our method generates the triples using semantic similarity from sentence embeddings. The baseline models also generate redundant triples which lowers their precision.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 1358, |
|
"end": 1365, |
|
"text": "Table 2", |
|
"ref_id": "TABREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Baselines and Metrics", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "It is worth noting that among the four datasets, WEBNLG is the most challenging one for KGSS, with much lower performance than that on other datasets. This is most likely because of the large number of relation types in its schema (more than 200 as compared to other datasets having less than 100 relations). We conjecture that some relations may be too semantically similar for SBERT to distinguish from each other. In terms of qualitative analysis, looking at the visual KG shown in Figure 3 generated for an excerpt from a Wikipedia article, we notice that all mentions of 'Bill Gates' and 'Gates' get correctly resolved to a single entity, i.e., 'Bill Gates', (and similarly, 'Microsoft Corporation' and 'Microsoft' get resolved to 'Microsoft Corporation') which helps prevent generating redundant triples. Another strength of the system can be seen in the form of triples such as \u27e8Bill Gates, friend, Paul Allen\u27e9, \u27e8Albuquerque, city of, New Mexico\u27e9 and \u27e8Seattle, city of, Washington\u27e9. Also, all the various positions held by Gates are captured well, thus highlighting the role of such systems as helpful tools for summarizing long pieces of unstructured text into a concise visual representation.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 485, |
|
"end": 493, |
|
"text": "Figure 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results and Discussion", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "In Table 3 , we evaluate the three systems on the NewsKG21 dataset one the task of entity tuple extraction, which means that we only compare the performance of systems generating pairs of head and tail entities to the ground truth in the dataset. We see that our method is better than Stanford Ope-nIE and MAMA which is most likely attributed to our entity tuple filtering rules (TF1, 2, and 3) that can remove some noisy entity pairs while preserving a large number of meaningful tuples.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 3, |
|
"end": 10, |
|
"text": "Table 3", |
|
"ref_id": "TABREF7" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Ablation Experiments", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "We also evaluate the three relation extraction rules described in Section 3.4. The results in Table 4 show that each rule helps to enhance the performance of our system as all the three measures increase as we apply more rules. The F-score is increased by around 7% after applying the three rules all together. One significant point to notice is that our system outperforms the other two unsupervised methods even when no heuristic rules are used.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 94, |
|
"end": 102, |
|
"text": "Table 4", |
|
"ref_id": "TABREF8" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Ablation Experiments", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "By analyzing the generated triples, we realized that some incorrect triples can be avoided if we consider the entity types of a relation in relation extraction. For example, the spouse relation can only connect two entities of the person type. Thus, we add the type of the tail entity in each relation in our relation schema. Note the head entity type is already in the schema, similar to the schema in the TACRED dataset. With such information in the relation schema, we are able to eliminate some candidate relations given an entity tuple. For example, if the entity tuple is \"Trump, New York\", any relations whose head and tail entity types do not match Person and Location (such as the spouse relation) are not considered as candidates. Table 4 demonstrates that by using the tail entity type for each relation in the schema, we can raise the F-score of our system by 4% points. This is another advantage of our system, which uses an entity-type aware method for eliminating unpromising triple extraction results, which the Stanford OpenIE and MAMA systems do not have. In addition, we run an ablation test on the NewsKG21 dataset using the tuple filtering criteria specified in section 3.2. As seen in Table 5 , each rule contributes to the improvement of overall performance of our system. One interesting finding is that, of the three systems, MAMA gets the lowest score on the NewsKG21 dataset since it extracts entity tuples based on information contained in a pre-trained language model BERT. As such, MAMA will approach its KG generation limit if the input articles are not from the language model's underlying corpus, such as our NewsKG21 dataset which is produced from the recent news stories. Table 6 : Results of human evaluation on the performance of triple extraction on NewsKG21.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 741, |
|
"end": 748, |
|
"text": "Table 4", |
|
"ref_id": "TABREF8" |
|
}, |
|
{ |
|
"start": 1207, |
|
"end": 1214, |
|
"text": "Table 5", |
|
"ref_id": "TABREF10" |
|
}, |
|
{ |
|
"start": 1707, |
|
"end": 1714, |
|
"text": "Table 6", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Ablation Experiments", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "In addition to automatic evaluation, we conduct human evaluation of our proposed system's triple extraction performance by comparing it to two baseline models: Stanford OpenIE and MAMA. Five human evaluators participated in our study, none of whom was told beforehand which systems they were assessing; more specifically, the names of each model were hidden. We chose 30 sentences at random from the NEWSKG21 dataset, and each participant graded the quality of triples generated by each system on each sentence based on the following criteria: (i) how accurate the extracted triples are in regard to the original text; and (ii) how thoroughly the extracted triples cover the true relations in the original sentence. Each evaluator was asked to assign a score from 0 to 1 to each generated triple on precision and to the set of triples generated from a sentence on recall, with 0 indicating entirely incorrect, 1 indicating completely accurate, and a value in between indicating partially correct. The results in Table 6 show that Stanford OpenIE performs much better on human evaluation than on automatic evaluation. This is because only evaluating the system based on automatically match with the ground truth in the dataset may not accurately reflect the performance of a system. However, the results in Table 6 confirm that our system outperforms the two baseline models.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 1012, |
|
"end": 1019, |
|
"text": "Table 6", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1306, |
|
"end": 1313, |
|
"text": "Table 6", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Human Evaluation", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "Although unsupervised approaches may allow more interpretable and flexible methods, they are not without limitations. The effectiveness of our unsupervised algorithm is partly dependent on the accuracy of the existing NER tools that we incor-porate into our pipeline. Similarly, the semantic matching phase's performance may be less effective when the relation schema contains similar relation names. In addition, if training data are available, supervised methods can achieve much better results as shown in Table 9 in Appendix C. Nevertheless, our unsupervised method can work when no training data are available and can potentially be used to create labeled data (although noisy) for distant supervised learning to bootstrap knowledge graph generation.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 509, |
|
"end": 516, |
|
"text": "Table 9", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Human Evaluation", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "We presented a novel unsupervised method for knowledge graph generation without the need for labeled data or manual mapping of extracted relations to a predefined relation schema (as in two previous unsupervised methods). A salient feature of the method is that it uses semantic similarity matching to find relations between entities. In addition, our system can work with any set of relations that the user prefers, flexibility that other methods, especially the supervised ones, do not have. We also created a new data set from news articles that will be shared with the community.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Our evaluation results demonstrate the effectiveness of our system which significantly outperforms two state-of-the-art unsupervised models over four different datasets. We also develop an open source interactive KG generation and visualization tool. As future work, we will evaluate effectiveness of using our method for bootstrapping knowledge graph generation with distant supervision.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "A Experiments on TACRED dataset including no_relation relationship Table 7 compares our system's performance to Stanford OpenIE and MAMA on the TACRED dataset, which includes the relation: no_relation. In this experiment, if the relation confidence rate returned from SBERT is less than 0.8, our system will return no_relation. Although the total performance of all three systems decreases, our system still outperforms the other two cutting-edge models. Table 7 : The performance of triple extraction on TA-CRED including relationship \"no_relation\".", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 67, |
|
"end": 74, |
|
"text": "Table 7", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 455, |
|
"end": 462, |
|
"text": "Table 7", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Conclusions", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "For entity extraction, we compare the performance of the named entity recognition (NER) systems", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "B Comparing performance of different algorithms on entity extraction", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The NewsKG21 dataset and the code for our KG generation and visualization are available under the open source license at https://github.com/lixianliu12/KGSS", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://spacy.io/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We use distilbert-base-nli-stsb-mean-tokens as the pretrained model.4 We set this threshold to 0.8 in our experiments based on the following experiment in the NYT dataset: beginning at 0 and increasing by 0.2 on each test until the threshold reaches 1, and we found that setting the threshold at 0.8 yielded the best F-score results. We use this threshold for all the other datasets.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://tac.nist.gov/6 The results of the evaluation including the \"no_relation\" instances can be found in Appendix A.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Although Stanford OpenIE was trained in a semisupervised way, we use their pre-trained version and do not fine-tune it on our training dataset. Thus, we consider our use of their method as unsupervised.8 https://pypi.org/project/fuzzywuzzy/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://spacy.io/ 10 https://stanfordnlp.github.io/stanza/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "We would like to thank our volunteer annotators Iris Chang, Rhitabrat Pokharel, and Andrew Jeon for their help in creating our NewsKG21 dataset. We are thankful to the anonymous reviewers for their helpful suggestions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgements", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "C Performance of the supervised KG models Table 9 shows the performance of the state of the art supervised KG models: TransEN (Huang et al., 2020) on the TACRED dataset, and AaR (Liu et al., 2021) on the NYT and WEBNLG datasets. All the models are trained on the training data of each dataset and evaluated on the test data of the corresponding dataset. The results are taken from the references. Table 9 : The performance of the state of the art supervised KG models on the TACRED, NYT, and WEBNLG datasets.", |
|
"cite_spans": [ |
|
{ |
|
"start": 126, |
|
"end": 146, |
|
"text": "(Huang et al., 2020)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 178, |
|
"end": 196, |
|
"text": "(Liu et al., 2021)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 42, |
|
"end": 49, |
|
"text": "Table 9", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 397, |
|
"end": 404, |
|
"text": "Table 9", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Dataset", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Leveraging linguistic structure for open domain information extraction", |
|
"authors": [ |
|
{ |
|
"first": "Gabor", |
|
"middle": [], |
|
"last": "Angeli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Melvin Jose Johnson", |
|
"middle": [], |
|
"last": "Premkumar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher D", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "344--354", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gabor Angeli, Melvin Jose Johnson Premkumar, and Christopher D Manning. 2015. Leveraging linguistic structure for open domain information extraction. In Proceedings of the 53rd Annual Meeting of the As- sociation for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 344-354.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Dbpedia: A nucleus for a web of open data", |
|
"authors": [ |
|
{ |
|
"first": "S\u00f6ren", |
|
"middle": [], |
|
"last": "Auer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christian", |
|
"middle": [], |
|
"last": "Bizer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Georgi", |
|
"middle": [], |
|
"last": "Kobilarov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jens", |
|
"middle": [], |
|
"last": "Lehmann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Cyganiak", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zachary", |
|
"middle": [], |
|
"last": "Ives", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "The semantic web", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "722--735", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "S\u00f6ren Auer, Christian Bizer, Georgi Kobilarov, Jens Lehmann, Richard Cyganiak, and Zachary Ives. 2007. Dbpedia: A nucleus for a web of open data. In The semantic web, pages 722-735. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "RECON: relation extraction using knowledge graph context in a graph neural network", |
|
"authors": [ |
|
{ |
|
"first": "Anson", |
|
"middle": [], |
|
"last": "Bastos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Abhishek", |
|
"middle": [], |
|
"last": "Nadgeri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kuldeep", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Isaiah", |
|
"middle": [ |
|
"Onando" |
|
], |
|
"last": "Mulang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "'", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Saeedeh", |
|
"middle": [], |
|
"last": "Shekarpour", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Johannes", |
|
"middle": [], |
|
"last": "Hoffart", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Manohar", |
|
"middle": [], |
|
"last": "Kaul", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "WWW '21: The Web Conference 2021, Virtual Event / Ljubljana", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1673--1685", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Anson Bastos, Abhishek Nadgeri, Kuldeep Singh, Isa- iah Onando Mulang', Saeedeh Shekarpour, Johannes Hoffart, and Manohar Kaul. 2021. RECON: relation extraction using knowledge graph context in a graph neural network. In WWW '21: The Web Conference 2021, Virtual Event / Ljubljana, Slovenia, April 19- 23, 2021, pages 1673-1685. ACM / IW3C2.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Freebase: a collaboratively created graph database for structuring human knowledge", |
|
"authors": [ |
|
{ |
|
"first": "Kurt", |
|
"middle": [], |
|
"last": "Bollacker", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Colin", |
|
"middle": [], |
|
"last": "Evans", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Praveen", |
|
"middle": [], |
|
"last": "Paritosh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tim", |
|
"middle": [], |
|
"last": "Sturge", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jamie", |
|
"middle": [], |
|
"last": "Taylor", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of the 2008 ACM SIG-MOD international conference on Management of data", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1247--1250", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kurt Bollacker, Colin Evans, Praveen Paritosh, Tim Sturge, and Jamie Taylor. 2008. Freebase: a collabo- ratively created graph database for structuring human knowledge. In Proceedings of the 2008 ACM SIG- MOD international conference on Management of data, pages 1247-1250.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Bert: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristinax", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1810.04805" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristi- nax Toutanova. 2018. Bert: Pre-training of deep bidirectional transformers for language understand- ing. arXiv preprint arXiv:1810.04805.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Span-based joint entity and relation extraction with transformer pre-training", |
|
"authors": [ |
|
{ |
|
"first": "Markus", |
|
"middle": [], |
|
"last": "Eberts", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adrian", |
|
"middle": [], |
|
"last": "Ulges", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "-Including 10th Conference on Prestigious Applications of Artificial Intelligence", |
|
"volume": "2020", |
|
"issue": "", |
|
"pages": "2006--2013", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Markus Eberts and Adrian Ulges. 2020. Span-based joint entity and relation extraction with transformer pre-training. In ECAI 2020 -24th European Confer- ence on Artificial Intelligence, 29 August-8 Septem- ber 2020, Santiago de Compostela, Spain, August 29 -September 8, 2020 -Including 10th Conference on Prestigious Applications of Artificial Intelligence (PAIS 2020), volume 325 of Frontiers in Artificial In- telligence and Applications, pages 2006-2013. IOS Press.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Identifying relations for open information extraction", |
|
"authors": [ |
|
{ |
|
"first": "Anthony", |
|
"middle": [], |
|
"last": "Fader", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephen", |
|
"middle": [], |
|
"last": "Soderland", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oren", |
|
"middle": [], |
|
"last": "Etzioni", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the 2011 conference on empirical methods in natural language processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1535--1545", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Anthony Fader, Stephen Soderland, and Oren Etzioni. 2011. Identifying relations for open information ex- traction. In Proceedings of the 2011 conference on empirical methods in natural language processing, pages 1535-1545.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Creating training corpora for nlg micro-planning", |
|
"authors": [ |
|
{ |
|
"first": "Claire", |
|
"middle": [], |
|
"last": "Gardent", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anastasia", |
|
"middle": [], |
|
"last": "Shimorina", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shashi", |
|
"middle": [], |
|
"last": "Narayan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Laura", |
|
"middle": [], |
|
"last": "Perez-Beltrachini", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "55th annual meeting of the Association for Computational Linguistics (ACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Claire Gardent, Anastasia Shimorina, Shashi Narayan, and Laura Perez-Beltrachini. 2017. Creating training corpora for nlg micro-planning. In 55th annual meet- ing of the Association for Computational Linguistics (ACL).", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Allennlp: A deep semantic natural language processing platform", |
|
"authors": [ |
|
{ |
|
"first": "Matt", |
|
"middle": [], |
|
"last": "Gardner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joel", |
|
"middle": [], |
|
"last": "Grus", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Neumann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oyvind", |
|
"middle": [], |
|
"last": "Tafjord", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pradeep", |
|
"middle": [], |
|
"last": "Dasigi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nelson", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Peters", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Schmitz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1803.07640" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matt Gardner, Joel Grus, Mark Neumann, Oyvind Tafjord, Pradeep Dasigi, Nelson Liu, Matthew Pe- ters, Michael Schmitz, and Luke Zettlemoyer. 2018. Allennlp: A deep semantic natural language process- ing platform. arXiv preprint arXiv:1803.07640.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Unsupervised relation extraction from language models using constrained cloze completion", |
|
"authors": [ |
|
{ |
|
"first": "Ankur", |
|
"middle": [], |
|
"last": "Goswami", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Akshata", |
|
"middle": [], |
|
"last": "Bhat", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ankur Goswami, Akshata Bhat, Hadar Ohana, and Theodoros Rekatsinas. 2020. Unsupervised relation extraction from language models using constrained cloze completion. CoRR, abs/2010.06804.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Relation classification via knowledge graph enhanced transformer encoder. Knowledge-Based Systems", |
|
"authors": [ |
|
{ |
|
"first": "Wenti", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yiyu", |
|
"middle": [], |
|
"last": "Mao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhan", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lei", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "206", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wenti Huang, Yiyu Mao, Zhan Yang, Lei Zhu, and Jun Long. 2020. Relation classification via knowledge graph enhanced transformer encoder. Knowledge- Based Systems, 206:106321.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Knowledge graph embedding based question answering", |
|
"authors": [ |
|
{ |
|
"first": "Xiao", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jingyuan", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dingcheng", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ping", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the Twelfth ACM International Conference on Web Search and Data Mining", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "105--113", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xiao Huang, Jingyuan Zhang, Dingcheng Li, and Ping Li. 2019. Knowledge graph embedding based ques- tion answering. In Proceedings of the Twelfth ACM International Conference on Web Search and Data Mining, pages 105-113.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Neural architectures for named entity recognition", |
|
"authors": [ |
|
{ |
|
"first": "Guillaume", |
|
"middle": [], |
|
"last": "Lample", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Miguel", |
|
"middle": [], |
|
"last": "Ballesteros", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sandeep", |
|
"middle": [], |
|
"last": "Subramanian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kazuya", |
|
"middle": [], |
|
"last": "Kawakami", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Dyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "260--270", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N16-1030" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Guillaume Lample, Miguel Ballesteros, Sandeep Sub- ramanian, Kazuya Kawakami, and Chris Dyer. 2016. Neural architectures for named entity recognition. In Proceedings of the 2016 Conference of the North American Chapter of the Association for Computa- tional Linguistics: Human Language Technologies, pages 260-270, San Diego, California. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "End-to-end neural coreference resolution", |
|
"authors": [ |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luheng", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "188--197", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D17-1018" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kenton Lee, Luheng He, Mike Lewis, and Luke Zettle- moyer. 2017. End-to-end neural coreference reso- lution. In Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing, pages 188-197, Copenhagen, Denmark. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Attention as relation: learning supervised multi-head self-attention for relation extraction", |
|
"authors": [ |
|
{ |
|
"first": "Jie", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shaowei", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bingquan", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiaxin", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Na", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tong", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the Twenty-Ninth International Conference on International Joint Conferences on Artificial Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3787--3793", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jie Liu, Shaowei Chen, Bingquan Wang, Jiaxin Zhang, Na Li, and Tong Xu. 2021. Attention as relation: learning supervised multi-head self-attention for rela- tion extraction. In Proceedings of the Twenty-Ninth International Conference on International Joint Con- ferences on Artificial Intelligence, pages 3787-3793.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Stanza: A python natural language processing toolkit for many human languages", |
|
"authors": [ |
|
{ |
|
"first": "Peng", |
|
"middle": [], |
|
"last": "Qi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuhao", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuhui", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Bolton", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher D", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2003.07082" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Peng Qi, Yuhao Zhang, Yuhui Zhang, Jason Bolton, and Christopher D Manning. 2020. Stanza: A python natural language processing toolkit for many human languages. arXiv preprint arXiv:2003.07082.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Sentence-bert: Sentence embeddings using siamese bert-networks", |
|
"authors": [ |
|
{ |
|
"first": "Nils", |
|
"middle": [], |
|
"last": "Reimers", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iryna", |
|
"middle": [], |
|
"last": "Gurevych", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1908.10084" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nils Reimers and Iryna Gurevych. 2019. Sentence-bert: Sentence embeddings using siamese bert-networks. arXiv preprint arXiv:1908.10084.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Modeling relations and their mentions without labeled text", |
|
"authors": [ |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Riedel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Limin", |
|
"middle": [], |
|
"last": "Yao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Mccallum", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Joint European Conference on Machine Learning and Knowledge Discovery in Databases", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "148--163", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sebastian Riedel, Limin Yao, and Andrew McCallum. 2010. Modeling relations and their mentions with- out labeled text. In Joint European Conference on Machine Learning and Knowledge Discovery in Databases, pages 148-163. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Open language learning for information extraction", |
|
"authors": [ |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Schmitz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephen", |
|
"middle": [], |
|
"last": "Soderland", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robert", |
|
"middle": [], |
|
"last": "Bart", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oren", |
|
"middle": [], |
|
"last": "Etzioni", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the 2012 joint conference on empirical methods in natural language processing and computational natural language learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "523--534", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Michael Schmitz, Stephen Soderland, Robert Bart, Oren Etzioni, et al. 2012. Open language learning for in- formation extraction. In Proceedings of the 2012 joint conference on empirical methods in natural language processing and computational natural lan- guage learning, pages 523-534.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Introducing the Knowledge Graph: things, not strings", |
|
"authors": [ |
|
{ |
|
"first": "Amit", |
|
"middle": [], |
|
"last": "Singhal", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Amit Singhal. 2012. Introducing the Knowl- edge Graph: things, not strings. https: //blog.google/products/search/ introducing-knowledge-graph-things-not/. [Online; accessed 01-July-2021].", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Yago: a core of semantic knowledge", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Fabian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gjergji", |
|
"middle": [], |
|
"last": "Suchanek", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gerhard", |
|
"middle": [], |
|
"last": "Kasneci", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Weikum", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of the 16th international conference on World Wide Web", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "697--706", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Fabian M Suchanek, Gjergji Kasneci, and Gerhard Weikum. 2007. Yago: a core of semantic knowledge. In Proceedings of the 16th international conference on World Wide Web, pages 697-706.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Revisiting unsupervised relation extraction", |
|
"authors": [ |
|
{ |
|
"first": "Phong", |
|
"middle": [], |
|
"last": "Thy Thy Tran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sophia", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ananiadou", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "7498--7505", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.669" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thy Thy Tran, Phong Le, and Sophia Ananiadou. 2020. Revisiting unsupervised relation extraction. In Pro- ceedings of the 58th Annual Meeting of the Asso- ciation for Computational Linguistics, pages 7498- 7505, Online. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Wikidata: a free collaborative knowledgebase", |
|
"authors": [ |
|
{ |
|
"first": "Denny", |
|
"middle": [], |
|
"last": "Vrande\u010di\u0107", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Markus", |
|
"middle": [], |
|
"last": "Kr\u00f6tzsch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Communications of the ACM", |
|
"volume": "57", |
|
"issue": "10", |
|
"pages": "78--85", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Denny Vrande\u010di\u0107 and Markus Kr\u00f6tzsch. 2014. Wiki- data: a free collaborative knowledgebase. Communi- cations of the ACM, 57(10):78-85.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Language models are open knowledge graphs", |
|
"authors": [ |
|
{ |
|
"first": "Chenguang", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiao", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dawn", |
|
"middle": [], |
|
"last": "Song", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2010.11967" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chenguang Wang, Xiao Liu, and Dawn Song. 2020. Language models are open knowledge graphs. arXiv preprint arXiv:2010.11967.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Knowledge graph construction and applications for web search and beyond", |
|
"authors": [ |
|
{ |
|
"first": "Peilu", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hao", |
|
"middle": [], |
|
"last": "Jiang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jingfang", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qi", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Data Intelligence", |
|
"volume": "1", |
|
"issue": "4", |
|
"pages": "333--349", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Peilu Wang, Hao Jiang, Jingfang Xu, and Qi Zhang. 2019a. Knowledge graph construction and applica- tions for web search and beyond. Data Intelligence, 1(4):333-349.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Kgat: Knowledge graph attention network for recommendation", |
|
"authors": [ |
|
{ |
|
"first": "Xiang", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiangnan", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yixin", |
|
"middle": [], |
|
"last": "Cao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Meng", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tat-Seng", |
|
"middle": [], |
|
"last": "Chua", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 25th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "950--958", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xiang Wang, Xiangnan He, Yixin Cao, Meng Liu, and Tat-Seng Chua. 2019b. Kgat: Knowledge graph at- tention network for recommendation. In Proceedings of the 25th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining, pages 950- 958.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Unsupervised and supervised learning of complexrelation instances extraction in natural language", |
|
"authors": [ |
|
{ |
|
"first": "Zina", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zina Wang. 2020. Unsupervised and supervised learn- ing of complexrelation instances extraction in natural language. Master's thesis, Delft University of Tech- nology.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Distant supervision for relation extraction via piecewise convolutional neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Daojian", |
|
"middle": [], |
|
"last": "Zeng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kang", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yubo", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 2015 conference on empirical methods in natural language processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1753--1762", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Daojian Zeng, Kang Liu, Yubo Chen, and Jun Zhao. 2015. Distant supervision for relation extraction via piecewise convolutional neural networks. In Proceed- ings of the 2015 conference on empirical methods in natural language processing, pages 1753-1762.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Position-aware attention and supervised data improve slot filling", |
|
"authors": [ |
|
{ |
|
"first": "Yuhao", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Victor", |
|
"middle": [], |
|
"last": "Zhong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Danqi", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gabor", |
|
"middle": [], |
|
"last": "Angeli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher D", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "35--45", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yuhao Zhang, Victor Zhong, Danqi Chen, Gabor Angeli, and Christopher D Manning. 2017. Position-aware attention and supervised data improve slot filling. In Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing, pages 35- 45.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"num": null, |
|
"uris": null, |
|
"text": "An overview of KGSS, our proposed unsupervised KG generation system.", |
|
"type_str": "figure" |
|
}, |
|
"TABREF1": { |
|
"content": "<table><tr><td/><td>e h</td><td>e t</td></tr><tr><td>P sub</td><td>Semantic Similarity Computation</td><td>Barack Obama age Columbia University R i 's</td></tr><tr><td colspan=\"2\">Barack Obama was born in</td><td>Barack Obama employee of Columbia University</td></tr><tr><td colspan=\"2\">Honolulu and graduated from</td><td>Barack Obama graduated from Columbia University</td></tr><tr><td>Columbia University</td><td/><td>Barack Obama spouse Columbia University</td></tr><tr><td/><td/><td>Barack Obama siblings Columbia University</td></tr><tr><td/><td/><td>\u2026</td></tr><tr><td/><td/><td>\u2026</td></tr><tr><td>triple</td><td colspan=\"2\">(Barack Obama, graduated_from, Columbia University)</td></tr></table>", |
|
"num": null, |
|
"text": "Sentence: Barack Obama was born in Honolulu and graduated from Columbia University.", |
|
"type_str": "table", |
|
"html": null |
|
}, |
|
"TABREF3": { |
|
"content": "<table/>", |
|
"num": null, |
|
"text": "Dataset statistics. TACRED* is a subset of TACRED without instances containing triples with \"no_relation\".", |
|
"type_str": "table", |
|
"html": null |
|
}, |
|
"TABREF5": { |
|
"content": "<table/>", |
|
"num": null, |
|
"text": "The results of KG triple extraction.", |
|
"type_str": "table", |
|
"html": null |
|
}, |
|
"TABREF7": { |
|
"content": "<table><tr><td>System</td><td colspan=\"2\">P % R % F1 %</td></tr><tr><td>Stanford OpenIE MAMA KGSS (without rules)</td><td>7.1 2.1 10.5 12.1 11.3 6.1</td><td>8.7 3.2 11.2</td></tr><tr><td colspan=\"2\">KGSS with RE 1 KGSS with RE 1 & 2 KGSS with RE 1, 2 & 3 KGSS with 3 REs & tail type 24.6 20.4 13.1 15.7 16.1 19.3 16.5 20.1</td><td>14.3 17.5 18.1 22.3</td></tr></table>", |
|
"num": null, |
|
"text": "Results of entity tuple extraction (e h , e t ) on NewsKG21", |
|
"type_str": "table", |
|
"html": null |
|
}, |
|
"TABREF8": { |
|
"content": "<table><tr><td>: Results of triple extraction (e h , r, e t ) on NewsKG21 dataset, without relation extraction rules (top) and with relation extraction rules (bottom). Adding rules improves the performance.</td></tr></table>", |
|
"num": null, |
|
"text": "", |
|
"type_str": "table", |
|
"html": null |
|
}, |
|
"TABREF10": { |
|
"content": "<table><tr><td>System</td><td>P %</td><td>R %</td><td>F1 %</td></tr><tr><td colspan=\"4\">Stanford OpenIE 33.5 \u00b1 9.0 MAMA 2.7 \u00b1 2.6 KGSS 34.1 \u00b1 10.0 37.8 \u00b1 12.7 35.9 34.6 \u00b1 15.9 34.0 10.3 \u00b1 6.9 4.3</td></tr></table>", |
|
"num": null, |
|
"text": "KGSS's performance on triple extraction with various tuple filtering methods on NewsKG21.", |
|
"type_str": "table", |
|
"html": null |
|
} |
|
} |
|
} |
|
} |