yago45en / yago45en.py
wikipunk's picture
update script
e0dd3d8
raw
history blame
2.94 kB
import os
from datasets import DatasetBuilder, SplitGenerator, DownloadConfig, load_dataset, DownloadManager, DatasetInfo
from rdflib import Graph, URIRef, Literal, BNode
from rdflib.namespace import RDF, RDFS, OWL, XSD, Namespace, NamespaceManager
from datasets.features import Features, Value
SCHEMA = Namespace('http://schema.org/')
YAGO = Namespace('http://yago-knowledge.org/resource/')
class YAGO45DatasetBuilder(DatasetBuilder):
VERSION = "1.0.0"
taxonomy = Graph(bind_namespaces="core")
def _info(self):
print("INFO")
return DatasetInfo(
description="A subset of the YAGO 4.5 dataset maintaining only English labels",
citation="@article{suchanek2023integrating,title={Integrating the Wikidata Taxonomy into YAGO},author={Suchanek, Fabian M and Alam, Mehwish and Bonald, Thomas and Paris, Pierre-Henri and Soria, Jules},journal={arXiv preprint arXiv:2308.11884},year={2023}}",
homepage="https://yago-knowledge.org/",
license="https://creativecommons.org/licenses/by-sa/3.0/",
features=Features({
'subject': Value('string'),
'predicate': Value('string'),
'object': Value('string')
})
)
def _split_generators(self, dl_manager):
# Download and extract the dataset
# Define splits for each chunk of your dataset.
# Download and extract the dataset files
dl_manager.download_config = DownloadConfig(cache_dir=os.path.abspath("raw"))
dl_manager.download_and_extract(["facts.tar.gz", "yago-taxonomy.ttl"])
# Load yago-taxonomy.ttl file in every process
self.taxonomy.parse(os.path.join(dl_manager.manual_dir, 'yago-taxonomy.ttl'), format='turtle')
# Extract prefix mappings
prefix_mappings = {prefix: namespace for prefix, namespace in self.taxonomy.namespaces()}
# Define splits for each chunk
chunk_paths = [os.path.join(dl_manager.manual_dir, chunk) for chunk in os.listdir(dl_manager.manual_dir) if chunk.endswith('.nt')]
return [SplitGenerator(name="train", gen_kwargs={'chunk_paths': chunk_paths, 'prefix_mappings': prefix_mappings})]
def _generate_examples(self, chunk_paths, prefix_mappings):
# Load the chunks into an rdflib graph
# Yield individual triples from the graph
for chunk_path in chunk_paths:
graph = Graph(bind_namespaces="core")
for prefix, namespace in prefix_mappings.items():
graph.bind(prefix, namespace)
graph.parse(chunk_path, format='nt')
# Yield individual triples from the graph
for i, (s, p, o) in enumerate(graph):
yield i, {
'subject': str(s),
'predicate': str(p),
'object': str(o)
}