# -*- coding: utf-8 -*- import os import json import datasets _DESCRIPTION = """datasets-for-simcse""" _CITATION = '' GITHUB_HOME = '' class DatasetsForSimCSEConfig(datasets.BuilderConfig): def __init__(self, features, data_url, citation, url, label_classes=(0, 1), **kwargs): super().__init__(version=datasets.Version("1.0.0"), **kwargs) self.features = features self.label_classes = label_classes self.data_url = data_url self.citation = citation self.url = url class DatasetsForSimCSE(datasets.GeneratorBasedBuilder): """The Natural Language Inference Chinese(NLI_zh) Corpus.""" BUILDER_CONFIGS = [ DatasetsForSimCSEConfig( name="nli_for_simcse", description=_DESCRIPTION, features=datasets.Features({"sent0": datasets.Value("string"), "sent1": datasets.Value("string"), "hard_neg": datasets.Value("string")}), data_url='https://huggingface.co/datasets/princeton-nlp/datasets-for-simcse/resolve/main/nli_for_simcse.csv', citation=_CITATION, url=GITHUB_HOME, ) ] def _info(self): return datasets.DatasetInfo( description=self.config.description, features=self.config.features, homepage=self.config.url, citation=self.config.citation, ) def _split_generators(self, dl_manager): filepath = dl_manager.download(self.config.data_url) return [datasets.SplitGenerator( name='nli_for_simcse', gen_kwargs={ "filepath": filepath, })] def _generate_examples(self, filepath): """This function returns the examples in the raw (text) form.""" with open(filepath, 'r', encoding="utf-8") as f: f.readline() for idx, row in enumerate(f.readlines()): sent0, sent1, hard_neg = row.split(',') context = {'sent0': sent0, 'sent1': sent1, 'hard_neg': hard_neg} yield idx, context