File size: 2,187 Bytes
9fdd2c3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
# -*- coding: utf-8 -*-

import os
import json
import datasets

_DESCRIPTION = """datasets-for-simcse"""

_CITATION = ''

GITHUB_HOME = ''

class DatasetsForSimCSEConfig(datasets.BuilderConfig):

    def __init__(self, features, data_url, citation, url, label_classes=(0, 1), **kwargs):
        super().__init__(version=datasets.Version("1.0.0"), **kwargs)
        self.features = features
        self.label_classes = label_classes
        self.data_url = data_url
        self.citation = citation
        self.url = url


class DatasetsForSimCSE(datasets.GeneratorBasedBuilder):
    """The Natural Language Inference Chinese(NLI_zh) Corpus."""

    BUILDER_CONFIGS = [
        DatasetsForSimCSEConfig(
            name="nli_for_simcse",
            description=_DESCRIPTION,
            features=datasets.Features({"sent0": datasets.Value("string"),
                                          "sent1": datasets.Value("string"),
                                          "hard_neg": datasets.Value("string")}),
            data_url='https://huggingface.co/datasets/princeton-nlp/datasets-for-simcse/resolve/main/nli_for_simcse.csv',
            citation=_CITATION,
            url=GITHUB_HOME,
        )
    ]

    def _info(self):
        return datasets.DatasetInfo(
            description=self.config.description,
            features=self.config.features,
            homepage=self.config.url,
            citation=self.config.citation,
        )

    def _split_generators(self, dl_manager):
        filepath = dl_manager.download(self.config.data_url)
        return [datasets.SplitGenerator(
                        name='nli_for_simcse',
                        gen_kwargs={
                            "filepath": filepath,
                        })]

    def _generate_examples(self, filepath):
        """This function returns the examples in the raw (text) form."""
        with open(filepath, 'r', encoding="utf-8") as f:
            f.readline()
            for idx, row in enumerate(f.readlines()):
                sent0, sent1, hard_neg = row.split(',')
                context = {'sent0': sent0, 'sent1': sent1, 'hard_neg': hard_neg}
                yield idx, context