File size: 2,101 Bytes
9fdd2c3
 
 
 
d4d492a
9fdd2c3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2ad750f
9fdd2c3
 
 
 
 
 
d4d492a
 
8b3fbc3
d4d492a
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
# -*- coding: utf-8 -*-
import os
import json
import datasets
import pandas as pd

_DESCRIPTION = """datasets-for-simcse"""

_CITATION = ''

GITHUB_HOME = ''

class DatasetsForSimCSEConfig(datasets.BuilderConfig):

    def __init__(self, features, data_url, citation, url, label_classes=(0, 1), **kwargs):
        super().__init__(version=datasets.Version("1.0.0"), **kwargs)
        self.features = features
        self.label_classes = label_classes
        self.data_url = data_url
        self.citation = citation
        self.url = url


class DatasetsForSimCSE(datasets.GeneratorBasedBuilder):
    """The Natural Language Inference Chinese(NLI_zh) Corpus."""

    BUILDER_CONFIGS = [
        DatasetsForSimCSEConfig(
            name="nli_for_simcse",
            description=_DESCRIPTION,
            features=datasets.Features({"sent0": datasets.Value("string"),
                                          "sent1": datasets.Value("string"),
                                          "hard_neg": datasets.Value("string")}),
            data_url='https://huggingface.co/datasets/princeton-nlp/datasets-for-simcse/resolve/main/nli_for_simcse.csv',
            citation=_CITATION,
            url=GITHUB_HOME,
        )
    ]

    def _info(self):
        return datasets.DatasetInfo(
            description=self.config.description,
            features=self.config.features,
            homepage=self.config.url,
            citation=self.config.citation,
        )

    def _split_generators(self, dl_manager):
        filepath = dl_manager.download(self.config.data_url)
        return [datasets.SplitGenerator(
                        name='train',
                        gen_kwargs={
                            "filepath": filepath,
                        })]

    def _generate_examples(self, filepath):
        """This function returns the examples in the raw (text) form."""
        df = pd.read_csv(filepath, sep=',')
        for idx, row in df.iterrows():
            context = {'sent0': row['sent0'], 'sent1': row['sent1'], 'hard_neg': row['hard_neg']}
            yield idx, context