alexwww94 commited on
Commit
9fdd2c3
·
1 Parent(s): ef37e89

Create datasets-for-simcse

Browse files
Files changed (1) hide show
  1. datasets-for-simcse +63 -0
datasets-for-simcse ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ import os
4
+ import json
5
+ import datasets
6
+
7
+ _DESCRIPTION = """datasets-for-simcse"""
8
+
9
+ _CITATION = ''
10
+
11
+ GITHUB_HOME = ''
12
+
13
+ class DatasetsForSimCSEConfig(datasets.BuilderConfig):
14
+
15
+ def __init__(self, features, data_url, citation, url, label_classes=(0, 1), **kwargs):
16
+ super().__init__(version=datasets.Version("1.0.0"), **kwargs)
17
+ self.features = features
18
+ self.label_classes = label_classes
19
+ self.data_url = data_url
20
+ self.citation = citation
21
+ self.url = url
22
+
23
+
24
+ class DatasetsForSimCSE(datasets.GeneratorBasedBuilder):
25
+ """The Natural Language Inference Chinese(NLI_zh) Corpus."""
26
+
27
+ BUILDER_CONFIGS = [
28
+ DatasetsForSimCSEConfig(
29
+ name="nli_for_simcse",
30
+ description=_DESCRIPTION,
31
+ features=datasets.Features({"sent0": datasets.Value("string"),
32
+ "sent1": datasets.Value("string"),
33
+ "hard_neg": datasets.Value("string")}),
34
+ data_url='https://huggingface.co/datasets/princeton-nlp/datasets-for-simcse/resolve/main/nli_for_simcse.csv',
35
+ citation=_CITATION,
36
+ url=GITHUB_HOME,
37
+ )
38
+ ]
39
+
40
+ def _info(self):
41
+ return datasets.DatasetInfo(
42
+ description=self.config.description,
43
+ features=self.config.features,
44
+ homepage=self.config.url,
45
+ citation=self.config.citation,
46
+ )
47
+
48
+ def _split_generators(self, dl_manager):
49
+ filepath = dl_manager.download(self.config.data_url)
50
+ return [datasets.SplitGenerator(
51
+ name='nli_for_simcse',
52
+ gen_kwargs={
53
+ "filepath": filepath,
54
+ })]
55
+
56
+ def _generate_examples(self, filepath):
57
+ """This function returns the examples in the raw (text) form."""
58
+ with open(filepath, 'r', encoding="utf-8") as f:
59
+ f.readline()
60
+ for idx, row in enumerate(f.readlines()):
61
+ sent0, sent1, hard_neg = row.split(',')
62
+ context = {'sent0': sent0, 'sent1': sent1, 'hard_neg': hard_neg}
63
+ yield idx, context