File size: 4,915 Bytes
27c75aa ce073b9 27c75aa ce073b9 27c75aa ce073b9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 |
# -*- coding: utf-8 -*-
"""
@author:XuMing([email protected])
@description:
"""
"""Natural Language Inference (NLI) Chinese Corpus.(nli_zh)"""
import os
import json
import datasets
_DESCRIPTION = """SimCLUE:3000000+中文语义理解与匹配数据集"""
GITHUB_HOME = "https://github.com/CLUEbenchmark/SimCLUE"
_CITATION = "https://github.com/CLUEbenchmark/SimCLUE"
_DATA_URL = "https://storage.googleapis.com/cluebenchmark/tasks/simclue_public.zip"
class SimCLUEConfig(datasets.BuilderConfig):
def __init__(self, features, data_url, citation, url, label_classes=(0, 1), **kwargs):
super().__init__(version=datasets.Version("1.0.0"), **kwargs)
self.features = features
self.label_classes = label_classes
self.data_url = data_url
self.citation = citation
self.url = url
class SimCLUE(datasets.GeneratorBasedBuilder):
"""The Natural Language Inference Chinese(NLI_zh) Corpus."""
part_file = {'train_rank': 'train_rank.json',
'train_pair': 'train_pair.json',
'corpus': 'corpus.txt',
'train_pair_postive': 'train_pair_postive.json',
'dev': 'dev.json',
'test_public': 'test_public.json'}
part_split = {'train_rank': datasets.Split.TRAIN,
'train_pair': datasets.Split.TRAIN,
'corpus': datasets.Split.TRAIN,
'train_pair_postive': datasets.Split.TRAIN,
'dev': datasets.Split.VALIDATION,
'test_public': datasets.Split.TEST}
BUILDER_CONFIGS = [
SimCLUEConfig(
name="train_rank",
description=_DESCRIPTION,
features=datasets.Features({"query": datasets.Value("string"),
"title": datasets.Value("string"),
"neg_title": datasets.Value("string")}),
data_url=_DATA_URL,
citation=_CITATION,
url=GITHUB_HOME,
),
SimCLUEConfig(
name="train_pair",
description=_DESCRIPTION,
features=datasets.Features({"sentence1": datasets.Value("string"),
"sentence2": datasets.Value("string"),
"label": datasets.Value("int32")}),
data_url=_DATA_URL,
citation=_CITATION,
url=GITHUB_HOME,
),
SimCLUEConfig(
name="corpus",
description=_DESCRIPTION,
features=datasets.Features({"sentence1": datasets.Value("string")}),
data_url=_DATA_URL,
citation=_CITATION,
url=GITHUB_HOME,
),
SimCLUEConfig(
name="train_pair_postive",
description=_DESCRIPTION,
features=datasets.Features({"sentence1": datasets.Value("string"),
"sentence2": datasets.Value("string"),
"label": datasets.Value("int32")}),
data_url=_DATA_URL,
citation=_CITATION,
url=GITHUB_HOME,
),
SimCLUEConfig(
name="dev",
description=_DESCRIPTION,
features=datasets.Features({"sentence1": datasets.Value("string"),
"sentence2": datasets.Value("string"),
"label": datasets.Value("int32")}),
data_url=_DATA_URL,
citation=_CITATION,
url=GITHUB_HOME,
),
SimCLUEConfig(
name="test_public",
description=_DESCRIPTION,
features=datasets.Features({"sentence1": datasets.Value("string"),
"sentence2": datasets.Value("string"),
"label": datasets.Value("int32")}),
data_url=_DATA_URL,
citation=_CITATION,
url=GITHUB_HOME,
),
]
def _info(self):
return datasets.DatasetInfo(
description=self.config.description,
features=self.config.features,
homepage=self.config.url,
citation=self.config.citation,
)
def _split_generators(self, dl_manager):
dl_dir = dl_manager.download_and_extract(self.config.data_url)
return [datasets.SplitGenerator(
name=self.part_split[self.config.name],
gen_kwargs={
"filepath": os.path.join(dl_dir, self.part_file[self.config.name]),
})]
def _generate_examples(self, filepath):
"""This function returns the examples in the raw (text) form."""
with open(filepath, 'r', encoding="utf-8") as f:
for idx, row in enumerate(f):
yield idx, json.loads(row) |