Datasets:
Update readme and dataset name (#2)
Browse files* update script
* update README
- README.md +30 -0
- jsnli.py +4 -5
- tests/jsnli_test.py +2 -2
README.md
CHANGED
@@ -1,3 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
# Dataset Card for JSNLI
|
2 |
|
3 |
[](https://github.com/shunk031/huggingface-datasets_jsnli/actions/workflows/ci.yaml)
|
|
|
1 |
+
---
|
2 |
+
language:
|
3 |
+
- ja
|
4 |
+
|
5 |
+
license:
|
6 |
+
- cc-by-sa-4.0
|
7 |
+
|
8 |
+
multilinguality:
|
9 |
+
- monolingual
|
10 |
+
|
11 |
+
task_categories:
|
12 |
+
- text-classification
|
13 |
+
|
14 |
+
task_ids:
|
15 |
+
- natural-language-inference
|
16 |
+
- multi-input-text-classification
|
17 |
+
|
18 |
+
tags:
|
19 |
+
- natural-language-inference
|
20 |
+
- nli
|
21 |
+
- jsnli
|
22 |
+
|
23 |
+
datasets:
|
24 |
+
- without-filtering
|
25 |
+
- with-filtering
|
26 |
+
|
27 |
+
metrics:
|
28 |
+
- accuracy
|
29 |
+
---
|
30 |
+
|
31 |
# Dataset Card for JSNLI
|
32 |
|
33 |
[](https://github.com/shunk031/huggingface-datasets_jsnli/actions/workflows/ci.yaml)
|
jsnli.py
CHANGED
@@ -30,15 +30,14 @@ _URL = "https://nlp.ist.i.kyoto-u.ac.jp/DLcounter/lime.cgi?down=https://nlp.ist.
|
|
30 |
|
31 |
class JSNLIDataset(ds.GeneratorBasedBuilder):
|
32 |
VERSION = ds.Version("1.1.0") # type: ignore
|
33 |
-
DEFAULT_CONFIG_NAME: str = "w_filtering" # type: ignore
|
34 |
BUILDER_CONFIG = [
|
35 |
ds.BuilderConfig(
|
36 |
-
name="
|
37 |
version=VERSION, # type: ignore
|
38 |
description="SNLIの学習データに機械翻訳を適用した後、BLEUスコアの閾値0.1でフィルタリングを施したもの。BERTにこの学習データを学習させることにより、93.0%の精度を記録した。(533,005ペア)",
|
39 |
),
|
40 |
ds.BuilderConfig(
|
41 |
-
name="
|
42 |
version=VERSION, # type: ignore
|
43 |
description="SNLIの学習データに機械翻訳を適用したもの。フィルタリングは行っていない。(548,014ペア)",
|
44 |
),
|
@@ -70,9 +69,9 @@ class JSNLIDataset(ds.GeneratorBasedBuilder):
|
|
70 |
train_wo_filtering_path = os.path.join(jsnli_dir, "train_wo_filtering.tsv")
|
71 |
|
72 |
dev_path = os.path.join(jsnli_dir, "dev.tsv")
|
73 |
-
if "
|
74 |
tng_path = train_w_filtering_path
|
75 |
-
elif "
|
76 |
tng_path = train_wo_filtering_path
|
77 |
else:
|
78 |
raise ValueError(f"Invalid config name: {self.config.name}")
|
|
|
30 |
|
31 |
class JSNLIDataset(ds.GeneratorBasedBuilder):
|
32 |
VERSION = ds.Version("1.1.0") # type: ignore
|
|
|
33 |
BUILDER_CONFIG = [
|
34 |
ds.BuilderConfig(
|
35 |
+
name="with-filtering",
|
36 |
version=VERSION, # type: ignore
|
37 |
description="SNLIの学習データに機械翻訳を適用した後、BLEUスコアの閾値0.1でフィルタリングを施したもの。BERTにこの学習データを学習させることにより、93.0%の精度を記録した。(533,005ペア)",
|
38 |
),
|
39 |
ds.BuilderConfig(
|
40 |
+
name="without-filtering",
|
41 |
version=VERSION, # type: ignore
|
42 |
description="SNLIの学習データに機械翻訳を適用したもの。フィルタリングは行っていない。(548,014ペア)",
|
43 |
),
|
|
|
69 |
train_wo_filtering_path = os.path.join(jsnli_dir, "train_wo_filtering.tsv")
|
70 |
|
71 |
dev_path = os.path.join(jsnli_dir, "dev.tsv")
|
72 |
+
if "with-filtering" in self.config.name:
|
73 |
tng_path = train_w_filtering_path
|
74 |
+
elif "without-filtering" in self.config.name:
|
75 |
tng_path = train_wo_filtering_path
|
76 |
else:
|
77 |
raise ValueError(f"Invalid config name: {self.config.name}")
|
tests/jsnli_test.py
CHANGED
@@ -10,8 +10,8 @@ def dataset_path() -> str:
|
|
10 |
@pytest.mark.parametrize(
|
11 |
"dataset_name, expected_num_train,",
|
12 |
(
|
13 |
-
("
|
14 |
-
("
|
15 |
),
|
16 |
)
|
17 |
def test_load_dataset(
|
|
|
10 |
@pytest.mark.parametrize(
|
11 |
"dataset_name, expected_num_train,",
|
12 |
(
|
13 |
+
("without-filtering", 548014),
|
14 |
+
("with-filtering", 533005),
|
15 |
),
|
16 |
)
|
17 |
def test_load_dataset(
|