shunk031 commited on
Commit
e8929fb
·
unverified ·
1 Parent(s): 28c2721

Initialize (#1)

Browse files

* add python files

* add poetry files

* add settings for CI

* add .gitignore

* add README.md

* update CI

* add CI settings

.github/workflows/ci.yaml ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: CI
2
+
3
+ on:
4
+ push:
5
+ branches: [main]
6
+ paths-ignore:
7
+ - 'README.md'
8
+ pull_request:
9
+ branches: [main]
10
+ paths-ignore:
11
+ - 'README.md'
12
+
13
+ jobs:
14
+ test:
15
+ runs-on: ubuntu-latest
16
+ strategy:
17
+ matrix:
18
+ python-version: ['3.8', '3.9', '3.10']
19
+
20
+ steps:
21
+ - uses: actions/checkout@v2
22
+ - name: Set up Python ${{ matrix.python-version }}
23
+ uses: actions/setup-python@v2
24
+ with:
25
+ python-version: ${{ matrix.python-version }}
26
+
27
+ - name: Install dependencies
28
+ run: |
29
+ pip install -U pip setuptools wheel poetry
30
+ poetry install
31
+ - name: Format
32
+ run: |
33
+ poetry run black --check .
34
+ - name: Lint
35
+ run: |
36
+ poetry run flake8 . --ignore=E501,W503,E203
37
+ - name: Type check
38
+ run: |
39
+ poetry run mypy . \
40
+ --ignore-missing-imports \
41
+ --no-strict-optional \
42
+ --no-site-packages \
43
+ --cache-dir=/dev/null
44
+
45
+ - name: Run tests
46
+ run: |
47
+ poetry run pytest --color=yes -rf
.github/workflows/push_to_hub.yaml ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Sync to Hugging Face Hub
2
+
3
+ on:
4
+ workflow_run:
5
+ workflows:
6
+ - CI
7
+ branches:
8
+ - main
9
+ types:
10
+ - completed
11
+
12
+ jobs:
13
+ push_to_hub:
14
+ runs-on: ubuntu-latest
15
+
16
+ steps:
17
+ - name: Checkout repository
18
+ uses: actions/checkout@v2
19
+
20
+ - name: Push to Huggingface hub
21
+ env:
22
+ HF_TOKEN: ${{ secrets.HF_TOKEN }}
23
+ HF_USERNAME: ${{ secrets.HF_USERNAME }}
24
+ run: |
25
+ git fetch --unshallow
26
+ git push --force https://${HF_USERNAME}:${HF_TOKEN}@huggingface.co/datasets/${HF_USERNAME}/jsnli main
.gitignore ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Created by https://www.toptal.com/developers/gitignore/api/python
2
+ # Edit at https://www.toptal.com/developers/gitignore?templates=python
3
+
4
+ ### Python ###
5
+ # Byte-compiled / optimized / DLL files
6
+ __pycache__/
7
+ *.py[cod]
8
+ *$py.class
9
+
10
+ # C extensions
11
+ *.so
12
+
13
+ # Distribution / packaging
14
+ .Python
15
+ build/
16
+ develop-eggs/
17
+ dist/
18
+ downloads/
19
+ eggs/
20
+ .eggs/
21
+ lib/
22
+ lib64/
23
+ parts/
24
+ sdist/
25
+ var/
26
+ wheels/
27
+ share/python-wheels/
28
+ *.egg-info/
29
+ .installed.cfg
30
+ *.egg
31
+ MANIFEST
32
+
33
+ # PyInstaller
34
+ # Usually these files are written by a python script from a template
35
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
36
+ *.manifest
37
+ *.spec
38
+
39
+ # Installer logs
40
+ pip-log.txt
41
+ pip-delete-this-directory.txt
42
+
43
+ # Unit test / coverage reports
44
+ htmlcov/
45
+ .tox/
46
+ .nox/
47
+ .coverage
48
+ .coverage.*
49
+ .cache
50
+ nosetests.xml
51
+ coverage.xml
52
+ *.cover
53
+ *.py,cover
54
+ .hypothesis/
55
+ .pytest_cache/
56
+ cover/
57
+
58
+ # Translations
59
+ *.mo
60
+ *.pot
61
+
62
+ # Django stuff:
63
+ *.log
64
+ local_settings.py
65
+ db.sqlite3
66
+ db.sqlite3-journal
67
+
68
+ # Flask stuff:
69
+ instance/
70
+ .webassets-cache
71
+
72
+ # Scrapy stuff:
73
+ .scrapy
74
+
75
+ # Sphinx documentation
76
+ docs/_build/
77
+
78
+ # PyBuilder
79
+ .pybuilder/
80
+ target/
81
+
82
+ # Jupyter Notebook
83
+ .ipynb_checkpoints
84
+
85
+ # IPython
86
+ profile_default/
87
+ ipython_config.py
88
+
89
+ # pyenv
90
+ # For a library or package, you might want to ignore these files since the code is
91
+ # intended to run in multiple environments; otherwise, check them in:
92
+ .python-version
93
+
94
+ # pipenv
95
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
96
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
97
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
98
+ # install all needed dependencies.
99
+ #Pipfile.lock
100
+
101
+ # poetry
102
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
103
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
104
+ # commonly ignored for libraries.
105
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
106
+ #poetry.lock
107
+
108
+ # pdm
109
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
110
+ #pdm.lock
111
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
112
+ # in version control.
113
+ # https://pdm.fming.dev/#use-with-ide
114
+ .pdm.toml
115
+
116
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
117
+ __pypackages__/
118
+
119
+ # Celery stuff
120
+ celerybeat-schedule
121
+ celerybeat.pid
122
+
123
+ # SageMath parsed files
124
+ *.sage.py
125
+
126
+ # Environments
127
+ .env
128
+ .venv
129
+ env/
130
+ venv/
131
+ ENV/
132
+ env.bak/
133
+ venv.bak/
134
+
135
+ # Spyder project settings
136
+ .spyderproject
137
+ .spyproject
138
+
139
+ # Rope project settings
140
+ .ropeproject
141
+
142
+ # mkdocs documentation
143
+ /site
144
+
145
+ # mypy
146
+ .mypy_cache/
147
+ .dmypy.json
148
+ dmypy.json
149
+
150
+ # Pyre type checker
151
+ .pyre/
152
+
153
+ # pytype static type analyzer
154
+ .pytype/
155
+
156
+ # Cython debug symbols
157
+ cython_debug/
158
+
159
+ # PyCharm
160
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
161
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
162
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
163
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
164
+ #.idea/
165
+
166
+ ### Python Patch ###
167
+ # Poetry local configuration file - https://python-poetry.org/docs/configuration/#local-configuration
168
+ poetry.toml
169
+
170
+
171
+ # End of https://www.toptal.com/developers/gitignore/api/python
README.md ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ # Dataset Card for JSNLI
2
+
3
+ [![CI](https://github.com/shunk031/huggingface-datasets_jsnli/actions/workflows/ci.yaml/badge.svg)](https://github.com/shunk031/huggingface-datasets_jsnli/actions/workflows/ci.yaml)
jsnli.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import os
3
+
4
+ import datasets as ds
5
+
6
+ logger = logging.getLogger(__name__)
7
+
8
+ _CITATION = """\
9
+ - 吉越 卓見, 河原 大輔, 黒橋 禎夫: 機械翻訳を用いた自然言語推論データセットの多言語化, 第244回自然言語処理研究会, (2020.7.3).
10
+ - Samuel R. Bowman, Gabor Angeli, Christopher Potts, and Christopher D. Manning. 2015. A large annotated corpus for learning natural language inference. In Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing (EMNLP).
11
+ - Peter Young, Alice Lai, Micah Hodosh, and Julia Hockenmaier. "From image descriptions to visual denotations: New similarity metrics for semantic inference over event descriptions." Transactions of the Association for Computational Linguistics 2 (2014): 67-78.
12
+ """
13
+
14
+ _DESCRIPTION = """\
15
+ == 日本語SNLI(JSNLI)データセット ==
16
+
17
+ SNLI コーパスを日本語に翻訳した自然言語推論データセット
18
+ 学習データは元データを翻訳し、計算機によるフィルタリングによって作成
19
+ 評価データは日本語として意味が通るか、翻訳後のラベルが元のラベルと一致しているかどうかの2段階のクラウドソーシングによりデータをフィルタリング
20
+ """
21
+
22
+ _HOMEPAGE = "https://nlp.ist.i.kyoto-u.ac.jp/?%E6%97%A5%E6%9C%AC%E8%AA%9ESNLI%28JSNLI%29%E3%83%87%E3%83%BC%E3%82%BF%E3%82%BB%E3%83%83%E3%83%88"
23
+
24
+ _LICENSE = """\
25
+ CC BY-SA 4.0
26
+ """
27
+
28
+ _URL = "https://nlp.ist.i.kyoto-u.ac.jp/DLcounter/lime.cgi?down=https://nlp.ist.i.kyoto-u.ac.jp/nl-resource/JSNLI/jsnli_1.1.zip&name=JSNLI.zip"
29
+
30
+
31
+ class JSNLIDataset(ds.GeneratorBasedBuilder):
32
+ VERSION = ds.Version("1.1.0") # type: ignore
33
+ DEFAULT_CONFIG_NAME: str = "w_filtering" # type: ignore
34
+ BUILDER_CONFIG = [
35
+ ds.BuilderConfig(
36
+ name="w_filtering",
37
+ version=VERSION, # type: ignore
38
+ description="SNLIの学習データに機械翻訳を適用した後、BLEUスコアの閾値0.1でフィルタリングを施したもの。BERTにこの学習データを学習させることにより、93.0%の精度を記録した。(533,005ペア)",
39
+ ),
40
+ ds.BuilderConfig(
41
+ name="wo_filtering",
42
+ version=VERSION, # type: ignore
43
+ description="SNLIの学習データに機械翻訳を適用したもの。フィルタリングは行っていない。(548,014ペア)",
44
+ ),
45
+ ]
46
+
47
+ def _info(self) -> ds.DatasetInfo:
48
+ features = ds.Features(
49
+ {
50
+ "label": ds.Value("string"),
51
+ "premise": ds.Value("string"),
52
+ "hypothesis": ds.Value("string"),
53
+ }
54
+ )
55
+ return ds.DatasetInfo(
56
+ description=_DESCRIPTION,
57
+ features=features,
58
+ homepage=_HOMEPAGE,
59
+ license=_LICENSE,
60
+ citation=_CITATION,
61
+ )
62
+
63
+ def _split_generators(self, dl_manager: ds.DownloadManager):
64
+ jsnli_base_dir = dl_manager.download_and_extract(_URL)
65
+ jsnli_dir = os.path.join(
66
+ jsnli_base_dir, f"jsnli_{self.VERSION.major}.{self.VERSION.minor}" # type: ignore
67
+ )
68
+
69
+ train_w_filtering_path = os.path.join(jsnli_dir, "train_w_filtering.tsv")
70
+ train_wo_filtering_path = os.path.join(jsnli_dir, "train_wo_filtering.tsv")
71
+
72
+ dev_path = os.path.join(jsnli_dir, "dev.tsv")
73
+ if "w_filtering" in self.config.name:
74
+ tng_path = train_w_filtering_path
75
+ elif "wo_filtering" in self.config.name:
76
+ tng_path = train_wo_filtering_path
77
+ else:
78
+ raise ValueError(f"Invalid config name: {self.config.name}")
79
+
80
+ tng_gen_kwargs = {
81
+ "tsv_path": tng_path,
82
+ }
83
+ val_gen_kwargs = {
84
+ "tsv_path": dev_path,
85
+ }
86
+
87
+ return [
88
+ ds.SplitGenerator(
89
+ name=ds.Split.TRAIN, # type: ignore
90
+ gen_kwargs=tng_gen_kwargs, # type: ignore
91
+ ),
92
+ ds.SplitGenerator(
93
+ name=ds.Split.VALIDATION, # type: ignore
94
+ gen_kwargs=val_gen_kwargs, # type: ignore
95
+ ),
96
+ ]
97
+
98
+ def _generate_examples( # type: ignore
99
+ self,
100
+ tsv_path: str,
101
+ ):
102
+ with open(tsv_path, "r") as rf:
103
+ for sentence_id, line in enumerate(rf):
104
+ label, premise, hypothesis = line.replace("\n", "").split("\t")
105
+
106
+ example_dict = {
107
+ "label": label,
108
+ "premise": premise,
109
+ "hypothesis": hypothesis,
110
+ }
111
+ yield sentence_id, example_dict
poetry.lock ADDED
The diff for this file is too large to render. See raw diff
 
pyproject.toml ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [tool.poetry]
2
+ name = "huggingface-datasets-jsnli"
3
+ version = "0.1.0"
4
+ description = ""
5
+ authors = ["Shunsuke KITADA <[email protected]>"]
6
+ readme = "README.md"
7
+ packages = []
8
+
9
+ [tool.poetry.dependencies]
10
+ python = ">=3.8.1,<4.0"
11
+ datasets = "^2.7.1"
12
+
13
+
14
+ [tool.poetry.group.dev.dependencies]
15
+ black = "^22.10.0"
16
+ isort = "^5.10.1"
17
+ flake8 = "^6.0.0"
18
+ mypy = "^0.991"
19
+ pytest = "^7.2.0"
20
+
21
+ [build-system]
22
+ requires = ["poetry-core"]
23
+ build-backend = "poetry.core.masonry.api"
tests/__init__.py ADDED
File without changes
tests/jsnli_test.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datasets as ds
2
+ import pytest
3
+
4
+
5
+ @pytest.fixture
6
+ def dataset_path() -> str:
7
+ return "jsnli.py"
8
+
9
+
10
+ @pytest.mark.parametrize(
11
+ "dataset_name, expected_num_train,",
12
+ (
13
+ ("wo_filtering", 548014),
14
+ ("w_filtering", 533005),
15
+ ),
16
+ )
17
+ def test_load_dataset(
18
+ dataset_path: str,
19
+ dataset_name: str,
20
+ expected_num_train: int,
21
+ expected_num_valid: int = 3916,
22
+ ):
23
+ dataset = ds.load_dataset(path=dataset_path, name=dataset_name)
24
+
25
+ assert dataset["train"].num_rows == expected_num_train # type: ignore
26
+ assert dataset["validation"].num_rows == expected_num_valid # type: ignore