Divyanshu commited on
Commit
021e040
1 Parent(s): 0df1e49
Files changed (39) hide show
  1. .history/IE-SemParse_20230707234104.py +162 -0
  2. .history/IE-SemParse_20230707234108.py +162 -0
  3. .history/IE-SemParse_20230707234111.py +162 -0
  4. .history/IE-SemParse_20230707234114.py +162 -0
  5. .history/IE-SemParse_20230707234133.py +162 -0
  6. .history/IE-SemParse_20230707234139.py +162 -0
  7. .history/IE-SemParse_20230707234141.py +162 -0
  8. .history/IE-SemParse_20230707234143.py +162 -0
  9. .history/README_20230707215813.md +3 -0
  10. .history/README_20230707233453.md +234 -0
  11. .history/README_20230707233505.md +234 -0
  12. .history/README_20230707233537.md +234 -0
  13. .history/README_20230707233553.md +234 -0
  14. .history/README_20230707233556.md +234 -0
  15. .history/README_20230707233559.md +234 -0
  16. .history/README_20230707233606.md +234 -0
  17. .history/README_20230707233630.md +234 -0
  18. .history/README_20230707233641.md +234 -0
  19. .history/README_20230707233647.md +234 -0
  20. .history/README_20230707233654.md +232 -0
  21. .history/README_20230707233704.md +232 -0
  22. .history/README_20230707233708.md +232 -0
  23. .history/README_20230707233722.md +229 -0
  24. .history/README_20230707233725.md +226 -0
  25. .history/README_20230707233734.md +214 -0
  26. .history/README_20230707233737.md +214 -0
  27. .history/README_20230707233747.md +214 -0
  28. .history/README_20230707233751.md +214 -0
  29. .history/README_20230707233753.md +214 -0
  30. .history/README_20230707233759.md +214 -0
  31. .history/README_20230707233801.md +214 -0
  32. .history/README_20230707233805.md +214 -0
  33. .history/README_20230707233808.md +214 -0
  34. .history/README_20230707233812.md +214 -0
  35. .history/README_20230707233815.md +214 -0
  36. .history/r_20230707233419 +0 -0
  37. .history/r_20230707233422 +0 -0
  38. IE-SemParse.py +5 -5
  39. README.md +212 -1
.history/IE-SemParse_20230707234104.py ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+
3
+
4
+ # Lint as: python3
5
+ """IndicXNLI: The Cross-Lingual NLI Corpus for Indic Languages."""
6
+
7
+
8
+ import os
9
+ import json
10
+
11
+ import pandas as pd
12
+
13
+ import datasets
14
+
15
+ from datasets import DownloadManager
16
+
17
+
18
+ _CITATION = """\
19
+ @misc{aggarwal2023evaluating,
20
+ title={Evaluating Inter-Bilingual Semantic Parsing for Indian Languages},
21
+ author={Divyanshu Aggarwal and Vivek Gupta and Anoop Kunchukuttan},
22
+ year={2023},
23
+ eprint={2304.13005},
24
+ archivePrefix={arXiv},
25
+ primaryClass={cs.CL}
26
+ }
27
+ """
28
+
29
+ _DESCRIPTION = """\
30
+ IE-SemParse is an Inter-bilingual Seq2seq Semantic parsing dataset for 11 distinct Indian languages
31
+ """
32
+
33
+ _LANGUAGES = (
34
+ 'hi',
35
+ 'bn',
36
+ 'mr',
37
+ 'as',
38
+ 'ta',
39
+ 'te',
40
+ 'or',
41
+ 'ml',
42
+ 'pa',
43
+ 'gu',
44
+ 'kn'
45
+ )
46
+
47
+
48
+ _DATASETS = (
49
+ 'itop',
50
+ 'indic-atis',
51
+ 'indic-TOP'
52
+ )
53
+
54
+
55
+ _URL = "https://huggingface.co/datasets/Divyanshu/IE-SemParse/resolve/main/"
56
+
57
+
58
+ class IESemParseConfig(datasets.BuilderConfig):
59
+ """BuilderConfig for IE-SemParse."""
60
+
61
+ def __init__(self, dataset: str, language: str, **kwargs):
62
+ """BuilderConfig for IE-SemParse.
63
+
64
+ Args:
65
+ language: One of hi, bn, mr, as, ta, te, or, ml, pa, gu, kn
66
+ **kwargs: keyword arguments forwarded to super.
67
+ """
68
+ super(IESemParseConfig, self).__init__(**kwargs)
69
+
70
+ self.dataset = dataset
71
+ self.language = language
72
+ self.languages = _LANGUAGES
73
+ self.datasets = _DATASETS
74
+
75
+ self._URLS = [os.path.join(
76
+ _URL, "unfiltered_data", dataset, f"{language}.json")]
77
+
78
+
79
+ class IE-SemParse(datasets.GeneratorBasedBuilder):
80
+ """IE-SemParse: Inter-Bilingual Semantic Parsing Dataset for Indic Languages. Version 1.0."""
81
+
82
+ VERSION = datasets.Version("1.0.0", "")
83
+ BUILDER_CONFIG_CLASS = IESemParseConfig
84
+ BUILDER_CONFIGS = [
85
+ IESemParseConfig(
86
+ name=f"{dataset}_{language}",
87
+ language=language,
88
+ dataset=dataset,
89
+ version=datasets.Version("1.0.0", ""),
90
+ description=f"Plain text import of IE-SemParse for the {language} language for {dataset} dataset",
91
+ )
92
+ for language, dataset in zip(_LANGUAGES, _DATASETS)
93
+ ]
94
+
95
+ def _info(self):
96
+ dl_manager = datasets.DownloadManager()
97
+
98
+ urls_to_download = self.config._URLS
99
+
100
+ filepath = dl_manager.download_and_extract(urls_to_download)[0]
101
+
102
+ with open(filepath, "r") as f:
103
+ data = json.load(f)
104
+ data = data[list(data.keys())[0]]
105
+
106
+ features = datasets.Features(
107
+ {k: datasets.Value("string") for k in data[0].keys()}
108
+ )
109
+
110
+ return datasets.DatasetInfo(
111
+ description=_DESCRIPTION,
112
+ features=features,
113
+ # No default supervised_keys (as we have to pass both premise
114
+ # and hypothesis as input).
115
+ supervised_keys=None,
116
+ homepage="https://github.com/divyanshuaggarwal/IE-SemParse",
117
+ citation=_CITATION,
118
+ )
119
+
120
+ def _split_generators(self, dl_manager):
121
+ urls_to_download = self.config._URLS
122
+
123
+ downloaded_file = dl_manager.download_and_extract(urls_to_download)[0]
124
+
125
+ return [
126
+ datasets.SplitGenerator(
127
+ name=datasets.Split.TRAIN,
128
+ gen_kwargs={
129
+ "split_key": "train",
130
+ "filepath": downloaded_file,
131
+ "data_format": "IE-SemParse"
132
+ },
133
+ ),
134
+ datasets.SplitGenerator(
135
+ name=datasets.Split.TEST,
136
+ gen_kwargs={
137
+ "split_key": "test",
138
+ "filepath": downloaded_file,
139
+ "data_format": "IE-SemParse"
140
+ },
141
+ ),
142
+ datasets.SplitGenerator(
143
+ name=datasets.Split.VALIDATION,
144
+ gen_kwargs={
145
+ "split_key": "val",
146
+ "filepath": downloaded_file,
147
+ "data_format": "IE-SemParse"
148
+ },
149
+ ),
150
+ ]
151
+
152
+ def _generate_examples(self, data_format, split_key, filepath):
153
+ """This function returns the examples in the raw (text) form."""
154
+
155
+ with open(filepath, "r") as f:
156
+ data = json.load(f)
157
+ data = data[split_key]
158
+
159
+ for idx, row in enumerate(data):
160
+ yield idx, {
161
+ k: v for k, v in row.items()
162
+ }
.history/IE-SemParse_20230707234108.py ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+
3
+
4
+ # Lint as: python3
5
+ """IndicXNLI: The Cross-Lingual NLI Corpus for Indic Languages."""
6
+
7
+
8
+ import os
9
+ import json
10
+
11
+ import pandas as pd
12
+
13
+ import datasets
14
+
15
+ from datasets import DownloadManager
16
+
17
+
18
+ _CITATION = """\
19
+ @misc{aggarwal2023evaluating,
20
+ title={Evaluating Inter-Bilingual Semantic Parsing for Indian Languages},
21
+ author={Divyanshu Aggarwal and Vivek Gupta and Anoop Kunchukuttan},
22
+ year={2023},
23
+ eprint={2304.13005},
24
+ archivePrefix={arXiv},
25
+ primaryClass={cs.CL}
26
+ }
27
+ """
28
+
29
+ _DESCRIPTION = """\
30
+ IE-SemParse is an Inter-bilingual Seq2seq Semantic parsing dataset for 11 distinct Indian languages
31
+ """
32
+
33
+ _LANGUAGES = (
34
+ 'hi',
35
+ 'bn',
36
+ 'mr',
37
+ 'as',
38
+ 'ta',
39
+ 'te',
40
+ 'or',
41
+ 'ml',
42
+ 'pa',
43
+ 'gu',
44
+ 'kn'
45
+ )
46
+
47
+
48
+ _DATASETS = (
49
+ 'itop',
50
+ 'indic-atis',
51
+ 'indic-TOP'
52
+ )
53
+
54
+
55
+ _URL = "https://huggingface.co/datasets/Divyanshu/IE-SemParse/resolve/main/"
56
+
57
+
58
+ class IESemParseConfig(datasets.BuilderConfig):
59
+ """BuilderConfig for IE-SemParse."""
60
+
61
+ def __init__(self, dataset: str, language: str, **kwargs):
62
+ """BuilderConfig for IE-SemParse.
63
+
64
+ Args:
65
+ language: One of hi, bn, mr, as, ta, te, or, ml, pa, gu, kn
66
+ **kwargs: keyword arguments forwarded to super.
67
+ """
68
+ super(IESemParseConfig, self).__init__(**kwargs)
69
+
70
+ self.dataset = dataset
71
+ self.language = language
72
+ self.languages = _LANGUAGES
73
+ self.datasets = _DATASETS
74
+
75
+ self._URLS = [os.path.join(
76
+ _URL, "unfiltered_data", dataset, f"{language}.json")]
77
+
78
+
79
+ class IE-SemParse(datasets.GeneratorBasedBuilder):
80
+ """IE-SemParse: Inter-Bilingual Semantic Parsing Dataset for Indic Languages. Version 1.0."""
81
+
82
+ VERSION = datasets.Version("1.0.0", "")
83
+ BUILDER_CONFIG_CLASS = IESemParseConfig
84
+ BUILDER_CONFIGS = [
85
+ IESemParseConfig(
86
+ name=f"{dataset}_{language}",
87
+ language=language,
88
+ dataset=dataset,
89
+ version=datasets.Version("1.0.0", ""),
90
+ description=f"Plain text import of IE-SemParse for the {language} language for {dataset} dataset",
91
+ )
92
+ for language, dataset in zip(_LANGUAGES, _DATASETS)
93
+ ]
94
+
95
+ def _info(self):
96
+ dl_manager = datasets.DownloadManager()
97
+
98
+ urls_to_download = self.config._URLS
99
+
100
+ filepath = dl_manager.download_and_extract(urls_to_download)[0]
101
+
102
+ with open(filepath, "r") as f:
103
+ data = json.load(f)
104
+ data = data[list(data.keys())[0]]
105
+
106
+ features = datasets.Features(
107
+ {k: datasets.Value("string") for k in data[0].keys()}
108
+ )
109
+
110
+ return datasets.DatasetInfo(
111
+ description=_DESCRIPTION,
112
+ features=features,
113
+ # No default supervised_keys (as we have to pass both premise
114
+ # and hypothesis as input).
115
+ supervised_keys=None,
116
+ homepage="https://github.com/divyanshuaggarwal/IE-SemParse",
117
+ citation=_CITATION,
118
+ )
119
+
120
+ def _split_generators(self, dl_manager):
121
+ urls_to_download = self.config._URLS
122
+
123
+ downloaded_file = dl_manager.download_and_extract(urls_to_download)[0]
124
+
125
+ return [
126
+ datasets.SplitGenerator(
127
+ name=datasets.Split.TRAIN,
128
+ gen_kwargs={
129
+ "split_key": "train",
130
+ "filepath": downloaded_file,
131
+ "data_format": "IE-SemParse"
132
+ },
133
+ ),
134
+ datasets.SplitGenerator(
135
+ name=datasets.Split.TEST,
136
+ gen_kwargs={
137
+ "split_key": "test",
138
+ "filepath": downloaded_file,
139
+ "data_format": "IE-SemParse"
140
+ },
141
+ ),
142
+ datasets.SplitGenerator(
143
+ name=datasets.Split.VALIDATION,
144
+ gen_kwargs={
145
+ "split_key": "val",
146
+ "filepath": downloaded_file,
147
+ "data_format": "IE-SemParse"
148
+ },
149
+ ),
150
+ ]
151
+
152
+ def _generate_examples(self, data_format, split_key, filepath):
153
+ """This function returns the examples in the raw (text) form."""
154
+
155
+ with open(filepath, "r") as f:
156
+ data = json.load(f)
157
+ data = data[split_key]
158
+
159
+ for idx, row in enumerate(data):
160
+ yield idx, {
161
+ k: v for k, v in row.items()
162
+ }
.history/IE-SemParse_20230707234111.py ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+
3
+
4
+ # Lint as: python3
5
+ """IndicXNLI: The Cross-Lingual NLI Corpus for Indic Languages."""
6
+
7
+
8
+ import os
9
+ import json
10
+
11
+ import pandas as pd
12
+
13
+ import datasets
14
+
15
+ from datasets import DownloadManager
16
+
17
+
18
+ _CITATION = """\
19
+ @misc{aggarwal2023evaluating,
20
+ title={Evaluating Inter-Bilingual Semantic Parsing for Indian Languages},
21
+ author={Divyanshu Aggarwal and Vivek Gupta and Anoop Kunchukuttan},
22
+ year={2023},
23
+ eprint={2304.13005},
24
+ archivePrefix={arXiv},
25
+ primaryClass={cs.CL}
26
+ }
27
+ """
28
+
29
+ _DESCRIPTION = """\
30
+ IE-SemParse is an Inter-bilingual Seq2seq Semantic parsing dataset for 11 distinct Indian languages
31
+ """
32
+
33
+ _LANGUAGES = (
34
+ 'hi',
35
+ 'bn',
36
+ 'mr',
37
+ 'as',
38
+ 'ta',
39
+ 'te',
40
+ 'or',
41
+ 'ml',
42
+ 'pa',
43
+ 'gu',
44
+ 'kn'
45
+ )
46
+
47
+
48
+ _DATASETS = (
49
+ 'itop',
50
+ 'indic-atis',
51
+ 'indic-TOP'
52
+ )
53
+
54
+
55
+ _URL = "https://huggingface.co/datasets/Divyanshu/IE-SemParse/resolve/main/"
56
+
57
+
58
+ class IESemParseConfig(datasets.BuilderConfig):
59
+ """BuilderConfig for IE-SemParse."""
60
+
61
+ def __init__(self, dataset: str, language: str, **kwargs):
62
+ """BuilderConfig for IE-SemParse.
63
+
64
+ Args:
65
+ language: One of hi, bn, mr, as, ta, te, or, ml, pa, gu, kn
66
+ **kwargs: keyword arguments forwarded to super.
67
+ """
68
+ super(IESemParseConfig, self).__init__(**kwargs)
69
+
70
+ self.dataset = dataset
71
+ self.language = language
72
+ self.languages = _LANGUAGES
73
+ self.datasets = _DATASETS
74
+
75
+ self._URLS = [os.path.join(
76
+ _URL, "unfiltered_data", dataset, f"{language}.json")]
77
+
78
+
79
+ class IE_SemParse(datasets.GeneratorBasedBuilder):
80
+ """IE-SemParse: Inter-Bilingual Semantic Parsing Dataset for Indic Languages. Version 1.0."""
81
+
82
+ VERSION = datasets.Version("1.0.0", "")
83
+ BUILDER_CONFIG_CLASS = IESemParseConfig
84
+ BUILDER_CONFIGS = [
85
+ IESemParseConfig(
86
+ name=f"{dataset}_{language}",
87
+ language=language,
88
+ dataset=dataset,
89
+ version=datasets.Version("1.0.0", ""),
90
+ description=f"Plain text import of IE-SemParse for the {language} language for {dataset} dataset",
91
+ )
92
+ for language, dataset in zip(_LANGUAGES, _DATASETS)
93
+ ]
94
+
95
+ def _info(self):
96
+ dl_manager = datasets.DownloadManager()
97
+
98
+ urls_to_download = self.config._URLS
99
+
100
+ filepath = dl_manager.download_and_extract(urls_to_download)[0]
101
+
102
+ with open(filepath, "r") as f:
103
+ data = json.load(f)
104
+ data = data[list(data.keys())[0]]
105
+
106
+ features = datasets.Features(
107
+ {k: datasets.Value("string") for k in data[0].keys()}
108
+ )
109
+
110
+ return datasets.DatasetInfo(
111
+ description=_DESCRIPTION,
112
+ features=features,
113
+ # No default supervised_keys (as we have to pass both premise
114
+ # and hypothesis as input).
115
+ supervised_keys=None,
116
+ homepage="https://github.com/divyanshuaggarwal/IE-SemParse",
117
+ citation=_CITATION,
118
+ )
119
+
120
+ def _split_generators(self, dl_manager):
121
+ urls_to_download = self.config._URLS
122
+
123
+ downloaded_file = dl_manager.download_and_extract(urls_to_download)[0]
124
+
125
+ return [
126
+ datasets.SplitGenerator(
127
+ name=datasets.Split.TRAIN,
128
+ gen_kwargs={
129
+ "split_key": "train",
130
+ "filepath": downloaded_file,
131
+ "data_format": "IE-SemParse"
132
+ },
133
+ ),
134
+ datasets.SplitGenerator(
135
+ name=datasets.Split.TEST,
136
+ gen_kwargs={
137
+ "split_key": "test",
138
+ "filepath": downloaded_file,
139
+ "data_format": "IE-SemParse"
140
+ },
141
+ ),
142
+ datasets.SplitGenerator(
143
+ name=datasets.Split.VALIDATION,
144
+ gen_kwargs={
145
+ "split_key": "val",
146
+ "filepath": downloaded_file,
147
+ "data_format": "IE-SemParse"
148
+ },
149
+ ),
150
+ ]
151
+
152
+ def _generate_examples(self, data_format, split_key, filepath):
153
+ """This function returns the examples in the raw (text) form."""
154
+
155
+ with open(filepath, "r") as f:
156
+ data = json.load(f)
157
+ data = data[split_key]
158
+
159
+ for idx, row in enumerate(data):
160
+ yield idx, {
161
+ k: v for k, v in row.items()
162
+ }
.history/IE-SemParse_20230707234114.py ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+
3
+
4
+ # Lint as: python3
5
+ """IndicXNLI: The Cross-Lingual NLI Corpus for Indic Languages."""
6
+
7
+
8
+ import os
9
+ import json
10
+
11
+ import pandas as pd
12
+
13
+ import datasets
14
+
15
+ from datasets import DownloadManager
16
+
17
+
18
+ _CITATION = """\
19
+ @misc{aggarwal2023evaluating,
20
+ title={Evaluating Inter-Bilingual Semantic Parsing for Indian Languages},
21
+ author={Divyanshu Aggarwal and Vivek Gupta and Anoop Kunchukuttan},
22
+ year={2023},
23
+ eprint={2304.13005},
24
+ archivePrefix={arXiv},
25
+ primaryClass={cs.CL}
26
+ }
27
+ """
28
+
29
+ _DESCRIPTION = """\
30
+ IE-SemParse is an Inter-bilingual Seq2seq Semantic parsing dataset for 11 distinct Indian languages
31
+ """
32
+
33
+ _LANGUAGES = (
34
+ 'hi',
35
+ 'bn',
36
+ 'mr',
37
+ 'as',
38
+ 'ta',
39
+ 'te',
40
+ 'or',
41
+ 'ml',
42
+ 'pa',
43
+ 'gu',
44
+ 'kn'
45
+ )
46
+
47
+
48
+ _DATASETS = (
49
+ 'itop',
50
+ 'indic-atis',
51
+ 'indic-TOP'
52
+ )
53
+
54
+
55
+ _URL = "https://huggingface.co/datasets/Divyanshu/IE-SemParse/resolve/main/"
56
+
57
+
58
+ class IE_SemParseConfig(datasets.BuilderConfig):
59
+ """BuilderConfig for IE-SemParse."""
60
+
61
+ def __init__(self, dataset: str, language: str, **kwargs):
62
+ """BuilderConfig for IE-SemParse.
63
+
64
+ Args:
65
+ language: One of hi, bn, mr, as, ta, te, or, ml, pa, gu, kn
66
+ **kwargs: keyword arguments forwarded to super.
67
+ """
68
+ super(IESemParseConfig, self).__init__(**kwargs)
69
+
70
+ self.dataset = dataset
71
+ self.language = language
72
+ self.languages = _LANGUAGES
73
+ self.datasets = _DATASETS
74
+
75
+ self._URLS = [os.path.join(
76
+ _URL, "unfiltered_data", dataset, f"{language}.json")]
77
+
78
+
79
+ class IE_SemParse(datasets.GeneratorBasedBuilder):
80
+ """IE-SemParse: Inter-Bilingual Semantic Parsing Dataset for Indic Languages. Version 1.0."""
81
+
82
+ VERSION = datasets.Version("1.0.0", "")
83
+ BUILDER_CONFIG_CLASS = IESemParseConfig
84
+ BUILDER_CONFIGS = [
85
+ IESemParseConfig(
86
+ name=f"{dataset}_{language}",
87
+ language=language,
88
+ dataset=dataset,
89
+ version=datasets.Version("1.0.0", ""),
90
+ description=f"Plain text import of IE-SemParse for the {language} language for {dataset} dataset",
91
+ )
92
+ for language, dataset in zip(_LANGUAGES, _DATASETS)
93
+ ]
94
+
95
+ def _info(self):
96
+ dl_manager = datasets.DownloadManager()
97
+
98
+ urls_to_download = self.config._URLS
99
+
100
+ filepath = dl_manager.download_and_extract(urls_to_download)[0]
101
+
102
+ with open(filepath, "r") as f:
103
+ data = json.load(f)
104
+ data = data[list(data.keys())[0]]
105
+
106
+ features = datasets.Features(
107
+ {k: datasets.Value("string") for k in data[0].keys()}
108
+ )
109
+
110
+ return datasets.DatasetInfo(
111
+ description=_DESCRIPTION,
112
+ features=features,
113
+ # No default supervised_keys (as we have to pass both premise
114
+ # and hypothesis as input).
115
+ supervised_keys=None,
116
+ homepage="https://github.com/divyanshuaggarwal/IE-SemParse",
117
+ citation=_CITATION,
118
+ )
119
+
120
+ def _split_generators(self, dl_manager):
121
+ urls_to_download = self.config._URLS
122
+
123
+ downloaded_file = dl_manager.download_and_extract(urls_to_download)[0]
124
+
125
+ return [
126
+ datasets.SplitGenerator(
127
+ name=datasets.Split.TRAIN,
128
+ gen_kwargs={
129
+ "split_key": "train",
130
+ "filepath": downloaded_file,
131
+ "data_format": "IE-SemParse"
132
+ },
133
+ ),
134
+ datasets.SplitGenerator(
135
+ name=datasets.Split.TEST,
136
+ gen_kwargs={
137
+ "split_key": "test",
138
+ "filepath": downloaded_file,
139
+ "data_format": "IE-SemParse"
140
+ },
141
+ ),
142
+ datasets.SplitGenerator(
143
+ name=datasets.Split.VALIDATION,
144
+ gen_kwargs={
145
+ "split_key": "val",
146
+ "filepath": downloaded_file,
147
+ "data_format": "IE-SemParse"
148
+ },
149
+ ),
150
+ ]
151
+
152
+ def _generate_examples(self, data_format, split_key, filepath):
153
+ """This function returns the examples in the raw (text) form."""
154
+
155
+ with open(filepath, "r") as f:
156
+ data = json.load(f)
157
+ data = data[split_key]
158
+
159
+ for idx, row in enumerate(data):
160
+ yield idx, {
161
+ k: v for k, v in row.items()
162
+ }
.history/IE-SemParse_20230707234133.py ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+
3
+
4
+ # Lint as: python3
5
+ """IndicXNLI: The Cross-Lingual NLI Corpus for Indic Languages."""
6
+
7
+
8
+ import os
9
+ import json
10
+
11
+ import pandas as pd
12
+
13
+ import datasets
14
+
15
+ from datasets import DownloadManager
16
+
17
+
18
+ _CITATION = """\
19
+ @misc{aggarwal2023evaluating,
20
+ title={Evaluating Inter-Bilingual Semantic Parsing for Indian Languages},
21
+ author={Divyanshu Aggarwal and Vivek Gupta and Anoop Kunchukuttan},
22
+ year={2023},
23
+ eprint={2304.13005},
24
+ archivePrefix={arXiv},
25
+ primaryClass={cs.CL}
26
+ }
27
+ """
28
+
29
+ _DESCRIPTION = """\
30
+ IE-SemParse is an Inter-bilingual Seq2seq Semantic parsing dataset for 11 distinct Indian languages
31
+ """
32
+
33
+ _LANGUAGES = (
34
+ 'hi',
35
+ 'bn',
36
+ 'mr',
37
+ 'as',
38
+ 'ta',
39
+ 'te',
40
+ 'or',
41
+ 'ml',
42
+ 'pa',
43
+ 'gu',
44
+ 'kn'
45
+ )
46
+
47
+
48
+ _DATASETS = (
49
+ 'itop',
50
+ 'indic-atis',
51
+ 'indic-TOP'
52
+ )
53
+
54
+
55
+ _URL = "https://huggingface.co/datasets/Divyanshu/IE-SemParse/resolve/main/"
56
+
57
+
58
+ class IE_SemParseConfig(datasets.BuilderConfig):
59
+ """BuilderConfig for IE-SemParse."""
60
+
61
+ def __init__(self, dataset: str, language: str, **kwargs):
62
+ """BuilderConfig for IE-SemParse.
63
+
64
+ Args:
65
+ language: One of hi, bn, mr, as, ta, te, or, ml, pa, gu, kn
66
+ **kwargs: keyword arguments forwarded to super.
67
+ """
68
+ super(IE_SemParseConfig, self).__init__(**kwargs)
69
+
70
+ self.dataset = dataset
71
+ self.language = language
72
+ self.languages = _LANGUAGES
73
+ self.datasets = _DATASETS
74
+
75
+ self._URLS = [os.path.join(
76
+ _URL, "unfiltered_data", dataset, f"{language}.json")]
77
+
78
+
79
+ class IE_SemParse(datasets.GeneratorBasedBuilder):
80
+ """IE-SemParse: Inter-Bilingual Semantic Parsing Dataset for Indic Languages. Version 1.0."""
81
+
82
+ VERSION = datasets.Version("1.0.0", "")
83
+ BUILDER_CONFIG_CLASS = IESemParseConfig
84
+ BUILDER_CONFIGS = [
85
+ IESemParseConfig(
86
+ name=f"{dataset}_{language}",
87
+ language=language,
88
+ dataset=dataset,
89
+ version=datasets.Version("1.0.0", ""),
90
+ description=f"Plain text import of IE-SemParse for the {language} language for {dataset} dataset",
91
+ )
92
+ for language, dataset in zip(_LANGUAGES, _DATASETS)
93
+ ]
94
+
95
+ def _info(self):
96
+ dl_manager = datasets.DownloadManager()
97
+
98
+ urls_to_download = self.config._URLS
99
+
100
+ filepath = dl_manager.download_and_extract(urls_to_download)[0]
101
+
102
+ with open(filepath, "r") as f:
103
+ data = json.load(f)
104
+ data = data[list(data.keys())[0]]
105
+
106
+ features = datasets.Features(
107
+ {k: datasets.Value("string") for k in data[0].keys()}
108
+ )
109
+
110
+ return datasets.DatasetInfo(
111
+ description=_DESCRIPTION,
112
+ features=features,
113
+ # No default supervised_keys (as we have to pass both premise
114
+ # and hypothesis as input).
115
+ supervised_keys=None,
116
+ homepage="https://github.com/divyanshuaggarwal/IE-SemParse",
117
+ citation=_CITATION,
118
+ )
119
+
120
+ def _split_generators(self, dl_manager):
121
+ urls_to_download = self.config._URLS
122
+
123
+ downloaded_file = dl_manager.download_and_extract(urls_to_download)[0]
124
+
125
+ return [
126
+ datasets.SplitGenerator(
127
+ name=datasets.Split.TRAIN,
128
+ gen_kwargs={
129
+ "split_key": "train",
130
+ "filepath": downloaded_file,
131
+ "data_format": "IE-SemParse"
132
+ },
133
+ ),
134
+ datasets.SplitGenerator(
135
+ name=datasets.Split.TEST,
136
+ gen_kwargs={
137
+ "split_key": "test",
138
+ "filepath": downloaded_file,
139
+ "data_format": "IE-SemParse"
140
+ },
141
+ ),
142
+ datasets.SplitGenerator(
143
+ name=datasets.Split.VALIDATION,
144
+ gen_kwargs={
145
+ "split_key": "val",
146
+ "filepath": downloaded_file,
147
+ "data_format": "IE-SemParse"
148
+ },
149
+ ),
150
+ ]
151
+
152
+ def _generate_examples(self, data_format, split_key, filepath):
153
+ """This function returns the examples in the raw (text) form."""
154
+
155
+ with open(filepath, "r") as f:
156
+ data = json.load(f)
157
+ data = data[split_key]
158
+
159
+ for idx, row in enumerate(data):
160
+ yield idx, {
161
+ k: v for k, v in row.items()
162
+ }
.history/IE-SemParse_20230707234139.py ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+
3
+
4
+ # Lint as: python3
5
+ """IndicXNLI: The Cross-Lingual NLI Corpus for Indic Languages."""
6
+
7
+
8
+ import os
9
+ import json
10
+
11
+ import pandas as pd
12
+
13
+ import datasets
14
+
15
+ from datasets import DownloadManager
16
+
17
+
18
+ _CITATION = """\
19
+ @misc{aggarwal2023evaluating,
20
+ title={Evaluating Inter-Bilingual Semantic Parsing for Indian Languages},
21
+ author={Divyanshu Aggarwal and Vivek Gupta and Anoop Kunchukuttan},
22
+ year={2023},
23
+ eprint={2304.13005},
24
+ archivePrefix={arXiv},
25
+ primaryClass={cs.CL}
26
+ }
27
+ """
28
+
29
+ _DESCRIPTION = """\
30
+ IE-SemParse is an Inter-bilingual Seq2seq Semantic parsing dataset for 11 distinct Indian languages
31
+ """
32
+
33
+ _LANGUAGES = (
34
+ 'hi',
35
+ 'bn',
36
+ 'mr',
37
+ 'as',
38
+ 'ta',
39
+ 'te',
40
+ 'or',
41
+ 'ml',
42
+ 'pa',
43
+ 'gu',
44
+ 'kn'
45
+ )
46
+
47
+
48
+ _DATASETS = (
49
+ 'itop',
50
+ 'indic-atis',
51
+ 'indic-TOP'
52
+ )
53
+
54
+
55
+ _URL = "https://huggingface.co/datasets/Divyanshu/IE-SemParse/resolve/main/"
56
+
57
+
58
+ class IE_SemParseConfig(datasets.BuilderConfig):
59
+ """BuilderConfig for IE-SemParse."""
60
+
61
+ def __init__(self, dataset: str, language: str, **kwargs):
62
+ """BuilderConfig for IE-SemParse.
63
+
64
+ Args:
65
+ language: One of hi, bn, mr, as, ta, te, or, ml, pa, gu, kn
66
+ **kwargs: keyword arguments forwarded to super.
67
+ """
68
+ super(IE_SemParseConfig, self).__init__(**kwargs)
69
+
70
+ self.dataset = dataset
71
+ self.language = language
72
+ self.languages = _LANGUAGES
73
+ self.datasets = _DATASETS
74
+
75
+ self._URLS = [os.path.join(
76
+ _URL, "unfiltered_data", dataset, f"{language}.json")]
77
+
78
+
79
+ class IE_SemParse(datasets.GeneratorBasedBuilder):
80
+ """IE-SemParse: Inter-Bilingual Semantic Parsing Dataset for Indic Languages. Version 1.0."""
81
+
82
+ VERSION = datasets.Version("1.0.0", "")
83
+ BUILDER_CONFIG_CLASS = IE_SemParseConfig
84
+ BUILDER_CONFIGS = [
85
+ IESemParseConfig(
86
+ name=f"{dataset}_{language}",
87
+ language=language,
88
+ dataset=dataset,
89
+ version=datasets.Version("1.0.0", ""),
90
+ description=f"Plain text import of IE-SemParse for the {language} language for {dataset} dataset",
91
+ )
92
+ for language, dataset in zip(_LANGUAGES, _DATASETS)
93
+ ]
94
+
95
+ def _info(self):
96
+ dl_manager = datasets.DownloadManager()
97
+
98
+ urls_to_download = self.config._URLS
99
+
100
+ filepath = dl_manager.download_and_extract(urls_to_download)[0]
101
+
102
+ with open(filepath, "r") as f:
103
+ data = json.load(f)
104
+ data = data[list(data.keys())[0]]
105
+
106
+ features = datasets.Features(
107
+ {k: datasets.Value("string") for k in data[0].keys()}
108
+ )
109
+
110
+ return datasets.DatasetInfo(
111
+ description=_DESCRIPTION,
112
+ features=features,
113
+ # No default supervised_keys (as we have to pass both premise
114
+ # and hypothesis as input).
115
+ supervised_keys=None,
116
+ homepage="https://github.com/divyanshuaggarwal/IE-SemParse",
117
+ citation=_CITATION,
118
+ )
119
+
120
+ def _split_generators(self, dl_manager):
121
+ urls_to_download = self.config._URLS
122
+
123
+ downloaded_file = dl_manager.download_and_extract(urls_to_download)[0]
124
+
125
+ return [
126
+ datasets.SplitGenerator(
127
+ name=datasets.Split.TRAIN,
128
+ gen_kwargs={
129
+ "split_key": "train",
130
+ "filepath": downloaded_file,
131
+ "data_format": "IE-SemParse"
132
+ },
133
+ ),
134
+ datasets.SplitGenerator(
135
+ name=datasets.Split.TEST,
136
+ gen_kwargs={
137
+ "split_key": "test",
138
+ "filepath": downloaded_file,
139
+ "data_format": "IE-SemParse"
140
+ },
141
+ ),
142
+ datasets.SplitGenerator(
143
+ name=datasets.Split.VALIDATION,
144
+ gen_kwargs={
145
+ "split_key": "val",
146
+ "filepath": downloaded_file,
147
+ "data_format": "IE-SemParse"
148
+ },
149
+ ),
150
+ ]
151
+
152
+ def _generate_examples(self, data_format, split_key, filepath):
153
+ """This function returns the examples in the raw (text) form."""
154
+
155
+ with open(filepath, "r") as f:
156
+ data = json.load(f)
157
+ data = data[split_key]
158
+
159
+ for idx, row in enumerate(data):
160
+ yield idx, {
161
+ k: v for k, v in row.items()
162
+ }
.history/IE-SemParse_20230707234141.py ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+
3
+
4
+ # Lint as: python3
5
+ """IndicXNLI: The Cross-Lingual NLI Corpus for Indic Languages."""
6
+
7
+
8
+ import os
9
+ import json
10
+
11
+ import pandas as pd
12
+
13
+ import datasets
14
+
15
+ from datasets import DownloadManager
16
+
17
+
18
+ _CITATION = """\
19
+ @misc{aggarwal2023evaluating,
20
+ title={Evaluating Inter-Bilingual Semantic Parsing for Indian Languages},
21
+ author={Divyanshu Aggarwal and Vivek Gupta and Anoop Kunchukuttan},
22
+ year={2023},
23
+ eprint={2304.13005},
24
+ archivePrefix={arXiv},
25
+ primaryClass={cs.CL}
26
+ }
27
+ """
28
+
29
+ _DESCRIPTION = """\
30
+ IE-SemParse is an Inter-bilingual Seq2seq Semantic parsing dataset for 11 distinct Indian languages
31
+ """
32
+
33
+ _LANGUAGES = (
34
+ 'hi',
35
+ 'bn',
36
+ 'mr',
37
+ 'as',
38
+ 'ta',
39
+ 'te',
40
+ 'or',
41
+ 'ml',
42
+ 'pa',
43
+ 'gu',
44
+ 'kn'
45
+ )
46
+
47
+
48
+ _DATASETS = (
49
+ 'itop',
50
+ 'indic-atis',
51
+ 'indic-TOP'
52
+ )
53
+
54
+
55
+ _URL = "https://huggingface.co/datasets/Divyanshu/IE-SemParse/resolve/main/"
56
+
57
+
58
+ class IE_SemParseConfig(datasets.BuilderConfig):
59
+ """BuilderConfig for IE-SemParse."""
60
+
61
+ def __init__(self, dataset: str, language: str, **kwargs):
62
+ """BuilderConfig for IE-SemParse.
63
+
64
+ Args:
65
+ language: One of hi, bn, mr, as, ta, te, or, ml, pa, gu, kn
66
+ **kwargs: keyword arguments forwarded to super.
67
+ """
68
+ super(IE_SemParseConfig, self).__init__(**kwargs)
69
+
70
+ self.dataset = dataset
71
+ self.language = language
72
+ self.languages = _LANGUAGES
73
+ self.datasets = _DATASETS
74
+
75
+ self._URLS = [os.path.join(
76
+ _URL, "unfiltered_data", dataset, f"{language}.json")]
77
+
78
+
79
+ class IE_SemParse(datasets.GeneratorBasedBuilder):
80
+ """IE-SemParse: Inter-Bilingual Semantic Parsing Dataset for Indic Languages. Version 1.0."""
81
+
82
+ VERSION = datasets.Version("1.0.0", "")
83
+ BUILDER_CONFIG_CLASS = IE_SemParseConfig
84
+ BUILDER_CONFIGS = [
85
+ IE_SemParseConfig(
86
+ name=f"{dataset}_{language}",
87
+ language=language,
88
+ dataset=dataset,
89
+ version=datasets.Version("1.0.0", ""),
90
+ description=f"Plain text import of IE-SemParse for the {language} language for {dataset} dataset",
91
+ )
92
+ for language, dataset in zip(_LANGUAGES, _DATASETS)
93
+ ]
94
+
95
+ def _info(self):
96
+ dl_manager = datasets.DownloadManager()
97
+
98
+ urls_to_download = self.config._URLS
99
+
100
+ filepath = dl_manager.download_and_extract(urls_to_download)[0]
101
+
102
+ with open(filepath, "r") as f:
103
+ data = json.load(f)
104
+ data = data[list(data.keys())[0]]
105
+
106
+ features = datasets.Features(
107
+ {k: datasets.Value("string") for k in data[0].keys()}
108
+ )
109
+
110
+ return datasets.DatasetInfo(
111
+ description=_DESCRIPTION,
112
+ features=features,
113
+ # No default supervised_keys (as we have to pass both premise
114
+ # and hypothesis as input).
115
+ supervised_keys=None,
116
+ homepage="https://github.com/divyanshuaggarwal/IE-SemParse",
117
+ citation=_CITATION,
118
+ )
119
+
120
+ def _split_generators(self, dl_manager):
121
+ urls_to_download = self.config._URLS
122
+
123
+ downloaded_file = dl_manager.download_and_extract(urls_to_download)[0]
124
+
125
+ return [
126
+ datasets.SplitGenerator(
127
+ name=datasets.Split.TRAIN,
128
+ gen_kwargs={
129
+ "split_key": "train",
130
+ "filepath": downloaded_file,
131
+ "data_format": "IE-SemParse"
132
+ },
133
+ ),
134
+ datasets.SplitGenerator(
135
+ name=datasets.Split.TEST,
136
+ gen_kwargs={
137
+ "split_key": "test",
138
+ "filepath": downloaded_file,
139
+ "data_format": "IE-SemParse"
140
+ },
141
+ ),
142
+ datasets.SplitGenerator(
143
+ name=datasets.Split.VALIDATION,
144
+ gen_kwargs={
145
+ "split_key": "val",
146
+ "filepath": downloaded_file,
147
+ "data_format": "IE-SemParse"
148
+ },
149
+ ),
150
+ ]
151
+
152
+ def _generate_examples(self, data_format, split_key, filepath):
153
+ """This function returns the examples in the raw (text) form."""
154
+
155
+ with open(filepath, "r") as f:
156
+ data = json.load(f)
157
+ data = data[split_key]
158
+
159
+ for idx, row in enumerate(data):
160
+ yield idx, {
161
+ k: v for k, v in row.items()
162
+ }
.history/IE-SemParse_20230707234143.py ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+
3
+
4
+ # Lint as: python3
5
+ """IndicXNLI: The Cross-Lingual NLI Corpus for Indic Languages."""
6
+
7
+
8
+ import os
9
+ import json
10
+
11
+ import pandas as pd
12
+
13
+ import datasets
14
+
15
+ from datasets import DownloadManager
16
+
17
+
18
+ _CITATION = """\
19
+ @misc{aggarwal2023evaluating,
20
+ title={Evaluating Inter-Bilingual Semantic Parsing for Indian Languages},
21
+ author={Divyanshu Aggarwal and Vivek Gupta and Anoop Kunchukuttan},
22
+ year={2023},
23
+ eprint={2304.13005},
24
+ archivePrefix={arXiv},
25
+ primaryClass={cs.CL}
26
+ }
27
+ """
28
+
29
+ _DESCRIPTION = """\
30
+ IE-SemParse is an Inter-bilingual Seq2seq Semantic parsing dataset for 11 distinct Indian languages
31
+ """
32
+
33
+ _LANGUAGES = (
34
+ 'hi',
35
+ 'bn',
36
+ 'mr',
37
+ 'as',
38
+ 'ta',
39
+ 'te',
40
+ 'or',
41
+ 'ml',
42
+ 'pa',
43
+ 'gu',
44
+ 'kn'
45
+ )
46
+
47
+
48
+ _DATASETS = (
49
+ 'itop',
50
+ 'indic-atis',
51
+ 'indic-TOP'
52
+ )
53
+
54
+
55
+ _URL = "https://huggingface.co/datasets/Divyanshu/IE-SemParse/resolve/main/"
56
+
57
+
58
+ class IE_SemParseConfig(datasets.BuilderConfig):
59
+ """BuilderConfig for IE-SemParse."""
60
+
61
+ def __init__(self, dataset: str, language: str, **kwargs):
62
+ """BuilderConfig for IE-SemParse.
63
+
64
+ Args:
65
+ language: One of hi, bn, mr, as, ta, te, or, ml, pa, gu, kn
66
+ **kwargs: keyword arguments forwarded to super.
67
+ """
68
+ super(IE_SemParseConfig, self).__init__(**kwargs)
69
+
70
+ self.dataset = dataset
71
+ self.language = language
72
+ self.languages = _LANGUAGES
73
+ self.datasets = _DATASETS
74
+
75
+ self._URLS = [os.path.join(
76
+ _URL, "unfiltered_data", dataset, f"{language}.json")]
77
+
78
+
79
+ class IE_SemParse(datasets.GeneratorBasedBuilder):
80
+ """IE-SemParse: Inter-Bilingual Semantic Parsing Dataset for Indic Languages. Version 1.0."""
81
+
82
+ VERSION = datasets.Version("1.0.0", "")
83
+ BUILDER_CONFIG_CLASS = IE_SemParseConfig
84
+ BUILDER_CONFIGS = [
85
+ IE_SemParseConfig(
86
+ name=f"{dataset}_{language}",
87
+ language=language,
88
+ dataset=dataset,
89
+ version=datasets.Version("1.0.0", ""),
90
+ description=f"Plain text import of IE-SemParse for the {language} language for {dataset} dataset",
91
+ )
92
+ for language, dataset in zip(_LANGUAGES, _DATASETS)
93
+ ]
94
+
95
+ def _info(self):
96
+ dl_manager = datasets.DownloadManager()
97
+
98
+ urls_to_download = self.config._URLS
99
+
100
+ filepath = dl_manager.download_and_extract(urls_to_download)[0]
101
+
102
+ with open(filepath, "r") as f:
103
+ data = json.load(f)
104
+ data = data[list(data.keys())[0]]
105
+
106
+ features = datasets.Features(
107
+ {k: datasets.Value("string") for k in data[0].keys()}
108
+ )
109
+
110
+ return datasets.DatasetInfo(
111
+ description=_DESCRIPTION,
112
+ features=features,
113
+ # No default supervised_keys (as we have to pass both premise
114
+ # and hypothesis as input).
115
+ supervised_keys=None,
116
+ homepage="https://github.com/divyanshuaggarwal/IE-SemParse",
117
+ citation=_CITATION,
118
+ )
119
+
120
+ def _split_generators(self, dl_manager):
121
+ urls_to_download = self.config._URLS
122
+
123
+ downloaded_file = dl_manager.download_and_extract(urls_to_download)[0]
124
+
125
+ return [
126
+ datasets.SplitGenerator(
127
+ name=datasets.Split.TRAIN,
128
+ gen_kwargs={
129
+ "split_key": "train",
130
+ "filepath": downloaded_file,
131
+ "data_format": "IE-SemParse"
132
+ },
133
+ ),
134
+ datasets.SplitGenerator(
135
+ name=datasets.Split.TEST,
136
+ gen_kwargs={
137
+ "split_key": "test",
138
+ "filepath": downloaded_file,
139
+ "data_format": "IE-SemParse"
140
+ },
141
+ ),
142
+ datasets.SplitGenerator(
143
+ name=datasets.Split.VALIDATION,
144
+ gen_kwargs={
145
+ "split_key": "val",
146
+ "filepath": downloaded_file,
147
+ "data_format": "IE-SemParse"
148
+ },
149
+ ),
150
+ ]
151
+
152
+ def _generate_examples(self, data_format, split_key, filepath):
153
+ """This function returns the examples in the raw (text) form."""
154
+
155
+ with open(filepath, "r") as f:
156
+ data = json.load(f)
157
+ data = data[split_key]
158
+
159
+ for idx, row in enumerate(data):
160
+ yield idx, {
161
+ k: v for k, v in row.items()
162
+ }
.history/README_20230707215813.md ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ ---
2
+ license: mit
3
+ ---
.history/README_20230707233453.md ADDED
@@ -0,0 +1,234 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ annotations_creators:
3
+ - machine-generated
4
+ language_creators:
5
+ - machine-generated
6
+ language:
7
+ - as
8
+ - bn
9
+ - gu
10
+ - hi
11
+ - kn
12
+ - ml
13
+ - mr
14
+ - or
15
+ - pa
16
+ - ta
17
+ - te
18
+ license:
19
+ - cc0-1.0
20
+ multilinguality:
21
+ - multilingual
22
+ pretty_name: IndicXNLI
23
+ size_categories:
24
+ - 1M<n<10M
25
+ source_datasets:
26
+ - original
27
+ task_categories:
28
+ - text-classification
29
+ task_ids:
30
+ - natural-language-inference
31
+ ---
32
+
33
+ # Dataset Card for "IndicXNLI"
34
+
35
+ ## Table of Contents
36
+
37
+ - [Dataset Card for "IndicXNLI"](#dataset-card-for-indicxnli)
38
+ - [Table of Contents](#table-of-contents)
39
+ - [Dataset Description](#dataset-description)
40
+ - [Dataset Summary](#dataset-summary)
41
+ - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
42
+ - [Languages](#languages)
43
+ - [Dataset Structure](#dataset-structure)
44
+ - [Data Instances](#data-instances)
45
+ - [Data Fields](#data-fields)
46
+ - [Data Splits](#data-splits)
47
+ - [Dataset usage](#dataset-usage)
48
+ - [Dataset Creation](#dataset-creation)
49
+ - [Curation Rationale](#curation-rationale)
50
+ - [Source Data](#source-data)
51
+ - [Initial Data Collection and Normalization](#initial-data-collection-and-normalization)
52
+ - [Who are the source language producers?](#who-are-the-source-language-producers)
53
+ - [Human Verification Process](#human-verification-process)
54
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
55
+ - [Social Impact of Dataset](#social-impact-of-dataset)
56
+ - [Discussion of Biases](#discussion-of-biases)
57
+ - [Other Known Limitations](#other-known-limitations)
58
+ - [Dataset Curators](#dataset-curators)
59
+ - [Licensing Information](#licensing-information)
60
+ - [Citation Information](#citation-information)
61
+
62
+ ## Dataset Description
63
+
64
+ - **Homepage:** <https://github.com/divyanshuaggarwal/IndicXNLI>
65
+ - **Paper:** [IndicXNLI: Evaluating Multilingual Inference for Indian Languages](https://arxiv.org/abs/2204.08776)
66
+ - **Point of Contact:** [Divyanshu Aggarwal](mailto:[email protected])
67
+
68
+ ### Dataset Summary
69
+
70
+ INDICXNLI is similar to existing
71
+ XNLI dataset in shape/form, but focusses on Indic language family. INDICXNLI include NLI
72
+ data for eleven major Indic languages that includes
73
+ Assamese (‘as’), Gujarat (‘gu’), Kannada (‘kn’),
74
+ Malayalam (‘ml’), Marathi (‘mr’), Odia (‘or’),
75
+ Punjabi (‘pa’), Tamil (‘ta’), Telugu (‘te’), Hindi
76
+ (‘hi’), and Bengali (‘bn’).
77
+
78
+ ### Supported Tasks and Leaderboards
79
+
80
+ **Tasks:** Natural Language Inference
81
+
82
+ **Leaderboards:** Currently there is no Leaderboard for this dataset.
83
+
84
+ ### Languages
85
+
86
+ - `Assamese (as)`
87
+ - `Bengali (bn)`
88
+ - `Gujarati (gu)`
89
+ - `Kannada (kn)`
90
+ - `Hindi (hi)`
91
+ - `Malayalam (ml)`
92
+ - `Marathi (mr)`
93
+ - `Oriya (or)`
94
+ - `Punjabi (pa)`
95
+ - `Tamil (ta)`
96
+ - `Telugu (te)`
97
+
98
+ ## Dataset Structure
99
+
100
+ ### Data Instances
101
+
102
+ One example from the `hi` dataset is given below in JSON format.
103
+
104
+ ```python
105
+ {'premise': 'अवधारणात्मक रूप से क्रीम स्किमिंग के दो बुनियादी आयाम हैं-उत्पाद और भूगोल।',
106
+ 'hypothesis': 'उत्पाद और भूगोल क्रीम स्किमिंग का काम करते हैं।',
107
+ 'label': 1 (neutral) }
108
+ ```
109
+
110
+ ### Data Fields
111
+
112
+ - `premise (string)`: Premise Sentence
113
+ - `hypothesis (string)`: Hypothesis Sentence
114
+ - `label (integer)`: Integer label `0` if hypothesis `entails` the premise, `2` if hypothesis `negates` the premise and `1` otherwise.
115
+
116
+ ### Data Splits
117
+
118
+ <!-- Below is the dataset split given for `hi` dataset.
119
+
120
+ ```python
121
+ DatasetDict({
122
+ train: Dataset({
123
+ features: ['premise', 'hypothesis', 'label'],
124
+ num_rows: 392702
125
+ })
126
+ test: Dataset({
127
+ features: ['premise', 'hypothesis', 'label'],
128
+ num_rows: 5010
129
+ })
130
+ validation: Dataset({
131
+ features: ['premise', 'hypothesis', 'label'],
132
+ num_rows: 2490
133
+ })
134
+ })
135
+
136
+ ``` -->
137
+
138
+ Language | ISO 639-1 Code |Train | Dev | Test |
139
+ --------------|----------------|-------|-----|------|
140
+ Assamese | as | 392,702 | 5,010 | 2,490 |
141
+ Bengali | bn | 392,702 | 5,010 | 2,490 |
142
+ Gujarati | gu | 392,702 | 5,010 | 2,490 |
143
+ Hindi | hi | 392,702 | 5,010 | 2,490 |
144
+ Kannada | kn | 392,702 | 5,010 | 2,490 |
145
+ Malayalam | ml |392,702 | 5,010 | 2,490 |
146
+ Marathi | mr |392,702 | 5,010 | 2,490 |
147
+ Oriya | or | 392,702 | 5,010 | 2,490 |
148
+ Punjabi | pa | 392,702 | 5,010 | 2,490 |
149
+ Tamil | ta | 392,702 | 5,010 | 2,490 |
150
+ Telugu | te | 392,702 | 5,010 | 2,490 |
151
+
152
+ <!-- The dataset split remains same across all languages. -->
153
+
154
+ ## Dataset usage
155
+
156
+ Code snippet for using the dataset using datasets library.
157
+
158
+ ```python
159
+ from datasets import load_dataset
160
+
161
+ dataset = load_dataset("Divyanshu/indicxnli")
162
+ ```
163
+
164
+ ## Dataset Creation
165
+
166
+ Machine translation of XNLI english dataset to 11 listed Indic Languages.
167
+
168
+ ### Curation Rationale
169
+
170
+ [More information needed]
171
+
172
+ ### Source Data
173
+
174
+ [XNLI dataset](https://cims.nyu.edu/~sbowman/xnli/)
175
+
176
+ #### Initial Data Collection and Normalization
177
+
178
+ [Detailed in the paper](https://arxiv.org/abs/2204.08776)
179
+
180
+ #### Who are the source language producers?
181
+
182
+ [Detailed in the paper](https://arxiv.org/abs/2204.08776)
183
+
184
+ #### Human Verification Process
185
+
186
+ [Detailed in the paper](https://arxiv.org/abs/2204.08776)
187
+
188
+ ## Considerations for Using the Data
189
+
190
+ ### Social Impact of Dataset
191
+
192
+ [Detailed in the paper](https://arxiv.org/abs/2204.08776)
193
+
194
+ ### Discussion of Biases
195
+
196
+ [Detailed in the paper](https://arxiv.org/abs/2204.08776)
197
+
198
+ ### Other Known Limitations
199
+
200
+ [Detailed in the paper](https://arxiv.org/abs/2204.08776)
201
+
202
+ ### Dataset Curators
203
+
204
+ Divyanshu Aggarwal, Vivek Gupta, Anoop Kunchukuttan
205
+
206
+ ### Licensing Information
207
+
208
+ Contents of this repository are restricted to only non-commercial research purposes under the [Creative Commons Attribution-NonCommercial 4.0 International License (CC BY-NC 4.0)](https://creativecommons.org/licenses/by-nc/4.0/). Copyright of the dataset contents belongs to the original copyright holders.
209
+
210
+ ### Citation Information
211
+
212
+ If you use any of the datasets, models or code modules, please cite the following paper:
213
+
214
+ ```
215
+ @misc{https://doi.org/10.48550/arxiv.2204.08776,
216
+ doi = {10.48550/ARXIV.2204.08776},
217
+
218
+ url = {https://arxiv.org/abs/2204.08776},
219
+
220
+ author = {Aggarwal, Divyanshu and Gupta, Vivek and Kunchukuttan, Anoop},
221
+
222
+ keywords = {Computation and Language (cs.CL), Artificial Intelligence (cs.AI), FOS: Computer and information sciences, FOS: Computer and information sciences},
223
+
224
+ title = {IndicXNLI: Evaluating Multilingual Inference for Indian Languages},
225
+
226
+ publisher = {arXiv},
227
+
228
+ year = {2022},
229
+
230
+ copyright = {Creative Commons Attribution 4.0 International}
231
+ }
232
+ ```
233
+
234
+ <!-- ### Contributions -->
.history/README_20230707233505.md ADDED
@@ -0,0 +1,234 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ annotations_creators:
3
+ - machine-generated
4
+ language_creators:
5
+ - machine-generated
6
+ language:
7
+ - as
8
+ - bn
9
+ - gu
10
+ - hi
11
+ - kn
12
+ - ml
13
+ - mr
14
+ - or
15
+ - pa
16
+ - ta
17
+ - te
18
+ license:
19
+ - cc0-1.0
20
+ multilinguality:
21
+ - multilingual
22
+ pretty_name: IndicXNLI
23
+ size_categories:
24
+ - 1M<n<10M
25
+ source_datasets:
26
+ - original
27
+ task_categories:
28
+ - text-classification
29
+ task_ids:
30
+ - natural-language-inference
31
+ ---
32
+
33
+ # Dataset Card for "IndicXNLI"
34
+
35
+ ## Table of Contents
36
+
37
+ - [Dataset Card for "IndicXNLI"](#dataset-card-for-indicxnli)
38
+ - [Table of Contents](#table-of-contents)
39
+ - [Dataset Description](#dataset-description)
40
+ - [Dataset Summary](#dataset-summary)
41
+ - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
42
+ - [Languages](#languages)
43
+ - [Dataset Structure](#dataset-structure)
44
+ - [Data Instances](#data-instances)
45
+ - [Data Fields](#data-fields)
46
+ - [Data Splits](#data-splits)
47
+ - [Dataset usage](#dataset-usage)
48
+ - [Dataset Creation](#dataset-creation)
49
+ - [Curation Rationale](#curation-rationale)
50
+ - [Source Data](#source-data)
51
+ - [Initial Data Collection and Normalization](#initial-data-collection-and-normalization)
52
+ - [Who are the source language producers?](#who-are-the-source-language-producers)
53
+ - [Human Verification Process](#human-verification-process)
54
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
55
+ - [Social Impact of Dataset](#social-impact-of-dataset)
56
+ - [Discussion of Biases](#discussion-of-biases)
57
+ - [Other Known Limitations](#other-known-limitations)
58
+ - [Dataset Curators](#dataset-curators)
59
+ - [Licensing Information](#licensing-information)
60
+ - [Citation Information](#citation-information)
61
+
62
+ ## Dataset Description
63
+
64
+ - **Homepage:** <https://github.com/divyanshuaggarwal/IndicXNLI>
65
+ - **Paper:** [IndicXNLI: Evaluating Multilingual Inference for Indian Languages](https://arxiv.org/abs/2204.08776)
66
+ - **Point of Contact:** [Divyanshu Aggarwal](mailto:[email protected])
67
+
68
+ ### Dataset Summary
69
+
70
+ INDICXNLI is similar to existing
71
+ XNLI dataset in shape/form, but focusses on Indic language family. INDICXNLI include NLI
72
+ data for eleven major Indic languages that includes
73
+ Assamese (‘as’), Gujarat (‘gu’), Kannada (‘kn’),
74
+ Malayalam (‘ml’), Marathi (‘mr’), Odia (‘or’),
75
+ Punjabi (‘pa’), Tamil (‘ta’), Telugu (‘te’), Hindi
76
+ (‘hi’), and Bengali (‘bn’).
77
+
78
+ ### Supported Tasks and Leaderboards
79
+
80
+ **Tasks:** Natural Language Inference
81
+
82
+ **Leaderboards:** Currently there is no Leaderboard for this dataset.
83
+
84
+ ### Languages
85
+
86
+ - `Assamese (as)`
87
+ - `Bengali (bn)`
88
+ - `Gujarati (gu)`
89
+ - `Kannada (kn)`
90
+ - `Hindi (hi)`
91
+ - `Malayalam (ml)`
92
+ - `Marathi (mr)`
93
+ - `Oriya (or)`
94
+ - `Punjabi (pa)`
95
+ - `Tamil (ta)`
96
+ - `Telugu (te)`
97
+
98
+ ## Dataset Structure
99
+
100
+ ### Data Instances
101
+
102
+ One example from the `hi` dataset is given below in JSON format.
103
+
104
+ ```python
105
+ {'premise': 'अवधारणात्मक रूप से क्रीम स्किमिंग के दो बुनियादी आयाम हैं-उत्पाद और भूगोल।',
106
+ 'hypothesis': 'उत्पाद और भूगोल क्रीम स्किमिंग का काम करते हैं।',
107
+ 'label': 1 (neutral) }
108
+ ```
109
+
110
+ ### Data Fields
111
+
112
+ - `premise (string)`: Premise Sentence
113
+ - `hypothesis (string)`: Hypothesis Sentence
114
+ - `label (integer)`: Integer label `0` if hypothesis `entails` the premise, `2` if hypothesis `negates` the premise and `1` otherwise.
115
+
116
+ ### Data Splits
117
+
118
+ <!-- Below is the dataset split given for `hi` dataset.
119
+
120
+ ```python
121
+ DatasetDict({
122
+ train: Dataset({
123
+ features: ['premise', 'hypothesis', 'label'],
124
+ num_rows: 392702
125
+ })
126
+ test: Dataset({
127
+ features: ['premise', 'hypothesis', 'label'],
128
+ num_rows: 5010
129
+ })
130
+ validation: Dataset({
131
+ features: ['premise', 'hypothesis', 'label'],
132
+ num_rows: 2490
133
+ })
134
+ })
135
+
136
+ ``` -->
137
+
138
+ Language | ISO 639-1 Code |Train | Dev | Test |
139
+ --------------|----------------|-------|-----|------|
140
+ Assamese | as | 392,702 | 5,010 | 2,490 |
141
+ Bengali | bn | 392,702 | 5,010 | 2,490 |
142
+ Gujarati | gu | 392,702 | 5,010 | 2,490 |
143
+ Hindi | hi | 392,702 | 5,010 | 2,490 |
144
+ Kannada | kn | 392,702 | 5,010 | 2,490 |
145
+ Malayalam | ml |392,702 | 5,010 | 2,490 |
146
+ Marathi | mr |392,702 | 5,010 | 2,490 |
147
+ Oriya | or | 392,702 | 5,010 | 2,490 |
148
+ Punjabi | pa | 392,702 | 5,010 | 2,490 |
149
+ Tamil | ta | 392,702 | 5,010 | 2,490 |
150
+ Telugu | te | 392,702 | 5,010 | 2,490 |
151
+
152
+ <!-- The dataset split remains same across all languages. -->
153
+
154
+ ## Dataset usage
155
+
156
+ Code snippet for using the dataset using datasets library.
157
+
158
+ ```python
159
+ from datasets import load_dataset
160
+
161
+ dataset = load_dataset("Divyanshu/indicxnli")
162
+ ```
163
+
164
+ ## Dataset Creation
165
+
166
+ Machine translation of XNLI english dataset to 11 listed Indic Languages.
167
+
168
+ ### Curation Rationale
169
+
170
+ [More information needed]
171
+
172
+ ### Source Data
173
+
174
+ [XNLI dataset](https://cims.nyu.edu/~sbowman/xnli/)
175
+
176
+ #### Initial Data Collection and Normalization
177
+
178
+ [Detailed in the paper](https://arxiv.org/abs/2204.08776)
179
+
180
+ #### Who are the source language producers?
181
+
182
+ [Detailed in the paper](https://arxiv.org/abs/2204.08776)
183
+
184
+ #### Human Verification Process
185
+
186
+ [Detailed in the paper](https://arxiv.org/abs/2204.08776)
187
+
188
+ ## Considerations for Using the Data
189
+
190
+ ### Social Impact of Dataset
191
+
192
+ [Detailed in the paper](https://arxiv.org/abs/2204.08776)
193
+
194
+ ### Discussion of Biases
195
+
196
+ [Detailed in the paper](https://arxiv.org/abs/2204.08776)
197
+
198
+ ### Other Known Limitations
199
+
200
+ [Detailed in the paper](https://arxiv.org/abs/2204.08776)
201
+
202
+ ### Dataset Curators
203
+
204
+ Divyanshu Aggarwal, Vivek Gupta, Anoop Kunchukuttan
205
+
206
+ ### Licensing Information
207
+
208
+ Contents of this repository are restricted to only non-commercial research purposes under the [Creative Commons Attribution-NonCommercial 4.0 International License (CC BY-NC 4.0)](https://creativecommons.org/licenses/by-nc/4.0/). Copyright of the dataset contents belongs to the original copyright holders.
209
+
210
+ ### Citation Information
211
+
212
+ If you use any of the datasets, models or code modules, please cite the following paper:
213
+
214
+ ```
215
+ @misc{https://doi.org/10.48550/arxiv.2204.08776,
216
+ doi = {10.48550/ARXIV.2204.08776},
217
+
218
+ url = {https://arxiv.org/abs/2204.08776},
219
+
220
+ author = {Aggarwal, Divyanshu and Gupta, Vivek and Kunchukuttan, Anoop},
221
+
222
+ keywords = {Computation and Language (cs.CL), Artificial Intelligence (cs.AI), FOS: Computer and information sciences, FOS: Computer and information sciences},
223
+
224
+ title = {IndicXNLI: Evaluating Multilingual Inference for Indian Languages},
225
+
226
+ publisher = {arXiv},
227
+
228
+ year = {2022},
229
+
230
+ copyright = {Creative Commons Attribution 4.0 International}
231
+ }
232
+ ```
233
+
234
+ <!-- ### Contributions -->
.history/README_20230707233537.md ADDED
@@ -0,0 +1,234 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ annotations_creators:
3
+ - machine-generated
4
+ language_creators:
5
+ - machine-generated
6
+ language:
7
+ - as
8
+ - bn
9
+ - gu
10
+ - hi
11
+ - kn
12
+ - ml
13
+ - mr
14
+ - or
15
+ - pa
16
+ - ta
17
+ - te
18
+ license:
19
+ - cc0-1.0
20
+ multilinguality:
21
+ - multilingual
22
+ pretty_name: IE-SemParse
23
+ size_categories:
24
+ - 1M<n<10M
25
+ source_datasets:
26
+ - original
27
+ task_categories:
28
+ - text-classification
29
+ task_ids:
30
+ - natural-language-inference
31
+ ---
32
+
33
+ # Dataset Card for "IE-SemParse"
34
+
35
+ ## Table of Contents
36
+
37
+ - [Dataset Card for "IE-SemParse"](#dataset-card-for-ie-semparse)
38
+ - [Table of Contents](#table-of-contents)
39
+ - [Dataset Description](#dataset-description)
40
+ - [Dataset Summary](#dataset-summary)
41
+ - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
42
+ - [Languages](#languages)
43
+ - [Dataset Structure](#dataset-structure)
44
+ - [Data Instances](#data-instances)
45
+ - [Data Fields](#data-fields)
46
+ - [Data Splits](#data-splits)
47
+ - [Dataset usage](#dataset-usage)
48
+ - [Dataset Creation](#dataset-creation)
49
+ - [Curation Rationale](#curation-rationale)
50
+ - [Source Data](#source-data)
51
+ - [Initial Data Collection and Normalization](#initial-data-collection-and-normalization)
52
+ - [Who are the source language producers?](#who-are-the-source-language-producers)
53
+ - [Human Verification Process](#human-verification-process)
54
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
55
+ - [Social Impact of Dataset](#social-impact-of-dataset)
56
+ - [Discussion of Biases](#discussion-of-biases)
57
+ - [Other Known Limitations](#other-known-limitations)
58
+ - [Dataset Curators](#dataset-curators)
59
+ - [Licensing Information](#licensing-information)
60
+ - [Citation Information](#citation-information)
61
+
62
+ ## Dataset Description
63
+
64
+ - **Homepage:** <https://github.com/divyanshuaggarwal/IE-SemParse>
65
+ - **Paper:** [IE-SemParse: Evaluating Multilingual Inference for Indian Languages](https://arxiv.org/abs/2204.08776)
66
+ - **Point of Contact:** [Divyanshu Aggarwal](mailto:[email protected])
67
+
68
+ ### Dataset Summary
69
+
70
+ INDICXNLI is similar to existing
71
+ XNLI dataset in shape/form, but focusses on Indic language family. INDICXNLI include NLI
72
+ data for eleven major Indic languages that includes
73
+ Assamese (‘as’), Gujarat (‘gu’), Kannada (‘kn’),
74
+ Malayalam (‘ml’), Marathi (‘mr’), Odia (‘or’),
75
+ Punjabi (‘pa’), Tamil (‘ta’), Telugu (‘te’), Hindi
76
+ (‘hi’), and Bengali (‘bn’).
77
+
78
+ ### Supported Tasks and Leaderboards
79
+
80
+ **Tasks:** Natural Language Inference
81
+
82
+ **Leaderboards:** Currently there is no Leaderboard for this dataset.
83
+
84
+ ### Languages
85
+
86
+ - `Assamese (as)`
87
+ - `Bengali (bn)`
88
+ - `Gujarati (gu)`
89
+ - `Kannada (kn)`
90
+ - `Hindi (hi)`
91
+ - `Malayalam (ml)`
92
+ - `Marathi (mr)`
93
+ - `Oriya (or)`
94
+ - `Punjabi (pa)`
95
+ - `Tamil (ta)`
96
+ - `Telugu (te)`
97
+
98
+ ## Dataset Structure
99
+
100
+ ### Data Instances
101
+
102
+ One example from the `hi` dataset is given below in JSON format.
103
+
104
+ ```python
105
+ {'premise': 'अवधारणात्मक रूप से क्रीम स्किमिंग के दो बुनियादी आयाम हैं-उत्पाद और भूगोल।',
106
+ 'hypothesis': 'उत्पाद और भूगोल क्रीम स्किमिंग का काम करते हैं।',
107
+ 'label': 1 (neutral) }
108
+ ```
109
+
110
+ ### Data Fields
111
+
112
+ - `premise (string)`: Premise Sentence
113
+ - `hypothesis (string)`: Hypothesis Sentence
114
+ - `label (integer)`: Integer label `0` if hypothesis `entails` the premise, `2` if hypothesis `negates` the premise and `1` otherwise.
115
+
116
+ ### Data Splits
117
+
118
+ <!-- Below is the dataset split given for `hi` dataset.
119
+
120
+ ```python
121
+ DatasetDict({
122
+ train: Dataset({
123
+ features: ['premise', 'hypothesis', 'label'],
124
+ num_rows: 392702
125
+ })
126
+ test: Dataset({
127
+ features: ['premise', 'hypothesis', 'label'],
128
+ num_rows: 5010
129
+ })
130
+ validation: Dataset({
131
+ features: ['premise', 'hypothesis', 'label'],
132
+ num_rows: 2490
133
+ })
134
+ })
135
+
136
+ ``` -->
137
+
138
+ Language | ISO 639-1 Code |Train | Dev | Test |
139
+ --------------|----------------|-------|-----|------|
140
+ Assamese | as | 392,702 | 5,010 | 2,490 |
141
+ Bengali | bn | 392,702 | 5,010 | 2,490 |
142
+ Gujarati | gu | 392,702 | 5,010 | 2,490 |
143
+ Hindi | hi | 392,702 | 5,010 | 2,490 |
144
+ Kannada | kn | 392,702 | 5,010 | 2,490 |
145
+ Malayalam | ml |392,702 | 5,010 | 2,490 |
146
+ Marathi | mr |392,702 | 5,010 | 2,490 |
147
+ Oriya | or | 392,702 | 5,010 | 2,490 |
148
+ Punjabi | pa | 392,702 | 5,010 | 2,490 |
149
+ Tamil | ta | 392,702 | 5,010 | 2,490 |
150
+ Telugu | te | 392,702 | 5,010 | 2,490 |
151
+
152
+ <!-- The dataset split remains same across all languages. -->
153
+
154
+ ## Dataset usage
155
+
156
+ Code snippet for using the dataset using datasets library.
157
+
158
+ ```python
159
+ from datasets import load_dataset
160
+
161
+ dataset = load_dataset("Divyanshu/indicxnli")
162
+ ```
163
+
164
+ ## Dataset Creation
165
+
166
+ Machine translation of XNLI english dataset to 11 listed Indic Languages.
167
+
168
+ ### Curation Rationale
169
+
170
+ [More information needed]
171
+
172
+ ### Source Data
173
+
174
+ [XNLI dataset](https://cims.nyu.edu/~sbowman/xnli/)
175
+
176
+ #### Initial Data Collection and Normalization
177
+
178
+ [Detailed in the paper](https://arxiv.org/abs/2204.08776)
179
+
180
+ #### Who are the source language producers?
181
+
182
+ [Detailed in the paper](https://arxiv.org/abs/2204.08776)
183
+
184
+ #### Human Verification Process
185
+
186
+ [Detailed in the paper](https://arxiv.org/abs/2204.08776)
187
+
188
+ ## Considerations for Using the Data
189
+
190
+ ### Social Impact of Dataset
191
+
192
+ [Detailed in the paper](https://arxiv.org/abs/2204.08776)
193
+
194
+ ### Discussion of Biases
195
+
196
+ [Detailed in the paper](https://arxiv.org/abs/2204.08776)
197
+
198
+ ### Other Known Limitations
199
+
200
+ [Detailed in the paper](https://arxiv.org/abs/2204.08776)
201
+
202
+ ### Dataset Curators
203
+
204
+ Divyanshu Aggarwal, Vivek Gupta, Anoop Kunchukuttan
205
+
206
+ ### Licensing Information
207
+
208
+ Contents of this repository are restricted to only non-commercial research purposes under the [Creative Commons Attribution-NonCommercial 4.0 International License (CC BY-NC 4.0)](https://creativecommons.org/licenses/by-nc/4.0/). Copyright of the dataset contents belongs to the original copyright holders.
209
+
210
+ ### Citation Information
211
+
212
+ If you use any of the datasets, models or code modules, please cite the following paper:
213
+
214
+ ```
215
+ @misc{https://doi.org/10.48550/arxiv.2204.08776,
216
+ doi = {10.48550/ARXIV.2204.08776},
217
+
218
+ url = {https://arxiv.org/abs/2204.08776},
219
+
220
+ author = {Aggarwal, Divyanshu and Gupta, Vivek and Kunchukuttan, Anoop},
221
+
222
+ keywords = {Computation and Language (cs.CL), Artificial Intelligence (cs.AI), FOS: Computer and information sciences, FOS: Computer and information sciences},
223
+
224
+ title = {IE-SemParse: Evaluating Multilingual Inference for Indian Languages},
225
+
226
+ publisher = {arXiv},
227
+
228
+ year = {2022},
229
+
230
+ copyright = {Creative Commons Attribution 4.0 International}
231
+ }
232
+ ```
233
+
234
+ <!-- ### Contributions -->
.history/README_20230707233553.md ADDED
@@ -0,0 +1,234 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ annotations_creators:
3
+ - machine-generated
4
+ language_creators:
5
+ - machine-generated
6
+ language:
7
+ - as
8
+ - bn
9
+ - gu
10
+ - hi
11
+ - kn
12
+ - ml
13
+ - mr
14
+ - or
15
+ - pa
16
+ - ta
17
+ - te
18
+ license:
19
+ - cc0-1.0
20
+ multilinguality:
21
+ - multilingual
22
+ pretty_name: IE-SemParse
23
+ size_categories:
24
+ - 1M<n<10M
25
+ source_datasets:
26
+ - original
27
+ task_categories:
28
+ - text-classification
29
+ task_ids:
30
+ - Sema
31
+ ---
32
+
33
+ # Dataset Card for "IE-SemParse"
34
+
35
+ ## Table of Contents
36
+
37
+ - [Dataset Card for "IE-SemParse"](#dataset-card-for-ie-semparse)
38
+ - [Table of Contents](#table-of-contents)
39
+ - [Dataset Description](#dataset-description)
40
+ - [Dataset Summary](#dataset-summary)
41
+ - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
42
+ - [Languages](#languages)
43
+ - [Dataset Structure](#dataset-structure)
44
+ - [Data Instances](#data-instances)
45
+ - [Data Fields](#data-fields)
46
+ - [Data Splits](#data-splits)
47
+ - [Dataset usage](#dataset-usage)
48
+ - [Dataset Creation](#dataset-creation)
49
+ - [Curation Rationale](#curation-rationale)
50
+ - [Source Data](#source-data)
51
+ - [Initial Data Collection and Normalization](#initial-data-collection-and-normalization)
52
+ - [Who are the source language producers?](#who-are-the-source-language-producers)
53
+ - [Human Verification Process](#human-verification-process)
54
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
55
+ - [Social Impact of Dataset](#social-impact-of-dataset)
56
+ - [Discussion of Biases](#discussion-of-biases)
57
+ - [Other Known Limitations](#other-known-limitations)
58
+ - [Dataset Curators](#dataset-curators)
59
+ - [Licensing Information](#licensing-information)
60
+ - [Citation Information](#citation-information)
61
+
62
+ ## Dataset Description
63
+
64
+ - **Homepage:** <https://github.com/divyanshuaggarwal/IE-SemParse>
65
+ - **Paper:** [IE-SemParse: Evaluating Multilingual Inference for Indian Languages](https://arxiv.org/abs/2204.08776)
66
+ - **Point of Contact:** [Divyanshu Aggarwal](mailto:[email protected])
67
+
68
+ ### Dataset Summary
69
+
70
+ INDICXNLI is similar to existing
71
+ XNLI dataset in shape/form, but focusses on Indic language family. INDICXNLI include NLI
72
+ data for eleven major Indic languages that includes
73
+ Assamese (‘as’), Gujarat (‘gu’), Kannada (‘kn’),
74
+ Malayalam (‘ml’), Marathi (‘mr’), Odia (‘or’),
75
+ Punjabi (‘pa’), Tamil (‘ta’), Telugu (‘te’), Hindi
76
+ (‘hi’), and Bengali (‘bn’).
77
+
78
+ ### Supported Tasks and Leaderboards
79
+
80
+ **Tasks:** Natural Language Inference
81
+
82
+ **Leaderboards:** Currently there is no Leaderboard for this dataset.
83
+
84
+ ### Languages
85
+
86
+ - `Assamese (as)`
87
+ - `Bengali (bn)`
88
+ - `Gujarati (gu)`
89
+ - `Kannada (kn)`
90
+ - `Hindi (hi)`
91
+ - `Malayalam (ml)`
92
+ - `Marathi (mr)`
93
+ - `Oriya (or)`
94
+ - `Punjabi (pa)`
95
+ - `Tamil (ta)`
96
+ - `Telugu (te)`
97
+
98
+ ## Dataset Structure
99
+
100
+ ### Data Instances
101
+
102
+ One example from the `hi` dataset is given below in JSON format.
103
+
104
+ ```python
105
+ {'premise': 'अवधारणात्मक रूप से क्रीम स्किमिंग के दो बुनियादी आयाम हैं-उत्पाद और भूगोल।',
106
+ 'hypothesis': 'उत्पाद और भूगोल क्रीम स्किमिंग का काम करते हैं।',
107
+ 'label': 1 (neutral) }
108
+ ```
109
+
110
+ ### Data Fields
111
+
112
+ - `premise (string)`: Premise Sentence
113
+ - `hypothesis (string)`: Hypothesis Sentence
114
+ - `label (integer)`: Integer label `0` if hypothesis `entails` the premise, `2` if hypothesis `negates` the premise and `1` otherwise.
115
+
116
+ ### Data Splits
117
+
118
+ <!-- Below is the dataset split given for `hi` dataset.
119
+
120
+ ```python
121
+ DatasetDict({
122
+ train: Dataset({
123
+ features: ['premise', 'hypothesis', 'label'],
124
+ num_rows: 392702
125
+ })
126
+ test: Dataset({
127
+ features: ['premise', 'hypothesis', 'label'],
128
+ num_rows: 5010
129
+ })
130
+ validation: Dataset({
131
+ features: ['premise', 'hypothesis', 'label'],
132
+ num_rows: 2490
133
+ })
134
+ })
135
+
136
+ ``` -->
137
+
138
+ Language | ISO 639-1 Code |Train | Dev | Test |
139
+ --------------|----------------|-------|-----|------|
140
+ Assamese | as | 392,702 | 5,010 | 2,490 |
141
+ Bengali | bn | 392,702 | 5,010 | 2,490 |
142
+ Gujarati | gu | 392,702 | 5,010 | 2,490 |
143
+ Hindi | hi | 392,702 | 5,010 | 2,490 |
144
+ Kannada | kn | 392,702 | 5,010 | 2,490 |
145
+ Malayalam | ml |392,702 | 5,010 | 2,490 |
146
+ Marathi | mr |392,702 | 5,010 | 2,490 |
147
+ Oriya | or | 392,702 | 5,010 | 2,490 |
148
+ Punjabi | pa | 392,702 | 5,010 | 2,490 |
149
+ Tamil | ta | 392,702 | 5,010 | 2,490 |
150
+ Telugu | te | 392,702 | 5,010 | 2,490 |
151
+
152
+ <!-- The dataset split remains same across all languages. -->
153
+
154
+ ## Dataset usage
155
+
156
+ Code snippet for using the dataset using datasets library.
157
+
158
+ ```python
159
+ from datasets import load_dataset
160
+
161
+ dataset = load_dataset("Divyanshu/indicxnli")
162
+ ```
163
+
164
+ ## Dataset Creation
165
+
166
+ Machine translation of XNLI english dataset to 11 listed Indic Languages.
167
+
168
+ ### Curation Rationale
169
+
170
+ [More information needed]
171
+
172
+ ### Source Data
173
+
174
+ [XNLI dataset](https://cims.nyu.edu/~sbowman/xnli/)
175
+
176
+ #### Initial Data Collection and Normalization
177
+
178
+ [Detailed in the paper](https://arxiv.org/abs/2204.08776)
179
+
180
+ #### Who are the source language producers?
181
+
182
+ [Detailed in the paper](https://arxiv.org/abs/2204.08776)
183
+
184
+ #### Human Verification Process
185
+
186
+ [Detailed in the paper](https://arxiv.org/abs/2204.08776)
187
+
188
+ ## Considerations for Using the Data
189
+
190
+ ### Social Impact of Dataset
191
+
192
+ [Detailed in the paper](https://arxiv.org/abs/2204.08776)
193
+
194
+ ### Discussion of Biases
195
+
196
+ [Detailed in the paper](https://arxiv.org/abs/2204.08776)
197
+
198
+ ### Other Known Limitations
199
+
200
+ [Detailed in the paper](https://arxiv.org/abs/2204.08776)
201
+
202
+ ### Dataset Curators
203
+
204
+ Divyanshu Aggarwal, Vivek Gupta, Anoop Kunchukuttan
205
+
206
+ ### Licensing Information
207
+
208
+ Contents of this repository are restricted to only non-commercial research purposes under the [Creative Commons Attribution-NonCommercial 4.0 International License (CC BY-NC 4.0)](https://creativecommons.org/licenses/by-nc/4.0/). Copyright of the dataset contents belongs to the original copyright holders.
209
+
210
+ ### Citation Information
211
+
212
+ If you use any of the datasets, models or code modules, please cite the following paper:
213
+
214
+ ```
215
+ @misc{https://doi.org/10.48550/arxiv.2204.08776,
216
+ doi = {10.48550/ARXIV.2204.08776},
217
+
218
+ url = {https://arxiv.org/abs/2204.08776},
219
+
220
+ author = {Aggarwal, Divyanshu and Gupta, Vivek and Kunchukuttan, Anoop},
221
+
222
+ keywords = {Computation and Language (cs.CL), Artificial Intelligence (cs.AI), FOS: Computer and information sciences, FOS: Computer and information sciences},
223
+
224
+ title = {IE-SemParse: Evaluating Multilingual Inference for Indian Languages},
225
+
226
+ publisher = {arXiv},
227
+
228
+ year = {2022},
229
+
230
+ copyright = {Creative Commons Attribution 4.0 International}
231
+ }
232
+ ```
233
+
234
+ <!-- ### Contributions -->
.history/README_20230707233556.md ADDED
@@ -0,0 +1,234 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ annotations_creators:
3
+ - machine-generated
4
+ language_creators:
5
+ - machine-generated
6
+ language:
7
+ - as
8
+ - bn
9
+ - gu
10
+ - hi
11
+ - kn
12
+ - ml
13
+ - mr
14
+ - or
15
+ - pa
16
+ - ta
17
+ - te
18
+ license:
19
+ - cc0-1.0
20
+ multilinguality:
21
+ - multilingual
22
+ pretty_name: IE-SemParse
23
+ size_categories:
24
+ - 1M<n<10M
25
+ source_datasets:
26
+ - original
27
+ task_categories:
28
+ - text-classification
29
+ task_ids:
30
+ - Semantic
31
+ ---
32
+
33
+ # Dataset Card for "IE-SemParse"
34
+
35
+ ## Table of Contents
36
+
37
+ - [Dataset Card for "IE-SemParse"](#dataset-card-for-ie-semparse)
38
+ - [Table of Contents](#table-of-contents)
39
+ - [Dataset Description](#dataset-description)
40
+ - [Dataset Summary](#dataset-summary)
41
+ - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
42
+ - [Languages](#languages)
43
+ - [Dataset Structure](#dataset-structure)
44
+ - [Data Instances](#data-instances)
45
+ - [Data Fields](#data-fields)
46
+ - [Data Splits](#data-splits)
47
+ - [Dataset usage](#dataset-usage)
48
+ - [Dataset Creation](#dataset-creation)
49
+ - [Curation Rationale](#curation-rationale)
50
+ - [Source Data](#source-data)
51
+ - [Initial Data Collection and Normalization](#initial-data-collection-and-normalization)
52
+ - [Who are the source language producers?](#who-are-the-source-language-producers)
53
+ - [Human Verification Process](#human-verification-process)
54
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
55
+ - [Social Impact of Dataset](#social-impact-of-dataset)
56
+ - [Discussion of Biases](#discussion-of-biases)
57
+ - [Other Known Limitations](#other-known-limitations)
58
+ - [Dataset Curators](#dataset-curators)
59
+ - [Licensing Information](#licensing-information)
60
+ - [Citation Information](#citation-information)
61
+
62
+ ## Dataset Description
63
+
64
+ - **Homepage:** <https://github.com/divyanshuaggarwal/IE-SemParse>
65
+ - **Paper:** [IE-SemParse: Evaluating Multilingual Inference for Indian Languages](https://arxiv.org/abs/2204.08776)
66
+ - **Point of Contact:** [Divyanshu Aggarwal](mailto:[email protected])
67
+
68
+ ### Dataset Summary
69
+
70
+ INDICXNLI is similar to existing
71
+ XNLI dataset in shape/form, but focusses on Indic language family. INDICXNLI include NLI
72
+ data for eleven major Indic languages that includes
73
+ Assamese (‘as’), Gujarat (‘gu’), Kannada (‘kn’),
74
+ Malayalam (‘ml’), Marathi (‘mr’), Odia (‘or’),
75
+ Punjabi (‘pa’), Tamil (‘ta’), Telugu (‘te’), Hindi
76
+ (‘hi’), and Bengali (‘bn’).
77
+
78
+ ### Supported Tasks and Leaderboards
79
+
80
+ **Tasks:** Natural Language Inference
81
+
82
+ **Leaderboards:** Currently there is no Leaderboard for this dataset.
83
+
84
+ ### Languages
85
+
86
+ - `Assamese (as)`
87
+ - `Bengali (bn)`
88
+ - `Gujarati (gu)`
89
+ - `Kannada (kn)`
90
+ - `Hindi (hi)`
91
+ - `Malayalam (ml)`
92
+ - `Marathi (mr)`
93
+ - `Oriya (or)`
94
+ - `Punjabi (pa)`
95
+ - `Tamil (ta)`
96
+ - `Telugu (te)`
97
+
98
+ ## Dataset Structure
99
+
100
+ ### Data Instances
101
+
102
+ One example from the `hi` dataset is given below in JSON format.
103
+
104
+ ```python
105
+ {'premise': 'अवधारणात्मक रूप से क्रीम स्किमिंग के दो बुनियादी आयाम हैं-उत्पाद और भूगोल।',
106
+ 'hypothesis': 'उत्पाद और भूगोल क्रीम स्किमिंग का काम करते हैं।',
107
+ 'label': 1 (neutral) }
108
+ ```
109
+
110
+ ### Data Fields
111
+
112
+ - `premise (string)`: Premise Sentence
113
+ - `hypothesis (string)`: Hypothesis Sentence
114
+ - `label (integer)`: Integer label `0` if hypothesis `entails` the premise, `2` if hypothesis `negates` the premise and `1` otherwise.
115
+
116
+ ### Data Splits
117
+
118
+ <!-- Below is the dataset split given for `hi` dataset.
119
+
120
+ ```python
121
+ DatasetDict({
122
+ train: Dataset({
123
+ features: ['premise', 'hypothesis', 'label'],
124
+ num_rows: 392702
125
+ })
126
+ test: Dataset({
127
+ features: ['premise', 'hypothesis', 'label'],
128
+ num_rows: 5010
129
+ })
130
+ validation: Dataset({
131
+ features: ['premise', 'hypothesis', 'label'],
132
+ num_rows: 2490
133
+ })
134
+ })
135
+
136
+ ``` -->
137
+
138
+ Language | ISO 639-1 Code |Train | Dev | Test |
139
+ --------------|----------------|-------|-----|------|
140
+ Assamese | as | 392,702 | 5,010 | 2,490 |
141
+ Bengali | bn | 392,702 | 5,010 | 2,490 |
142
+ Gujarati | gu | 392,702 | 5,010 | 2,490 |
143
+ Hindi | hi | 392,702 | 5,010 | 2,490 |
144
+ Kannada | kn | 392,702 | 5,010 | 2,490 |
145
+ Malayalam | ml |392,702 | 5,010 | 2,490 |
146
+ Marathi | mr |392,702 | 5,010 | 2,490 |
147
+ Oriya | or | 392,702 | 5,010 | 2,490 |
148
+ Punjabi | pa | 392,702 | 5,010 | 2,490 |
149
+ Tamil | ta | 392,702 | 5,010 | 2,490 |
150
+ Telugu | te | 392,702 | 5,010 | 2,490 |
151
+
152
+ <!-- The dataset split remains same across all languages. -->
153
+
154
+ ## Dataset usage
155
+
156
+ Code snippet for using the dataset using datasets library.
157
+
158
+ ```python
159
+ from datasets import load_dataset
160
+
161
+ dataset = load_dataset("Divyanshu/indicxnli")
162
+ ```
163
+
164
+ ## Dataset Creation
165
+
166
+ Machine translation of XNLI english dataset to 11 listed Indic Languages.
167
+
168
+ ### Curation Rationale
169
+
170
+ [More information needed]
171
+
172
+ ### Source Data
173
+
174
+ [XNLI dataset](https://cims.nyu.edu/~sbowman/xnli/)
175
+
176
+ #### Initial Data Collection and Normalization
177
+
178
+ [Detailed in the paper](https://arxiv.org/abs/2204.08776)
179
+
180
+ #### Who are the source language producers?
181
+
182
+ [Detailed in the paper](https://arxiv.org/abs/2204.08776)
183
+
184
+ #### Human Verification Process
185
+
186
+ [Detailed in the paper](https://arxiv.org/abs/2204.08776)
187
+
188
+ ## Considerations for Using the Data
189
+
190
+ ### Social Impact of Dataset
191
+
192
+ [Detailed in the paper](https://arxiv.org/abs/2204.08776)
193
+
194
+ ### Discussion of Biases
195
+
196
+ [Detailed in the paper](https://arxiv.org/abs/2204.08776)
197
+
198
+ ### Other Known Limitations
199
+
200
+ [Detailed in the paper](https://arxiv.org/abs/2204.08776)
201
+
202
+ ### Dataset Curators
203
+
204
+ Divyanshu Aggarwal, Vivek Gupta, Anoop Kunchukuttan
205
+
206
+ ### Licensing Information
207
+
208
+ Contents of this repository are restricted to only non-commercial research purposes under the [Creative Commons Attribution-NonCommercial 4.0 International License (CC BY-NC 4.0)](https://creativecommons.org/licenses/by-nc/4.0/). Copyright of the dataset contents belongs to the original copyright holders.
209
+
210
+ ### Citation Information
211
+
212
+ If you use any of the datasets, models or code modules, please cite the following paper:
213
+
214
+ ```
215
+ @misc{https://doi.org/10.48550/arxiv.2204.08776,
216
+ doi = {10.48550/ARXIV.2204.08776},
217
+
218
+ url = {https://arxiv.org/abs/2204.08776},
219
+
220
+ author = {Aggarwal, Divyanshu and Gupta, Vivek and Kunchukuttan, Anoop},
221
+
222
+ keywords = {Computation and Language (cs.CL), Artificial Intelligence (cs.AI), FOS: Computer and information sciences, FOS: Computer and information sciences},
223
+
224
+ title = {IE-SemParse: Evaluating Multilingual Inference for Indian Languages},
225
+
226
+ publisher = {arXiv},
227
+
228
+ year = {2022},
229
+
230
+ copyright = {Creative Commons Attribution 4.0 International}
231
+ }
232
+ ```
233
+
234
+ <!-- ### Contributions -->
.history/README_20230707233559.md ADDED
@@ -0,0 +1,234 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ annotations_creators:
3
+ - machine-generated
4
+ language_creators:
5
+ - machine-generated
6
+ language:
7
+ - as
8
+ - bn
9
+ - gu
10
+ - hi
11
+ - kn
12
+ - ml
13
+ - mr
14
+ - or
15
+ - pa
16
+ - ta
17
+ - te
18
+ license:
19
+ - cc0-1.0
20
+ multilinguality:
21
+ - multilingual
22
+ pretty_name: IE-SemParse
23
+ size_categories:
24
+ - 1M<n<10M
25
+ source_datasets:
26
+ - original
27
+ task_categories:
28
+ - text-classification
29
+ task_ids:
30
+ - Semantic-Parsing
31
+ ---
32
+
33
+ # Dataset Card for "IE-SemParse"
34
+
35
+ ## Table of Contents
36
+
37
+ - [Dataset Card for "IE-SemParse"](#dataset-card-for-ie-semparse)
38
+ - [Table of Contents](#table-of-contents)
39
+ - [Dataset Description](#dataset-description)
40
+ - [Dataset Summary](#dataset-summary)
41
+ - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
42
+ - [Languages](#languages)
43
+ - [Dataset Structure](#dataset-structure)
44
+ - [Data Instances](#data-instances)
45
+ - [Data Fields](#data-fields)
46
+ - [Data Splits](#data-splits)
47
+ - [Dataset usage](#dataset-usage)
48
+ - [Dataset Creation](#dataset-creation)
49
+ - [Curation Rationale](#curation-rationale)
50
+ - [Source Data](#source-data)
51
+ - [Initial Data Collection and Normalization](#initial-data-collection-and-normalization)
52
+ - [Who are the source language producers?](#who-are-the-source-language-producers)
53
+ - [Human Verification Process](#human-verification-process)
54
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
55
+ - [Social Impact of Dataset](#social-impact-of-dataset)
56
+ - [Discussion of Biases](#discussion-of-biases)
57
+ - [Other Known Limitations](#other-known-limitations)
58
+ - [Dataset Curators](#dataset-curators)
59
+ - [Licensing Information](#licensing-information)
60
+ - [Citation Information](#citation-information)
61
+
62
+ ## Dataset Description
63
+
64
+ - **Homepage:** <https://github.com/divyanshuaggarwal/IE-SemParse>
65
+ - **Paper:** [IE-SemParse: Evaluating Multilingual Inference for Indian Languages](https://arxiv.org/abs/2204.08776)
66
+ - **Point of Contact:** [Divyanshu Aggarwal](mailto:[email protected])
67
+
68
+ ### Dataset Summary
69
+
70
+ INDICXNLI is similar to existing
71
+ XNLI dataset in shape/form, but focusses on Indic language family. INDICXNLI include NLI
72
+ data for eleven major Indic languages that includes
73
+ Assamese (‘as’), Gujarat (‘gu’), Kannada (‘kn’),
74
+ Malayalam (‘ml’), Marathi (‘mr’), Odia (‘or’),
75
+ Punjabi (‘pa’), Tamil (‘ta’), Telugu (‘te’), Hindi
76
+ (‘hi’), and Bengali (‘bn’).
77
+
78
+ ### Supported Tasks and Leaderboards
79
+
80
+ **Tasks:** Natural Language Inference
81
+
82
+ **Leaderboards:** Currently there is no Leaderboard for this dataset.
83
+
84
+ ### Languages
85
+
86
+ - `Assamese (as)`
87
+ - `Bengali (bn)`
88
+ - `Gujarati (gu)`
89
+ - `Kannada (kn)`
90
+ - `Hindi (hi)`
91
+ - `Malayalam (ml)`
92
+ - `Marathi (mr)`
93
+ - `Oriya (or)`
94
+ - `Punjabi (pa)`
95
+ - `Tamil (ta)`
96
+ - `Telugu (te)`
97
+
98
+ ## Dataset Structure
99
+
100
+ ### Data Instances
101
+
102
+ One example from the `hi` dataset is given below in JSON format.
103
+
104
+ ```python
105
+ {'premise': 'अवधारणात्मक रूप से क्रीम स्किमिंग के दो बुनियादी आयाम हैं-उत्पाद और भूगोल।',
106
+ 'hypothesis': 'उत्पाद और भूगोल क्रीम स्किमिंग का काम करते हैं।',
107
+ 'label': 1 (neutral) }
108
+ ```
109
+
110
+ ### Data Fields
111
+
112
+ - `premise (string)`: Premise Sentence
113
+ - `hypothesis (string)`: Hypothesis Sentence
114
+ - `label (integer)`: Integer label `0` if hypothesis `entails` the premise, `2` if hypothesis `negates` the premise and `1` otherwise.
115
+
116
+ ### Data Splits
117
+
118
+ <!-- Below is the dataset split given for `hi` dataset.
119
+
120
+ ```python
121
+ DatasetDict({
122
+ train: Dataset({
123
+ features: ['premise', 'hypothesis', 'label'],
124
+ num_rows: 392702
125
+ })
126
+ test: Dataset({
127
+ features: ['premise', 'hypothesis', 'label'],
128
+ num_rows: 5010
129
+ })
130
+ validation: Dataset({
131
+ features: ['premise', 'hypothesis', 'label'],
132
+ num_rows: 2490
133
+ })
134
+ })
135
+
136
+ ``` -->
137
+
138
+ Language | ISO 639-1 Code |Train | Dev | Test |
139
+ --------------|----------------|-------|-----|------|
140
+ Assamese | as | 392,702 | 5,010 | 2,490 |
141
+ Bengali | bn | 392,702 | 5,010 | 2,490 |
142
+ Gujarati | gu | 392,702 | 5,010 | 2,490 |
143
+ Hindi | hi | 392,702 | 5,010 | 2,490 |
144
+ Kannada | kn | 392,702 | 5,010 | 2,490 |
145
+ Malayalam | ml |392,702 | 5,010 | 2,490 |
146
+ Marathi | mr |392,702 | 5,010 | 2,490 |
147
+ Oriya | or | 392,702 | 5,010 | 2,490 |
148
+ Punjabi | pa | 392,702 | 5,010 | 2,490 |
149
+ Tamil | ta | 392,702 | 5,010 | 2,490 |
150
+ Telugu | te | 392,702 | 5,010 | 2,490 |
151
+
152
+ <!-- The dataset split remains same across all languages. -->
153
+
154
+ ## Dataset usage
155
+
156
+ Code snippet for using the dataset using datasets library.
157
+
158
+ ```python
159
+ from datasets import load_dataset
160
+
161
+ dataset = load_dataset("Divyanshu/indicxnli")
162
+ ```
163
+
164
+ ## Dataset Creation
165
+
166
+ Machine translation of XNLI english dataset to 11 listed Indic Languages.
167
+
168
+ ### Curation Rationale
169
+
170
+ [More information needed]
171
+
172
+ ### Source Data
173
+
174
+ [XNLI dataset](https://cims.nyu.edu/~sbowman/xnli/)
175
+
176
+ #### Initial Data Collection and Normalization
177
+
178
+ [Detailed in the paper](https://arxiv.org/abs/2204.08776)
179
+
180
+ #### Who are the source language producers?
181
+
182
+ [Detailed in the paper](https://arxiv.org/abs/2204.08776)
183
+
184
+ #### Human Verification Process
185
+
186
+ [Detailed in the paper](https://arxiv.org/abs/2204.08776)
187
+
188
+ ## Considerations for Using the Data
189
+
190
+ ### Social Impact of Dataset
191
+
192
+ [Detailed in the paper](https://arxiv.org/abs/2204.08776)
193
+
194
+ ### Discussion of Biases
195
+
196
+ [Detailed in the paper](https://arxiv.org/abs/2204.08776)
197
+
198
+ ### Other Known Limitations
199
+
200
+ [Detailed in the paper](https://arxiv.org/abs/2204.08776)
201
+
202
+ ### Dataset Curators
203
+
204
+ Divyanshu Aggarwal, Vivek Gupta, Anoop Kunchukuttan
205
+
206
+ ### Licensing Information
207
+
208
+ Contents of this repository are restricted to only non-commercial research purposes under the [Creative Commons Attribution-NonCommercial 4.0 International License (CC BY-NC 4.0)](https://creativecommons.org/licenses/by-nc/4.0/). Copyright of the dataset contents belongs to the original copyright holders.
209
+
210
+ ### Citation Information
211
+
212
+ If you use any of the datasets, models or code modules, please cite the following paper:
213
+
214
+ ```
215
+ @misc{https://doi.org/10.48550/arxiv.2204.08776,
216
+ doi = {10.48550/ARXIV.2204.08776},
217
+
218
+ url = {https://arxiv.org/abs/2204.08776},
219
+
220
+ author = {Aggarwal, Divyanshu and Gupta, Vivek and Kunchukuttan, Anoop},
221
+
222
+ keywords = {Computation and Language (cs.CL), Artificial Intelligence (cs.AI), FOS: Computer and information sciences, FOS: Computer and information sciences},
223
+
224
+ title = {IE-SemParse: Evaluating Multilingual Inference for Indian Languages},
225
+
226
+ publisher = {arXiv},
227
+
228
+ year = {2022},
229
+
230
+ copyright = {Creative Commons Attribution 4.0 International}
231
+ }
232
+ ```
233
+
234
+ <!-- ### Contributions -->
.history/README_20230707233606.md ADDED
@@ -0,0 +1,234 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ annotations_creators:
3
+ - machine-generated
4
+ language_creators:
5
+ - machine-generated
6
+ language:
7
+ - as
8
+ - bn
9
+ - gu
10
+ - hi
11
+ - kn
12
+ - ml
13
+ - mr
14
+ - or
15
+ - pa
16
+ - ta
17
+ - te
18
+ license:
19
+ - cc0-1.0
20
+ multilinguality:
21
+ - multilingual
22
+ pretty_name: IE-SemParse
23
+ size_categories:
24
+ - 1M<n<10M
25
+ source_datasets:
26
+ - original
27
+ task_categories:
28
+ - text2text
29
+ task_ids:
30
+ - Semantic-Parsing
31
+ ---
32
+
33
+ # Dataset Card for "IE-SemParse"
34
+
35
+ ## Table of Contents
36
+
37
+ - [Dataset Card for "IE-SemParse"](#dataset-card-for-ie-semparse)
38
+ - [Table of Contents](#table-of-contents)
39
+ - [Dataset Description](#dataset-description)
40
+ - [Dataset Summary](#dataset-summary)
41
+ - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
42
+ - [Languages](#languages)
43
+ - [Dataset Structure](#dataset-structure)
44
+ - [Data Instances](#data-instances)
45
+ - [Data Fields](#data-fields)
46
+ - [Data Splits](#data-splits)
47
+ - [Dataset usage](#dataset-usage)
48
+ - [Dataset Creation](#dataset-creation)
49
+ - [Curation Rationale](#curation-rationale)
50
+ - [Source Data](#source-data)
51
+ - [Initial Data Collection and Normalization](#initial-data-collection-and-normalization)
52
+ - [Who are the source language producers?](#who-are-the-source-language-producers)
53
+ - [Human Verification Process](#human-verification-process)
54
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
55
+ - [Social Impact of Dataset](#social-impact-of-dataset)
56
+ - [Discussion of Biases](#discussion-of-biases)
57
+ - [Other Known Limitations](#other-known-limitations)
58
+ - [Dataset Curators](#dataset-curators)
59
+ - [Licensing Information](#licensing-information)
60
+ - [Citation Information](#citation-information)
61
+
62
+ ## Dataset Description
63
+
64
+ - **Homepage:** <https://github.com/divyanshuaggarwal/IE-SemParse>
65
+ - **Paper:** [IE-SemParse: Evaluating Multilingual Inference for Indian Languages](https://arxiv.org/abs/2204.08776)
66
+ - **Point of Contact:** [Divyanshu Aggarwal](mailto:[email protected])
67
+
68
+ ### Dataset Summary
69
+
70
+ INDICXNLI is similar to existing
71
+ XNLI dataset in shape/form, but focusses on Indic language family. INDICXNLI include NLI
72
+ data for eleven major Indic languages that includes
73
+ Assamese (‘as’), Gujarat (‘gu’), Kannada (‘kn’),
74
+ Malayalam (‘ml’), Marathi (‘mr’), Odia (‘or’),
75
+ Punjabi (‘pa’), Tamil (‘ta’), Telugu (‘te’), Hindi
76
+ (‘hi’), and Bengali (‘bn’).
77
+
78
+ ### Supported Tasks and Leaderboards
79
+
80
+ **Tasks:** Natural Language Inference
81
+
82
+ **Leaderboards:** Currently there is no Leaderboard for this dataset.
83
+
84
+ ### Languages
85
+
86
+ - `Assamese (as)`
87
+ - `Bengali (bn)`
88
+ - `Gujarati (gu)`
89
+ - `Kannada (kn)`
90
+ - `Hindi (hi)`
91
+ - `Malayalam (ml)`
92
+ - `Marathi (mr)`
93
+ - `Oriya (or)`
94
+ - `Punjabi (pa)`
95
+ - `Tamil (ta)`
96
+ - `Telugu (te)`
97
+
98
+ ## Dataset Structure
99
+
100
+ ### Data Instances
101
+
102
+ One example from the `hi` dataset is given below in JSON format.
103
+
104
+ ```python
105
+ {'premise': 'अवधारणात्मक रूप से क्रीम स्किमिंग के दो बुनियादी आयाम हैं-उत्पाद और भूगोल।',
106
+ 'hypothesis': 'उत्पाद और भूगोल क्रीम स्किमिंग का काम करते हैं।',
107
+ 'label': 1 (neutral) }
108
+ ```
109
+
110
+ ### Data Fields
111
+
112
+ - `premise (string)`: Premise Sentence
113
+ - `hypothesis (string)`: Hypothesis Sentence
114
+ - `label (integer)`: Integer label `0` if hypothesis `entails` the premise, `2` if hypothesis `negates` the premise and `1` otherwise.
115
+
116
+ ### Data Splits
117
+
118
+ <!-- Below is the dataset split given for `hi` dataset.
119
+
120
+ ```python
121
+ DatasetDict({
122
+ train: Dataset({
123
+ features: ['premise', 'hypothesis', 'label'],
124
+ num_rows: 392702
125
+ })
126
+ test: Dataset({
127
+ features: ['premise', 'hypothesis', 'label'],
128
+ num_rows: 5010
129
+ })
130
+ validation: Dataset({
131
+ features: ['premise', 'hypothesis', 'label'],
132
+ num_rows: 2490
133
+ })
134
+ })
135
+
136
+ ``` -->
137
+
138
+ Language | ISO 639-1 Code |Train | Dev | Test |
139
+ --------------|----------------|-------|-----|------|
140
+ Assamese | as | 392,702 | 5,010 | 2,490 |
141
+ Bengali | bn | 392,702 | 5,010 | 2,490 |
142
+ Gujarati | gu | 392,702 | 5,010 | 2,490 |
143
+ Hindi | hi | 392,702 | 5,010 | 2,490 |
144
+ Kannada | kn | 392,702 | 5,010 | 2,490 |
145
+ Malayalam | ml |392,702 | 5,010 | 2,490 |
146
+ Marathi | mr |392,702 | 5,010 | 2,490 |
147
+ Oriya | or | 392,702 | 5,010 | 2,490 |
148
+ Punjabi | pa | 392,702 | 5,010 | 2,490 |
149
+ Tamil | ta | 392,702 | 5,010 | 2,490 |
150
+ Telugu | te | 392,702 | 5,010 | 2,490 |
151
+
152
+ <!-- The dataset split remains same across all languages. -->
153
+
154
+ ## Dataset usage
155
+
156
+ Code snippet for using the dataset using datasets library.
157
+
158
+ ```python
159
+ from datasets import load_dataset
160
+
161
+ dataset = load_dataset("Divyanshu/indicxnli")
162
+ ```
163
+
164
+ ## Dataset Creation
165
+
166
+ Machine translation of XNLI english dataset to 11 listed Indic Languages.
167
+
168
+ ### Curation Rationale
169
+
170
+ [More information needed]
171
+
172
+ ### Source Data
173
+
174
+ [XNLI dataset](https://cims.nyu.edu/~sbowman/xnli/)
175
+
176
+ #### Initial Data Collection and Normalization
177
+
178
+ [Detailed in the paper](https://arxiv.org/abs/2204.08776)
179
+
180
+ #### Who are the source language producers?
181
+
182
+ [Detailed in the paper](https://arxiv.org/abs/2204.08776)
183
+
184
+ #### Human Verification Process
185
+
186
+ [Detailed in the paper](https://arxiv.org/abs/2204.08776)
187
+
188
+ ## Considerations for Using the Data
189
+
190
+ ### Social Impact of Dataset
191
+
192
+ [Detailed in the paper](https://arxiv.org/abs/2204.08776)
193
+
194
+ ### Discussion of Biases
195
+
196
+ [Detailed in the paper](https://arxiv.org/abs/2204.08776)
197
+
198
+ ### Other Known Limitations
199
+
200
+ [Detailed in the paper](https://arxiv.org/abs/2204.08776)
201
+
202
+ ### Dataset Curators
203
+
204
+ Divyanshu Aggarwal, Vivek Gupta, Anoop Kunchukuttan
205
+
206
+ ### Licensing Information
207
+
208
+ Contents of this repository are restricted to only non-commercial research purposes under the [Creative Commons Attribution-NonCommercial 4.0 International License (CC BY-NC 4.0)](https://creativecommons.org/licenses/by-nc/4.0/). Copyright of the dataset contents belongs to the original copyright holders.
209
+
210
+ ### Citation Information
211
+
212
+ If you use any of the datasets, models or code modules, please cite the following paper:
213
+
214
+ ```
215
+ @misc{https://doi.org/10.48550/arxiv.2204.08776,
216
+ doi = {10.48550/ARXIV.2204.08776},
217
+
218
+ url = {https://arxiv.org/abs/2204.08776},
219
+
220
+ author = {Aggarwal, Divyanshu and Gupta, Vivek and Kunchukuttan, Anoop},
221
+
222
+ keywords = {Computation and Language (cs.CL), Artificial Intelligence (cs.AI), FOS: Computer and information sciences, FOS: Computer and information sciences},
223
+
224
+ title = {IE-SemParse: Evaluating Multilingual Inference for Indian Languages},
225
+
226
+ publisher = {arXiv},
227
+
228
+ year = {2022},
229
+
230
+ copyright = {Creative Commons Attribution 4.0 International}
231
+ }
232
+ ```
233
+
234
+ <!-- ### Contributions -->
.history/README_20230707233630.md ADDED
@@ -0,0 +1,234 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ annotations_creators:
3
+ - machine-generated
4
+ language_creators:
5
+ - machine-generated
6
+ language:
7
+ - as
8
+ - bn
9
+ - gu
10
+ - hi
11
+ - kn
12
+ - ml
13
+ - mr
14
+ - or
15
+ - pa
16
+ - ta
17
+ - te
18
+ license:
19
+ - cc0-1.0
20
+ multilinguality:
21
+ - multilingual
22
+ pretty_name: IE-SemParse
23
+ size_categories:
24
+ - 1M<n<10M
25
+ source_datasets:
26
+ - original
27
+ task_categories:
28
+ - text2text
29
+ task_ids:
30
+ - Semantic-Parsing
31
+ ---
32
+
33
+ # Dataset Card for "IE-SemParse"
34
+
35
+ ## Table of Contents
36
+
37
+ - [Dataset Card for "IE-SemParse"](#dataset-card-for-ie-semparse)
38
+ - [Table of Contents](#table-of-contents)
39
+ - [Dataset Description](#dataset-description)
40
+ - [Dataset Summary](#dataset-summary)
41
+ - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
42
+ - [Languages](#languages)
43
+ - [Dataset Structure](#dataset-structure)
44
+ - [Data Instances](#data-instances)
45
+ - [Data Fields](#data-fields)
46
+ - [Data Splits](#data-splits)
47
+ - [Dataset usage](#dataset-usage)
48
+ - [Dataset Creation](#dataset-creation)
49
+ - [Curation Rationale](#curation-rationale)
50
+ - [Source Data](#source-data)
51
+ - [Initial Data Collection and Normalization](#initial-data-collection-and-normalization)
52
+ - [Who are the source language producers?](#who-are-the-source-language-producers)
53
+ - [Human Verification Process](#human-verification-process)
54
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
55
+ - [Social Impact of Dataset](#social-impact-of-dataset)
56
+ - [Discussion of Biases](#discussion-of-biases)
57
+ - [Other Known Limitations](#other-known-limitations)
58
+ - [Dataset Curators](#dataset-curators)
59
+ - [Licensing Information](#licensing-information)
60
+ - [Citation Information](#citation-information)
61
+
62
+ ## Dataset Description
63
+
64
+ - **Homepage:** <https://github.com/divyanshuaggarwal/IE-SemParse>
65
+ - **Paper:** [Evaluating Inter-Bilingual Semantic Parsing for Indian Languages](https://arxiv.org/abs/2204.08776)
66
+ - **Point of Contact:** [Divyanshu Aggarwal](mailto:[email protected])
67
+
68
+ ### Dataset Summary
69
+
70
+ INDICXNLI is similar to existing
71
+ XNLI dataset in shape/form, but focusses on Indic language family. INDICXNLI include NLI
72
+ data for eleven major Indic languages that includes
73
+ Assamese (‘as’), Gujarat (‘gu’), Kannada (‘kn’),
74
+ Malayalam (‘ml’), Marathi (‘mr’), Odia (‘or’),
75
+ Punjabi (‘pa’), Tamil (‘ta’), Telugu (‘te’), Hindi
76
+ (‘hi’), and Bengali (‘bn’).
77
+
78
+ ### Supported Tasks and Leaderboards
79
+
80
+ **Tasks:** Natural Language Inference
81
+
82
+ **Leaderboards:** Currently there is no Leaderboard for this dataset.
83
+
84
+ ### Languages
85
+
86
+ - `Assamese (as)`
87
+ - `Bengali (bn)`
88
+ - `Gujarati (gu)`
89
+ - `Kannada (kn)`
90
+ - `Hindi (hi)`
91
+ - `Malayalam (ml)`
92
+ - `Marathi (mr)`
93
+ - `Oriya (or)`
94
+ - `Punjabi (pa)`
95
+ - `Tamil (ta)`
96
+ - `Telugu (te)`
97
+
98
+ ## Dataset Structure
99
+
100
+ ### Data Instances
101
+
102
+ One example from the `hi` dataset is given below in JSON format.
103
+
104
+ ```python
105
+ {'premise': 'अवधारणात्मक रूप से क्रीम स्किमिंग के दो बुनियादी आयाम हैं-उत्पाद और भूगोल।',
106
+ 'hypothesis': 'उत्पाद और भूगोल क्रीम स्किमिंग का काम करते हैं।',
107
+ 'label': 1 (neutral) }
108
+ ```
109
+
110
+ ### Data Fields
111
+
112
+ - `premise (string)`: Premise Sentence
113
+ - `hypothesis (string)`: Hypothesis Sentence
114
+ - `label (integer)`: Integer label `0` if hypothesis `entails` the premise, `2` if hypothesis `negates` the premise and `1` otherwise.
115
+
116
+ ### Data Splits
117
+
118
+ <!-- Below is the dataset split given for `hi` dataset.
119
+
120
+ ```python
121
+ DatasetDict({
122
+ train: Dataset({
123
+ features: ['premise', 'hypothesis', 'label'],
124
+ num_rows: 392702
125
+ })
126
+ test: Dataset({
127
+ features: ['premise', 'hypothesis', 'label'],
128
+ num_rows: 5010
129
+ })
130
+ validation: Dataset({
131
+ features: ['premise', 'hypothesis', 'label'],
132
+ num_rows: 2490
133
+ })
134
+ })
135
+
136
+ ``` -->
137
+
138
+ Language | ISO 639-1 Code |Train | Dev | Test |
139
+ --------------|----------------|-------|-----|------|
140
+ Assamese | as | 392,702 | 5,010 | 2,490 |
141
+ Bengali | bn | 392,702 | 5,010 | 2,490 |
142
+ Gujarati | gu | 392,702 | 5,010 | 2,490 |
143
+ Hindi | hi | 392,702 | 5,010 | 2,490 |
144
+ Kannada | kn | 392,702 | 5,010 | 2,490 |
145
+ Malayalam | ml |392,702 | 5,010 | 2,490 |
146
+ Marathi | mr |392,702 | 5,010 | 2,490 |
147
+ Oriya | or | 392,702 | 5,010 | 2,490 |
148
+ Punjabi | pa | 392,702 | 5,010 | 2,490 |
149
+ Tamil | ta | 392,702 | 5,010 | 2,490 |
150
+ Telugu | te | 392,702 | 5,010 | 2,490 |
151
+
152
+ <!-- The dataset split remains same across all languages. -->
153
+
154
+ ## Dataset usage
155
+
156
+ Code snippet for using the dataset using datasets library.
157
+
158
+ ```python
159
+ from datasets import load_dataset
160
+
161
+ dataset = load_dataset("Divyanshu/indicxnli")
162
+ ```
163
+
164
+ ## Dataset Creation
165
+
166
+ Machine translation of XNLI english dataset to 11 listed Indic Languages.
167
+
168
+ ### Curation Rationale
169
+
170
+ [More information needed]
171
+
172
+ ### Source Data
173
+
174
+ [XNLI dataset](https://cims.nyu.edu/~sbowman/xnli/)
175
+
176
+ #### Initial Data Collection and Normalization
177
+
178
+ [Detailed in the paper](https://arxiv.org/abs/2204.08776)
179
+
180
+ #### Who are the source language producers?
181
+
182
+ [Detailed in the paper](https://arxiv.org/abs/2204.08776)
183
+
184
+ #### Human Verification Process
185
+
186
+ [Detailed in the paper](https://arxiv.org/abs/2204.08776)
187
+
188
+ ## Considerations for Using the Data
189
+
190
+ ### Social Impact of Dataset
191
+
192
+ [Detailed in the paper](https://arxiv.org/abs/2204.08776)
193
+
194
+ ### Discussion of Biases
195
+
196
+ [Detailed in the paper](https://arxiv.org/abs/2204.08776)
197
+
198
+ ### Other Known Limitations
199
+
200
+ [Detailed in the paper](https://arxiv.org/abs/2204.08776)
201
+
202
+ ### Dataset Curators
203
+
204
+ Divyanshu Aggarwal, Vivek Gupta, Anoop Kunchukuttan
205
+
206
+ ### Licensing Information
207
+
208
+ Contents of this repository are restricted to only non-commercial research purposes under the [Creative Commons Attribution-NonCommercial 4.0 International License (CC BY-NC 4.0)](https://creativecommons.org/licenses/by-nc/4.0/). Copyright of the dataset contents belongs to the original copyright holders.
209
+
210
+ ### Citation Information
211
+
212
+ If you use any of the datasets, models or code modules, please cite the following paper:
213
+
214
+ ```
215
+ @misc{https://doi.org/10.48550/arxiv.2204.08776,
216
+ doi = {10.48550/ARXIV.2204.08776},
217
+
218
+ url = {https://arxiv.org/abs/2204.08776},
219
+
220
+ author = {Aggarwal, Divyanshu and Gupta, Vivek and Kunchukuttan, Anoop},
221
+
222
+ keywords = {Computation and Language (cs.CL), Artificial Intelligence (cs.AI), FOS: Computer and information sciences, FOS: Computer and information sciences},
223
+
224
+ title = {IE-SemParse: Evaluating Multilingual Inference for Indian Languages},
225
+
226
+ publisher = {arXiv},
227
+
228
+ year = {2022},
229
+
230
+ copyright = {Creative Commons Attribution 4.0 International}
231
+ }
232
+ ```
233
+
234
+ <!-- ### Contributions -->
.history/README_20230707233641.md ADDED
@@ -0,0 +1,234 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ annotations_creators:
3
+ - machine-generated
4
+ language_creators:
5
+ - machine-generated
6
+ language:
7
+ - as
8
+ - bn
9
+ - gu
10
+ - hi
11
+ - kn
12
+ - ml
13
+ - mr
14
+ - or
15
+ - pa
16
+ - ta
17
+ - te
18
+ license:
19
+ - cc0-1.0
20
+ multilinguality:
21
+ - multilingual
22
+ pretty_name: IE-SemParse
23
+ size_categories:
24
+ - 1M<n<10M
25
+ source_datasets:
26
+ - original
27
+ task_categories:
28
+ - text2text
29
+ task_ids:
30
+ - Semantic-Parsing
31
+ ---
32
+
33
+ # Dataset Card for "IE-SemParse"
34
+
35
+ ## Table of Contents
36
+
37
+ - [Dataset Card for "IE-SemParse"](#dataset-card-for-ie-semparse)
38
+ - [Table of Contents](#table-of-contents)
39
+ - [Dataset Description](#dataset-description)
40
+ - [Dataset Summary](#dataset-summary)
41
+ - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
42
+ - [Languages](#languages)
43
+ - [Dataset Structure](#dataset-structure)
44
+ - [Data Instances](#data-instances)
45
+ - [Data Fields](#data-fields)
46
+ - [Data Splits](#data-splits)
47
+ - [Dataset usage](#dataset-usage)
48
+ - [Dataset Creation](#dataset-creation)
49
+ - [Curation Rationale](#curation-rationale)
50
+ - [Source Data](#source-data)
51
+ - [Initial Data Collection and Normalization](#initial-data-collection-and-normalization)
52
+ - [Who are the source language producers?](#who-are-the-source-language-producers)
53
+ - [Human Verification Process](#human-verification-process)
54
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
55
+ - [Social Impact of Dataset](#social-impact-of-dataset)
56
+ - [Discussion of Biases](#discussion-of-biases)
57
+ - [Other Known Limitations](#other-known-limitations)
58
+ - [Dataset Curators](#dataset-curators)
59
+ - [Licensing Information](#licensing-information)
60
+ - [Citation Information](#citation-information)
61
+
62
+ ## Dataset Description
63
+
64
+ - **Homepage:** <https://github.com/divyanshuaggarwal/IE-SemParse>
65
+ - **Paper:** [Evaluating Inter-Bilingual Semantic Parsing for Indian Languages](https://arxiv.org/abs/2304.13005)
66
+ - **Point of Contact:** [Divyanshu Aggarwal](mailto:[email protected])
67
+
68
+ ### Dataset Summary
69
+
70
+ INDICXNLI is similar to existing
71
+ XNLI dataset in shape/form, but focusses on Indic language family. INDICXNLI include NLI
72
+ data for eleven major Indic languages that includes
73
+ Assamese (‘as’), Gujarat (‘gu’), Kannada (‘kn’),
74
+ Malayalam (‘ml’), Marathi (‘mr’), Odia (‘or’),
75
+ Punjabi (‘pa’), Tamil (‘ta’), Telugu (‘te’), Hindi
76
+ (‘hi’), and Bengali (‘bn’).
77
+
78
+ ### Supported Tasks and Leaderboards
79
+
80
+ **Tasks:** Natural Language Inference
81
+
82
+ **Leaderboards:** Currently there is no Leaderboard for this dataset.
83
+
84
+ ### Languages
85
+
86
+ - `Assamese (as)`
87
+ - `Bengali (bn)`
88
+ - `Gujarati (gu)`
89
+ - `Kannada (kn)`
90
+ - `Hindi (hi)`
91
+ - `Malayalam (ml)`
92
+ - `Marathi (mr)`
93
+ - `Oriya (or)`
94
+ - `Punjabi (pa)`
95
+ - `Tamil (ta)`
96
+ - `Telugu (te)`
97
+
98
+ ## Dataset Structure
99
+
100
+ ### Data Instances
101
+
102
+ One example from the `hi` dataset is given below in JSON format.
103
+
104
+ ```python
105
+ {'premise': 'अवधारणात्मक रूप से क्रीम स्किमिंग के दो बुनियादी आयाम हैं-उत्पाद और भूगोल।',
106
+ 'hypothesis': 'उत्पाद और भूगोल क्रीम स्किमिंग का काम करते हैं।',
107
+ 'label': 1 (neutral) }
108
+ ```
109
+
110
+ ### Data Fields
111
+
112
+ - `premise (string)`: Premise Sentence
113
+ - `hypothesis (string)`: Hypothesis Sentence
114
+ - `label (integer)`: Integer label `0` if hypothesis `entails` the premise, `2` if hypothesis `negates` the premise and `1` otherwise.
115
+
116
+ ### Data Splits
117
+
118
+ <!-- Below is the dataset split given for `hi` dataset.
119
+
120
+ ```python
121
+ DatasetDict({
122
+ train: Dataset({
123
+ features: ['premise', 'hypothesis', 'label'],
124
+ num_rows: 392702
125
+ })
126
+ test: Dataset({
127
+ features: ['premise', 'hypothesis', 'label'],
128
+ num_rows: 5010
129
+ })
130
+ validation: Dataset({
131
+ features: ['premise', 'hypothesis', 'label'],
132
+ num_rows: 2490
133
+ })
134
+ })
135
+
136
+ ``` -->
137
+
138
+ Language | ISO 639-1 Code |Train | Dev | Test |
139
+ --------------|----------------|-------|-----|------|
140
+ Assamese | as | 392,702 | 5,010 | 2,490 |
141
+ Bengali | bn | 392,702 | 5,010 | 2,490 |
142
+ Gujarati | gu | 392,702 | 5,010 | 2,490 |
143
+ Hindi | hi | 392,702 | 5,010 | 2,490 |
144
+ Kannada | kn | 392,702 | 5,010 | 2,490 |
145
+ Malayalam | ml |392,702 | 5,010 | 2,490 |
146
+ Marathi | mr |392,702 | 5,010 | 2,490 |
147
+ Oriya | or | 392,702 | 5,010 | 2,490 |
148
+ Punjabi | pa | 392,702 | 5,010 | 2,490 |
149
+ Tamil | ta | 392,702 | 5,010 | 2,490 |
150
+ Telugu | te | 392,702 | 5,010 | 2,490 |
151
+
152
+ <!-- The dataset split remains same across all languages. -->
153
+
154
+ ## Dataset usage
155
+
156
+ Code snippet for using the dataset using datasets library.
157
+
158
+ ```python
159
+ from datasets import load_dataset
160
+
161
+ dataset = load_dataset("Divyanshu/indicxnli")
162
+ ```
163
+
164
+ ## Dataset Creation
165
+
166
+ Machine translation of XNLI english dataset to 11 listed Indic Languages.
167
+
168
+ ### Curation Rationale
169
+
170
+ [More information needed]
171
+
172
+ ### Source Data
173
+
174
+ [XNLI dataset](https://cims.nyu.edu/~sbowman/xnli/)
175
+
176
+ #### Initial Data Collection and Normalization
177
+
178
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
179
+
180
+ #### Who are the source language producers?
181
+
182
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
183
+
184
+ #### Human Verification Process
185
+
186
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
187
+
188
+ ## Considerations for Using the Data
189
+
190
+ ### Social Impact of Dataset
191
+
192
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
193
+
194
+ ### Discussion of Biases
195
+
196
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
197
+
198
+ ### Other Known Limitations
199
+
200
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
201
+
202
+ ### Dataset Curators
203
+
204
+ Divyanshu Aggarwal, Vivek Gupta, Anoop Kunchukuttan
205
+
206
+ ### Licensing Information
207
+
208
+ Contents of this repository are restricted to only non-commercial research purposes under the [Creative Commons Attribution-NonCommercial 4.0 International License (CC BY-NC 4.0)](https://creativecommons.org/licenses/by-nc/4.0/). Copyright of the dataset contents belongs to the original copyright holders.
209
+
210
+ ### Citation Information
211
+
212
+ If you use any of the datasets, models or code modules, please cite the following paper:
213
+
214
+ ```
215
+ @misc{https://doi.org/10.48550/arxiv.2204.08776,
216
+ doi = {10.48550/ARXIV.2204.08776},
217
+
218
+ url = {https://arxiv.org/abs/2304.13005},
219
+
220
+ author = {Aggarwal, Divyanshu and Gupta, Vivek and Kunchukuttan, Anoop},
221
+
222
+ keywords = {Computation and Language (cs.CL), Artificial Intelligence (cs.AI), FOS: Computer and information sciences, FOS: Computer and information sciences},
223
+
224
+ title = {IE-SemParse: Evaluating Multilingual Inference for Indian Languages},
225
+
226
+ publisher = {arXiv},
227
+
228
+ year = {2022},
229
+
230
+ copyright = {Creative Commons Attribution 4.0 International}
231
+ }
232
+ ```
233
+
234
+ <!-- ### Contributions -->
.history/README_20230707233647.md ADDED
@@ -0,0 +1,234 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ annotations_creators:
3
+ - machine-generated
4
+ language_creators:
5
+ - machine-generated
6
+ language:
7
+ - as
8
+ - bn
9
+ - gu
10
+ - hi
11
+ - kn
12
+ - ml
13
+ - mr
14
+ - or
15
+ - pa
16
+ - ta
17
+ - te
18
+ license:
19
+ - cc0-1.0
20
+ multilinguality:
21
+ - multilingual
22
+ pretty_name: IE-SemParse
23
+ size_categories:
24
+ - 1M<n<10M
25
+ source_datasets:
26
+ - original
27
+ task_categories:
28
+ - text2text
29
+ task_ids:
30
+ - Semantic-Parsing
31
+ ---
32
+
33
+ # Dataset Card for "IE-SemParse"
34
+
35
+ ## Table of Contents
36
+
37
+ - [Dataset Card for "IE-SemParse"](#dataset-card-for-ie-semparse)
38
+ - [Table of Contents](#table-of-contents)
39
+ - [Dataset Description](#dataset-description)
40
+ - [Dataset Summary](#dataset-summary)
41
+ - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
42
+ - [Languages](#languages)
43
+ - [Dataset Structure](#dataset-structure)
44
+ - [Data Instances](#data-instances)
45
+ - [Data Fields](#data-fields)
46
+ - [Data Splits](#data-splits)
47
+ - [Dataset usage](#dataset-usage)
48
+ - [Dataset Creation](#dataset-creation)
49
+ - [Curation Rationale](#curation-rationale)
50
+ - [Source Data](#source-data)
51
+ - [Initial Data Collection and Normalization](#initial-data-collection-and-normalization)
52
+ - [Who are the source language producers?](#who-are-the-source-language-producers)
53
+ - [Human Verification Process](#human-verification-process)
54
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
55
+ - [Social Impact of Dataset](#social-impact-of-dataset)
56
+ - [Discussion of Biases](#discussion-of-biases)
57
+ - [Other Known Limitations](#other-known-limitations)
58
+ - [Dataset Curators](#dataset-curators)
59
+ - [Licensing Information](#licensing-information)
60
+ - [Citation Information](#citation-information)
61
+
62
+ ## Dataset Description
63
+
64
+ - **Homepage:** <https://github.com/divyanshuaggarwal/IE-SemParse>
65
+ - **Paper:** [Evaluating Inter-Bilingual Semantic Parsing for Indian Languages](https://arxiv.org/abs/2304.13005)
66
+ - **Point of Contact:** [Divyanshu Aggarwal](mailto:[email protected])
67
+
68
+ ### Dataset Summary
69
+
70
+ IE-SemParse is similar to existing
71
+ XNLI dataset in shape/form, but focusses on Indic language family. INDICXNLI include NLI
72
+ data for eleven major Indic languages that includes
73
+ Assamese (‘as’), Gujarat (‘gu’), Kannada (‘kn’),
74
+ Malayalam (‘ml’), Marathi (‘mr’), Odia (‘or’),
75
+ Punjabi (‘pa’), Tamil (‘ta’), Telugu (‘te’), Hindi
76
+ (‘hi’), and Bengali (‘bn’).
77
+
78
+ ### Supported Tasks and Leaderboards
79
+
80
+ **Tasks:** Natural Language Inference
81
+
82
+ **Leaderboards:** Currently there is no Leaderboard for this dataset.
83
+
84
+ ### Languages
85
+
86
+ - `Assamese (as)`
87
+ - `Bengali (bn)`
88
+ - `Gujarati (gu)`
89
+ - `Kannada (kn)`
90
+ - `Hindi (hi)`
91
+ - `Malayalam (ml)`
92
+ - `Marathi (mr)`
93
+ - `Oriya (or)`
94
+ - `Punjabi (pa)`
95
+ - `Tamil (ta)`
96
+ - `Telugu (te)`
97
+
98
+ ## Dataset Structure
99
+
100
+ ### Data Instances
101
+
102
+ One example from the `hi` dataset is given below in JSON format.
103
+
104
+ ```python
105
+ {'premise': 'अवधारणात्मक रूप से क्रीम स्किमिंग के दो बुनियादी आयाम हैं-उत्पाद और भूगोल।',
106
+ 'hypothesis': 'उत्पाद और भूगोल क्रीम स्किमिंग का काम करते हैं।',
107
+ 'label': 1 (neutral) }
108
+ ```
109
+
110
+ ### Data Fields
111
+
112
+ - `premise (string)`: Premise Sentence
113
+ - `hypothesis (string)`: Hypothesis Sentence
114
+ - `label (integer)`: Integer label `0` if hypothesis `entails` the premise, `2` if hypothesis `negates` the premise and `1` otherwise.
115
+
116
+ ### Data Splits
117
+
118
+ <!-- Below is the dataset split given for `hi` dataset.
119
+
120
+ ```python
121
+ DatasetDict({
122
+ train: Dataset({
123
+ features: ['premise', 'hypothesis', 'label'],
124
+ num_rows: 392702
125
+ })
126
+ test: Dataset({
127
+ features: ['premise', 'hypothesis', 'label'],
128
+ num_rows: 5010
129
+ })
130
+ validation: Dataset({
131
+ features: ['premise', 'hypothesis', 'label'],
132
+ num_rows: 2490
133
+ })
134
+ })
135
+
136
+ ``` -->
137
+
138
+ Language | ISO 639-1 Code |Train | Dev | Test |
139
+ --------------|----------------|-------|-----|------|
140
+ Assamese | as | 392,702 | 5,010 | 2,490 |
141
+ Bengali | bn | 392,702 | 5,010 | 2,490 |
142
+ Gujarati | gu | 392,702 | 5,010 | 2,490 |
143
+ Hindi | hi | 392,702 | 5,010 | 2,490 |
144
+ Kannada | kn | 392,702 | 5,010 | 2,490 |
145
+ Malayalam | ml |392,702 | 5,010 | 2,490 |
146
+ Marathi | mr |392,702 | 5,010 | 2,490 |
147
+ Oriya | or | 392,702 | 5,010 | 2,490 |
148
+ Punjabi | pa | 392,702 | 5,010 | 2,490 |
149
+ Tamil | ta | 392,702 | 5,010 | 2,490 |
150
+ Telugu | te | 392,702 | 5,010 | 2,490 |
151
+
152
+ <!-- The dataset split remains same across all languages. -->
153
+
154
+ ## Dataset usage
155
+
156
+ Code snippet for using the dataset using datasets library.
157
+
158
+ ```python
159
+ from datasets import load_dataset
160
+
161
+ dataset = load_dataset("Divyanshu/indicxnli")
162
+ ```
163
+
164
+ ## Dataset Creation
165
+
166
+ Machine translation of XNLI english dataset to 11 listed Indic Languages.
167
+
168
+ ### Curation Rationale
169
+
170
+ [More information needed]
171
+
172
+ ### Source Data
173
+
174
+ [XNLI dataset](https://cims.nyu.edu/~sbowman/xnli/)
175
+
176
+ #### Initial Data Collection and Normalization
177
+
178
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
179
+
180
+ #### Who are the source language producers?
181
+
182
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
183
+
184
+ #### Human Verification Process
185
+
186
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
187
+
188
+ ## Considerations for Using the Data
189
+
190
+ ### Social Impact of Dataset
191
+
192
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
193
+
194
+ ### Discussion of Biases
195
+
196
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
197
+
198
+ ### Other Known Limitations
199
+
200
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
201
+
202
+ ### Dataset Curators
203
+
204
+ Divyanshu Aggarwal, Vivek Gupta, Anoop Kunchukuttan
205
+
206
+ ### Licensing Information
207
+
208
+ Contents of this repository are restricted to only non-commercial research purposes under the [Creative Commons Attribution-NonCommercial 4.0 International License (CC BY-NC 4.0)](https://creativecommons.org/licenses/by-nc/4.0/). Copyright of the dataset contents belongs to the original copyright holders.
209
+
210
+ ### Citation Information
211
+
212
+ If you use any of the datasets, models or code modules, please cite the following paper:
213
+
214
+ ```
215
+ @misc{https://doi.org/10.48550/arxiv.2204.08776,
216
+ doi = {10.48550/ARXIV.2204.08776},
217
+
218
+ url = {https://arxiv.org/abs/2304.13005},
219
+
220
+ author = {Aggarwal, Divyanshu and Gupta, Vivek and Kunchukuttan, Anoop},
221
+
222
+ keywords = {Computation and Language (cs.CL), Artificial Intelligence (cs.AI), FOS: Computer and information sciences, FOS: Computer and information sciences},
223
+
224
+ title = {IE-SemParse: Evaluating Multilingual Inference for Indian Languages},
225
+
226
+ publisher = {arXiv},
227
+
228
+ year = {2022},
229
+
230
+ copyright = {Creative Commons Attribution 4.0 International}
231
+ }
232
+ ```
233
+
234
+ <!-- ### Contributions -->
.history/README_20230707233654.md ADDED
@@ -0,0 +1,232 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ annotations_creators:
3
+ - machine-generated
4
+ language_creators:
5
+ - machine-generated
6
+ language:
7
+ - as
8
+ - bn
9
+ - gu
10
+ - hi
11
+ - kn
12
+ - ml
13
+ - mr
14
+ - or
15
+ - pa
16
+ - ta
17
+ - te
18
+ license:
19
+ - cc0-1.0
20
+ multilinguality:
21
+ - multilingual
22
+ pretty_name: IE-SemParse
23
+ size_categories:
24
+ - 1M<n<10M
25
+ source_datasets:
26
+ - original
27
+ task_categories:
28
+ - text2text
29
+ task_ids:
30
+ - Semantic-Parsing
31
+ ---
32
+
33
+ # Dataset Card for "IE-SemParse"
34
+
35
+ ## Table of Contents
36
+
37
+ - [Dataset Card for "IE-SemParse"](#dataset-card-for-ie-semparse)
38
+ - [Table of Contents](#table-of-contents)
39
+ - [Dataset Description](#dataset-description)
40
+ - [Dataset Summary](#dataset-summary)
41
+ - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
42
+ - [Languages](#languages)
43
+ - [Dataset Structure](#dataset-structure)
44
+ - [Data Instances](#data-instances)
45
+ - [Data Fields](#data-fields)
46
+ - [Data Splits](#data-splits)
47
+ - [Dataset usage](#dataset-usage)
48
+ - [Dataset Creation](#dataset-creation)
49
+ - [Curation Rationale](#curation-rationale)
50
+ - [Source Data](#source-data)
51
+ - [Initial Data Collection and Normalization](#initial-data-collection-and-normalization)
52
+ - [Who are the source language producers?](#who-are-the-source-language-producers)
53
+ - [Human Verification Process](#human-verification-process)
54
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
55
+ - [Social Impact of Dataset](#social-impact-of-dataset)
56
+ - [Discussion of Biases](#discussion-of-biases)
57
+ - [Other Known Limitations](#other-known-limitations)
58
+ - [Dataset Curators](#dataset-curators)
59
+ - [Licensing Information](#licensing-information)
60
+ - [Citation Information](#citation-information)
61
+
62
+ ## Dataset Description
63
+
64
+ - **Homepage:** <https://github.com/divyanshuaggarwal/IE-SemParse>
65
+ - **Paper:** [Evaluating Inter-Bilingual Semantic Parsing for Indian Languages](https://arxiv.org/abs/2304.13005)
66
+ - **Point of Contact:** [Divyanshu Aggarwal](mailto:[email protected])
67
+
68
+ ### Dataset Summary
69
+
70
+ IE-SemParse is a eleven major Indic languages that includes
71
+ Assamese (‘as’), Gujarat (‘gu’), Kannada (‘kn’),
72
+ Malayalam (‘ml’), Marathi (‘mr’), Odia (‘or’),
73
+ Punjabi (‘pa’), Tamil (‘ta’), Telugu (‘te’), Hindi
74
+ (‘hi’), and Bengali (‘bn’).
75
+
76
+ ### Supported Tasks and Leaderboards
77
+
78
+ **Tasks:** Natural Language Inference
79
+
80
+ **Leaderboards:** Currently there is no Leaderboard for this dataset.
81
+
82
+ ### Languages
83
+
84
+ - `Assamese (as)`
85
+ - `Bengali (bn)`
86
+ - `Gujarati (gu)`
87
+ - `Kannada (kn)`
88
+ - `Hindi (hi)`
89
+ - `Malayalam (ml)`
90
+ - `Marathi (mr)`
91
+ - `Oriya (or)`
92
+ - `Punjabi (pa)`
93
+ - `Tamil (ta)`
94
+ - `Telugu (te)`
95
+
96
+ ## Dataset Structure
97
+
98
+ ### Data Instances
99
+
100
+ One example from the `hi` dataset is given below in JSON format.
101
+
102
+ ```python
103
+ {'premise': 'अवधारणात्मक रूप से क्रीम स्किमिंग के दो बुनियादी आयाम हैं-उत्पाद और भूगोल।',
104
+ 'hypothesis': 'उत्पाद और भूगोल क्रीम स्किमिंग का काम करते हैं।',
105
+ 'label': 1 (neutral) }
106
+ ```
107
+
108
+ ### Data Fields
109
+
110
+ - `premise (string)`: Premise Sentence
111
+ - `hypothesis (string)`: Hypothesis Sentence
112
+ - `label (integer)`: Integer label `0` if hypothesis `entails` the premise, `2` if hypothesis `negates` the premise and `1` otherwise.
113
+
114
+ ### Data Splits
115
+
116
+ <!-- Below is the dataset split given for `hi` dataset.
117
+
118
+ ```python
119
+ DatasetDict({
120
+ train: Dataset({
121
+ features: ['premise', 'hypothesis', 'label'],
122
+ num_rows: 392702
123
+ })
124
+ test: Dataset({
125
+ features: ['premise', 'hypothesis', 'label'],
126
+ num_rows: 5010
127
+ })
128
+ validation: Dataset({
129
+ features: ['premise', 'hypothesis', 'label'],
130
+ num_rows: 2490
131
+ })
132
+ })
133
+
134
+ ``` -->
135
+
136
+ Language | ISO 639-1 Code |Train | Dev | Test |
137
+ --------------|----------------|-------|-----|------|
138
+ Assamese | as | 392,702 | 5,010 | 2,490 |
139
+ Bengali | bn | 392,702 | 5,010 | 2,490 |
140
+ Gujarati | gu | 392,702 | 5,010 | 2,490 |
141
+ Hindi | hi | 392,702 | 5,010 | 2,490 |
142
+ Kannada | kn | 392,702 | 5,010 | 2,490 |
143
+ Malayalam | ml |392,702 | 5,010 | 2,490 |
144
+ Marathi | mr |392,702 | 5,010 | 2,490 |
145
+ Oriya | or | 392,702 | 5,010 | 2,490 |
146
+ Punjabi | pa | 392,702 | 5,010 | 2,490 |
147
+ Tamil | ta | 392,702 | 5,010 | 2,490 |
148
+ Telugu | te | 392,702 | 5,010 | 2,490 |
149
+
150
+ <!-- The dataset split remains same across all languages. -->
151
+
152
+ ## Dataset usage
153
+
154
+ Code snippet for using the dataset using datasets library.
155
+
156
+ ```python
157
+ from datasets import load_dataset
158
+
159
+ dataset = load_dataset("Divyanshu/indicxnli")
160
+ ```
161
+
162
+ ## Dataset Creation
163
+
164
+ Machine translation of XNLI english dataset to 11 listed Indic Languages.
165
+
166
+ ### Curation Rationale
167
+
168
+ [More information needed]
169
+
170
+ ### Source Data
171
+
172
+ [XNLI dataset](https://cims.nyu.edu/~sbowman/xnli/)
173
+
174
+ #### Initial Data Collection and Normalization
175
+
176
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
177
+
178
+ #### Who are the source language producers?
179
+
180
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
181
+
182
+ #### Human Verification Process
183
+
184
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
185
+
186
+ ## Considerations for Using the Data
187
+
188
+ ### Social Impact of Dataset
189
+
190
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
191
+
192
+ ### Discussion of Biases
193
+
194
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
195
+
196
+ ### Other Known Limitations
197
+
198
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
199
+
200
+ ### Dataset Curators
201
+
202
+ Divyanshu Aggarwal, Vivek Gupta, Anoop Kunchukuttan
203
+
204
+ ### Licensing Information
205
+
206
+ Contents of this repository are restricted to only non-commercial research purposes under the [Creative Commons Attribution-NonCommercial 4.0 International License (CC BY-NC 4.0)](https://creativecommons.org/licenses/by-nc/4.0/). Copyright of the dataset contents belongs to the original copyright holders.
207
+
208
+ ### Citation Information
209
+
210
+ If you use any of the datasets, models or code modules, please cite the following paper:
211
+
212
+ ```
213
+ @misc{https://doi.org/10.48550/arxiv.2204.08776,
214
+ doi = {10.48550/ARXIV.2204.08776},
215
+
216
+ url = {https://arxiv.org/abs/2304.13005},
217
+
218
+ author = {Aggarwal, Divyanshu and Gupta, Vivek and Kunchukuttan, Anoop},
219
+
220
+ keywords = {Computation and Language (cs.CL), Artificial Intelligence (cs.AI), FOS: Computer and information sciences, FOS: Computer and information sciences},
221
+
222
+ title = {IE-SemParse: Evaluating Multilingual Inference for Indian Languages},
223
+
224
+ publisher = {arXiv},
225
+
226
+ year = {2022},
227
+
228
+ copyright = {Creative Commons Attribution 4.0 International}
229
+ }
230
+ ```
231
+
232
+ <!-- ### Contributions -->
.history/README_20230707233704.md ADDED
@@ -0,0 +1,232 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ annotations_creators:
3
+ - machine-generated
4
+ language_creators:
5
+ - machine-generated
6
+ language:
7
+ - as
8
+ - bn
9
+ - gu
10
+ - hi
11
+ - kn
12
+ - ml
13
+ - mr
14
+ - or
15
+ - pa
16
+ - ta
17
+ - te
18
+ license:
19
+ - cc0-1.0
20
+ multilinguality:
21
+ - multilingual
22
+ pretty_name: IE-SemParse
23
+ size_categories:
24
+ - 1M<n<10M
25
+ source_datasets:
26
+ - original
27
+ task_categories:
28
+ - text2text
29
+ task_ids:
30
+ - Semantic-Parsing
31
+ ---
32
+
33
+ # Dataset Card for "IE-SemParse"
34
+
35
+ ## Table of Contents
36
+
37
+ - [Dataset Card for "IE-SemParse"](#dataset-card-for-ie-semparse)
38
+ - [Table of Contents](#table-of-contents)
39
+ - [Dataset Description](#dataset-description)
40
+ - [Dataset Summary](#dataset-summary)
41
+ - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
42
+ - [Languages](#languages)
43
+ - [Dataset Structure](#dataset-structure)
44
+ - [Data Instances](#data-instances)
45
+ - [Data Fields](#data-fields)
46
+ - [Data Splits](#data-splits)
47
+ - [Dataset usage](#dataset-usage)
48
+ - [Dataset Creation](#dataset-creation)
49
+ - [Curation Rationale](#curation-rationale)
50
+ - [Source Data](#source-data)
51
+ - [Initial Data Collection and Normalization](#initial-data-collection-and-normalization)
52
+ - [Who are the source language producers?](#who-are-the-source-language-producers)
53
+ - [Human Verification Process](#human-verification-process)
54
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
55
+ - [Social Impact of Dataset](#social-impact-of-dataset)
56
+ - [Discussion of Biases](#discussion-of-biases)
57
+ - [Other Known Limitations](#other-known-limitations)
58
+ - [Dataset Curators](#dataset-curators)
59
+ - [Licensing Information](#licensing-information)
60
+ - [Citation Information](#citation-information)
61
+
62
+ ## Dataset Description
63
+
64
+ - **Homepage:** <https://github.com/divyanshuaggarwal/IE-SemParse>
65
+ - **Paper:** [Evaluating Inter-Bilingual Semantic Parsing for Indian Languages](https://arxiv.org/abs/2304.13005)
66
+ - **Point of Contact:** [Divyanshu Aggarwal](mailto:[email protected])
67
+
68
+ ### Dataset Summary
69
+
70
+ IE-SemParse is an InterBilingual Semantic eleven major Indic languages that includes
71
+ Assamese (‘as’), Gujarat (‘gu’), Kannada (‘kn’),
72
+ Malayalam (‘ml’), Marathi (‘mr’), Odia (‘or’),
73
+ Punjabi (‘pa’), Tamil (‘ta’), Telugu (‘te’), Hindi
74
+ (‘hi’), and Bengali (‘bn’).
75
+
76
+ ### Supported Tasks and Leaderboards
77
+
78
+ **Tasks:** Natural Language Inference
79
+
80
+ **Leaderboards:** Currently there is no Leaderboard for this dataset.
81
+
82
+ ### Languages
83
+
84
+ - `Assamese (as)`
85
+ - `Bengali (bn)`
86
+ - `Gujarati (gu)`
87
+ - `Kannada (kn)`
88
+ - `Hindi (hi)`
89
+ - `Malayalam (ml)`
90
+ - `Marathi (mr)`
91
+ - `Oriya (or)`
92
+ - `Punjabi (pa)`
93
+ - `Tamil (ta)`
94
+ - `Telugu (te)`
95
+
96
+ ## Dataset Structure
97
+
98
+ ### Data Instances
99
+
100
+ One example from the `hi` dataset is given below in JSON format.
101
+
102
+ ```python
103
+ {'premise': 'अवधारणात्मक रूप से क्रीम स्किमिंग के दो बुनियादी आयाम हैं-उत्पाद और भूगोल।',
104
+ 'hypothesis': 'उत्पाद और भूगोल क्रीम स्किमिंग का काम करते हैं।',
105
+ 'label': 1 (neutral) }
106
+ ```
107
+
108
+ ### Data Fields
109
+
110
+ - `premise (string)`: Premise Sentence
111
+ - `hypothesis (string)`: Hypothesis Sentence
112
+ - `label (integer)`: Integer label `0` if hypothesis `entails` the premise, `2` if hypothesis `negates` the premise and `1` otherwise.
113
+
114
+ ### Data Splits
115
+
116
+ <!-- Below is the dataset split given for `hi` dataset.
117
+
118
+ ```python
119
+ DatasetDict({
120
+ train: Dataset({
121
+ features: ['premise', 'hypothesis', 'label'],
122
+ num_rows: 392702
123
+ })
124
+ test: Dataset({
125
+ features: ['premise', 'hypothesis', 'label'],
126
+ num_rows: 5010
127
+ })
128
+ validation: Dataset({
129
+ features: ['premise', 'hypothesis', 'label'],
130
+ num_rows: 2490
131
+ })
132
+ })
133
+
134
+ ``` -->
135
+
136
+ Language | ISO 639-1 Code |Train | Dev | Test |
137
+ --------------|----------------|-------|-----|------|
138
+ Assamese | as | 392,702 | 5,010 | 2,490 |
139
+ Bengali | bn | 392,702 | 5,010 | 2,490 |
140
+ Gujarati | gu | 392,702 | 5,010 | 2,490 |
141
+ Hindi | hi | 392,702 | 5,010 | 2,490 |
142
+ Kannada | kn | 392,702 | 5,010 | 2,490 |
143
+ Malayalam | ml |392,702 | 5,010 | 2,490 |
144
+ Marathi | mr |392,702 | 5,010 | 2,490 |
145
+ Oriya | or | 392,702 | 5,010 | 2,490 |
146
+ Punjabi | pa | 392,702 | 5,010 | 2,490 |
147
+ Tamil | ta | 392,702 | 5,010 | 2,490 |
148
+ Telugu | te | 392,702 | 5,010 | 2,490 |
149
+
150
+ <!-- The dataset split remains same across all languages. -->
151
+
152
+ ## Dataset usage
153
+
154
+ Code snippet for using the dataset using datasets library.
155
+
156
+ ```python
157
+ from datasets import load_dataset
158
+
159
+ dataset = load_dataset("Divyanshu/indicxnli")
160
+ ```
161
+
162
+ ## Dataset Creation
163
+
164
+ Machine translation of XNLI english dataset to 11 listed Indic Languages.
165
+
166
+ ### Curation Rationale
167
+
168
+ [More information needed]
169
+
170
+ ### Source Data
171
+
172
+ [XNLI dataset](https://cims.nyu.edu/~sbowman/xnli/)
173
+
174
+ #### Initial Data Collection and Normalization
175
+
176
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
177
+
178
+ #### Who are the source language producers?
179
+
180
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
181
+
182
+ #### Human Verification Process
183
+
184
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
185
+
186
+ ## Considerations for Using the Data
187
+
188
+ ### Social Impact of Dataset
189
+
190
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
191
+
192
+ ### Discussion of Biases
193
+
194
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
195
+
196
+ ### Other Known Limitations
197
+
198
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
199
+
200
+ ### Dataset Curators
201
+
202
+ Divyanshu Aggarwal, Vivek Gupta, Anoop Kunchukuttan
203
+
204
+ ### Licensing Information
205
+
206
+ Contents of this repository are restricted to only non-commercial research purposes under the [Creative Commons Attribution-NonCommercial 4.0 International License (CC BY-NC 4.0)](https://creativecommons.org/licenses/by-nc/4.0/). Copyright of the dataset contents belongs to the original copyright holders.
207
+
208
+ ### Citation Information
209
+
210
+ If you use any of the datasets, models or code modules, please cite the following paper:
211
+
212
+ ```
213
+ @misc{https://doi.org/10.48550/arxiv.2204.08776,
214
+ doi = {10.48550/ARXIV.2204.08776},
215
+
216
+ url = {https://arxiv.org/abs/2304.13005},
217
+
218
+ author = {Aggarwal, Divyanshu and Gupta, Vivek and Kunchukuttan, Anoop},
219
+
220
+ keywords = {Computation and Language (cs.CL), Artificial Intelligence (cs.AI), FOS: Computer and information sciences, FOS: Computer and information sciences},
221
+
222
+ title = {IE-SemParse: Evaluating Multilingual Inference for Indian Languages},
223
+
224
+ publisher = {arXiv},
225
+
226
+ year = {2022},
227
+
228
+ copyright = {Creative Commons Attribution 4.0 International}
229
+ }
230
+ ```
231
+
232
+ <!-- ### Contributions -->
.history/README_20230707233708.md ADDED
@@ -0,0 +1,232 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ annotations_creators:
3
+ - machine-generated
4
+ language_creators:
5
+ - machine-generated
6
+ language:
7
+ - as
8
+ - bn
9
+ - gu
10
+ - hi
11
+ - kn
12
+ - ml
13
+ - mr
14
+ - or
15
+ - pa
16
+ - ta
17
+ - te
18
+ license:
19
+ - cc0-1.0
20
+ multilinguality:
21
+ - multilingual
22
+ pretty_name: IE-SemParse
23
+ size_categories:
24
+ - 1M<n<10M
25
+ source_datasets:
26
+ - original
27
+ task_categories:
28
+ - text2text
29
+ task_ids:
30
+ - Semantic-Parsing
31
+ ---
32
+
33
+ # Dataset Card for "IE-SemParse"
34
+
35
+ ## Table of Contents
36
+
37
+ - [Dataset Card for "IE-SemParse"](#dataset-card-for-ie-semparse)
38
+ - [Table of Contents](#table-of-contents)
39
+ - [Dataset Description](#dataset-description)
40
+ - [Dataset Summary](#dataset-summary)
41
+ - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
42
+ - [Languages](#languages)
43
+ - [Dataset Structure](#dataset-structure)
44
+ - [Data Instances](#data-instances)
45
+ - [Data Fields](#data-fields)
46
+ - [Data Splits](#data-splits)
47
+ - [Dataset usage](#dataset-usage)
48
+ - [Dataset Creation](#dataset-creation)
49
+ - [Curation Rationale](#curation-rationale)
50
+ - [Source Data](#source-data)
51
+ - [Initial Data Collection and Normalization](#initial-data-collection-and-normalization)
52
+ - [Who are the source language producers?](#who-are-the-source-language-producers)
53
+ - [Human Verification Process](#human-verification-process)
54
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
55
+ - [Social Impact of Dataset](#social-impact-of-dataset)
56
+ - [Discussion of Biases](#discussion-of-biases)
57
+ - [Other Known Limitations](#other-known-limitations)
58
+ - [Dataset Curators](#dataset-curators)
59
+ - [Licensing Information](#licensing-information)
60
+ - [Citation Information](#citation-information)
61
+
62
+ ## Dataset Description
63
+
64
+ - **Homepage:** <https://github.com/divyanshuaggarwal/IE-SemParse>
65
+ - **Paper:** [Evaluating Inter-Bilingual Semantic Parsing for Indian Languages](https://arxiv.org/abs/2304.13005)
66
+ - **Point of Contact:** [Divyanshu Aggarwal](mailto:[email protected])
67
+
68
+ ### Dataset Summary
69
+
70
+ IE-SemParse is an InterBilingual Semantic Parsing Dataset for eleven major Indic languages that includes
71
+ Assamese (‘as’), Gujarat (‘gu’), Kannada (‘kn’),
72
+ Malayalam (‘ml’), Marathi (‘mr’), Odia (‘or’),
73
+ Punjabi (‘pa’), Tamil (‘ta’), Telugu (‘te’), Hindi
74
+ (‘hi’), and Bengali (‘bn’).
75
+
76
+ ### Supported Tasks and Leaderboards
77
+
78
+ **Tasks:** Natural Language Inference
79
+
80
+ **Leaderboards:** Currently there is no Leaderboard for this dataset.
81
+
82
+ ### Languages
83
+
84
+ - `Assamese (as)`
85
+ - `Bengali (bn)`
86
+ - `Gujarati (gu)`
87
+ - `Kannada (kn)`
88
+ - `Hindi (hi)`
89
+ - `Malayalam (ml)`
90
+ - `Marathi (mr)`
91
+ - `Oriya (or)`
92
+ - `Punjabi (pa)`
93
+ - `Tamil (ta)`
94
+ - `Telugu (te)`
95
+
96
+ ## Dataset Structure
97
+
98
+ ### Data Instances
99
+
100
+ One example from the `hi` dataset is given below in JSON format.
101
+
102
+ ```python
103
+ {'premise': 'अवधारणात्मक रूप से क्रीम स्किमिंग के दो बुनियादी आयाम हैं-उत्पाद और भूगोल।',
104
+ 'hypothesis': 'उत्पाद और भूगोल क्रीम स्किमिंग का काम करते हैं।',
105
+ 'label': 1 (neutral) }
106
+ ```
107
+
108
+ ### Data Fields
109
+
110
+ - `premise (string)`: Premise Sentence
111
+ - `hypothesis (string)`: Hypothesis Sentence
112
+ - `label (integer)`: Integer label `0` if hypothesis `entails` the premise, `2` if hypothesis `negates` the premise and `1` otherwise.
113
+
114
+ ### Data Splits
115
+
116
+ <!-- Below is the dataset split given for `hi` dataset.
117
+
118
+ ```python
119
+ DatasetDict({
120
+ train: Dataset({
121
+ features: ['premise', 'hypothesis', 'label'],
122
+ num_rows: 392702
123
+ })
124
+ test: Dataset({
125
+ features: ['premise', 'hypothesis', 'label'],
126
+ num_rows: 5010
127
+ })
128
+ validation: Dataset({
129
+ features: ['premise', 'hypothesis', 'label'],
130
+ num_rows: 2490
131
+ })
132
+ })
133
+
134
+ ``` -->
135
+
136
+ Language | ISO 639-1 Code |Train | Dev | Test |
137
+ --------------|----------------|-------|-----|------|
138
+ Assamese | as | 392,702 | 5,010 | 2,490 |
139
+ Bengali | bn | 392,702 | 5,010 | 2,490 |
140
+ Gujarati | gu | 392,702 | 5,010 | 2,490 |
141
+ Hindi | hi | 392,702 | 5,010 | 2,490 |
142
+ Kannada | kn | 392,702 | 5,010 | 2,490 |
143
+ Malayalam | ml |392,702 | 5,010 | 2,490 |
144
+ Marathi | mr |392,702 | 5,010 | 2,490 |
145
+ Oriya | or | 392,702 | 5,010 | 2,490 |
146
+ Punjabi | pa | 392,702 | 5,010 | 2,490 |
147
+ Tamil | ta | 392,702 | 5,010 | 2,490 |
148
+ Telugu | te | 392,702 | 5,010 | 2,490 |
149
+
150
+ <!-- The dataset split remains same across all languages. -->
151
+
152
+ ## Dataset usage
153
+
154
+ Code snippet for using the dataset using datasets library.
155
+
156
+ ```python
157
+ from datasets import load_dataset
158
+
159
+ dataset = load_dataset("Divyanshu/indicxnli")
160
+ ```
161
+
162
+ ## Dataset Creation
163
+
164
+ Machine translation of XNLI english dataset to 11 listed Indic Languages.
165
+
166
+ ### Curation Rationale
167
+
168
+ [More information needed]
169
+
170
+ ### Source Data
171
+
172
+ [XNLI dataset](https://cims.nyu.edu/~sbowman/xnli/)
173
+
174
+ #### Initial Data Collection and Normalization
175
+
176
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
177
+
178
+ #### Who are the source language producers?
179
+
180
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
181
+
182
+ #### Human Verification Process
183
+
184
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
185
+
186
+ ## Considerations for Using the Data
187
+
188
+ ### Social Impact of Dataset
189
+
190
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
191
+
192
+ ### Discussion of Biases
193
+
194
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
195
+
196
+ ### Other Known Limitations
197
+
198
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
199
+
200
+ ### Dataset Curators
201
+
202
+ Divyanshu Aggarwal, Vivek Gupta, Anoop Kunchukuttan
203
+
204
+ ### Licensing Information
205
+
206
+ Contents of this repository are restricted to only non-commercial research purposes under the [Creative Commons Attribution-NonCommercial 4.0 International License (CC BY-NC 4.0)](https://creativecommons.org/licenses/by-nc/4.0/). Copyright of the dataset contents belongs to the original copyright holders.
207
+
208
+ ### Citation Information
209
+
210
+ If you use any of the datasets, models or code modules, please cite the following paper:
211
+
212
+ ```
213
+ @misc{https://doi.org/10.48550/arxiv.2204.08776,
214
+ doi = {10.48550/ARXIV.2204.08776},
215
+
216
+ url = {https://arxiv.org/abs/2304.13005},
217
+
218
+ author = {Aggarwal, Divyanshu and Gupta, Vivek and Kunchukuttan, Anoop},
219
+
220
+ keywords = {Computation and Language (cs.CL), Artificial Intelligence (cs.AI), FOS: Computer and information sciences, FOS: Computer and information sciences},
221
+
222
+ title = {IE-SemParse: Evaluating Multilingual Inference for Indian Languages},
223
+
224
+ publisher = {arXiv},
225
+
226
+ year = {2022},
227
+
228
+ copyright = {Creative Commons Attribution 4.0 International}
229
+ }
230
+ ```
231
+
232
+ <!-- ### Contributions -->
.history/README_20230707233722.md ADDED
@@ -0,0 +1,229 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ annotations_creators:
3
+ - machine-generated
4
+ language_creators:
5
+ - machine-generated
6
+ language:
7
+ - as
8
+ - bn
9
+ - gu
10
+ - hi
11
+ - kn
12
+ - ml
13
+ - mr
14
+ - or
15
+ - pa
16
+ - ta
17
+ - te
18
+ license:
19
+ - cc0-1.0
20
+ multilinguality:
21
+ - multilingual
22
+ pretty_name: IE-SemParse
23
+ size_categories:
24
+ - 1M<n<10M
25
+ source_datasets:
26
+ - original
27
+ task_categories:
28
+ - text2text
29
+ task_ids:
30
+ - Semantic-Parsing
31
+ ---
32
+
33
+ # Dataset Card for "IE-SemParse"
34
+
35
+ ## Table of Contents
36
+
37
+ - [Dataset Card for "IE-SemParse"](#dataset-card-for-ie-semparse)
38
+ - [Table of Contents](#table-of-contents)
39
+ - [Dataset Description](#dataset-description)
40
+ - [Dataset Summary](#dataset-summary)
41
+ - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
42
+ - [Languages](#languages)
43
+ - [Dataset Structure](#dataset-structure)
44
+ - [Data Instances](#data-instances)
45
+ - [Data Fields](#data-fields)
46
+ - [Data Splits](#data-splits)
47
+ - [Dataset usage](#dataset-usage)
48
+ - [Dataset Creation](#dataset-creation)
49
+ - [Curation Rationale](#curation-rationale)
50
+ - [Source Data](#source-data)
51
+ - [Initial Data Collection and Normalization](#initial-data-collection-and-normalization)
52
+ - [Who are the source language producers?](#who-are-the-source-language-producers)
53
+ - [Human Verification Process](#human-verification-process)
54
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
55
+ - [Social Impact of Dataset](#social-impact-of-dataset)
56
+ - [Discussion of Biases](#discussion-of-biases)
57
+ - [Other Known Limitations](#other-known-limitations)
58
+ - [Dataset Curators](#dataset-curators)
59
+ - [Licensing Information](#licensing-information)
60
+ - [Citation Information](#citation-information)
61
+
62
+ ## Dataset Description
63
+
64
+ - **Homepage:** <https://github.com/divyanshuaggarwal/IE-SemParse>
65
+ - **Paper:** [Evaluating Inter-Bilingual Semantic Parsing for Indian Languages](https://arxiv.org/abs/2304.13005)
66
+ - **Point of Contact:** [Divyanshu Aggarwal](mailto:[email protected])
67
+
68
+ ### Dataset Summary
69
+
70
+ IE-SemParse is an InterBilingual Semantic Parsing Dataset for eleven major Indic languages that includes
71
+ Assamese (‘as’), Gujarat (‘gu’), Kannada (‘kn’),
72
+ Malayalam (‘ml’), Marathi (‘mr’), Odia (‘or’),
73
+ Punjabi (‘pa’), Tamil (‘ta’), Telugu (‘te’), Hindi
74
+ (‘hi’), and Bengali (‘bn’).
75
+
76
+ ### Supported Tasks and Leaderboards
77
+
78
+ **Tasks:** Natural Language Inference
79
+
80
+ **Leaderboards:** Currently there is no Leaderboard for this dataset.
81
+
82
+ ### Languages
83
+
84
+ - `Assamese (as)`
85
+ - `Bengali (bn)`
86
+ - `Gujarati (gu)`
87
+ - `Kannada (kn)`
88
+ - `Hindi (hi)`
89
+ - `Malayalam (ml)`
90
+ - `Marathi (mr)`
91
+ - `Oriya (or)`
92
+ - `Punjabi (pa)`
93
+ - `Tamil (ta)`
94
+ - `Telugu (te)`
95
+
96
+ ## Dataset Structure
97
+
98
+ ### Data Instances
99
+
100
+ One example from the `hi` dataset is given below in JSON format.
101
+
102
+ ```python
103
+ {'premise': 'अवधारणात्मक रूप से क्रीम स्किमिंग के दो बुनियादी आयाम हैं-उत्पाद और भूगोल।',
104
+ 'hypothesis': 'उत्पाद और भूगोल क्रीम स्किमिंग का काम करते हैं।',
105
+ 'label': 1 (neutral) }
106
+ ```
107
+
108
+ ### Data Fields
109
+
110
+
111
+ ### Data Splits
112
+
113
+ <!-- Below is the dataset split given for `hi` dataset.
114
+
115
+ ```python
116
+ DatasetDict({
117
+ train: Dataset({
118
+ features: ['premise', 'hypothesis', 'label'],
119
+ num_rows: 392702
120
+ })
121
+ test: Dataset({
122
+ features: ['premise', 'hypothesis', 'label'],
123
+ num_rows: 5010
124
+ })
125
+ validation: Dataset({
126
+ features: ['premise', 'hypothesis', 'label'],
127
+ num_rows: 2490
128
+ })
129
+ })
130
+
131
+ ``` -->
132
+
133
+ Language | ISO 639-1 Code |Train | Dev | Test |
134
+ --------------|----------------|-------|-----|------|
135
+ Assamese | as | 392,702 | 5,010 | 2,490 |
136
+ Bengali | bn | 392,702 | 5,010 | 2,490 |
137
+ Gujarati | gu | 392,702 | 5,010 | 2,490 |
138
+ Hindi | hi | 392,702 | 5,010 | 2,490 |
139
+ Kannada | kn | 392,702 | 5,010 | 2,490 |
140
+ Malayalam | ml |392,702 | 5,010 | 2,490 |
141
+ Marathi | mr |392,702 | 5,010 | 2,490 |
142
+ Oriya | or | 392,702 | 5,010 | 2,490 |
143
+ Punjabi | pa | 392,702 | 5,010 | 2,490 |
144
+ Tamil | ta | 392,702 | 5,010 | 2,490 |
145
+ Telugu | te | 392,702 | 5,010 | 2,490 |
146
+
147
+ <!-- The dataset split remains same across all languages. -->
148
+
149
+ ## Dataset usage
150
+
151
+ Code snippet for using the dataset using datasets library.
152
+
153
+ ```python
154
+ from datasets import load_dataset
155
+
156
+ dataset = load_dataset("Divyanshu/indicxnli")
157
+ ```
158
+
159
+ ## Dataset Creation
160
+
161
+ Machine translation of XNLI english dataset to 11 listed Indic Languages.
162
+
163
+ ### Curation Rationale
164
+
165
+ [More information needed]
166
+
167
+ ### Source Data
168
+
169
+ [XNLI dataset](https://cims.nyu.edu/~sbowman/xnli/)
170
+
171
+ #### Initial Data Collection and Normalization
172
+
173
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
174
+
175
+ #### Who are the source language producers?
176
+
177
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
178
+
179
+ #### Human Verification Process
180
+
181
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
182
+
183
+ ## Considerations for Using the Data
184
+
185
+ ### Social Impact of Dataset
186
+
187
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
188
+
189
+ ### Discussion of Biases
190
+
191
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
192
+
193
+ ### Other Known Limitations
194
+
195
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
196
+
197
+ ### Dataset Curators
198
+
199
+ Divyanshu Aggarwal, Vivek Gupta, Anoop Kunchukuttan
200
+
201
+ ### Licensing Information
202
+
203
+ Contents of this repository are restricted to only non-commercial research purposes under the [Creative Commons Attribution-NonCommercial 4.0 International License (CC BY-NC 4.0)](https://creativecommons.org/licenses/by-nc/4.0/). Copyright of the dataset contents belongs to the original copyright holders.
204
+
205
+ ### Citation Information
206
+
207
+ If you use any of the datasets, models or code modules, please cite the following paper:
208
+
209
+ ```
210
+ @misc{https://doi.org/10.48550/arxiv.2204.08776,
211
+ doi = {10.48550/ARXIV.2204.08776},
212
+
213
+ url = {https://arxiv.org/abs/2304.13005},
214
+
215
+ author = {Aggarwal, Divyanshu and Gupta, Vivek and Kunchukuttan, Anoop},
216
+
217
+ keywords = {Computation and Language (cs.CL), Artificial Intelligence (cs.AI), FOS: Computer and information sciences, FOS: Computer and information sciences},
218
+
219
+ title = {IE-SemParse: Evaluating Multilingual Inference for Indian Languages},
220
+
221
+ publisher = {arXiv},
222
+
223
+ year = {2022},
224
+
225
+ copyright = {Creative Commons Attribution 4.0 International}
226
+ }
227
+ ```
228
+
229
+ <!-- ### Contributions -->
.history/README_20230707233725.md ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ annotations_creators:
3
+ - machine-generated
4
+ language_creators:
5
+ - machine-generated
6
+ language:
7
+ - as
8
+ - bn
9
+ - gu
10
+ - hi
11
+ - kn
12
+ - ml
13
+ - mr
14
+ - or
15
+ - pa
16
+ - ta
17
+ - te
18
+ license:
19
+ - cc0-1.0
20
+ multilinguality:
21
+ - multilingual
22
+ pretty_name: IE-SemParse
23
+ size_categories:
24
+ - 1M<n<10M
25
+ source_datasets:
26
+ - original
27
+ task_categories:
28
+ - text2text
29
+ task_ids:
30
+ - Semantic-Parsing
31
+ ---
32
+
33
+ # Dataset Card for "IE-SemParse"
34
+
35
+ ## Table of Contents
36
+
37
+ - [Dataset Card for "IE-SemParse"](#dataset-card-for-ie-semparse)
38
+ - [Table of Contents](#table-of-contents)
39
+ - [Dataset Description](#dataset-description)
40
+ - [Dataset Summary](#dataset-summary)
41
+ - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
42
+ - [Languages](#languages)
43
+ - [Dataset Structure](#dataset-structure)
44
+ - [Data Instances](#data-instances)
45
+ - [Data Splits](#data-splits)
46
+ - [Dataset usage](#dataset-usage)
47
+ - [Dataset Creation](#dataset-creation)
48
+ - [Curation Rationale](#curation-rationale)
49
+ - [Source Data](#source-data)
50
+ - [Initial Data Collection and Normalization](#initial-data-collection-and-normalization)
51
+ - [Who are the source language producers?](#who-are-the-source-language-producers)
52
+ - [Human Verification Process](#human-verification-process)
53
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
54
+ - [Social Impact of Dataset](#social-impact-of-dataset)
55
+ - [Discussion of Biases](#discussion-of-biases)
56
+ - [Other Known Limitations](#other-known-limitations)
57
+ - [Dataset Curators](#dataset-curators)
58
+ - [Licensing Information](#licensing-information)
59
+ - [Citation Information](#citation-information)
60
+
61
+ ## Dataset Description
62
+
63
+ - **Homepage:** <https://github.com/divyanshuaggarwal/IE-SemParse>
64
+ - **Paper:** [Evaluating Inter-Bilingual Semantic Parsing for Indian Languages](https://arxiv.org/abs/2304.13005)
65
+ - **Point of Contact:** [Divyanshu Aggarwal](mailto:[email protected])
66
+
67
+ ### Dataset Summary
68
+
69
+ IE-SemParse is an InterBilingual Semantic Parsing Dataset for eleven major Indic languages that includes
70
+ Assamese (‘as’), Gujarat (‘gu’), Kannada (‘kn’),
71
+ Malayalam (‘ml’), Marathi (‘mr’), Odia (‘or’),
72
+ Punjabi (‘pa’), Tamil (‘ta’), Telugu (‘te’), Hindi
73
+ (‘hi’), and Bengali (‘bn’).
74
+
75
+ ### Supported Tasks and Leaderboards
76
+
77
+ **Tasks:** Natural Language Inference
78
+
79
+ **Leaderboards:** Currently there is no Leaderboard for this dataset.
80
+
81
+ ### Languages
82
+
83
+ - `Assamese (as)`
84
+ - `Bengali (bn)`
85
+ - `Gujarati (gu)`
86
+ - `Kannada (kn)`
87
+ - `Hindi (hi)`
88
+ - `Malayalam (ml)`
89
+ - `Marathi (mr)`
90
+ - `Oriya (or)`
91
+ - `Punjabi (pa)`
92
+ - `Tamil (ta)`
93
+ - `Telugu (te)`
94
+
95
+ ## Dataset Structure
96
+
97
+ ### Data Instances
98
+
99
+ One example from the `hi` dataset is given below in JSON format.
100
+
101
+ ```python
102
+ {'premise': 'अवधारणात्मक रूप से क्रीम स्किमिंग के दो बुनियादी आयाम हैं-उत्पाद और भूगोल।',
103
+ 'hypothesis': 'उत्पाद और भूगोल क्रीम स्किमिंग का काम करते हैं।',
104
+ 'label': 1 (neutral) }
105
+ ```
106
+
107
+
108
+ ### Data Splits
109
+
110
+ <!-- Below is the dataset split given for `hi` dataset.
111
+
112
+ ```python
113
+ DatasetDict({
114
+ train: Dataset({
115
+ features: ['premise', 'hypothesis', 'label'],
116
+ num_rows: 392702
117
+ })
118
+ test: Dataset({
119
+ features: ['premise', 'hypothesis', 'label'],
120
+ num_rows: 5010
121
+ })
122
+ validation: Dataset({
123
+ features: ['premise', 'hypothesis', 'label'],
124
+ num_rows: 2490
125
+ })
126
+ })
127
+
128
+ ``` -->
129
+
130
+ Language | ISO 639-1 Code |Train | Dev | Test |
131
+ --------------|----------------|-------|-----|------|
132
+ Assamese | as | 392,702 | 5,010 | 2,490 |
133
+ Bengali | bn | 392,702 | 5,010 | 2,490 |
134
+ Gujarati | gu | 392,702 | 5,010 | 2,490 |
135
+ Hindi | hi | 392,702 | 5,010 | 2,490 |
136
+ Kannada | kn | 392,702 | 5,010 | 2,490 |
137
+ Malayalam | ml |392,702 | 5,010 | 2,490 |
138
+ Marathi | mr |392,702 | 5,010 | 2,490 |
139
+ Oriya | or | 392,702 | 5,010 | 2,490 |
140
+ Punjabi | pa | 392,702 | 5,010 | 2,490 |
141
+ Tamil | ta | 392,702 | 5,010 | 2,490 |
142
+ Telugu | te | 392,702 | 5,010 | 2,490 |
143
+
144
+ <!-- The dataset split remains same across all languages. -->
145
+
146
+ ## Dataset usage
147
+
148
+ Code snippet for using the dataset using datasets library.
149
+
150
+ ```python
151
+ from datasets import load_dataset
152
+
153
+ dataset = load_dataset("Divyanshu/indicxnli")
154
+ ```
155
+
156
+ ## Dataset Creation
157
+
158
+ Machine translation of XNLI english dataset to 11 listed Indic Languages.
159
+
160
+ ### Curation Rationale
161
+
162
+ [More information needed]
163
+
164
+ ### Source Data
165
+
166
+ [XNLI dataset](https://cims.nyu.edu/~sbowman/xnli/)
167
+
168
+ #### Initial Data Collection and Normalization
169
+
170
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
171
+
172
+ #### Who are the source language producers?
173
+
174
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
175
+
176
+ #### Human Verification Process
177
+
178
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
179
+
180
+ ## Considerations for Using the Data
181
+
182
+ ### Social Impact of Dataset
183
+
184
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
185
+
186
+ ### Discussion of Biases
187
+
188
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
189
+
190
+ ### Other Known Limitations
191
+
192
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
193
+
194
+ ### Dataset Curators
195
+
196
+ Divyanshu Aggarwal, Vivek Gupta, Anoop Kunchukuttan
197
+
198
+ ### Licensing Information
199
+
200
+ Contents of this repository are restricted to only non-commercial research purposes under the [Creative Commons Attribution-NonCommercial 4.0 International License (CC BY-NC 4.0)](https://creativecommons.org/licenses/by-nc/4.0/). Copyright of the dataset contents belongs to the original copyright holders.
201
+
202
+ ### Citation Information
203
+
204
+ If you use any of the datasets, models or code modules, please cite the following paper:
205
+
206
+ ```
207
+ @misc{https://doi.org/10.48550/arxiv.2204.08776,
208
+ doi = {10.48550/ARXIV.2204.08776},
209
+
210
+ url = {https://arxiv.org/abs/2304.13005},
211
+
212
+ author = {Aggarwal, Divyanshu and Gupta, Vivek and Kunchukuttan, Anoop},
213
+
214
+ keywords = {Computation and Language (cs.CL), Artificial Intelligence (cs.AI), FOS: Computer and information sciences, FOS: Computer and information sciences},
215
+
216
+ title = {IE-SemParse: Evaluating Multilingual Inference for Indian Languages},
217
+
218
+ publisher = {arXiv},
219
+
220
+ year = {2022},
221
+
222
+ copyright = {Creative Commons Attribution 4.0 International}
223
+ }
224
+ ```
225
+
226
+ <!-- ### Contributions -->
.history/README_20230707233734.md ADDED
@@ -0,0 +1,214 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ annotations_creators:
3
+ - machine-generated
4
+ language_creators:
5
+ - machine-generated
6
+ language:
7
+ - as
8
+ - bn
9
+ - gu
10
+ - hi
11
+ - kn
12
+ - ml
13
+ - mr
14
+ - or
15
+ - pa
16
+ - ta
17
+ - te
18
+ license:
19
+ - cc0-1.0
20
+ multilinguality:
21
+ - multilingual
22
+ pretty_name: IE-SemParse
23
+ size_categories:
24
+ - 1M<n<10M
25
+ source_datasets:
26
+ - original
27
+ task_categories:
28
+ - text2text
29
+ task_ids:
30
+ - Semantic-Parsing
31
+ ---
32
+
33
+ # Dataset Card for "IE-SemParse"
34
+
35
+ ## Table of Contents
36
+
37
+ - [Dataset Card for "IE-SemParse"](#dataset-card-for-ie-semparse)
38
+ - [Table of Contents](#table-of-contents)
39
+ - [Dataset Description](#dataset-description)
40
+ - [Dataset Summary](#dataset-summary)
41
+ - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
42
+ - [Languages](#languages)
43
+ - [Data Splits](#data-splits)
44
+ - [Dataset usage](#dataset-usage)
45
+ - [Dataset Creation](#dataset-creation)
46
+ - [Curation Rationale](#curation-rationale)
47
+ - [Source Data](#source-data)
48
+ - [Initial Data Collection and Normalization](#initial-data-collection-and-normalization)
49
+ - [Who are the source language producers?](#who-are-the-source-language-producers)
50
+ - [Human Verification Process](#human-verification-process)
51
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
52
+ - [Social Impact of Dataset](#social-impact-of-dataset)
53
+ - [Discussion of Biases](#discussion-of-biases)
54
+ - [Other Known Limitations](#other-known-limitations)
55
+ - [Dataset Curators](#dataset-curators)
56
+ - [Licensing Information](#licensing-information)
57
+ - [Citation Information](#citation-information)
58
+
59
+ ## Dataset Description
60
+
61
+ - **Homepage:** <https://github.com/divyanshuaggarwal/IE-SemParse>
62
+ - **Paper:** [Evaluating Inter-Bilingual Semantic Parsing for Indian Languages](https://arxiv.org/abs/2304.13005)
63
+ - **Point of Contact:** [Divyanshu Aggarwal](mailto:[email protected])
64
+
65
+ ### Dataset Summary
66
+
67
+ IE-SemParse is an InterBilingual Semantic Parsing Dataset for eleven major Indic languages that includes
68
+ Assamese (‘as’), Gujarat (‘gu’), Kannada (‘kn’),
69
+ Malayalam (‘ml’), Marathi (‘mr’), Odia (‘or’),
70
+ Punjabi (‘pa’), Tamil (‘ta’), Telugu (‘te’), Hindi
71
+ (‘hi’), and Bengali (‘bn’).
72
+
73
+ ### Supported Tasks and Leaderboards
74
+
75
+ **Tasks:** Natural Language Inference
76
+
77
+ **Leaderboards:** Currently there is no Leaderboard for this dataset.
78
+
79
+ ### Languages
80
+
81
+ - `Assamese (as)`
82
+ - `Bengali (bn)`
83
+ - `Gujarati (gu)`
84
+ - `Kannada (kn)`
85
+ - `Hindi (hi)`
86
+ - `Malayalam (ml)`
87
+ - `Marathi (mr)`
88
+ - `Oriya (or)`
89
+ - `Punjabi (pa)`
90
+ - `Tamil (ta)`
91
+ - `Telugu (te)`
92
+
93
+
94
+
95
+
96
+ ### Data Splits
97
+
98
+ <!-- Below is the dataset split given for `hi` dataset.
99
+
100
+ ```python
101
+ DatasetDict({
102
+ train: Dataset({
103
+ features: ['premise', 'hypothesis', 'label'],
104
+ num_rows: 392702
105
+ })
106
+ test: Dataset({
107
+ features: ['premise', 'hypothesis', 'label'],
108
+ num_rows: 5010
109
+ })
110
+ validation: Dataset({
111
+ features: ['premise', 'hypothesis', 'label'],
112
+ num_rows: 2490
113
+ })
114
+ })
115
+
116
+ ``` -->
117
+
118
+ Language | ISO 639-1 Code |Train | Dev | Test |
119
+ --------------|----------------|-------|-----|------|
120
+ Assamese | as | 392,702 | 5,010 | 2,490 |
121
+ Bengali | bn | 392,702 | 5,010 | 2,490 |
122
+ Gujarati | gu | 392,702 | 5,010 | 2,490 |
123
+ Hindi | hi | 392,702 | 5,010 | 2,490 |
124
+ Kannada | kn | 392,702 | 5,010 | 2,490 |
125
+ Malayalam | ml |392,702 | 5,010 | 2,490 |
126
+ Marathi | mr |392,702 | 5,010 | 2,490 |
127
+ Oriya | or | 392,702 | 5,010 | 2,490 |
128
+ Punjabi | pa | 392,702 | 5,010 | 2,490 |
129
+ Tamil | ta | 392,702 | 5,010 | 2,490 |
130
+ Telugu | te | 392,702 | 5,010 | 2,490 |
131
+
132
+ <!-- The dataset split remains same across all languages. -->
133
+
134
+ ## Dataset usage
135
+
136
+ Code snippet for using the dataset using datasets library.
137
+
138
+ ```python
139
+ from datasets import load_dataset
140
+
141
+ dataset = load_dataset("Divyanshu/indicxnli")
142
+ ```
143
+
144
+ ## Dataset Creation
145
+
146
+ Machine translation of XNLI english dataset to 11 listed Indic Languages.
147
+
148
+ ### Curation Rationale
149
+
150
+ [More information needed]
151
+
152
+ ### Source Data
153
+
154
+ [XNLI dataset](https://cims.nyu.edu/~sbowman/xnli/)
155
+
156
+ #### Initial Data Collection and Normalization
157
+
158
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
159
+
160
+ #### Who are the source language producers?
161
+
162
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
163
+
164
+ #### Human Verification Process
165
+
166
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
167
+
168
+ ## Considerations for Using the Data
169
+
170
+ ### Social Impact of Dataset
171
+
172
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
173
+
174
+ ### Discussion of Biases
175
+
176
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
177
+
178
+ ### Other Known Limitations
179
+
180
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
181
+
182
+ ### Dataset Curators
183
+
184
+ Divyanshu Aggarwal, Vivek Gupta, Anoop Kunchukuttan
185
+
186
+ ### Licensing Information
187
+
188
+ Contents of this repository are restricted to only non-commercial research purposes under the [Creative Commons Attribution-NonCommercial 4.0 International License (CC BY-NC 4.0)](https://creativecommons.org/licenses/by-nc/4.0/). Copyright of the dataset contents belongs to the original copyright holders.
189
+
190
+ ### Citation Information
191
+
192
+ If you use any of the datasets, models or code modules, please cite the following paper:
193
+
194
+ ```
195
+ @misc{https://doi.org/10.48550/arxiv.2204.08776,
196
+ doi = {10.48550/ARXIV.2204.08776},
197
+
198
+ url = {https://arxiv.org/abs/2304.13005},
199
+
200
+ author = {Aggarwal, Divyanshu and Gupta, Vivek and Kunchukuttan, Anoop},
201
+
202
+ keywords = {Computation and Language (cs.CL), Artificial Intelligence (cs.AI), FOS: Computer and information sciences, FOS: Computer and information sciences},
203
+
204
+ title = {IE-SemParse: Evaluating Multilingual Inference for Indian Languages},
205
+
206
+ publisher = {arXiv},
207
+
208
+ year = {2022},
209
+
210
+ copyright = {Creative Commons Attribution 4.0 International}
211
+ }
212
+ ```
213
+
214
+ <!-- ### Contributions -->
.history/README_20230707233737.md ADDED
@@ -0,0 +1,214 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ annotations_creators:
3
+ - machine-generated
4
+ language_creators:
5
+ - machine-generated
6
+ language:
7
+ - as
8
+ - bn
9
+ - gu
10
+ - hi
11
+ - kn
12
+ - ml
13
+ - mr
14
+ - or
15
+ - pa
16
+ - ta
17
+ - te
18
+ license:
19
+ - cc0-1.0
20
+ multilinguality:
21
+ - multilingual
22
+ pretty_name: IE-SemParse
23
+ size_categories:
24
+ - 1M<n<10M
25
+ source_datasets:
26
+ - original
27
+ task_categories:
28
+ - text2text
29
+ task_ids:
30
+ - Semantic-Parsing
31
+ ---
32
+
33
+ # Dataset Card for "IE-SemParse"
34
+
35
+ ## Table of Contents
36
+
37
+ - [Dataset Card for "IE-SemParse"](#dataset-card-for-ie-semparse)
38
+ - [Table of Contents](#table-of-contents)
39
+ - [Dataset Description](#dataset-description)
40
+ - [Dataset Summary](#dataset-summary)
41
+ - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
42
+ - [Languages](#languages)
43
+ - [Data Splits](#data-splits)
44
+ - [Dataset usage](#dataset-usage)
45
+ - [Dataset Creation](#dataset-creation)
46
+ - [Curation Rationale](#curation-rationale)
47
+ - [Source Data](#source-data)
48
+ - [Initial Data Collection and Normalization](#initial-data-collection-and-normalization)
49
+ - [Who are the source language producers?](#who-are-the-source-language-producers)
50
+ - [Human Verification Process](#human-verification-process)
51
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
52
+ - [Social Impact of Dataset](#social-impact-of-dataset)
53
+ - [Discussion of Biases](#discussion-of-biases)
54
+ - [Other Known Limitations](#other-known-limitations)
55
+ - [Dataset Curators](#dataset-curators)
56
+ - [Licensing Information](#licensing-information)
57
+ - [Citation Information](#citation-information)
58
+
59
+ ## Dataset Description
60
+
61
+ - **Homepage:** <https://github.com/divyanshuaggarwal/IE-SemParse>
62
+ - **Paper:** [Evaluating Inter-Bilingual Semantic Parsing for Indian Languages](https://arxiv.org/abs/2304.13005)
63
+ - **Point of Contact:** [Divyanshu Aggarwal](mailto:[email protected])
64
+
65
+ ### Dataset Summary
66
+
67
+ IE-SemParse is an InterBilingual Semantic Parsing Dataset for eleven major Indic languages that includes
68
+ Assamese (‘as’), Gujarat (‘gu’), Kannada (‘kn’),
69
+ Malayalam (‘ml’), Marathi (‘mr’), Odia (‘or’),
70
+ Punjabi (‘pa’), Tamil (‘ta’), Telugu (‘te’), Hindi
71
+ (‘hi’), and Bengali (‘bn’).
72
+
73
+ ### Supported Tasks and Leaderboards
74
+
75
+ **Tasks:** Natural Language Inference
76
+
77
+ **Leaderboards:** Currently there is no Leaderboard for this dataset.
78
+
79
+ ### Languages
80
+
81
+ - `Assamese (as)`
82
+ - `Bengali (bn)`
83
+ - `Gujarati (gu)`
84
+ - `Kannada (kn)`
85
+ - `Hindi (hi)`
86
+ - `Malayalam (ml)`
87
+ - `Marathi (mr)`
88
+ - `Oriya (or)`
89
+ - `Punjabi (pa)`
90
+ - `Tamil (ta)`
91
+ - `Telugu (te)`
92
+
93
+ ...
94
+
95
+
96
+ ### Data Splits
97
+
98
+ <!-- Below is the dataset split given for `hi` dataset.
99
+
100
+ ```python
101
+ DatasetDict({
102
+ train: Dataset({
103
+ features: ['premise', 'hypothesis', 'label'],
104
+ num_rows: 392702
105
+ })
106
+ test: Dataset({
107
+ features: ['premise', 'hypothesis', 'label'],
108
+ num_rows: 5010
109
+ })
110
+ validation: Dataset({
111
+ features: ['premise', 'hypothesis', 'label'],
112
+ num_rows: 2490
113
+ })
114
+ })
115
+
116
+ ``` -->
117
+
118
+ Language | ISO 639-1 Code |Train | Dev | Test |
119
+ --------------|----------------|-------|-----|------|
120
+ Assamese | as | 392,702 | 5,010 | 2,490 |
121
+ Bengali | bn | 392,702 | 5,010 | 2,490 |
122
+ Gujarati | gu | 392,702 | 5,010 | 2,490 |
123
+ Hindi | hi | 392,702 | 5,010 | 2,490 |
124
+ Kannada | kn | 392,702 | 5,010 | 2,490 |
125
+ Malayalam | ml |392,702 | 5,010 | 2,490 |
126
+ Marathi | mr |392,702 | 5,010 | 2,490 |
127
+ Oriya | or | 392,702 | 5,010 | 2,490 |
128
+ Punjabi | pa | 392,702 | 5,010 | 2,490 |
129
+ Tamil | ta | 392,702 | 5,010 | 2,490 |
130
+ Telugu | te | 392,702 | 5,010 | 2,490 |
131
+
132
+ <!-- The dataset split remains same across all languages. -->
133
+
134
+ ## Dataset usage
135
+
136
+ Code snippet for using the dataset using datasets library.
137
+
138
+ ```python
139
+ from datasets import load_dataset
140
+
141
+ dataset = load_dataset("Divyanshu/indicxnli")
142
+ ```
143
+
144
+ ## Dataset Creation
145
+
146
+ Machine translation of XNLI english dataset to 11 listed Indic Languages.
147
+
148
+ ### Curation Rationale
149
+
150
+ [More information needed]
151
+
152
+ ### Source Data
153
+
154
+ [XNLI dataset](https://cims.nyu.edu/~sbowman/xnli/)
155
+
156
+ #### Initial Data Collection and Normalization
157
+
158
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
159
+
160
+ #### Who are the source language producers?
161
+
162
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
163
+
164
+ #### Human Verification Process
165
+
166
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
167
+
168
+ ## Considerations for Using the Data
169
+
170
+ ### Social Impact of Dataset
171
+
172
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
173
+
174
+ ### Discussion of Biases
175
+
176
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
177
+
178
+ ### Other Known Limitations
179
+
180
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
181
+
182
+ ### Dataset Curators
183
+
184
+ Divyanshu Aggarwal, Vivek Gupta, Anoop Kunchukuttan
185
+
186
+ ### Licensing Information
187
+
188
+ Contents of this repository are restricted to only non-commercial research purposes under the [Creative Commons Attribution-NonCommercial 4.0 International License (CC BY-NC 4.0)](https://creativecommons.org/licenses/by-nc/4.0/). Copyright of the dataset contents belongs to the original copyright holders.
189
+
190
+ ### Citation Information
191
+
192
+ If you use any of the datasets, models or code modules, please cite the following paper:
193
+
194
+ ```
195
+ @misc{https://doi.org/10.48550/arxiv.2204.08776,
196
+ doi = {10.48550/ARXIV.2204.08776},
197
+
198
+ url = {https://arxiv.org/abs/2304.13005},
199
+
200
+ author = {Aggarwal, Divyanshu and Gupta, Vivek and Kunchukuttan, Anoop},
201
+
202
+ keywords = {Computation and Language (cs.CL), Artificial Intelligence (cs.AI), FOS: Computer and information sciences, FOS: Computer and information sciences},
203
+
204
+ title = {IE-SemParse: Evaluating Multilingual Inference for Indian Languages},
205
+
206
+ publisher = {arXiv},
207
+
208
+ year = {2022},
209
+
210
+ copyright = {Creative Commons Attribution 4.0 International}
211
+ }
212
+ ```
213
+
214
+ <!-- ### Contributions -->
.history/README_20230707233747.md ADDED
@@ -0,0 +1,214 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ annotations_creators:
3
+ - machine-generated
4
+ language_creators:
5
+ - machine-generated
6
+ language:
7
+ - as
8
+ - bn
9
+ - gu
10
+ - hi
11
+ - kn
12
+ - ml
13
+ - mr
14
+ - or
15
+ - pa
16
+ - ta
17
+ - te
18
+ license:
19
+ - cc0-1.0
20
+ multilinguality:
21
+ - multilingual
22
+ pretty_name: IE-SemParse
23
+ size_categories:
24
+ - 1M<n<10M
25
+ source_datasets:
26
+ - original
27
+ task_categories:
28
+ - text2text
29
+ task_ids:
30
+ - Semantic-Parsing
31
+ ---
32
+
33
+ # Dataset Card for "IE-SemParse"
34
+
35
+ ## Table of Contents
36
+
37
+ - [Dataset Card for "IE-SemParse"](#dataset-card-for-ie-semparse)
38
+ - [Table of Contents](#table-of-contents)
39
+ - [Dataset Description](#dataset-description)
40
+ - [Dataset Summary](#dataset-summary)
41
+ - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
42
+ - [Languages](#languages)
43
+ - [Data Splits](#data-splits)
44
+ - [Dataset usage](#dataset-usage)
45
+ - [Dataset Creation](#dataset-creation)
46
+ - [Curation Rationale](#curation-rationale)
47
+ - [Source Data](#source-data)
48
+ - [Initial Data Collection and Normalization](#initial-data-collection-and-normalization)
49
+ - [Who are the source language producers?](#who-are-the-source-language-producers)
50
+ - [Human Verification Process](#human-verification-process)
51
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
52
+ - [Social Impact of Dataset](#social-impact-of-dataset)
53
+ - [Discussion of Biases](#discussion-of-biases)
54
+ - [Other Known Limitations](#other-known-limitations)
55
+ - [Dataset Curators](#dataset-curators)
56
+ - [Licensing Information](#licensing-information)
57
+ - [Citation Information](#citation-information)
58
+
59
+ ## Dataset Description
60
+
61
+ - **Homepage:** <https://github.com/divyanshuaggarwal/IE-SemParse>
62
+ - **Paper:** [Evaluating Inter-Bilingual Semantic Parsing for Indian Languages](https://arxiv.org/abs/2304.13005)
63
+ - **Point of Contact:** [Divyanshu Aggarwal](mailto:[email protected])
64
+
65
+ ### Dataset Summary
66
+
67
+ IE-SemParse is an InterBilingual Semantic Parsing Dataset for eleven major Indic languages that includes
68
+ Assamese (‘as’), Gujarat (‘gu’), Kannada (‘kn’),
69
+ Malayalam (‘ml’), Marathi (‘mr’), Odia (‘or’),
70
+ Punjabi (‘pa’), Tamil (‘ta’), Telugu (‘te’), Hindi
71
+ (‘hi’), and Bengali (‘bn’).
72
+
73
+ ### Supported Tasks and Leaderboards
74
+
75
+ **Tasks:** Natural Language Inference
76
+
77
+ **Leaderboards:** Currently there is no Leaderboard for this dataset.
78
+
79
+ ### Languages
80
+
81
+ - `Assamese (as)`
82
+ - `Bengali (bn)`
83
+ - `Gujarati (gu)`
84
+ - `Kannada (kn)`
85
+ - `Hindi (hi)`
86
+ - `Malayalam (ml)`
87
+ - `Marathi (mr)`
88
+ - `Oriya (or)`
89
+ - `Punjabi (pa)`
90
+ - `Tamil (ta)`
91
+ - `Telugu (te)`
92
+
93
+ ...
94
+
95
+
96
+ ### Data Splits
97
+
98
+ <!-- Below is the dataset split given for `hi` dataset.
99
+
100
+ ```python
101
+ DatasetDict({
102
+ train: Dataset({
103
+ features: ['utterance', 'hypothesis', 'label'],
104
+ num_rows: 392702
105
+ })
106
+ test: Dataset({
107
+ features: ['premise', 'hypothesis', 'label'],
108
+ num_rows: 5010
109
+ })
110
+ validation: Dataset({
111
+ features: ['premise', 'hypothesis', 'label'],
112
+ num_rows: 2490
113
+ })
114
+ })
115
+
116
+ ``` -->
117
+
118
+ Language | ISO 639-1 Code |Train | Dev | Test |
119
+ --------------|----------------|-------|-----|------|
120
+ Assamese | as | 392,702 | 5,010 | 2,490 |
121
+ Bengali | bn | 392,702 | 5,010 | 2,490 |
122
+ Gujarati | gu | 392,702 | 5,010 | 2,490 |
123
+ Hindi | hi | 392,702 | 5,010 | 2,490 |
124
+ Kannada | kn | 392,702 | 5,010 | 2,490 |
125
+ Malayalam | ml |392,702 | 5,010 | 2,490 |
126
+ Marathi | mr |392,702 | 5,010 | 2,490 |
127
+ Oriya | or | 392,702 | 5,010 | 2,490 |
128
+ Punjabi | pa | 392,702 | 5,010 | 2,490 |
129
+ Tamil | ta | 392,702 | 5,010 | 2,490 |
130
+ Telugu | te | 392,702 | 5,010 | 2,490 |
131
+
132
+ <!-- The dataset split remains same across all languages. -->
133
+
134
+ ## Dataset usage
135
+
136
+ Code snippet for using the dataset using datasets library.
137
+
138
+ ```python
139
+ from datasets import load_dataset
140
+
141
+ dataset = load_dataset("Divyanshu/indicxnli")
142
+ ```
143
+
144
+ ## Dataset Creation
145
+
146
+ Machine translation of XNLI english dataset to 11 listed Indic Languages.
147
+
148
+ ### Curation Rationale
149
+
150
+ [More information needed]
151
+
152
+ ### Source Data
153
+
154
+ [XNLI dataset](https://cims.nyu.edu/~sbowman/xnli/)
155
+
156
+ #### Initial Data Collection and Normalization
157
+
158
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
159
+
160
+ #### Who are the source language producers?
161
+
162
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
163
+
164
+ #### Human Verification Process
165
+
166
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
167
+
168
+ ## Considerations for Using the Data
169
+
170
+ ### Social Impact of Dataset
171
+
172
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
173
+
174
+ ### Discussion of Biases
175
+
176
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
177
+
178
+ ### Other Known Limitations
179
+
180
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
181
+
182
+ ### Dataset Curators
183
+
184
+ Divyanshu Aggarwal, Vivek Gupta, Anoop Kunchukuttan
185
+
186
+ ### Licensing Information
187
+
188
+ Contents of this repository are restricted to only non-commercial research purposes under the [Creative Commons Attribution-NonCommercial 4.0 International License (CC BY-NC 4.0)](https://creativecommons.org/licenses/by-nc/4.0/). Copyright of the dataset contents belongs to the original copyright holders.
189
+
190
+ ### Citation Information
191
+
192
+ If you use any of the datasets, models or code modules, please cite the following paper:
193
+
194
+ ```
195
+ @misc{https://doi.org/10.48550/arxiv.2204.08776,
196
+ doi = {10.48550/ARXIV.2204.08776},
197
+
198
+ url = {https://arxiv.org/abs/2304.13005},
199
+
200
+ author = {Aggarwal, Divyanshu and Gupta, Vivek and Kunchukuttan, Anoop},
201
+
202
+ keywords = {Computation and Language (cs.CL), Artificial Intelligence (cs.AI), FOS: Computer and information sciences, FOS: Computer and information sciences},
203
+
204
+ title = {IE-SemParse: Evaluating Multilingual Inference for Indian Languages},
205
+
206
+ publisher = {arXiv},
207
+
208
+ year = {2022},
209
+
210
+ copyright = {Creative Commons Attribution 4.0 International}
211
+ }
212
+ ```
213
+
214
+ <!-- ### Contributions -->
.history/README_20230707233751.md ADDED
@@ -0,0 +1,214 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ annotations_creators:
3
+ - machine-generated
4
+ language_creators:
5
+ - machine-generated
6
+ language:
7
+ - as
8
+ - bn
9
+ - gu
10
+ - hi
11
+ - kn
12
+ - ml
13
+ - mr
14
+ - or
15
+ - pa
16
+ - ta
17
+ - te
18
+ license:
19
+ - cc0-1.0
20
+ multilinguality:
21
+ - multilingual
22
+ pretty_name: IE-SemParse
23
+ size_categories:
24
+ - 1M<n<10M
25
+ source_datasets:
26
+ - original
27
+ task_categories:
28
+ - text2text
29
+ task_ids:
30
+ - Semantic-Parsing
31
+ ---
32
+
33
+ # Dataset Card for "IE-SemParse"
34
+
35
+ ## Table of Contents
36
+
37
+ - [Dataset Card for "IE-SemParse"](#dataset-card-for-ie-semparse)
38
+ - [Table of Contents](#table-of-contents)
39
+ - [Dataset Description](#dataset-description)
40
+ - [Dataset Summary](#dataset-summary)
41
+ - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
42
+ - [Languages](#languages)
43
+ - [Data Splits](#data-splits)
44
+ - [Dataset usage](#dataset-usage)
45
+ - [Dataset Creation](#dataset-creation)
46
+ - [Curation Rationale](#curation-rationale)
47
+ - [Source Data](#source-data)
48
+ - [Initial Data Collection and Normalization](#initial-data-collection-and-normalization)
49
+ - [Who are the source language producers?](#who-are-the-source-language-producers)
50
+ - [Human Verification Process](#human-verification-process)
51
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
52
+ - [Social Impact of Dataset](#social-impact-of-dataset)
53
+ - [Discussion of Biases](#discussion-of-biases)
54
+ - [Other Known Limitations](#other-known-limitations)
55
+ - [Dataset Curators](#dataset-curators)
56
+ - [Licensing Information](#licensing-information)
57
+ - [Citation Information](#citation-information)
58
+
59
+ ## Dataset Description
60
+
61
+ - **Homepage:** <https://github.com/divyanshuaggarwal/IE-SemParse>
62
+ - **Paper:** [Evaluating Inter-Bilingual Semantic Parsing for Indian Languages](https://arxiv.org/abs/2304.13005)
63
+ - **Point of Contact:** [Divyanshu Aggarwal](mailto:[email protected])
64
+
65
+ ### Dataset Summary
66
+
67
+ IE-SemParse is an InterBilingual Semantic Parsing Dataset for eleven major Indic languages that includes
68
+ Assamese (‘as’), Gujarat (‘gu’), Kannada (‘kn’),
69
+ Malayalam (‘ml’), Marathi (‘mr’), Odia (‘or’),
70
+ Punjabi (‘pa’), Tamil (‘ta’), Telugu (‘te’), Hindi
71
+ (‘hi’), and Bengali (‘bn’).
72
+
73
+ ### Supported Tasks and Leaderboards
74
+
75
+ **Tasks:** Natural Language Inference
76
+
77
+ **Leaderboards:** Currently there is no Leaderboard for this dataset.
78
+
79
+ ### Languages
80
+
81
+ - `Assamese (as)`
82
+ - `Bengali (bn)`
83
+ - `Gujarati (gu)`
84
+ - `Kannada (kn)`
85
+ - `Hindi (hi)`
86
+ - `Malayalam (ml)`
87
+ - `Marathi (mr)`
88
+ - `Oriya (or)`
89
+ - `Punjabi (pa)`
90
+ - `Tamil (ta)`
91
+ - `Telugu (te)`
92
+
93
+ ...
94
+
95
+
96
+ ### Data Splits
97
+
98
+ <!-- Below is the dataset split given for `hi` dataset.
99
+
100
+ ```python
101
+ DatasetDict({
102
+ train: Dataset({
103
+ features: ['utterance', 'logical form', 'label'],
104
+ num_rows: 392702
105
+ })
106
+ test: Dataset({
107
+ features: ['premise', 'hypothesis', 'label'],
108
+ num_rows: 5010
109
+ })
110
+ validation: Dataset({
111
+ features: ['premise', 'hypothesis', 'label'],
112
+ num_rows: 2490
113
+ })
114
+ })
115
+
116
+ ``` -->
117
+
118
+ Language | ISO 639-1 Code |Train | Dev | Test |
119
+ --------------|----------------|-------|-----|------|
120
+ Assamese | as | 392,702 | 5,010 | 2,490 |
121
+ Bengali | bn | 392,702 | 5,010 | 2,490 |
122
+ Gujarati | gu | 392,702 | 5,010 | 2,490 |
123
+ Hindi | hi | 392,702 | 5,010 | 2,490 |
124
+ Kannada | kn | 392,702 | 5,010 | 2,490 |
125
+ Malayalam | ml |392,702 | 5,010 | 2,490 |
126
+ Marathi | mr |392,702 | 5,010 | 2,490 |
127
+ Oriya | or | 392,702 | 5,010 | 2,490 |
128
+ Punjabi | pa | 392,702 | 5,010 | 2,490 |
129
+ Tamil | ta | 392,702 | 5,010 | 2,490 |
130
+ Telugu | te | 392,702 | 5,010 | 2,490 |
131
+
132
+ <!-- The dataset split remains same across all languages. -->
133
+
134
+ ## Dataset usage
135
+
136
+ Code snippet for using the dataset using datasets library.
137
+
138
+ ```python
139
+ from datasets import load_dataset
140
+
141
+ dataset = load_dataset("Divyanshu/indicxnli")
142
+ ```
143
+
144
+ ## Dataset Creation
145
+
146
+ Machine translation of XNLI english dataset to 11 listed Indic Languages.
147
+
148
+ ### Curation Rationale
149
+
150
+ [More information needed]
151
+
152
+ ### Source Data
153
+
154
+ [XNLI dataset](https://cims.nyu.edu/~sbowman/xnli/)
155
+
156
+ #### Initial Data Collection and Normalization
157
+
158
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
159
+
160
+ #### Who are the source language producers?
161
+
162
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
163
+
164
+ #### Human Verification Process
165
+
166
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
167
+
168
+ ## Considerations for Using the Data
169
+
170
+ ### Social Impact of Dataset
171
+
172
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
173
+
174
+ ### Discussion of Biases
175
+
176
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
177
+
178
+ ### Other Known Limitations
179
+
180
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
181
+
182
+ ### Dataset Curators
183
+
184
+ Divyanshu Aggarwal, Vivek Gupta, Anoop Kunchukuttan
185
+
186
+ ### Licensing Information
187
+
188
+ Contents of this repository are restricted to only non-commercial research purposes under the [Creative Commons Attribution-NonCommercial 4.0 International License (CC BY-NC 4.0)](https://creativecommons.org/licenses/by-nc/4.0/). Copyright of the dataset contents belongs to the original copyright holders.
189
+
190
+ ### Citation Information
191
+
192
+ If you use any of the datasets, models or code modules, please cite the following paper:
193
+
194
+ ```
195
+ @misc{https://doi.org/10.48550/arxiv.2204.08776,
196
+ doi = {10.48550/ARXIV.2204.08776},
197
+
198
+ url = {https://arxiv.org/abs/2304.13005},
199
+
200
+ author = {Aggarwal, Divyanshu and Gupta, Vivek and Kunchukuttan, Anoop},
201
+
202
+ keywords = {Computation and Language (cs.CL), Artificial Intelligence (cs.AI), FOS: Computer and information sciences, FOS: Computer and information sciences},
203
+
204
+ title = {IE-SemParse: Evaluating Multilingual Inference for Indian Languages},
205
+
206
+ publisher = {arXiv},
207
+
208
+ year = {2022},
209
+
210
+ copyright = {Creative Commons Attribution 4.0 International}
211
+ }
212
+ ```
213
+
214
+ <!-- ### Contributions -->
.history/README_20230707233753.md ADDED
@@ -0,0 +1,214 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ annotations_creators:
3
+ - machine-generated
4
+ language_creators:
5
+ - machine-generated
6
+ language:
7
+ - as
8
+ - bn
9
+ - gu
10
+ - hi
11
+ - kn
12
+ - ml
13
+ - mr
14
+ - or
15
+ - pa
16
+ - ta
17
+ - te
18
+ license:
19
+ - cc0-1.0
20
+ multilinguality:
21
+ - multilingual
22
+ pretty_name: IE-SemParse
23
+ size_categories:
24
+ - 1M<n<10M
25
+ source_datasets:
26
+ - original
27
+ task_categories:
28
+ - text2text
29
+ task_ids:
30
+ - Semantic-Parsing
31
+ ---
32
+
33
+ # Dataset Card for "IE-SemParse"
34
+
35
+ ## Table of Contents
36
+
37
+ - [Dataset Card for "IE-SemParse"](#dataset-card-for-ie-semparse)
38
+ - [Table of Contents](#table-of-contents)
39
+ - [Dataset Description](#dataset-description)
40
+ - [Dataset Summary](#dataset-summary)
41
+ - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
42
+ - [Languages](#languages)
43
+ - [Data Splits](#data-splits)
44
+ - [Dataset usage](#dataset-usage)
45
+ - [Dataset Creation](#dataset-creation)
46
+ - [Curation Rationale](#curation-rationale)
47
+ - [Source Data](#source-data)
48
+ - [Initial Data Collection and Normalization](#initial-data-collection-and-normalization)
49
+ - [Who are the source language producers?](#who-are-the-source-language-producers)
50
+ - [Human Verification Process](#human-verification-process)
51
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
52
+ - [Social Impact of Dataset](#social-impact-of-dataset)
53
+ - [Discussion of Biases](#discussion-of-biases)
54
+ - [Other Known Limitations](#other-known-limitations)
55
+ - [Dataset Curators](#dataset-curators)
56
+ - [Licensing Information](#licensing-information)
57
+ - [Citation Information](#citation-information)
58
+
59
+ ## Dataset Description
60
+
61
+ - **Homepage:** <https://github.com/divyanshuaggarwal/IE-SemParse>
62
+ - **Paper:** [Evaluating Inter-Bilingual Semantic Parsing for Indian Languages](https://arxiv.org/abs/2304.13005)
63
+ - **Point of Contact:** [Divyanshu Aggarwal](mailto:[email protected])
64
+
65
+ ### Dataset Summary
66
+
67
+ IE-SemParse is an InterBilingual Semantic Parsing Dataset for eleven major Indic languages that includes
68
+ Assamese (‘as’), Gujarat (‘gu’), Kannada (‘kn’),
69
+ Malayalam (‘ml’), Marathi (‘mr’), Odia (‘or’),
70
+ Punjabi (‘pa’), Tamil (‘ta’), Telugu (‘te’), Hindi
71
+ (‘hi’), and Bengali (‘bn’).
72
+
73
+ ### Supported Tasks and Leaderboards
74
+
75
+ **Tasks:** Natural Language Inference
76
+
77
+ **Leaderboards:** Currently there is no Leaderboard for this dataset.
78
+
79
+ ### Languages
80
+
81
+ - `Assamese (as)`
82
+ - `Bengali (bn)`
83
+ - `Gujarati (gu)`
84
+ - `Kannada (kn)`
85
+ - `Hindi (hi)`
86
+ - `Malayalam (ml)`
87
+ - `Marathi (mr)`
88
+ - `Oriya (or)`
89
+ - `Punjabi (pa)`
90
+ - `Tamil (ta)`
91
+ - `Telugu (te)`
92
+
93
+ ...
94
+
95
+
96
+ ### Data Splits
97
+
98
+ <!-- Below is the dataset split given for `hi` dataset.
99
+
100
+ ```python
101
+ DatasetDict({
102
+ train: Dataset({
103
+ features: ['utterance', 'logical form', 'intent'],
104
+ num_rows: 392702
105
+ })
106
+ test: Dataset({
107
+ features: ['premise', 'hypothesis', 'label'],
108
+ num_rows: 5010
109
+ })
110
+ validation: Dataset({
111
+ features: ['premise', 'hypothesis', 'label'],
112
+ num_rows: 2490
113
+ })
114
+ })
115
+
116
+ ``` -->
117
+
118
+ Language | ISO 639-1 Code |Train | Dev | Test |
119
+ --------------|----------------|-------|-----|------|
120
+ Assamese | as | 392,702 | 5,010 | 2,490 |
121
+ Bengali | bn | 392,702 | 5,010 | 2,490 |
122
+ Gujarati | gu | 392,702 | 5,010 | 2,490 |
123
+ Hindi | hi | 392,702 | 5,010 | 2,490 |
124
+ Kannada | kn | 392,702 | 5,010 | 2,490 |
125
+ Malayalam | ml |392,702 | 5,010 | 2,490 |
126
+ Marathi | mr |392,702 | 5,010 | 2,490 |
127
+ Oriya | or | 392,702 | 5,010 | 2,490 |
128
+ Punjabi | pa | 392,702 | 5,010 | 2,490 |
129
+ Tamil | ta | 392,702 | 5,010 | 2,490 |
130
+ Telugu | te | 392,702 | 5,010 | 2,490 |
131
+
132
+ <!-- The dataset split remains same across all languages. -->
133
+
134
+ ## Dataset usage
135
+
136
+ Code snippet for using the dataset using datasets library.
137
+
138
+ ```python
139
+ from datasets import load_dataset
140
+
141
+ dataset = load_dataset("Divyanshu/indicxnli")
142
+ ```
143
+
144
+ ## Dataset Creation
145
+
146
+ Machine translation of XNLI english dataset to 11 listed Indic Languages.
147
+
148
+ ### Curation Rationale
149
+
150
+ [More information needed]
151
+
152
+ ### Source Data
153
+
154
+ [XNLI dataset](https://cims.nyu.edu/~sbowman/xnli/)
155
+
156
+ #### Initial Data Collection and Normalization
157
+
158
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
159
+
160
+ #### Who are the source language producers?
161
+
162
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
163
+
164
+ #### Human Verification Process
165
+
166
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
167
+
168
+ ## Considerations for Using the Data
169
+
170
+ ### Social Impact of Dataset
171
+
172
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
173
+
174
+ ### Discussion of Biases
175
+
176
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
177
+
178
+ ### Other Known Limitations
179
+
180
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
181
+
182
+ ### Dataset Curators
183
+
184
+ Divyanshu Aggarwal, Vivek Gupta, Anoop Kunchukuttan
185
+
186
+ ### Licensing Information
187
+
188
+ Contents of this repository are restricted to only non-commercial research purposes under the [Creative Commons Attribution-NonCommercial 4.0 International License (CC BY-NC 4.0)](https://creativecommons.org/licenses/by-nc/4.0/). Copyright of the dataset contents belongs to the original copyright holders.
189
+
190
+ ### Citation Information
191
+
192
+ If you use any of the datasets, models or code modules, please cite the following paper:
193
+
194
+ ```
195
+ @misc{https://doi.org/10.48550/arxiv.2204.08776,
196
+ doi = {10.48550/ARXIV.2204.08776},
197
+
198
+ url = {https://arxiv.org/abs/2304.13005},
199
+
200
+ author = {Aggarwal, Divyanshu and Gupta, Vivek and Kunchukuttan, Anoop},
201
+
202
+ keywords = {Computation and Language (cs.CL), Artificial Intelligence (cs.AI), FOS: Computer and information sciences, FOS: Computer and information sciences},
203
+
204
+ title = {IE-SemParse: Evaluating Multilingual Inference for Indian Languages},
205
+
206
+ publisher = {arXiv},
207
+
208
+ year = {2022},
209
+
210
+ copyright = {Creative Commons Attribution 4.0 International}
211
+ }
212
+ ```
213
+
214
+ <!-- ### Contributions -->
.history/README_20230707233759.md ADDED
@@ -0,0 +1,214 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ annotations_creators:
3
+ - machine-generated
4
+ language_creators:
5
+ - machine-generated
6
+ language:
7
+ - as
8
+ - bn
9
+ - gu
10
+ - hi
11
+ - kn
12
+ - ml
13
+ - mr
14
+ - or
15
+ - pa
16
+ - ta
17
+ - te
18
+ license:
19
+ - cc0-1.0
20
+ multilinguality:
21
+ - multilingual
22
+ pretty_name: IE-SemParse
23
+ size_categories:
24
+ - 1M<n<10M
25
+ source_datasets:
26
+ - original
27
+ task_categories:
28
+ - text2text
29
+ task_ids:
30
+ - Semantic-Parsing
31
+ ---
32
+
33
+ # Dataset Card for "IE-SemParse"
34
+
35
+ ## Table of Contents
36
+
37
+ - [Dataset Card for "IE-SemParse"](#dataset-card-for-ie-semparse)
38
+ - [Table of Contents](#table-of-contents)
39
+ - [Dataset Description](#dataset-description)
40
+ - [Dataset Summary](#dataset-summary)
41
+ - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
42
+ - [Languages](#languages)
43
+ - [Data Splits](#data-splits)
44
+ - [Dataset usage](#dataset-usage)
45
+ - [Dataset Creation](#dataset-creation)
46
+ - [Curation Rationale](#curation-rationale)
47
+ - [Source Data](#source-data)
48
+ - [Initial Data Collection and Normalization](#initial-data-collection-and-normalization)
49
+ - [Who are the source language producers?](#who-are-the-source-language-producers)
50
+ - [Human Verification Process](#human-verification-process)
51
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
52
+ - [Social Impact of Dataset](#social-impact-of-dataset)
53
+ - [Discussion of Biases](#discussion-of-biases)
54
+ - [Other Known Limitations](#other-known-limitations)
55
+ - [Dataset Curators](#dataset-curators)
56
+ - [Licensing Information](#licensing-information)
57
+ - [Citation Information](#citation-information)
58
+
59
+ ## Dataset Description
60
+
61
+ - **Homepage:** <https://github.com/divyanshuaggarwal/IE-SemParse>
62
+ - **Paper:** [Evaluating Inter-Bilingual Semantic Parsing for Indian Languages](https://arxiv.org/abs/2304.13005)
63
+ - **Point of Contact:** [Divyanshu Aggarwal](mailto:[email protected])
64
+
65
+ ### Dataset Summary
66
+
67
+ IE-SemParse is an InterBilingual Semantic Parsing Dataset for eleven major Indic languages that includes
68
+ Assamese (‘as’), Gujarat (‘gu’), Kannada (‘kn’),
69
+ Malayalam (‘ml’), Marathi (‘mr’), Odia (‘or’),
70
+ Punjabi (‘pa’), Tamil (‘ta’), Telugu (‘te’), Hindi
71
+ (‘hi’), and Bengali (‘bn’).
72
+
73
+ ### Supported Tasks and Leaderboards
74
+
75
+ **Tasks:** Natural Language Inference
76
+
77
+ **Leaderboards:** Currently there is no Leaderboard for this dataset.
78
+
79
+ ### Languages
80
+
81
+ - `Assamese (as)`
82
+ - `Bengali (bn)`
83
+ - `Gujarati (gu)`
84
+ - `Kannada (kn)`
85
+ - `Hindi (hi)`
86
+ - `Malayalam (ml)`
87
+ - `Marathi (mr)`
88
+ - `Oriya (or)`
89
+ - `Punjabi (pa)`
90
+ - `Tamil (ta)`
91
+ - `Telugu (te)`
92
+
93
+ ...
94
+
95
+
96
+ ### Data Splits
97
+
98
+ <!-- Below is the dataset split given for `hi` dataset.
99
+
100
+ ```python
101
+ DatasetDict({
102
+ train: Dataset({
103
+ features: ['utterance', 'logical form', 'intent'],
104
+ num_rows: 392702
105
+ })
106
+ test: Dataset({
107
+ features: ['utterance', 'logical form', 'intent'],
108
+ num_rows: 5010
109
+ })
110
+ validation: Dataset({
111
+ features: ['premise', 'hypothesis', 'label'],
112
+ num_rows: 2490
113
+ })
114
+ })
115
+
116
+ ``` -->
117
+
118
+ Language | ISO 639-1 Code |Train | Dev | Test |
119
+ --------------|----------------|-------|-----|------|
120
+ Assamese | as | 392,702 | 5,010 | 2,490 |
121
+ Bengali | bn | 392,702 | 5,010 | 2,490 |
122
+ Gujarati | gu | 392,702 | 5,010 | 2,490 |
123
+ Hindi | hi | 392,702 | 5,010 | 2,490 |
124
+ Kannada | kn | 392,702 | 5,010 | 2,490 |
125
+ Malayalam | ml |392,702 | 5,010 | 2,490 |
126
+ Marathi | mr |392,702 | 5,010 | 2,490 |
127
+ Oriya | or | 392,702 | 5,010 | 2,490 |
128
+ Punjabi | pa | 392,702 | 5,010 | 2,490 |
129
+ Tamil | ta | 392,702 | 5,010 | 2,490 |
130
+ Telugu | te | 392,702 | 5,010 | 2,490 |
131
+
132
+ <!-- The dataset split remains same across all languages. -->
133
+
134
+ ## Dataset usage
135
+
136
+ Code snippet for using the dataset using datasets library.
137
+
138
+ ```python
139
+ from datasets import load_dataset
140
+
141
+ dataset = load_dataset("Divyanshu/indicxnli")
142
+ ```
143
+
144
+ ## Dataset Creation
145
+
146
+ Machine translation of XNLI english dataset to 11 listed Indic Languages.
147
+
148
+ ### Curation Rationale
149
+
150
+ [More information needed]
151
+
152
+ ### Source Data
153
+
154
+ [XNLI dataset](https://cims.nyu.edu/~sbowman/xnli/)
155
+
156
+ #### Initial Data Collection and Normalization
157
+
158
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
159
+
160
+ #### Who are the source language producers?
161
+
162
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
163
+
164
+ #### Human Verification Process
165
+
166
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
167
+
168
+ ## Considerations for Using the Data
169
+
170
+ ### Social Impact of Dataset
171
+
172
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
173
+
174
+ ### Discussion of Biases
175
+
176
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
177
+
178
+ ### Other Known Limitations
179
+
180
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
181
+
182
+ ### Dataset Curators
183
+
184
+ Divyanshu Aggarwal, Vivek Gupta, Anoop Kunchukuttan
185
+
186
+ ### Licensing Information
187
+
188
+ Contents of this repository are restricted to only non-commercial research purposes under the [Creative Commons Attribution-NonCommercial 4.0 International License (CC BY-NC 4.0)](https://creativecommons.org/licenses/by-nc/4.0/). Copyright of the dataset contents belongs to the original copyright holders.
189
+
190
+ ### Citation Information
191
+
192
+ If you use any of the datasets, models or code modules, please cite the following paper:
193
+
194
+ ```
195
+ @misc{https://doi.org/10.48550/arxiv.2204.08776,
196
+ doi = {10.48550/ARXIV.2204.08776},
197
+
198
+ url = {https://arxiv.org/abs/2304.13005},
199
+
200
+ author = {Aggarwal, Divyanshu and Gupta, Vivek and Kunchukuttan, Anoop},
201
+
202
+ keywords = {Computation and Language (cs.CL), Artificial Intelligence (cs.AI), FOS: Computer and information sciences, FOS: Computer and information sciences},
203
+
204
+ title = {IE-SemParse: Evaluating Multilingual Inference for Indian Languages},
205
+
206
+ publisher = {arXiv},
207
+
208
+ year = {2022},
209
+
210
+ copyright = {Creative Commons Attribution 4.0 International}
211
+ }
212
+ ```
213
+
214
+ <!-- ### Contributions -->
.history/README_20230707233801.md ADDED
@@ -0,0 +1,214 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ annotations_creators:
3
+ - machine-generated
4
+ language_creators:
5
+ - machine-generated
6
+ language:
7
+ - as
8
+ - bn
9
+ - gu
10
+ - hi
11
+ - kn
12
+ - ml
13
+ - mr
14
+ - or
15
+ - pa
16
+ - ta
17
+ - te
18
+ license:
19
+ - cc0-1.0
20
+ multilinguality:
21
+ - multilingual
22
+ pretty_name: IE-SemParse
23
+ size_categories:
24
+ - 1M<n<10M
25
+ source_datasets:
26
+ - original
27
+ task_categories:
28
+ - text2text
29
+ task_ids:
30
+ - Semantic-Parsing
31
+ ---
32
+
33
+ # Dataset Card for "IE-SemParse"
34
+
35
+ ## Table of Contents
36
+
37
+ - [Dataset Card for "IE-SemParse"](#dataset-card-for-ie-semparse)
38
+ - [Table of Contents](#table-of-contents)
39
+ - [Dataset Description](#dataset-description)
40
+ - [Dataset Summary](#dataset-summary)
41
+ - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
42
+ - [Languages](#languages)
43
+ - [Data Splits](#data-splits)
44
+ - [Dataset usage](#dataset-usage)
45
+ - [Dataset Creation](#dataset-creation)
46
+ - [Curation Rationale](#curation-rationale)
47
+ - [Source Data](#source-data)
48
+ - [Initial Data Collection and Normalization](#initial-data-collection-and-normalization)
49
+ - [Who are the source language producers?](#who-are-the-source-language-producers)
50
+ - [Human Verification Process](#human-verification-process)
51
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
52
+ - [Social Impact of Dataset](#social-impact-of-dataset)
53
+ - [Discussion of Biases](#discussion-of-biases)
54
+ - [Other Known Limitations](#other-known-limitations)
55
+ - [Dataset Curators](#dataset-curators)
56
+ - [Licensing Information](#licensing-information)
57
+ - [Citation Information](#citation-information)
58
+
59
+ ## Dataset Description
60
+
61
+ - **Homepage:** <https://github.com/divyanshuaggarwal/IE-SemParse>
62
+ - **Paper:** [Evaluating Inter-Bilingual Semantic Parsing for Indian Languages](https://arxiv.org/abs/2304.13005)
63
+ - **Point of Contact:** [Divyanshu Aggarwal](mailto:[email protected])
64
+
65
+ ### Dataset Summary
66
+
67
+ IE-SemParse is an InterBilingual Semantic Parsing Dataset for eleven major Indic languages that includes
68
+ Assamese (‘as’), Gujarat (‘gu’), Kannada (‘kn’),
69
+ Malayalam (‘ml’), Marathi (‘mr’), Odia (‘or’),
70
+ Punjabi (‘pa’), Tamil (‘ta’), Telugu (‘te’), Hindi
71
+ (‘hi’), and Bengali (‘bn’).
72
+
73
+ ### Supported Tasks and Leaderboards
74
+
75
+ **Tasks:** Natural Language Inference
76
+
77
+ **Leaderboards:** Currently there is no Leaderboard for this dataset.
78
+
79
+ ### Languages
80
+
81
+ - `Assamese (as)`
82
+ - `Bengali (bn)`
83
+ - `Gujarati (gu)`
84
+ - `Kannada (kn)`
85
+ - `Hindi (hi)`
86
+ - `Malayalam (ml)`
87
+ - `Marathi (mr)`
88
+ - `Oriya (or)`
89
+ - `Punjabi (pa)`
90
+ - `Tamil (ta)`
91
+ - `Telugu (te)`
92
+
93
+ ...
94
+
95
+
96
+ ### Data Splits
97
+
98
+ <!-- Below is the dataset split given for `hi` dataset.
99
+
100
+ ```python
101
+ DatasetDict({
102
+ train: Dataset({
103
+ features: ['utterance', 'logical form', 'intent'],
104
+ num_rows: 392702
105
+ })
106
+ test: Dataset({
107
+ features: ['utterance', 'logical form', 'intent'],
108
+ num_rows: 5010
109
+ })
110
+ validation: Dataset({
111
+ features: ['utterance', 'logical form', 'intent'],
112
+ num_rows: 2490
113
+ })
114
+ })
115
+
116
+ ``` -->
117
+
118
+ Language | ISO 639-1 Code |Train | Dev | Test |
119
+ --------------|----------------|-------|-----|------|
120
+ Assamese | as | 392,702 | 5,010 | 2,490 |
121
+ Bengali | bn | 392,702 | 5,010 | 2,490 |
122
+ Gujarati | gu | 392,702 | 5,010 | 2,490 |
123
+ Hindi | hi | 392,702 | 5,010 | 2,490 |
124
+ Kannada | kn | 392,702 | 5,010 | 2,490 |
125
+ Malayalam | ml |392,702 | 5,010 | 2,490 |
126
+ Marathi | mr |392,702 | 5,010 | 2,490 |
127
+ Oriya | or | 392,702 | 5,010 | 2,490 |
128
+ Punjabi | pa | 392,702 | 5,010 | 2,490 |
129
+ Tamil | ta | 392,702 | 5,010 | 2,490 |
130
+ Telugu | te | 392,702 | 5,010 | 2,490 |
131
+
132
+ <!-- The dataset split remains same across all languages. -->
133
+
134
+ ## Dataset usage
135
+
136
+ Code snippet for using the dataset using datasets library.
137
+
138
+ ```python
139
+ from datasets import load_dataset
140
+
141
+ dataset = load_dataset("Divyanshu/indicxnli")
142
+ ```
143
+
144
+ ## Dataset Creation
145
+
146
+ Machine translation of XNLI english dataset to 11 listed Indic Languages.
147
+
148
+ ### Curation Rationale
149
+
150
+ [More information needed]
151
+
152
+ ### Source Data
153
+
154
+ [XNLI dataset](https://cims.nyu.edu/~sbowman/xnli/)
155
+
156
+ #### Initial Data Collection and Normalization
157
+
158
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
159
+
160
+ #### Who are the source language producers?
161
+
162
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
163
+
164
+ #### Human Verification Process
165
+
166
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
167
+
168
+ ## Considerations for Using the Data
169
+
170
+ ### Social Impact of Dataset
171
+
172
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
173
+
174
+ ### Discussion of Biases
175
+
176
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
177
+
178
+ ### Other Known Limitations
179
+
180
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
181
+
182
+ ### Dataset Curators
183
+
184
+ Divyanshu Aggarwal, Vivek Gupta, Anoop Kunchukuttan
185
+
186
+ ### Licensing Information
187
+
188
+ Contents of this repository are restricted to only non-commercial research purposes under the [Creative Commons Attribution-NonCommercial 4.0 International License (CC BY-NC 4.0)](https://creativecommons.org/licenses/by-nc/4.0/). Copyright of the dataset contents belongs to the original copyright holders.
189
+
190
+ ### Citation Information
191
+
192
+ If you use any of the datasets, models or code modules, please cite the following paper:
193
+
194
+ ```
195
+ @misc{https://doi.org/10.48550/arxiv.2204.08776,
196
+ doi = {10.48550/ARXIV.2204.08776},
197
+
198
+ url = {https://arxiv.org/abs/2304.13005},
199
+
200
+ author = {Aggarwal, Divyanshu and Gupta, Vivek and Kunchukuttan, Anoop},
201
+
202
+ keywords = {Computation and Language (cs.CL), Artificial Intelligence (cs.AI), FOS: Computer and information sciences, FOS: Computer and information sciences},
203
+
204
+ title = {IE-SemParse: Evaluating Multilingual Inference for Indian Languages},
205
+
206
+ publisher = {arXiv},
207
+
208
+ year = {2022},
209
+
210
+ copyright = {Creative Commons Attribution 4.0 International}
211
+ }
212
+ ```
213
+
214
+ <!-- ### Contributions -->
.history/README_20230707233805.md ADDED
@@ -0,0 +1,214 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ annotations_creators:
3
+ - machine-generated
4
+ language_creators:
5
+ - machine-generated
6
+ language:
7
+ - as
8
+ - bn
9
+ - gu
10
+ - hi
11
+ - kn
12
+ - ml
13
+ - mr
14
+ - or
15
+ - pa
16
+ - ta
17
+ - te
18
+ license:
19
+ - cc0-1.0
20
+ multilinguality:
21
+ - multilingual
22
+ pretty_name: IE-SemParse
23
+ size_categories:
24
+ - 1M<n<10M
25
+ source_datasets:
26
+ - original
27
+ task_categories:
28
+ - text2text
29
+ task_ids:
30
+ - Semantic-Parsing
31
+ ---
32
+
33
+ # Dataset Card for "IE-SemParse"
34
+
35
+ ## Table of Contents
36
+
37
+ - [Dataset Card for "IE-SemParse"](#dataset-card-for-ie-semparse)
38
+ - [Table of Contents](#table-of-contents)
39
+ - [Dataset Description](#dataset-description)
40
+ - [Dataset Summary](#dataset-summary)
41
+ - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
42
+ - [Languages](#languages)
43
+ - [Data Splits](#data-splits)
44
+ - [Dataset usage](#dataset-usage)
45
+ - [Dataset Creation](#dataset-creation)
46
+ - [Curation Rationale](#curation-rationale)
47
+ - [Source Data](#source-data)
48
+ - [Initial Data Collection and Normalization](#initial-data-collection-and-normalization)
49
+ - [Who are the source language producers?](#who-are-the-source-language-producers)
50
+ - [Human Verification Process](#human-verification-process)
51
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
52
+ - [Social Impact of Dataset](#social-impact-of-dataset)
53
+ - [Discussion of Biases](#discussion-of-biases)
54
+ - [Other Known Limitations](#other-known-limitations)
55
+ - [Dataset Curators](#dataset-curators)
56
+ - [Licensing Information](#licensing-information)
57
+ - [Citation Information](#citation-information)
58
+
59
+ ## Dataset Description
60
+
61
+ - **Homepage:** <https://github.com/divyanshuaggarwal/IE-SemParse>
62
+ - **Paper:** [Evaluating Inter-Bilingual Semantic Parsing for Indian Languages](https://arxiv.org/abs/2304.13005)
63
+ - **Point of Contact:** [Divyanshu Aggarwal](mailto:[email protected])
64
+
65
+ ### Dataset Summary
66
+
67
+ IE-SemParse is an InterBilingual Semantic Parsing Dataset for eleven major Indic languages that includes
68
+ Assamese (‘as’), Gujarat (‘gu’), Kannada (‘kn’),
69
+ Malayalam (‘ml’), Marathi (‘mr’), Odia (‘or’),
70
+ Punjabi (‘pa’), Tamil (‘ta’), Telugu (‘te’), Hindi
71
+ (‘hi’), and Bengali (‘bn’).
72
+
73
+ ### Supported Tasks and Leaderboards
74
+
75
+ **Tasks:** Natural Language Inference
76
+
77
+ **Leaderboards:** Currently there is no Leaderboard for this dataset.
78
+
79
+ ### Languages
80
+
81
+ - `Assamese (as)`
82
+ - `Bengali (bn)`
83
+ - `Gujarati (gu)`
84
+ - `Kannada (kn)`
85
+ - `Hindi (hi)`
86
+ - `Malayalam (ml)`
87
+ - `Marathi (mr)`
88
+ - `Oriya (or)`
89
+ - `Punjabi (pa)`
90
+ - `Tamil (ta)`
91
+ - `Telugu (te)`
92
+
93
+ ...
94
+
95
+
96
+ ### Data Splits
97
+
98
+ <!-- Below is the dataset split given for `hi` dataset.
99
+
100
+ ```python
101
+ DatasetDict({
102
+ train: Dataset({
103
+ features: ['utterance', 'logical form', 'intent'],
104
+ num_rows:
105
+ })
106
+ test: Dataset({
107
+ features: ['utterance', 'logical form', 'intent'],
108
+ num_rows: 5010
109
+ })
110
+ validation: Dataset({
111
+ features: ['utterance', 'logical form', 'intent'],
112
+ num_rows: 2490
113
+ })
114
+ })
115
+
116
+ ``` -->
117
+
118
+ Language | ISO 639-1 Code |Train | Dev | Test |
119
+ --------------|----------------|-------|-----|------|
120
+ Assamese | as | 392,702 | 5,010 | 2,490 |
121
+ Bengali | bn | 392,702 | 5,010 | 2,490 |
122
+ Gujarati | gu | 392,702 | 5,010 | 2,490 |
123
+ Hindi | hi | 392,702 | 5,010 | 2,490 |
124
+ Kannada | kn | 392,702 | 5,010 | 2,490 |
125
+ Malayalam | ml |392,702 | 5,010 | 2,490 |
126
+ Marathi | mr |392,702 | 5,010 | 2,490 |
127
+ Oriya | or | 392,702 | 5,010 | 2,490 |
128
+ Punjabi | pa | 392,702 | 5,010 | 2,490 |
129
+ Tamil | ta | 392,702 | 5,010 | 2,490 |
130
+ Telugu | te | 392,702 | 5,010 | 2,490 |
131
+
132
+ <!-- The dataset split remains same across all languages. -->
133
+
134
+ ## Dataset usage
135
+
136
+ Code snippet for using the dataset using datasets library.
137
+
138
+ ```python
139
+ from datasets import load_dataset
140
+
141
+ dataset = load_dataset("Divyanshu/indicxnli")
142
+ ```
143
+
144
+ ## Dataset Creation
145
+
146
+ Machine translation of XNLI english dataset to 11 listed Indic Languages.
147
+
148
+ ### Curation Rationale
149
+
150
+ [More information needed]
151
+
152
+ ### Source Data
153
+
154
+ [XNLI dataset](https://cims.nyu.edu/~sbowman/xnli/)
155
+
156
+ #### Initial Data Collection and Normalization
157
+
158
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
159
+
160
+ #### Who are the source language producers?
161
+
162
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
163
+
164
+ #### Human Verification Process
165
+
166
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
167
+
168
+ ## Considerations for Using the Data
169
+
170
+ ### Social Impact of Dataset
171
+
172
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
173
+
174
+ ### Discussion of Biases
175
+
176
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
177
+
178
+ ### Other Known Limitations
179
+
180
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
181
+
182
+ ### Dataset Curators
183
+
184
+ Divyanshu Aggarwal, Vivek Gupta, Anoop Kunchukuttan
185
+
186
+ ### Licensing Information
187
+
188
+ Contents of this repository are restricted to only non-commercial research purposes under the [Creative Commons Attribution-NonCommercial 4.0 International License (CC BY-NC 4.0)](https://creativecommons.org/licenses/by-nc/4.0/). Copyright of the dataset contents belongs to the original copyright holders.
189
+
190
+ ### Citation Information
191
+
192
+ If you use any of the datasets, models or code modules, please cite the following paper:
193
+
194
+ ```
195
+ @misc{https://doi.org/10.48550/arxiv.2204.08776,
196
+ doi = {10.48550/ARXIV.2204.08776},
197
+
198
+ url = {https://arxiv.org/abs/2304.13005},
199
+
200
+ author = {Aggarwal, Divyanshu and Gupta, Vivek and Kunchukuttan, Anoop},
201
+
202
+ keywords = {Computation and Language (cs.CL), Artificial Intelligence (cs.AI), FOS: Computer and information sciences, FOS: Computer and information sciences},
203
+
204
+ title = {IE-SemParse: Evaluating Multilingual Inference for Indian Languages},
205
+
206
+ publisher = {arXiv},
207
+
208
+ year = {2022},
209
+
210
+ copyright = {Creative Commons Attribution 4.0 International}
211
+ }
212
+ ```
213
+
214
+ <!-- ### Contributions -->
.history/README_20230707233808.md ADDED
@@ -0,0 +1,214 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ annotations_creators:
3
+ - machine-generated
4
+ language_creators:
5
+ - machine-generated
6
+ language:
7
+ - as
8
+ - bn
9
+ - gu
10
+ - hi
11
+ - kn
12
+ - ml
13
+ - mr
14
+ - or
15
+ - pa
16
+ - ta
17
+ - te
18
+ license:
19
+ - cc0-1.0
20
+ multilinguality:
21
+ - multilingual
22
+ pretty_name: IE-SemParse
23
+ size_categories:
24
+ - 1M<n<10M
25
+ source_datasets:
26
+ - original
27
+ task_categories:
28
+ - text2text
29
+ task_ids:
30
+ - Semantic-Parsing
31
+ ---
32
+
33
+ # Dataset Card for "IE-SemParse"
34
+
35
+ ## Table of Contents
36
+
37
+ - [Dataset Card for "IE-SemParse"](#dataset-card-for-ie-semparse)
38
+ - [Table of Contents](#table-of-contents)
39
+ - [Dataset Description](#dataset-description)
40
+ - [Dataset Summary](#dataset-summary)
41
+ - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
42
+ - [Languages](#languages)
43
+ - [Data Splits](#data-splits)
44
+ - [Dataset usage](#dataset-usage)
45
+ - [Dataset Creation](#dataset-creation)
46
+ - [Curation Rationale](#curation-rationale)
47
+ - [Source Data](#source-data)
48
+ - [Initial Data Collection and Normalization](#initial-data-collection-and-normalization)
49
+ - [Who are the source language producers?](#who-are-the-source-language-producers)
50
+ - [Human Verification Process](#human-verification-process)
51
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
52
+ - [Social Impact of Dataset](#social-impact-of-dataset)
53
+ - [Discussion of Biases](#discussion-of-biases)
54
+ - [Other Known Limitations](#other-known-limitations)
55
+ - [Dataset Curators](#dataset-curators)
56
+ - [Licensing Information](#licensing-information)
57
+ - [Citation Information](#citation-information)
58
+
59
+ ## Dataset Description
60
+
61
+ - **Homepage:** <https://github.com/divyanshuaggarwal/IE-SemParse>
62
+ - **Paper:** [Evaluating Inter-Bilingual Semantic Parsing for Indian Languages](https://arxiv.org/abs/2304.13005)
63
+ - **Point of Contact:** [Divyanshu Aggarwal](mailto:[email protected])
64
+
65
+ ### Dataset Summary
66
+
67
+ IE-SemParse is an InterBilingual Semantic Parsing Dataset for eleven major Indic languages that includes
68
+ Assamese (‘as’), Gujarat (‘gu’), Kannada (‘kn’),
69
+ Malayalam (‘ml’), Marathi (‘mr’), Odia (‘or’),
70
+ Punjabi (‘pa’), Tamil (‘ta’), Telugu (‘te’), Hindi
71
+ (‘hi’), and Bengali (‘bn’).
72
+
73
+ ### Supported Tasks and Leaderboards
74
+
75
+ **Tasks:** Natural Language Inference
76
+
77
+ **Leaderboards:** Currently there is no Leaderboard for this dataset.
78
+
79
+ ### Languages
80
+
81
+ - `Assamese (as)`
82
+ - `Bengali (bn)`
83
+ - `Gujarati (gu)`
84
+ - `Kannada (kn)`
85
+ - `Hindi (hi)`
86
+ - `Malayalam (ml)`
87
+ - `Marathi (mr)`
88
+ - `Oriya (or)`
89
+ - `Punjabi (pa)`
90
+ - `Tamil (ta)`
91
+ - `Telugu (te)`
92
+
93
+ ...
94
+
95
+
96
+ ### Data Splits
97
+
98
+ <!-- Below is the dataset split given for `hi` dataset.
99
+
100
+ ```python
101
+ DatasetDict({
102
+ train: Dataset({
103
+ features: ['utterance', 'logical form', 'intent'],
104
+ num_rows: 36000
105
+ })
106
+ test: Dataset({
107
+ features: ['utterance', 'logical form', 'intent'],
108
+ num_rows: 5010
109
+ })
110
+ validation: Dataset({
111
+ features: ['utterance', 'logical form', 'intent'],
112
+ num_rows: 2490
113
+ })
114
+ })
115
+
116
+ ``` -->
117
+
118
+ Language | ISO 639-1 Code |Train | Dev | Test |
119
+ --------------|----------------|-------|-----|------|
120
+ Assamese | as | 392,702 | 5,010 | 2,490 |
121
+ Bengali | bn | 392,702 | 5,010 | 2,490 |
122
+ Gujarati | gu | 392,702 | 5,010 | 2,490 |
123
+ Hindi | hi | 392,702 | 5,010 | 2,490 |
124
+ Kannada | kn | 392,702 | 5,010 | 2,490 |
125
+ Malayalam | ml |392,702 | 5,010 | 2,490 |
126
+ Marathi | mr |392,702 | 5,010 | 2,490 |
127
+ Oriya | or | 392,702 | 5,010 | 2,490 |
128
+ Punjabi | pa | 392,702 | 5,010 | 2,490 |
129
+ Tamil | ta | 392,702 | 5,010 | 2,490 |
130
+ Telugu | te | 392,702 | 5,010 | 2,490 |
131
+
132
+ <!-- The dataset split remains same across all languages. -->
133
+
134
+ ## Dataset usage
135
+
136
+ Code snippet for using the dataset using datasets library.
137
+
138
+ ```python
139
+ from datasets import load_dataset
140
+
141
+ dataset = load_dataset("Divyanshu/indicxnli")
142
+ ```
143
+
144
+ ## Dataset Creation
145
+
146
+ Machine translation of XNLI english dataset to 11 listed Indic Languages.
147
+
148
+ ### Curation Rationale
149
+
150
+ [More information needed]
151
+
152
+ ### Source Data
153
+
154
+ [XNLI dataset](https://cims.nyu.edu/~sbowman/xnli/)
155
+
156
+ #### Initial Data Collection and Normalization
157
+
158
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
159
+
160
+ #### Who are the source language producers?
161
+
162
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
163
+
164
+ #### Human Verification Process
165
+
166
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
167
+
168
+ ## Considerations for Using the Data
169
+
170
+ ### Social Impact of Dataset
171
+
172
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
173
+
174
+ ### Discussion of Biases
175
+
176
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
177
+
178
+ ### Other Known Limitations
179
+
180
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
181
+
182
+ ### Dataset Curators
183
+
184
+ Divyanshu Aggarwal, Vivek Gupta, Anoop Kunchukuttan
185
+
186
+ ### Licensing Information
187
+
188
+ Contents of this repository are restricted to only non-commercial research purposes under the [Creative Commons Attribution-NonCommercial 4.0 International License (CC BY-NC 4.0)](https://creativecommons.org/licenses/by-nc/4.0/). Copyright of the dataset contents belongs to the original copyright holders.
189
+
190
+ ### Citation Information
191
+
192
+ If you use any of the datasets, models or code modules, please cite the following paper:
193
+
194
+ ```
195
+ @misc{https://doi.org/10.48550/arxiv.2204.08776,
196
+ doi = {10.48550/ARXIV.2204.08776},
197
+
198
+ url = {https://arxiv.org/abs/2304.13005},
199
+
200
+ author = {Aggarwal, Divyanshu and Gupta, Vivek and Kunchukuttan, Anoop},
201
+
202
+ keywords = {Computation and Language (cs.CL), Artificial Intelligence (cs.AI), FOS: Computer and information sciences, FOS: Computer and information sciences},
203
+
204
+ title = {IE-SemParse: Evaluating Multilingual Inference for Indian Languages},
205
+
206
+ publisher = {arXiv},
207
+
208
+ year = {2022},
209
+
210
+ copyright = {Creative Commons Attribution 4.0 International}
211
+ }
212
+ ```
213
+
214
+ <!-- ### Contributions -->
.history/README_20230707233812.md ADDED
@@ -0,0 +1,214 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ annotations_creators:
3
+ - machine-generated
4
+ language_creators:
5
+ - machine-generated
6
+ language:
7
+ - as
8
+ - bn
9
+ - gu
10
+ - hi
11
+ - kn
12
+ - ml
13
+ - mr
14
+ - or
15
+ - pa
16
+ - ta
17
+ - te
18
+ license:
19
+ - cc0-1.0
20
+ multilinguality:
21
+ - multilingual
22
+ pretty_name: IE-SemParse
23
+ size_categories:
24
+ - 1M<n<10M
25
+ source_datasets:
26
+ - original
27
+ task_categories:
28
+ - text2text
29
+ task_ids:
30
+ - Semantic-Parsing
31
+ ---
32
+
33
+ # Dataset Card for "IE-SemParse"
34
+
35
+ ## Table of Contents
36
+
37
+ - [Dataset Card for "IE-SemParse"](#dataset-card-for-ie-semparse)
38
+ - [Table of Contents](#table-of-contents)
39
+ - [Dataset Description](#dataset-description)
40
+ - [Dataset Summary](#dataset-summary)
41
+ - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
42
+ - [Languages](#languages)
43
+ - [Data Splits](#data-splits)
44
+ - [Dataset usage](#dataset-usage)
45
+ - [Dataset Creation](#dataset-creation)
46
+ - [Curation Rationale](#curation-rationale)
47
+ - [Source Data](#source-data)
48
+ - [Initial Data Collection and Normalization](#initial-data-collection-and-normalization)
49
+ - [Who are the source language producers?](#who-are-the-source-language-producers)
50
+ - [Human Verification Process](#human-verification-process)
51
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
52
+ - [Social Impact of Dataset](#social-impact-of-dataset)
53
+ - [Discussion of Biases](#discussion-of-biases)
54
+ - [Other Known Limitations](#other-known-limitations)
55
+ - [Dataset Curators](#dataset-curators)
56
+ - [Licensing Information](#licensing-information)
57
+ - [Citation Information](#citation-information)
58
+
59
+ ## Dataset Description
60
+
61
+ - **Homepage:** <https://github.com/divyanshuaggarwal/IE-SemParse>
62
+ - **Paper:** [Evaluating Inter-Bilingual Semantic Parsing for Indian Languages](https://arxiv.org/abs/2304.13005)
63
+ - **Point of Contact:** [Divyanshu Aggarwal](mailto:[email protected])
64
+
65
+ ### Dataset Summary
66
+
67
+ IE-SemParse is an InterBilingual Semantic Parsing Dataset for eleven major Indic languages that includes
68
+ Assamese (‘as’), Gujarat (‘gu’), Kannada (‘kn’),
69
+ Malayalam (‘ml’), Marathi (‘mr’), Odia (‘or’),
70
+ Punjabi (‘pa’), Tamil (‘ta’), Telugu (‘te’), Hindi
71
+ (‘hi’), and Bengali (‘bn’).
72
+
73
+ ### Supported Tasks and Leaderboards
74
+
75
+ **Tasks:** Natural Language Inference
76
+
77
+ **Leaderboards:** Currently there is no Leaderboard for this dataset.
78
+
79
+ ### Languages
80
+
81
+ - `Assamese (as)`
82
+ - `Bengali (bn)`
83
+ - `Gujarati (gu)`
84
+ - `Kannada (kn)`
85
+ - `Hindi (hi)`
86
+ - `Malayalam (ml)`
87
+ - `Marathi (mr)`
88
+ - `Oriya (or)`
89
+ - `Punjabi (pa)`
90
+ - `Tamil (ta)`
91
+ - `Telugu (te)`
92
+
93
+ ...
94
+
95
+
96
+ ### Data Splits
97
+
98
+ <!-- Below is the dataset split given for `hi` dataset.
99
+
100
+ ```python
101
+ DatasetDict({
102
+ train: Dataset({
103
+ features: ['utterance', 'logical form', 'intent'],
104
+ num_rows: 36000
105
+ })
106
+ test: Dataset({
107
+ features: ['utterance', 'logical form', 'intent'],
108
+ num_rows: 3000
109
+ })
110
+ validation: Dataset({
111
+ features: ['utterance', 'logical form', 'intent'],
112
+ num_rows: 2490
113
+ })
114
+ })
115
+
116
+ ``` -->
117
+
118
+ Language | ISO 639-1 Code |Train | Dev | Test |
119
+ --------------|----------------|-------|-----|------|
120
+ Assamese | as | 392,702 | 5,010 | 2,490 |
121
+ Bengali | bn | 392,702 | 5,010 | 2,490 |
122
+ Gujarati | gu | 392,702 | 5,010 | 2,490 |
123
+ Hindi | hi | 392,702 | 5,010 | 2,490 |
124
+ Kannada | kn | 392,702 | 5,010 | 2,490 |
125
+ Malayalam | ml |392,702 | 5,010 | 2,490 |
126
+ Marathi | mr |392,702 | 5,010 | 2,490 |
127
+ Oriya | or | 392,702 | 5,010 | 2,490 |
128
+ Punjabi | pa | 392,702 | 5,010 | 2,490 |
129
+ Tamil | ta | 392,702 | 5,010 | 2,490 |
130
+ Telugu | te | 392,702 | 5,010 | 2,490 |
131
+
132
+ <!-- The dataset split remains same across all languages. -->
133
+
134
+ ## Dataset usage
135
+
136
+ Code snippet for using the dataset using datasets library.
137
+
138
+ ```python
139
+ from datasets import load_dataset
140
+
141
+ dataset = load_dataset("Divyanshu/indicxnli")
142
+ ```
143
+
144
+ ## Dataset Creation
145
+
146
+ Machine translation of XNLI english dataset to 11 listed Indic Languages.
147
+
148
+ ### Curation Rationale
149
+
150
+ [More information needed]
151
+
152
+ ### Source Data
153
+
154
+ [XNLI dataset](https://cims.nyu.edu/~sbowman/xnli/)
155
+
156
+ #### Initial Data Collection and Normalization
157
+
158
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
159
+
160
+ #### Who are the source language producers?
161
+
162
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
163
+
164
+ #### Human Verification Process
165
+
166
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
167
+
168
+ ## Considerations for Using the Data
169
+
170
+ ### Social Impact of Dataset
171
+
172
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
173
+
174
+ ### Discussion of Biases
175
+
176
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
177
+
178
+ ### Other Known Limitations
179
+
180
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
181
+
182
+ ### Dataset Curators
183
+
184
+ Divyanshu Aggarwal, Vivek Gupta, Anoop Kunchukuttan
185
+
186
+ ### Licensing Information
187
+
188
+ Contents of this repository are restricted to only non-commercial research purposes under the [Creative Commons Attribution-NonCommercial 4.0 International License (CC BY-NC 4.0)](https://creativecommons.org/licenses/by-nc/4.0/). Copyright of the dataset contents belongs to the original copyright holders.
189
+
190
+ ### Citation Information
191
+
192
+ If you use any of the datasets, models or code modules, please cite the following paper:
193
+
194
+ ```
195
+ @misc{https://doi.org/10.48550/arxiv.2204.08776,
196
+ doi = {10.48550/ARXIV.2204.08776},
197
+
198
+ url = {https://arxiv.org/abs/2304.13005},
199
+
200
+ author = {Aggarwal, Divyanshu and Gupta, Vivek and Kunchukuttan, Anoop},
201
+
202
+ keywords = {Computation and Language (cs.CL), Artificial Intelligence (cs.AI), FOS: Computer and information sciences, FOS: Computer and information sciences},
203
+
204
+ title = {IE-SemParse: Evaluating Multilingual Inference for Indian Languages},
205
+
206
+ publisher = {arXiv},
207
+
208
+ year = {2022},
209
+
210
+ copyright = {Creative Commons Attribution 4.0 International}
211
+ }
212
+ ```
213
+
214
+ <!-- ### Contributions -->
.history/README_20230707233815.md ADDED
@@ -0,0 +1,214 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ annotations_creators:
3
+ - machine-generated
4
+ language_creators:
5
+ - machine-generated
6
+ language:
7
+ - as
8
+ - bn
9
+ - gu
10
+ - hi
11
+ - kn
12
+ - ml
13
+ - mr
14
+ - or
15
+ - pa
16
+ - ta
17
+ - te
18
+ license:
19
+ - cc0-1.0
20
+ multilinguality:
21
+ - multilingual
22
+ pretty_name: IE-SemParse
23
+ size_categories:
24
+ - 1M<n<10M
25
+ source_datasets:
26
+ - original
27
+ task_categories:
28
+ - text2text
29
+ task_ids:
30
+ - Semantic-Parsing
31
+ ---
32
+
33
+ # Dataset Card for "IE-SemParse"
34
+
35
+ ## Table of Contents
36
+
37
+ - [Dataset Card for "IE-SemParse"](#dataset-card-for-ie-semparse)
38
+ - [Table of Contents](#table-of-contents)
39
+ - [Dataset Description](#dataset-description)
40
+ - [Dataset Summary](#dataset-summary)
41
+ - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
42
+ - [Languages](#languages)
43
+ - [Data Splits](#data-splits)
44
+ - [Dataset usage](#dataset-usage)
45
+ - [Dataset Creation](#dataset-creation)
46
+ - [Curation Rationale](#curation-rationale)
47
+ - [Source Data](#source-data)
48
+ - [Initial Data Collection and Normalization](#initial-data-collection-and-normalization)
49
+ - [Who are the source language producers?](#who-are-the-source-language-producers)
50
+ - [Human Verification Process](#human-verification-process)
51
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
52
+ - [Social Impact of Dataset](#social-impact-of-dataset)
53
+ - [Discussion of Biases](#discussion-of-biases)
54
+ - [Other Known Limitations](#other-known-limitations)
55
+ - [Dataset Curators](#dataset-curators)
56
+ - [Licensing Information](#licensing-information)
57
+ - [Citation Information](#citation-information)
58
+
59
+ ## Dataset Description
60
+
61
+ - **Homepage:** <https://github.com/divyanshuaggarwal/IE-SemParse>
62
+ - **Paper:** [Evaluating Inter-Bilingual Semantic Parsing for Indian Languages](https://arxiv.org/abs/2304.13005)
63
+ - **Point of Contact:** [Divyanshu Aggarwal](mailto:[email protected])
64
+
65
+ ### Dataset Summary
66
+
67
+ IE-SemParse is an InterBilingual Semantic Parsing Dataset for eleven major Indic languages that includes
68
+ Assamese (‘as’), Gujarat (‘gu’), Kannada (‘kn’),
69
+ Malayalam (‘ml’), Marathi (‘mr’), Odia (‘or’),
70
+ Punjabi (‘pa’), Tamil (‘ta’), Telugu (‘te’), Hindi
71
+ (‘hi’), and Bengali (‘bn’).
72
+
73
+ ### Supported Tasks and Leaderboards
74
+
75
+ **Tasks:** Natural Language Inference
76
+
77
+ **Leaderboards:** Currently there is no Leaderboard for this dataset.
78
+
79
+ ### Languages
80
+
81
+ - `Assamese (as)`
82
+ - `Bengali (bn)`
83
+ - `Gujarati (gu)`
84
+ - `Kannada (kn)`
85
+ - `Hindi (hi)`
86
+ - `Malayalam (ml)`
87
+ - `Marathi (mr)`
88
+ - `Oriya (or)`
89
+ - `Punjabi (pa)`
90
+ - `Tamil (ta)`
91
+ - `Telugu (te)`
92
+
93
+ ...
94
+
95
+
96
+ ### Data Splits
97
+
98
+ <!-- Below is the dataset split given for `hi` dataset.
99
+
100
+ ```python
101
+ DatasetDict({
102
+ train: Dataset({
103
+ features: ['utterance', 'logical form', 'intent'],
104
+ num_rows: 36000
105
+ })
106
+ test: Dataset({
107
+ features: ['utterance', 'logical form', 'intent'],
108
+ num_rows: 3000
109
+ })
110
+ validation: Dataset({
111
+ features: ['utterance', 'logical form', 'intent'],
112
+ num_rows: 1500
113
+ })
114
+ })
115
+
116
+ ``` -->
117
+
118
+ Language | ISO 639-1 Code |Train | Dev | Test |
119
+ --------------|----------------|-------|-----|------|
120
+ Assamese | as | 392,702 | 5,010 | 2,490 |
121
+ Bengali | bn | 392,702 | 5,010 | 2,490 |
122
+ Gujarati | gu | 392,702 | 5,010 | 2,490 |
123
+ Hindi | hi | 392,702 | 5,010 | 2,490 |
124
+ Kannada | kn | 392,702 | 5,010 | 2,490 |
125
+ Malayalam | ml |392,702 | 5,010 | 2,490 |
126
+ Marathi | mr |392,702 | 5,010 | 2,490 |
127
+ Oriya | or | 392,702 | 5,010 | 2,490 |
128
+ Punjabi | pa | 392,702 | 5,010 | 2,490 |
129
+ Tamil | ta | 392,702 | 5,010 | 2,490 |
130
+ Telugu | te | 392,702 | 5,010 | 2,490 |
131
+
132
+ <!-- The dataset split remains same across all languages. -->
133
+
134
+ ## Dataset usage
135
+
136
+ Code snippet for using the dataset using datasets library.
137
+
138
+ ```python
139
+ from datasets import load_dataset
140
+
141
+ dataset = load_dataset("Divyanshu/indicxnli")
142
+ ```
143
+
144
+ ## Dataset Creation
145
+
146
+ Machine translation of XNLI english dataset to 11 listed Indic Languages.
147
+
148
+ ### Curation Rationale
149
+
150
+ [More information needed]
151
+
152
+ ### Source Data
153
+
154
+ [XNLI dataset](https://cims.nyu.edu/~sbowman/xnli/)
155
+
156
+ #### Initial Data Collection and Normalization
157
+
158
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
159
+
160
+ #### Who are the source language producers?
161
+
162
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
163
+
164
+ #### Human Verification Process
165
+
166
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
167
+
168
+ ## Considerations for Using the Data
169
+
170
+ ### Social Impact of Dataset
171
+
172
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
173
+
174
+ ### Discussion of Biases
175
+
176
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
177
+
178
+ ### Other Known Limitations
179
+
180
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
181
+
182
+ ### Dataset Curators
183
+
184
+ Divyanshu Aggarwal, Vivek Gupta, Anoop Kunchukuttan
185
+
186
+ ### Licensing Information
187
+
188
+ Contents of this repository are restricted to only non-commercial research purposes under the [Creative Commons Attribution-NonCommercial 4.0 International License (CC BY-NC 4.0)](https://creativecommons.org/licenses/by-nc/4.0/). Copyright of the dataset contents belongs to the original copyright holders.
189
+
190
+ ### Citation Information
191
+
192
+ If you use any of the datasets, models or code modules, please cite the following paper:
193
+
194
+ ```
195
+ @misc{https://doi.org/10.48550/arxiv.2204.08776,
196
+ doi = {10.48550/ARXIV.2204.08776},
197
+
198
+ url = {https://arxiv.org/abs/2304.13005},
199
+
200
+ author = {Aggarwal, Divyanshu and Gupta, Vivek and Kunchukuttan, Anoop},
201
+
202
+ keywords = {Computation and Language (cs.CL), Artificial Intelligence (cs.AI), FOS: Computer and information sciences, FOS: Computer and information sciences},
203
+
204
+ title = {IE-SemParse: Evaluating Multilingual Inference for Indian Languages},
205
+
206
+ publisher = {arXiv},
207
+
208
+ year = {2022},
209
+
210
+ copyright = {Creative Commons Attribution 4.0 International}
211
+ }
212
+ ```
213
+
214
+ <!-- ### Contributions -->
.history/r_20230707233419 ADDED
File without changes
.history/r_20230707233422 ADDED
File without changes
IE-SemParse.py CHANGED
@@ -55,7 +55,7 @@ _DATASETS = (
55
  _URL = "https://huggingface.co/datasets/Divyanshu/IE-SemParse/resolve/main/"
56
 
57
 
58
- class IESemParseConfig(datasets.BuilderConfig):
59
  """BuilderConfig for IE-SemParse."""
60
 
61
  def __init__(self, dataset: str, language: str, **kwargs):
@@ -65,7 +65,7 @@ class IESemParseConfig(datasets.BuilderConfig):
65
  language: One of hi, bn, mr, as, ta, te, or, ml, pa, gu, kn
66
  **kwargs: keyword arguments forwarded to super.
67
  """
68
- super(IESemParseConfig, self).__init__(**kwargs)
69
 
70
  self.dataset = dataset
71
  self.language = language
@@ -76,13 +76,13 @@ class IESemParseConfig(datasets.BuilderConfig):
76
  _URL, "unfiltered_data", dataset, f"{language}.json")]
77
 
78
 
79
- class IESemParse(datasets.GeneratorBasedBuilder):
80
  """IE-SemParse: Inter-Bilingual Semantic Parsing Dataset for Indic Languages. Version 1.0."""
81
 
82
  VERSION = datasets.Version("1.0.0", "")
83
- BUILDER_CONFIG_CLASS = IESemParseConfig
84
  BUILDER_CONFIGS = [
85
- IESemParseConfig(
86
  name=f"{dataset}_{language}",
87
  language=language,
88
  dataset=dataset,
 
55
  _URL = "https://huggingface.co/datasets/Divyanshu/IE-SemParse/resolve/main/"
56
 
57
 
58
+ class IE_SemParseConfig(datasets.BuilderConfig):
59
  """BuilderConfig for IE-SemParse."""
60
 
61
  def __init__(self, dataset: str, language: str, **kwargs):
 
65
  language: One of hi, bn, mr, as, ta, te, or, ml, pa, gu, kn
66
  **kwargs: keyword arguments forwarded to super.
67
  """
68
+ super(IE_SemParseConfig, self).__init__(**kwargs)
69
 
70
  self.dataset = dataset
71
  self.language = language
 
76
  _URL, "unfiltered_data", dataset, f"{language}.json")]
77
 
78
 
79
+ class IE_SemParse(datasets.GeneratorBasedBuilder):
80
  """IE-SemParse: Inter-Bilingual Semantic Parsing Dataset for Indic Languages. Version 1.0."""
81
 
82
  VERSION = datasets.Version("1.0.0", "")
83
+ BUILDER_CONFIG_CLASS = IE_SemParseConfig
84
  BUILDER_CONFIGS = [
85
+ IE_SemParseConfig(
86
  name=f"{dataset}_{language}",
87
  language=language,
88
  dataset=dataset,
README.md CHANGED
@@ -1,3 +1,214 @@
1
  ---
2
- license: mit
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ annotations_creators:
3
+ - machine-generated
4
+ language_creators:
5
+ - machine-generated
6
+ language:
7
+ - as
8
+ - bn
9
+ - gu
10
+ - hi
11
+ - kn
12
+ - ml
13
+ - mr
14
+ - or
15
+ - pa
16
+ - ta
17
+ - te
18
+ license:
19
+ - cc0-1.0
20
+ multilinguality:
21
+ - multilingual
22
+ pretty_name: IE-SemParse
23
+ size_categories:
24
+ - 1M<n<10M
25
+ source_datasets:
26
+ - original
27
+ task_categories:
28
+ - text2text
29
+ task_ids:
30
+ - Semantic-Parsing
31
  ---
32
+
33
+ # Dataset Card for "IE-SemParse"
34
+
35
+ ## Table of Contents
36
+
37
+ - [Dataset Card for "IE-SemParse"](#dataset-card-for-ie-semparse)
38
+ - [Table of Contents](#table-of-contents)
39
+ - [Dataset Description](#dataset-description)
40
+ - [Dataset Summary](#dataset-summary)
41
+ - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
42
+ - [Languages](#languages)
43
+ - [Data Splits](#data-splits)
44
+ - [Dataset usage](#dataset-usage)
45
+ - [Dataset Creation](#dataset-creation)
46
+ - [Curation Rationale](#curation-rationale)
47
+ - [Source Data](#source-data)
48
+ - [Initial Data Collection and Normalization](#initial-data-collection-and-normalization)
49
+ - [Who are the source language producers?](#who-are-the-source-language-producers)
50
+ - [Human Verification Process](#human-verification-process)
51
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
52
+ - [Social Impact of Dataset](#social-impact-of-dataset)
53
+ - [Discussion of Biases](#discussion-of-biases)
54
+ - [Other Known Limitations](#other-known-limitations)
55
+ - [Dataset Curators](#dataset-curators)
56
+ - [Licensing Information](#licensing-information)
57
+ - [Citation Information](#citation-information)
58
+
59
+ ## Dataset Description
60
+
61
+ - **Homepage:** <https://github.com/divyanshuaggarwal/IE-SemParse>
62
+ - **Paper:** [Evaluating Inter-Bilingual Semantic Parsing for Indian Languages](https://arxiv.org/abs/2304.13005)
63
+ - **Point of Contact:** [Divyanshu Aggarwal](mailto:[email protected])
64
+
65
+ ### Dataset Summary
66
+
67
+ IE-SemParse is an InterBilingual Semantic Parsing Dataset for eleven major Indic languages that includes
68
+ Assamese (‘as’), Gujarat (‘gu’), Kannada (‘kn’),
69
+ Malayalam (‘ml’), Marathi (‘mr’), Odia (‘or’),
70
+ Punjabi (‘pa’), Tamil (‘ta’), Telugu (‘te’), Hindi
71
+ (‘hi’), and Bengali (‘bn’).
72
+
73
+ ### Supported Tasks and Leaderboards
74
+
75
+ **Tasks:** Natural Language Inference
76
+
77
+ **Leaderboards:** Currently there is no Leaderboard for this dataset.
78
+
79
+ ### Languages
80
+
81
+ - `Assamese (as)`
82
+ - `Bengali (bn)`
83
+ - `Gujarati (gu)`
84
+ - `Kannada (kn)`
85
+ - `Hindi (hi)`
86
+ - `Malayalam (ml)`
87
+ - `Marathi (mr)`
88
+ - `Oriya (or)`
89
+ - `Punjabi (pa)`
90
+ - `Tamil (ta)`
91
+ - `Telugu (te)`
92
+
93
+ ...
94
+
95
+
96
+ ### Data Splits
97
+
98
+ <!-- Below is the dataset split given for `hi` dataset.
99
+
100
+ ```python
101
+ DatasetDict({
102
+ train: Dataset({
103
+ features: ['utterance', 'logical form', 'intent'],
104
+ num_rows: 36000
105
+ })
106
+ test: Dataset({
107
+ features: ['utterance', 'logical form', 'intent'],
108
+ num_rows: 3000
109
+ })
110
+ validation: Dataset({
111
+ features: ['utterance', 'logical form', 'intent'],
112
+ num_rows: 1500
113
+ })
114
+ })
115
+
116
+ ``` -->
117
+
118
+ Language | ISO 639-1 Code |Train | Dev | Test |
119
+ --------------|----------------|-------|-----|------|
120
+ Assamese | as | 392,702 | 5,010 | 2,490 |
121
+ Bengali | bn | 392,702 | 5,010 | 2,490 |
122
+ Gujarati | gu | 392,702 | 5,010 | 2,490 |
123
+ Hindi | hi | 392,702 | 5,010 | 2,490 |
124
+ Kannada | kn | 392,702 | 5,010 | 2,490 |
125
+ Malayalam | ml |392,702 | 5,010 | 2,490 |
126
+ Marathi | mr |392,702 | 5,010 | 2,490 |
127
+ Oriya | or | 392,702 | 5,010 | 2,490 |
128
+ Punjabi | pa | 392,702 | 5,010 | 2,490 |
129
+ Tamil | ta | 392,702 | 5,010 | 2,490 |
130
+ Telugu | te | 392,702 | 5,010 | 2,490 |
131
+
132
+ <!-- The dataset split remains same across all languages. -->
133
+
134
+ ## Dataset usage
135
+
136
+ Code snippet for using the dataset using datasets library.
137
+
138
+ ```python
139
+ from datasets import load_dataset
140
+
141
+ dataset = load_dataset("Divyanshu/indicxnli")
142
+ ```
143
+
144
+ ## Dataset Creation
145
+
146
+ Machine translation of XNLI english dataset to 11 listed Indic Languages.
147
+
148
+ ### Curation Rationale
149
+
150
+ [More information needed]
151
+
152
+ ### Source Data
153
+
154
+ [XNLI dataset](https://cims.nyu.edu/~sbowman/xnli/)
155
+
156
+ #### Initial Data Collection and Normalization
157
+
158
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
159
+
160
+ #### Who are the source language producers?
161
+
162
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
163
+
164
+ #### Human Verification Process
165
+
166
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
167
+
168
+ ## Considerations for Using the Data
169
+
170
+ ### Social Impact of Dataset
171
+
172
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
173
+
174
+ ### Discussion of Biases
175
+
176
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
177
+
178
+ ### Other Known Limitations
179
+
180
+ [Detailed in the paper](https://arxiv.org/abs/2304.13005)
181
+
182
+ ### Dataset Curators
183
+
184
+ Divyanshu Aggarwal, Vivek Gupta, Anoop Kunchukuttan
185
+
186
+ ### Licensing Information
187
+
188
+ Contents of this repository are restricted to only non-commercial research purposes under the [Creative Commons Attribution-NonCommercial 4.0 International License (CC BY-NC 4.0)](https://creativecommons.org/licenses/by-nc/4.0/). Copyright of the dataset contents belongs to the original copyright holders.
189
+
190
+ ### Citation Information
191
+
192
+ If you use any of the datasets, models or code modules, please cite the following paper:
193
+
194
+ ```
195
+ @misc{https://doi.org/10.48550/arxiv.2204.08776,
196
+ doi = {10.48550/ARXIV.2204.08776},
197
+
198
+ url = {https://arxiv.org/abs/2304.13005},
199
+
200
+ author = {Aggarwal, Divyanshu and Gupta, Vivek and Kunchukuttan, Anoop},
201
+
202
+ keywords = {Computation and Language (cs.CL), Artificial Intelligence (cs.AI), FOS: Computer and information sciences, FOS: Computer and information sciences},
203
+
204
+ title = {IE-SemParse: Evaluating Multilingual Inference for Indian Languages},
205
+
206
+ publisher = {arXiv},
207
+
208
+ year = {2022},
209
+
210
+ copyright = {Creative Commons Attribution 4.0 International}
211
+ }
212
+ ```
213
+
214
+ <!-- ### Contributions -->