Datasets:

Modalities:
Text
Formats:
parquet
Languages:
Catalan
ArXiv:
Libraries:
Datasets
pandas
License:
AnnaSallesRius commited on
Commit
7f435ff
1 Parent(s): a72eaa5

Upload folder using huggingface_hub

Browse files
Files changed (6) hide show
  1. OLD/dev.json +3 -0
  2. OLD/splitter.py +41 -0
  3. OLD/splitter_with_ids.py +42 -0
  4. OLD/teca.py +116 -0
  5. OLD/test.json +3 -0
  6. OLD/train.json +3 -0
OLD/dev.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c46b5888a4fd7eb14225dd0db7074e40f22d51c5832903b58f14c44d582072f7
3
+ size 513528
OLD/splitter.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import pandas as pd
3
+ from sklearn.model_selection import train_test_split
4
+
5
+ # both files downloaded from https://zenodo.org/record/4621378
6
+ path_to_teca1 = 'dataset_te1.json'
7
+ path_to_teca2 = 'dataset_te_vilaweb.json'
8
+
9
+ # load data to pandas dataframes
10
+ teca1 = pd.read_json(path_to_teca1) # Shape: (14997, 4)
11
+ teca2 = pd.read_json(path_to_teca2) # Shape: (6166, 4)
12
+ teca = pd.concat([teca1, teca2]) # Shape: (21163, 4)
13
+
14
+ # remove "id" column, now columns are: ['premise', 'hypothesis', 'label']
15
+ teca.drop(['id'], axis=1, inplace=True)
16
+
17
+ # shuffle rows
18
+ teca = teca.sample(frac=1).reset_index(drop=True)
19
+
20
+ # stratified split with harcoded percentages: 80% train, 10% dev, 10% test
21
+ train, dev_test = train_test_split(teca, test_size=0.2, random_state=42, stratify=teca['label'])
22
+ dev, test = train_test_split(dev_test, test_size=0.5, random_state=42, stratify=dev_test['label'])
23
+
24
+ # report some stats
25
+ print('### VALUE COUNTS TECA ###')
26
+ print(teca['label'].value_counts())
27
+ print('### VALUE COUNTS TRAIN ###')
28
+ print(train['label'].value_counts())
29
+ print('### VALUE COUNTS DEV ###')
30
+ print(dev['label'].value_counts())
31
+ print('### VALUE COUNTS TEST ###')
32
+ print(test['label'].value_counts())
33
+ print('train shape:', train.shape[0], ', dev shape:', dev.shape[0], ', test shape:', test.shape[0])
34
+
35
+ # save train/dev/test sets as json files
36
+ sets = {'train': train, 'dev': dev, 'test': test}
37
+ for key in sets:
38
+ set_dict = sets[key].to_dict('records')
39
+ json_content = {"version": '1.0.1', "data": set_dict}
40
+ with open(key+'.json', 'w') as f:
41
+ json.dump(json_content, f)
OLD/splitter_with_ids.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import pandas as pd
3
+ from sklearn.model_selection import train_test_split
4
+
5
+ # both files downloaded from https://zenodo.org/record/4621378
6
+ path_to_teca1 = 'dataset_te1.json'
7
+ path_to_teca2 = 'dataset_te_vilaweb.json'
8
+
9
+ teca1 = pd.read_json(path_to_teca1) # Shape: (14997, 4)
10
+ teca2 = pd.read_json(path_to_teca2) # Shape: (6166, 4)
11
+
12
+ teca1['id'] = 'te1_' + teca1['id'].astype(str)
13
+ teca2['id'] = 'vila_' + teca2['id'].astype(str)
14
+
15
+ teca = pd.concat([teca1, teca2]) # Shape: (21163, 4)
16
+ #teca.drop(['id'], axis=1, inplace=True) # now columns are: ['premise', 'hypothesis', 'label']
17
+ teca = teca.sample(frac=1).reset_index(drop=True) # shuffle rows
18
+
19
+ print('### VALUE COUNTS TECA ###')
20
+ print(teca['label'].value_counts())
21
+
22
+ # stratified split with harcoded percentages: 80% train, 10% dev, 10% test
23
+ train, dev_test = train_test_split(teca, test_size=0.2, random_state=42, stratify=teca['label'])
24
+ dev, test = train_test_split(dev_test, test_size=0.5, random_state=42, stratify=dev_test['label'])
25
+
26
+ print('### VALUE COUNTS TRAIN ###')
27
+ print(train['label'].value_counts())
28
+ print('### VALUE COUNTS DEV ###')
29
+ print(dev['label'].value_counts())
30
+ print('### VALUE COUNTS TEST ###')
31
+ print(test['label'].value_counts())
32
+ print('train shape:', train.shape[0], ', dev shape:', dev.shape[0], ', test shape:', test.shape[0])
33
+
34
+ print(train.head())
35
+
36
+ sets = {'train': train, 'dev': dev, 'test': test, 'full': teca}
37
+
38
+ for key in sets:
39
+ set_dict = sets[key].to_dict('records')
40
+ json_content = {"version": '1.0.1', "data": set_dict}
41
+ with open(key+'.json', 'w') as f:
42
+ json.dump(json_content, f)
OLD/teca.py ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Loading script for the TECA dataset.
2
+ import json
3
+ import datasets
4
+
5
+ logger = datasets.logging.get_logger(__name__)
6
+
7
+ _CITATION = """
8
+ @inproceedings{armengol-estape-etal-2021-multilingual,
9
+ title = "Are Multilingual Models the Best Choice for Moderately Under-resourced Languages? {A} Comprehensive Assessment for {C}atalan",
10
+ author = "Armengol-Estap{\'e}, Jordi and
11
+ Carrino, Casimiro Pio and
12
+ Rodriguez-Penagos, Carlos and
13
+ de Gibert Bonet, Ona and
14
+ Armentano-Oller, Carme and
15
+ Gonzalez-Agirre, Aitor and
16
+ Melero, Maite and
17
+ Villegas, Marta",
18
+ booktitle = "Findings of the Association for Computational Linguistics: ACL-IJCNLP 2021",
19
+ month = aug,
20
+ year = "2021",
21
+ address = "Online",
22
+ publisher = "Association for Computational Linguistics",
23
+ url = "https://aclanthology.org/2021.findings-acl.437",
24
+ doi = "10.18653/v1/2021.findings-acl.437",
25
+ pages = "4933--4946",
26
+ }
27
+ """
28
+
29
+ _DESCRIPTION = """
30
+ TECA consists of two subsets of textual entailment in Catalan, *catalan_TE1* and *vilaweb_TE*, which contain 14997 and 6166 pairs of premises and hypotheses, annotated according to the inference relation they have (implication, contradiction or neutral). This dataset was developed by BSC TeMU as part of the AINA project and intended as part of the Catalan Language Understanding Benchmark (CLUB).
31
+ """
32
+
33
+ _HOMEPAGE = """https://zenodo.org/record/4621378"""
34
+
35
+ # TODO: upload datasets to github
36
+ _URL = "https://huggingface.co/datasets/projecte-aina/teca/resolve/main/"
37
+ _TRAINING_FILE = "train.json"
38
+ _DEV_FILE = "dev.json"
39
+ _TEST_FILE = "test.json"
40
+
41
+
42
+ class tecaConfig(datasets.BuilderConfig):
43
+ """ Builder config for the TECA dataset """
44
+
45
+ def __init__(self, **kwargs):
46
+ """BuilderConfig for TECA.
47
+ Args:
48
+ **kwargs: keyword arguments forwarded to super.
49
+ """
50
+ super(tecaConfig, self).__init__(**kwargs)
51
+
52
+
53
+ class teca(datasets.GeneratorBasedBuilder):
54
+ """ TECA Dataset """
55
+
56
+ BUILDER_CONFIGS = [
57
+ tecaConfig(
58
+ name="teca",
59
+ version=datasets.Version("1.0.1"),
60
+ description="teca dataset",
61
+ ),
62
+ ]
63
+
64
+ def _info(self):
65
+ return datasets.DatasetInfo(
66
+ description=_DESCRIPTION,
67
+ features=datasets.Features(
68
+ {
69
+ "id": datasets.Value("string"),
70
+ "premise": datasets.Value("string"),
71
+ "hypothesis": datasets.Value("string"),
72
+ "label": datasets.features.ClassLabel
73
+ (names=
74
+ [
75
+ "entailment",
76
+ "neutral",
77
+ "contradiction"
78
+ ]
79
+ ),
80
+ }
81
+ ),
82
+ homepage=_HOMEPAGE,
83
+ citation=_CITATION,
84
+ )
85
+
86
+ def _split_generators(self, dl_manager):
87
+ """Returns SplitGenerators."""
88
+ urls_to_download = {
89
+ "train": f"{_URL}{_TRAINING_FILE}",
90
+ "dev": f"{_URL}{_DEV_FILE}",
91
+ "test": f"{_URL}{_TEST_FILE}",
92
+ }
93
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
94
+
95
+ return [
96
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
97
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
98
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
99
+ ]
100
+
101
+ def _generate_examples(self, filepath):
102
+ """This function returns the examples in the raw (text) form."""
103
+ logger.info("generating examples from = %s", filepath)
104
+ with open(filepath, encoding="utf-8") as f:
105
+ data_dict = json.load(f)
106
+ for id_, article in enumerate(data_dict["data"]):
107
+ original_id = article["id"]
108
+ premise = article["premise"]
109
+ hypothesis = article["hypothesis"]
110
+ label = article["label"]
111
+ yield id_, {
112
+ "id": original_id,
113
+ "premise": premise,
114
+ "hypothesis": hypothesis,
115
+ "label": label,
116
+ }
OLD/test.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fe100977ffa0bf228cc0a032f26872374e031c928e0fa4692ddf617690afc83b
3
+ size 509308
OLD/train.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1977a676bb22fdada80241c01dd6a8a52313535be25c6f4ef387d25b8fa2829c
3
+ size 4100267