Datasets:

Modalities:
Text
Formats:
parquet
Languages:
Catalan
ArXiv:
Libraries:
Datasets
pandas
License:
File size: 1,570 Bytes
c096dbe
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
import json
import pandas as pd
from sklearn.model_selection import train_test_split

# both files downloaded from https://zenodo.org/record/4621378
path_to_teca1 = 'dataset_te1.json'
path_to_teca2 = 'dataset_te_vilaweb.json'

# load data to pandas dataframes
teca1 = pd.read_json(path_to_teca1) # Shape: (14997, 4)
teca2 = pd.read_json(path_to_teca2) # Shape: (6166, 4)
teca  = pd.concat([teca1, teca2])   # Shape: (21163, 4)

# remove "id" column, now columns are: ['premise', 'hypothesis', 'label']
teca.drop(['id'], axis=1, inplace=True)

# shuffle rows
teca = teca.sample(frac=1).reset_index(drop=True)

# stratified split with harcoded percentages: 80% train, 10% dev, 10% test
train, dev_test = train_test_split(teca, test_size=0.2, random_state=42, stratify=teca['label'])
dev, test = train_test_split(dev_test, test_size=0.5, random_state=42, stratify=dev_test['label'])

# report some stats
print('### VALUE COUNTS TECA ###')
print(teca['label'].value_counts())
print('### VALUE COUNTS TRAIN ###')
print(train['label'].value_counts())
print('###  VALUE COUNTS DEV  ###')
print(dev['label'].value_counts())
print('### VALUE COUNTS TEST ###')
print(test['label'].value_counts())
print('train shape:', train.shape[0], ', dev shape:', dev.shape[0], ', test shape:', test.shape[0])

# save train/dev/test sets as json files
sets = {'train': train, 'dev': dev, 'test': test}
for key in sets:
    set_dict = sets[key].to_dict('records')
    json_content = {"version": '1.0.1', "data": set_dict}
    with open(key+'.json', 'w') as f:
        json.dump(json_content, f)