File size: 2,180 Bytes
ecad05e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8197968
ecad05e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8197968
ecad05e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
from __future__ import annotations

from pathlib import Path
import numpy as np

import datasets


_HF_AFFIX = {
    "ara": "arabic",
    "cmn": "mandarin",
    "eng": "",
    "deu": "german",
    "fra": "french",
    "hin": "hindi",
    "ita": "italian",
    "nld": "dutch",
    "pol": "polish",
    "por": "portuguese",
    "spa": "spanish",
}

_HF_AFFIX_REV = {v:k for k,v in _HF_AFFIX.items()}

_REVISION_DICT = {
    "ara": "65eb7455a05cb77b3ae0c69d444569a8eee54628",
    "cmn": "617d3e9fccd186277297cc305f6588af7384b008",
    "eng": "9d2ac89df04254e5c427bcc8d61b6d6c83a1f59b",
    "deu": "5229a5cc475f36c08d03ca52f0ccb005705e60d2",
    "fra": "5d3085f2129139abc10d2b58becd4d4f2978e5d5",
    "hin": "e9e68e1a4db04726b9278192377049d0f9693012",
    "ita": "21e3d5c827cb60619a89988b24979850a7af85a5",
    "nld": "d622427417d37a8d74e110e6289bc29af4ba4056",
    "pol": "28d7098e2e5a211c4810d0a4d8deccc5889e55b6",
    "por": "323bdf67e0fbd3d7f8086fad0971b5bd5a62524b",
    "spa": "a7ea759535bb9fad6361cca151cf94a46e88edf3",
}

def _transform(dataset):
    target_cols = ["test_case", "label_gold"]
    new_cols = ['text', 'is_hateful']
    rename_dict = dict(zip(target_cols, ["text", "is_hateful"]))
    dataset = dataset.rename_columns(rename_dict)
    keep_cols = new_cols + ["functionality"]
    remove_cols = [col for col in dataset["test"].column_names if col not in keep_cols]
    dataset = dataset.remove_columns(remove_cols)
    return dataset


def make_dataset():
    """
    Load dataset from HuggingFace hub
    """
    ds = {}
    for lang in _HF_AFFIX.values():
        lcode = _HF_AFFIX_REV[lang]
        path = f'Paul/hatecheck-{lang}'.rstrip('-')
        dataset = datasets.load_dataset(
            path=path, revision=_REVISION_DICT[lcode]
        )
        dataset = _transform(dataset)
        out_path = Path('..') / lcode / 'test.jsonl'
        dataset['test'].to_json(out_path)
        ds[lcode] = dataset
    return ds
    

if __name__ == '__main__':
    dataset = make_dataset()
    AVG_CHAR = 0
    for lang in _HF_AFFIX:
        AVG_CHAR += np.mean([len(x['text']) for x in dataset[lang]['test']])
    print(f'avg char: {AVG_CHAR / len(_HF_AFFIX)}')