Datasets:
Tasks:
Text Classification
Modalities:
Text
Formats:
json
Sub-tasks:
multi-class-classification
Languages:
Catalan
Size:
10K - 100K
License:
File size: 5,068 Bytes
e183dd7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 |
# Loading script for the TeCla dataset.
import json
import datasets
logger = datasets.logging.get_logger(__name__)
_CITATION = """
"""
_DESCRIPTION = """
Dataset automatically created from Catalan Wikipedia articles and the associated categories.
"""
_URL = "./"
_TRAINING_FILE = "train.json"
_DEV_FILE = "dev.json"
class ca_wiki_tcConfig(datasets.BuilderConfig):
""" Builder config for the CaWikiTC dataset """
def __init__(self, **kwargs):
"""BuilderConfig for CaWikiTC.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(ca_wiki_tcConfig, self).__init__(**kwargs)
class ca_wiki_tc(datasets.GeneratorBasedBuilder):
""" CaWikiTC Dataset """
BUILDER_CONFIGS = [
ca_wiki_tcConfig(
name="ca-wiki-tc",
version=datasets.Version("1.0.1"),
description="CaWikiTC dataset",
),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"text": datasets.Value("string"),
"label": datasets.features.ClassLabel
(names=
[
"Administració",
"Aeronàutica",
"Agricultura",
"Antropologia",
"Arqueologia",
"Arquitectura",
"Art",
"Astronomia",
"Astronàutica",
"Biblioteconomia",
"Biotecnologia",
"Catàstrofes",
"Circ",
"Ciència militar",
"Ciència-ficció",
"Ciències ambientals",
"Ciències de la salut",
"Ciències polítiques",
"Conflictes",
"Cronometria",
"Cultura popular",
"Dansa",
"Dret",
"Ecologia",
"Enginyeria",
"Epidèmies",
"Esoterisme",
"Estris",
"Festivals",
"Filologia",
"Filosofia",
"Fiscalitat",
"Física",
"Geografia",
"Geologia",
"Gestió",
"Heràldica",
"Història",
"Humor",
"Indumentària",
"Informàtica",
"Jaciments paleontològics",
"Jocs",
"Lingüística",
"Llengües",
"Llocs ficticis",
"Matemàtiques",
"Metodologia",
"Mitologia",
"Multimèdia",
"Museologia",
"Nàutica",
"Objectes astronòmics",
"Pedagogia",
"Periodisme",
"Protestes",
"Pseudociència",
"Psicologia",
"Química",
"Robòtica",
"Ràdio",
"Seguretat laboral",
"Sociologia",
"Telecomunicacions",
"Televisió",
"Teologia",
"Ètica",
]
),
}
),
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
urls_to_download = {
"train": f"{_URL}{_TRAINING_FILE}",
"dev": f"{_URL}{_DEV_FILE}",
}
downloaded_files = dl_manager.download_and_extract(urls_to_download)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]})
]
def _generate_examples(self, filepath):
"""This function returns the examples in the raw (text) form."""
logger.info("generating examples from = %s", filepath)
with open(filepath, encoding="utf-8") as f:
data = json.load(f)
for id_, article in enumerate(data):
text = article["text"]
label = article["label"]
yield id_, {
"text": text,
"label": label,
}
|