File size: 2,481 Bytes
7d00c36
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9b215e0
7d00c36
 
 
 
7dfccf0
7d00c36
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c479416
7d00c36
 
 
3c0e657
e589b40
3c0e657
7d00c36
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
"""Victorian."""

from typing import List
from functools import partial

import datasets

import pandas


VERSION = datasets.Version("1.0.0")
_ORIGINAL_FEATURE_NAMES = [
    "text",
    "author"
]
_BASE_FEATURE_NAMES = [
    "text",
    "author"
]

DESCRIPTION = "Victorian dataset from the Gungor thesis.\"."
_HOMEPAGE = "https://scholarworks.iupui.edu/server/api/core/bitstreams/708a9870-915e-4d59-b54d-938af563c196/content"
_URLS = ("https://scholarworks.iupui.edu/server/api/core/bitstreams/708a9870-915e-4d59-b54d-938af563c196/content")
_CITATION = """
@phdthesis{gungor2018benchmarking,
  title={Benchmarking authorship attribution techniques using over a thousand books by fifty victorian era novelists},
  author={Gungor, Abdulmecit},
  year={2018},
  school={Purdue University}
}"""

# Dataset info
urls_per_split = {
    "train": "https://huggingface.co/datasets/mstz/victorian_authorship/resolve/main/train.csv",
}
features_types_per_config = {
    "authorship": {
        "text": datasets.Value("string"),
        "author": datasets.ClassLabel(num_classes=51)
    }
}
features_per_config = {k: datasets.Features(features_types_per_config[k]) for k in features_types_per_config}


class VictorianConfig(datasets.BuilderConfig):
    def __init__(self, **kwargs):
        super(VictorianConfig, self).__init__(version=VERSION, **kwargs)
        self.features = features_per_config[kwargs["name"]]


class Victorian(datasets.GeneratorBasedBuilder):
    # dataset versions
    DEFAULT_CONFIG = "authorship"
    BUILDER_CONFIGS = [
        VictorianConfig(name="authorship",
                        description="authorship"),
    ]


    def _info(self):
        info = datasets.DatasetInfo(description=DESCRIPTION, citation=_CITATION, homepage=_HOMEPAGE,
                                    features=features_per_config[self.config.name])

        return info
    
    def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
        downloads = dl_manager.download_and_extract(urls_per_split)

        return [
            datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloads["train"]})
        ]
    
    def _generate_examples(self, filepath: str):       
        print(f"reading {filepath}")
        data = pandas.read_csv(filepath, encoding="latin-1")
        print(data.columns)

        for row_id, row in data.iterrows():
            data_row = dict(row)

            yield row_id, data_row