Datasets:

Modalities:
Tabular
Text
Formats:
parquet
Languages:
English
Size:
< 1K
ArXiv:
Libraries:
Datasets
pandas
License:
davanstrien HF staff commited on
Commit
cbc4306
1 Parent(s): ee85adc

Delete loading script

Browse files
Files changed (1) hide show
  1. atypical_animacy.py +0 -123
atypical_animacy.py DELETED
@@ -1,123 +0,0 @@
1
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- import csv
16
- import json
17
- import os
18
- import pandas as pd
19
- import datasets
20
-
21
- _CITATION = """@article{DBLP:journals/corr/abs-2005-11140,
22
- author = {Mariona Coll Ardanuy and
23
- Federico Nanni and
24
- Kaspar Beelen and
25
- Kasra Hosseini and
26
- Ruth Ahnert and
27
- Jon Lawrence and
28
- Katherine McDonough and
29
- Giorgia Tolfo and
30
- Daniel C. S. Wilson and
31
- Barbara McGillivray},
32
- title = {Living Machines: {A} study of atypical animacy},
33
- journal = {CoRR},
34
- volume = {abs/2005.11140},
35
- year = {2020},
36
- url = {https://arxiv.org/abs/2005.11140},
37
- eprinttype = {arXiv},
38
- eprint = {2005.11140},
39
- timestamp = {Sat, 23 Jan 2021 01:12:25 +0100},
40
- biburl = {https://dblp.org/rec/journals/corr/abs-2005-11140.bib},
41
- bibsource = {dblp computer science bibliography, https://dblp.org}
42
- }
43
- """
44
-
45
-
46
- _DESCRIPTION = """\
47
- Atypical animacy detection dataset, based on nineteenth-century sentences in English extracted from an open dataset of nineteenth-century books digitized by the British Library (available via https://doi.org/10.21250/db14, British Library Labs, 2014).
48
- This dataset contains 598 sentences containing mentions of machines. Each sentence has been annotated according to the animacy and humanness of the machine in the sentence.
49
- """
50
-
51
- _HOMEPAGE = "https://bl.iro.bl.uk/concern/datasets/323177af-6081-4e93-8aaf-7932ca4a390a?locale=en"
52
-
53
- _DATASETNAME = "atypical_animacy"
54
-
55
- _LICENSE = "CC0 1.0 Universal Public Domain"
56
-
57
- _URLS = {
58
- _DATASETNAME: "https://bl.iro.bl.uk/downloads/59a8c52f-e0a5-4432-9897-0db8c067627c?locale=en",
59
- }
60
-
61
-
62
- class AtypicalAnimacy(datasets.GeneratorBasedBuilder):
63
- """Living Machines: A study of atypical animacy. Each sentence has been annotated according to the animacy and humanness of the target in the sentence. Additionally, the context is also provided"""
64
-
65
- VERSION = datasets.Version("1.1.0")
66
-
67
- def _info(self):
68
- features = datasets.Features(
69
- {
70
- "id": datasets.Value("string"),
71
- "sentence": datasets.Value("string"),
72
- "context": datasets.Value("string"),
73
- "target": datasets.Value("string"),
74
- "animacy": datasets.Value("float"),
75
- "humanness": datasets.Value("float"),
76
- "offsets": [datasets.Value("int32")],
77
- "date": datasets.Value("string"),
78
- }
79
- )
80
- return datasets.DatasetInfo(
81
- description=_DESCRIPTION,
82
- features=features,
83
- homepage=_HOMEPAGE,
84
- license=_LICENSE,
85
- citation=_CITATION,
86
- )
87
-
88
- def _split_generators(self, dl_manager):
89
- urls = _URLS[_DATASETNAME]
90
- data_dir = dl_manager.download_and_extract(urls)
91
- return [
92
- datasets.SplitGenerator(
93
- name=datasets.Split.TRAIN,
94
- gen_kwargs={
95
- "filepath": os.path.join(
96
- data_dir, "LwM-nlp-animacy-annotations-machines19thC.tsv"
97
- ),
98
- },
99
- ),
100
- ]
101
-
102
- def _generate_examples(self, filepath):
103
- data = pd.read_csv(filepath, sep="\t", header=0)
104
- for id, row in data.iterrows():
105
- date = row.Date
106
- sentence = row.Sentence.replace("***", "")
107
- context = row.SentenceCtxt.replace("[SEP]", "").replace("***", "")
108
- target = row.TargetExpression
109
- animacy = float(row.animacy)
110
- humanness = float(row.humanness)
111
- target_start = row.Sentence.find("***")
112
- offsets = [target_start, target_start + len(target)]
113
- id = row.SentenceId
114
- yield id, {
115
- "id": id,
116
- "sentence": sentence,
117
- "context": context,
118
- "target": target,
119
- "animacy": animacy,
120
- "humanness": humanness,
121
- "date": date,
122
- "offsets": offsets,
123
- }