Peixian Wang commited on
Commit
1826f48
·
1 Parent(s): de6b5ec

add rtGender loader

Browse files
Files changed (1) hide show
  1. rtGender.py +272 -0
rtGender.py ADDED
@@ -0,0 +1,272 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Class for loading datafrom rtGender"""
16
+
17
+ from __future__ import absolute_import, division, print_function
18
+
19
+ import csv
20
+ from enum import Enum
21
+ import os
22
+
23
+ import datasets
24
+
25
+
26
+ _CITATION = """\
27
+ @inproceedings{voigt-etal-2018-rtgender,
28
+ title = "{R}t{G}ender: A Corpus for Studying Differential Responses to Gender",
29
+ author = "Voigt, Rob and
30
+ Jurgens, David and
31
+ Prabhakaran, Vinodkumar and
32
+ Jurafsky, Dan and
33
+ Tsvetkov, Yulia",
34
+ booktitle = "Proceedings of the Eleventh International Conference on Language Resources and Evaluation ({LREC} 2018)",
35
+ month = may,
36
+ year = "2018",
37
+ address = "Miyazaki, Japan",
38
+ publisher = "European Language Resources Association (ELRA)",
39
+ url = "https://www.aclweb.org/anthology/L18-1445",
40
+ }
41
+ """
42
+
43
+ _DESCRIPTION = """\
44
+ RtGender is a corpus for studying responses to gender online, including posts and responses from Facebook, TED, Fitocracy, and Reddit where the gender of the source poster/speaker is known.
45
+ """
46
+
47
+ _HOMEPAGE = "https://nlp.stanford.edu/robvoigt/rtgender/#contact"
48
+
49
+ _LICENSE = "Research Only"
50
+
51
+ _URL = "https://nlp.stanford.edu/robvoigt/rtgender/rtgender.tar.gz"
52
+
53
+
54
+ class Config(Enum):
55
+ ANNOTATIONS = "annotations"
56
+ POSTS = "posts"
57
+ RESPONSES = "responses"
58
+ FB_POLI = "fb_politicians"
59
+ FB_PUB = "fb_public"
60
+ TED = "ted"
61
+ FITOCRACY = "fitocracy"
62
+ REDDIT = "reddit"
63
+
64
+
65
+ class rtGender(datasets.GeneratorBasedBuilder):
66
+ """TODO: Short description of my dataset."""
67
+
68
+ VERSION = datasets.Version("1.1.0")
69
+
70
+ # This is an example of a dataset with multiple configurations.
71
+ # If you don't want/need to define several sub-sets in your dataset,
72
+ # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
73
+
74
+ # If you need to make complex sub-parts in the datasets with configurable options
75
+ # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
76
+ # BUILDER_CONFIG_CLASS = MyBuilderConfig
77
+
78
+ # You will be able to load one or the other configurations in the following list with
79
+ # data = datasets.load_dataset('my_dataset', 'first_domain')
80
+ # data = datasets.load_dataset('my_dataset', 'second_domain')
81
+ BUILDER_CONFIGS = [
82
+ datasets.BuilderConfig(
83
+ name=str(Config.ANNOTATIONS),
84
+ version=VERSION,
85
+ description="Covers 30k annotations",
86
+ ),
87
+ datasets.BuilderConfig(
88
+ name=str(Config.POSTS),
89
+ version=VERSION,
90
+ description="This part of my dataset covers a second domain",
91
+ ),
92
+ datasets.BuilderConfig(
93
+ name=str(Config.RESPONSES),
94
+ version=VERSION,
95
+ description="This part of my dataset covers a second domain",
96
+ )
97
+ ]
98
+ DEFAULT_CONFIG_NAME = str(Config.ANNOTATIONS) # It's not mandatory to have a default configuration. Just use one if it make sense.
99
+
100
+ POSTS_FEATURES = {
101
+ "source": datasets.Value("string"),
102
+ "op_id": datasets.Value("string"),
103
+ "op_gender": datasets.Value("string"),
104
+ "post_id": datasets.Value("string"),
105
+ "post_text": datasets.Value("string"),
106
+ "post_type": datasets.Value("string"), # only for fb
107
+ "subreddit": datasets.Value("string"), # only for reddit
108
+ "op_gender_visible": datasets.Value("string"), # only for reddit
109
+ }
110
+
111
+ RESPONSES_FEATURES = {
112
+ "source": datasets.Value("string"),
113
+ "op_id": datasets.Value("string"),
114
+ "op_gender": datasets.Value("string"),
115
+ "post_id": datasets.Value("string"),
116
+ "responder_id": datasets.Value("string"),
117
+ "response_text": datasets.Value("string"),
118
+ "op_name": datasets.Value("string"), # only for fb
119
+ "op_category": datasets.Value("string"), # only for fb
120
+ "responder_gender": datasets.Value("string"), # only for fitocracy and reddit
121
+ "responder_gender_visible": datasets.Value("string"), # only for reddit
122
+ "subreddit": datasets.Value("string"),
123
+ }
124
+
125
+ ANNOTATION_FEATURES = {
126
+ "source": datasets.Value("string"),
127
+ "op_gender": datasets.Value("string"),
128
+ "post_text": datasets.Value("string"),
129
+ "response_text": datasets.Value("string"),
130
+ "sentiment": datasets.Value("string"),
131
+ "relevance": datasets.Value("string"),
132
+ }
133
+
134
+ def _info(self):
135
+
136
+ if (
137
+ self.config.name == Config.ANNOTATIONS
138
+ ): # This is the name of the configuration selected in BUILDER_CONFIGS above
139
+ features = datasets.Features(self.ANNOTATION_FEATURES)
140
+ elif self.config.name == Config.POSTS:
141
+ features = datasets.Features(self.POSTS_FEATURES)
142
+ else:
143
+ features = datasets.Features(self.RESPONSES_FEATURES)
144
+ return datasets.DatasetInfo(
145
+ # This is the description that will appear on the datasets page.
146
+ description=_DESCRIPTION,
147
+ # This defines the different columns of the dataset and their types
148
+ features=features, # Here we define them above because they are different between the two configurations
149
+ # If there's a common (input, target) tuple from the features,
150
+ # specify them here. They'll be used if as_supervised=True in
151
+ # builder.as_dataset.
152
+ supervised_keys=None,
153
+ # Homepage of the dataset for documentation
154
+ homepage=_HOMEPAGE,
155
+ # License for the dataset if available
156
+ license=_LICENSE,
157
+ # Citation for the dataset
158
+ citation=_CITATION,
159
+ )
160
+
161
+ def _split_generators(self, dl_manager):
162
+ """Returns SplitGenerators."""
163
+ # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
164
+
165
+ # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs
166
+ # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
167
+ # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
168
+ data_dir = dl_manager.download_and_extract(_URL)
169
+ if self.config.name == Config.ANNOTATIONS:
170
+ files = ["annotations.csv"]
171
+ elif self.config.name == Config.POSTS:
172
+ files = [
173
+ "facebook_congress_posts.csv",
174
+ "facebook_wiki_posts.csv",
175
+ "fitocracy_posts.csv",
176
+ "reddit_posts.csv",
177
+ ]
178
+ else:
179
+ files = [
180
+ "facebook_congress_responses.csv",
181
+ "facebook_wiki_responses.csv",
182
+ "fitocracy_responses.csv",
183
+ "reddit_responses.csv",
184
+ "ted_responses.csv",
185
+ ]
186
+ return [
187
+ datasets.SplitGenerator(
188
+ name=datasets.Split.TRAIN,
189
+ # These kwargs will be passed to _generate_examples
190
+ gen_kwargs={
191
+ "filepaths": files,
192
+ "split": "train",
193
+ },
194
+ ),
195
+ ]
196
+
197
+ def _generate_examples(
198
+ self,
199
+ filepaths,
200
+ split, # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
201
+ ):
202
+ """ Yields examples as (key, example) tuples. """
203
+ # This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
204
+ # The `key` is here for legacy reason (tfds) and is not important in itself.
205
+ files = []
206
+ readers = {}
207
+ for fp in filepaths:
208
+ f = open(fp, encoding="utf-8")
209
+ reader = csv.reader(f)
210
+ next(reader)
211
+ readers[fp.replace(".csv", "")] = reader
212
+ files.append(f)
213
+
214
+ id_ = 0
215
+ for reader_name, reader in readers.items():
216
+ for row in reader:
217
+ if self.config.name == Config.ANNOTATIONS:
218
+ yield id_, {
219
+ "source": row[0],
220
+ "op_gender": row[1],
221
+ "post_text": row[2],
222
+ "response_text": row[3],
223
+ "sentiment": row[4],
224
+ "relevance": row[5],
225
+ }
226
+ elif self.config.name == Config.POSTS:
227
+ r = {
228
+ "source": reader_name,
229
+ "op_id": row[0],
230
+ "op_gender": row[1],
231
+ "post_id": row[2],
232
+ "post_text": row[3],
233
+ "post_type": None,
234
+ "subreddit": None,
235
+ "op_gender_visible": None,
236
+ }
237
+ if "facebook" in reader_name:
238
+ r["post_type"] = row[4]
239
+ elif "reddit" in reader_name:
240
+ r["subreddit"] = row[4]
241
+ r["op_gender_visible"] = row[5]
242
+
243
+ yield id_, r
244
+
245
+ else:
246
+ r = {
247
+ "source": reader_name,
248
+ "op_id": row[0],
249
+ "op_gender": row[1],
250
+ "post_id": row[2],
251
+ "responder_id": row[3],
252
+ "response_text": row[4],
253
+ "op_name": None,
254
+ "op_category": None,
255
+ "responder_gender": None,
256
+ "responder_gender_visible": None,
257
+ "subreddit": None
258
+ }
259
+ if "facebook" in reader_name:
260
+ r["op_name"] = row[5]
261
+ r["op_category"] = row[6]
262
+ elif "fitocracy" in reader_name:
263
+ r["responder_gender"] = row[5]
264
+ elif "reddit" in reader_name:
265
+ r["subreddit"] = row[5]
266
+ r["responder_gender"] = row[6]
267
+ r["responder_gender_visible"] = row[7]
268
+ yield id_, r
269
+ id_ += 1
270
+
271
+ for fd in files:
272
+ fd.close()