Datasets:

ArXiv:
License:
holylovenia commited on
Commit
0369c71
·
verified ·
1 Parent(s): 5853ace

Upload cc3m_35l.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. cc3m_35l.py +242 -0
cc3m_35l.py ADDED
@@ -0,0 +1,242 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import Dict, List, Tuple
3
+
4
+ import datasets
5
+ import jsonlines as jl
6
+ import pandas as pd
7
+
8
+ from seacrowd.utils import schemas
9
+ from seacrowd.utils.configs import SEACrowdConfig
10
+ from seacrowd.utils.constants import Licenses, Tasks
11
+
12
+ _CITATION = """\
13
+ @inproceedings{thapliyal-etal-2022-crossmodal,
14
+ title = "Crossmodal-3600: A Massively Multilingual Multimodal Evaluation Dataset",
15
+ author = "Thapliyal, Ashish V. and
16
+ Pont Tuset, Jordi and
17
+ Chen, Xi and
18
+ Soricut, Radu",
19
+ editor = "Goldberg, Yoav and
20
+ Kozareva, Zornitsa and
21
+ Zhang, Yue",
22
+ booktitle = "Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing",
23
+ month = dec,
24
+ year = "2022",
25
+ address = "Abu Dhabi, United Arab Emirates",
26
+ publisher = "Association for Computational Linguistics",
27
+ url = "https://aclanthology.org/2022.emnlp-main.45",
28
+ doi = "10.18653/v1/2022.emnlp-main.45",
29
+ pages = "715--729",
30
+ }
31
+ """
32
+
33
+ _DATASETNAME = "cc3m_35l"
34
+
35
+ _DESCRIPTION = """\
36
+ CC3M-35L is created by translating Conceptual Captions 3M (Sharma et al., 2018),
37
+ originally in English, to the other 34 languages using Google's machine translation API.
38
+ """
39
+
40
+ _HOMEPAGE = "https://google.github.io/crossmodal-3600/"
41
+
42
+ _LICENSE = Licenses.CC_BY_4_0.value
43
+
44
+ # the image URLs are contained in tsv file together with the original captions which can be downloaded locally using google account.
45
+ # those tsv file originally can be found and downloaded from this page https://ai.google.com/research/ConceptualCaptions/download
46
+ # there are no direct image folder ready, so it needs to be downloaded one by one
47
+ # some warnings may occur when downloading due to reasons such as security certificate and others
48
+ _URLS = {
49
+ "trans_train": "https://storage.googleapis.com/crossmodal-3600/cc3m_mt_train.jsonl.gz",
50
+ "trans_dev": "https://storage.googleapis.com/crossmodal-3600/cc3m_mt_dev.jsonl.gz",
51
+ }
52
+
53
+ _SUPPORTED_TASKS = [Tasks.IMAGE_CAPTIONING]
54
+
55
+ _SOURCE_VERSION = "1.0.0"
56
+
57
+ _SEACROWD_VERSION = "2024.06.20"
58
+
59
+ _LANGUAGES = ["fil", "ind", "tha", "vie"]
60
+
61
+ _LOCAL = True
62
+
63
+
64
+ class CC3M35L(datasets.GeneratorBasedBuilder):
65
+ """
66
+ CC3M-35L is created by translating Conceptual Captions 3M (Sharma et al., 2018),
67
+ originally in English, to the other 34 languages using Google's machine translation API.
68
+ """
69
+
70
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
71
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
72
+
73
+ BUILDER_CONFIGS = [SEACrowdConfig(name=f"cc3m_35l_{lang}_source", version=datasets.Version(_SOURCE_VERSION), description=f"cc3m_35l_{lang} source schema", schema="source", subset_id=f"cc3m_35l_{lang}",) for lang in _LANGUAGES] + [
74
+ SEACrowdConfig(
75
+ name=f"{_DATASETNAME}_{lang}_seacrowd_imtext",
76
+ version=datasets.Version(_SEACROWD_VERSION),
77
+ description=f"{_DATASETNAME}_{lang} SEACrowd schema",
78
+ schema="seacrowd_imtext",
79
+ subset_id=f"{_DATASETNAME}_{lang}",
80
+ )
81
+ for lang in _LANGUAGES
82
+ ]
83
+
84
+ DEFAULT_CONFIG_NAME = "cc3m_35l_id_source"
85
+
86
+ def _info(self) -> datasets.DatasetInfo:
87
+ if self.config.schema == "source":
88
+ features = datasets.Features(
89
+ {
90
+ "id": datasets.Value("string"),
91
+ "image_paths": datasets.Value("string"),
92
+ "src_lang": datasets.Value("string"),
93
+ "caption_tokenized": datasets.Value("string"),
94
+ "trg_lang": datasets.Value("string"),
95
+ "translation_tokenized": datasets.Value("string"),
96
+ "backtranslation_tokenized": datasets.Value("string"),
97
+ }
98
+ )
99
+ elif self.config.schema == "seacrowd_imtext":
100
+ features = schemas.image_text_features()
101
+
102
+ return datasets.DatasetInfo(
103
+ description=_DESCRIPTION,
104
+ features=features,
105
+ homepage=_HOMEPAGE,
106
+ license=_LICENSE,
107
+ citation=_CITATION,
108
+ )
109
+
110
+ def fill_img_path(self, df: pd.DataFrame, line: dict):
111
+ exceptions = []
112
+ selected_row = df.query('caption==@line["caption_tokenized"]')
113
+ # it may return several rows, skip of empty
114
+ if not selected_row.empty:
115
+ # for each row, download the image, use its path and put the translation
116
+ for idx, row in selected_row.iterrows():
117
+ row["trans_caption"] = line["translation_tokenized"]
118
+ row["backtrans_caption"] = line["backtranslation_tokenized"]
119
+ # if the image cannot be downloaded for some reason, skip it
120
+ # may cause difference in the total data each run
121
+ try:
122
+ row["img_path"] = datasets.DownloadManager().download(row["img_url"])
123
+ except:
124
+ exceptions.append(idx)
125
+
126
+ return selected_row, exceptions
127
+
128
+ def is_target(self, line: dict, trg_lang: str):
129
+ if line["trg_lang"] == trg_lang:
130
+ return line
131
+
132
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
133
+ """Returns SplitGenerators."""
134
+ dev_path = dl_manager.download_and_extract(_URLS["trans_dev"])
135
+ train_path = dl_manager.download_and_extract(_URLS["trans_train"])
136
+
137
+ if self.config.data_dir is None:
138
+ raise ValueError("This is a local dataset. Please pass the data_dir kwarg to load_dataset.")
139
+ else:
140
+ data_dir = self.config.data_dir
141
+
142
+ # read tsv from local train and validation files
143
+ gcc_val = os.path.join(data_dir, "Validation_GCC-1.1.0-Validation.tsv")
144
+ gcc_train = os.path.join(data_dir, "Train_GCC-training.tsv")
145
+
146
+ # make it into pandas dataframe
147
+ colnames = ["caption", "img_url"]
148
+ gcc_val_df = pd.read_csv(gcc_val, sep="\t", header=None, names=colnames)
149
+ gcc_train_df = pd.read_csv(gcc_train, sep="\t", header=None, names=colnames)
150
+
151
+ # add new column to keep the downloaded image path
152
+ gcc_val_df["img_path"] = None
153
+ gcc_train_df["img_path"] = None
154
+
155
+ # add new column to keep the translated caption
156
+ gcc_val_df["trans_caption"] = None
157
+ gcc_train_df["trans_caption"] = None
158
+
159
+ gcc_val_df["backtrans_caption"] = None
160
+ gcc_train_df["backtrans_caption"] = None
161
+
162
+ # match the original captions in the translated set to the dataframe caption
163
+ # download the images from the URL and use it as the filepath
164
+ train_exceptions = []
165
+ val_exceptions = []
166
+
167
+ current_lang = self.config.subset_id.split("_")[2]
168
+ val_caption_targets = []
169
+ train_caption_targets = []
170
+
171
+ # filter validation data
172
+ with jl.open(os.path.join(dev_path), mode="r") as j:
173
+ val_caption_targets = [line for line in j if line["trg_lang"] == current_lang]
174
+
175
+ #for line in val_caption_targets[:100]: # this was for debugging only
176
+ for line in val_caption_targets:
177
+ res = self.fill_img_path(gcc_train_df, line)
178
+ val_exceptions.extend(res[1])
179
+ gcc_val_df.update(res[0])
180
+
181
+ # clean the memory
182
+ val_caption_targets = []
183
+
184
+ # filter train data
185
+ with jl.open(os.path.join(train_path), mode="r") as j:
186
+ train_caption_targets = [line for line in j if line["trg_lang"] == current_lang]
187
+
188
+
189
+ #for line in train_caption_targets[:100]: # this was for debugging only
190
+ for line in train_caption_targets:
191
+ res = self.fill_img_path(gcc_val_df, line)
192
+ train_exceptions.extend(res[1])
193
+ gcc_train_df.update(res[0])
194
+
195
+ # clean the memory
196
+ train_caption_targets = []
197
+
198
+ return [
199
+ datasets.SplitGenerator(
200
+ name=datasets.Split.TRAIN,
201
+ gen_kwargs={
202
+ "filepath": gcc_train_df,
203
+ "exceptions": train_exceptions,
204
+ },
205
+ ),
206
+ datasets.SplitGenerator(
207
+ name=datasets.Split.VALIDATION,
208
+ gen_kwargs={
209
+ "filepath": gcc_val_df,
210
+ "exceptions": val_exceptions,
211
+ },
212
+ ),
213
+ ]
214
+
215
+ def _generate_examples(self, filepath: dict, exceptions: list) -> Tuple[int, Dict]:
216
+ """Yields examples as (key, example) tuples."""
217
+ for idx, row in filepath.iterrows():
218
+ if idx not in exceptions:
219
+ if self.config.schema == "source":
220
+ yield idx, {
221
+ "id": str(idx),
222
+ "image_paths": row["img_path"],
223
+ "src_lang": "en",
224
+ "caption_tokenized": row["caption"],
225
+ "trg_lang": self.config.subset_id.split("_")[2],
226
+ "translation_tokenized": row["trans_caption"],
227
+ "backtranslation_tokenized": row["backtrans_caption"],
228
+ }
229
+
230
+ elif self.config.schema == "seacrowd_imtext":
231
+ yield idx, {
232
+ "id": str(idx),
233
+ "image_paths": [row["img_path"]],
234
+ "texts": row["trans_caption"],
235
+ "metadata": {
236
+ "context": None,
237
+ "labels": None,
238
+ },
239
+ }
240
+
241
+ else:
242
+ raise ValueError(f"Invalid config: {self.config.name}")