Datasets:

Languages:
Vietnamese
ArXiv:
License:
holylovenia commited on
Commit
15a0ce2
·
verified ·
1 Parent(s): 11c833c

Upload uit_viic.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. uit_viic.py +150 -0
uit_viic.py ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ import json
3
+ import os.path
4
+
5
+ import datasets
6
+
7
+ from seacrowd.utils import schemas
8
+ from seacrowd.utils.configs import SEACrowdConfig
9
+ from seacrowd.utils.constants import Licenses, Tasks
10
+
11
+ _DATASETNAME = "uit_viic"
12
+ _CITATION = """\
13
+ @InProceedings{10.1007/978-3-030-63007-2_57,
14
+ author="Lam, Quan Hoang
15
+ and Le, Quang Duy
16
+ and Nguyen, Van Kiet
17
+ and Nguyen, Ngan Luu-Thuy",
18
+ editor="Nguyen, Ngoc Thanh
19
+ and Hoang, Bao Hung
20
+ and Huynh, Cong Phap
21
+ and Hwang, Dosam
22
+ and Trawi{\'{n}}ski, Bogdan
23
+ and Vossen, Gottfried",
24
+ title="UIT-ViIC: A Dataset for the First Evaluation on Vietnamese Image Captioning",
25
+ booktitle="Computational Collective Intelligence",
26
+ year="2020",
27
+ publisher="Springer International Publishing",
28
+ address="Cham",
29
+ pages="730--742",
30
+ abstract="Image Captioning (IC), the task of automatic generation of image captions, has attracted
31
+ attentions from researchers in many fields of computer science, being computer vision, natural language
32
+ processing and machine learning in recent years. This paper contributes to research on Image Captioning
33
+ task in terms of extending dataset to a different language - Vietnamese. So far, there has been no existed
34
+ Image Captioning dataset for Vietnamese language, so this is the foremost fundamental step for developing
35
+ Vietnamese Image Captioning. In this scope, we first built a dataset which contains manually written
36
+ captions for images from Microsoft COCO dataset relating to sports played with balls, we called this dataset
37
+ UIT-ViIC (University Of Information Technology - Vietnamese Image Captions). UIT-ViIC consists of 19,250
38
+ Vietnamese captions for 3,850 images. Following that, we evaluated our dataset on deep neural network models
39
+ and did comparisons with English dataset and two Vietnamese datasets built by different methods. UIT-ViIC
40
+ is published on our lab website (https://sites.google.com/uit.edu.vn/uit-nlp/) for research purposes.",
41
+ isbn="978-3-030-63007-2"
42
+ }
43
+ """
44
+
45
+ _DESCRIPTION = """
46
+ UIT-ViIC contains manually written captions for images from Microsoft COCO dataset relating to sports
47
+ played with ball. UIT-ViIC consists of 19,250 Vietnamese captions for 3,850 images. For each image,
48
+ UIT-ViIC provides five Vietnamese captions annotated by five annotators.
49
+ """
50
+
51
+ _HOMEPAGE = "https://drive.google.com/file/d/1YexKrE6o0UiJhFWpE8M5LKoe6-k3AiM4"
52
+ _PAPER_URL = "https://arxiv.org/abs/2002.00175"
53
+ _LICENSE = Licenses.UNKNOWN.value
54
+ _HF_URL = ""
55
+ _LANGUAGES = ["vi"]
56
+ _LOCAL = False
57
+ _SUPPORTED_TASKS = [Tasks.IMAGE_CAPTIONING]
58
+ _SOURCE_VERSION = "1.0.0"
59
+ _SEACROWD_VERSION = "2024.06.20"
60
+
61
+ _URLS = "https://drive.google.com/uc?export=download&id=1YexKrE6o0UiJhFWpE8M5LKoe6-k3AiM4"
62
+ _Split_Path = {
63
+ "train": "UIT-ViIC/uitviic_captions_train2017.json",
64
+ "validation": "UIT-ViIC/uitviic_captions_val2017.json",
65
+ "test": "UIT-ViIC/uitviic_captions_test2017.json",
66
+ }
67
+
68
+
69
+ class UITViICDataset(datasets.GeneratorBasedBuilder):
70
+ BUILDER_CONFIGS = [
71
+ SEACrowdConfig(name=f"{_DATASETNAME}_source", version=datasets.Version(_SOURCE_VERSION), description=_DESCRIPTION, subset_id=f"{_DATASETNAME}", schema="source"),
72
+ SEACrowdConfig(name=f"{_DATASETNAME}_seacrowd_imtext", version=datasets.Version(_SEACROWD_VERSION), description=_DESCRIPTION, subset_id=f"{_DATASETNAME}", schema="seacrowd_imtext"),
73
+ ]
74
+
75
+ def _info(self):
76
+ if self.config.schema == "source":
77
+ features = datasets.Features(
78
+ {
79
+ "license": datasets.Value("int32"),
80
+ "file_name": datasets.Value("string"),
81
+ "coco_url": datasets.Value("string"),
82
+ "flickr_url": datasets.Value("string"),
83
+ "height": datasets.Value("int32"),
84
+ "width": datasets.Value("int32"),
85
+ "date_captured": datasets.Value("string"),
86
+ "image_id": datasets.Value("int32"),
87
+ "caption": datasets.Value("string"),
88
+ "cap_id": datasets.Value("int32"),
89
+ }
90
+ )
91
+ elif self.config.schema == "seacrowd_imtext":
92
+ features = schemas.image_text_features()
93
+ return datasets.DatasetInfo(
94
+ description=_DESCRIPTION,
95
+ features=features,
96
+ license=_LICENSE,
97
+ homepage=_HOMEPAGE,
98
+ citation=_CITATION,
99
+ )
100
+
101
+ def _split_generators(self, dl_manager):
102
+ file_paths = dl_manager.download_and_extract(_URLS)
103
+
104
+ return [
105
+ datasets.SplitGenerator(
106
+ name=datasets.Split.TRAIN,
107
+ gen_kwargs={"filepath": os.path.join(file_paths, _Split_Path["train"])},
108
+ ),
109
+ datasets.SplitGenerator(
110
+ name=datasets.Split.VALIDATION,
111
+ gen_kwargs={"filepath": os.path.join(file_paths, _Split_Path["validation"])},
112
+ ),
113
+ datasets.SplitGenerator(
114
+ name=datasets.Split.TEST,
115
+ gen_kwargs={"filepath": os.path.join(file_paths, _Split_Path["test"])},
116
+ ),
117
+ ]
118
+
119
+ def _generate_examples(self, filepath):
120
+ """Yields examples."""
121
+ with open(filepath, encoding="utf-8") as f:
122
+ json_dict = json.load(f)
123
+ images = {itm["id"]: itm for itm in json_dict["images"]}
124
+ captns = json_dict["annotations"]
125
+
126
+ for idx, capt in enumerate(captns):
127
+ image_id = capt["image_id"]
128
+ if self.config.schema == "source":
129
+ yield idx, {
130
+ "license": images[image_id]["license"],
131
+ "file_name": images[image_id]["file_name"],
132
+ "coco_url": images[image_id]["coco_url"],
133
+ "flickr_url": images[image_id]["flickr_url"],
134
+ "height": images[image_id]["height"],
135
+ "width": images[image_id]["width"],
136
+ "date_captured": images[image_id]["date_captured"],
137
+ "image_id": capt["image_id"],
138
+ "caption": capt["caption"],
139
+ "cap_id": capt["id"],
140
+ }
141
+ elif self.config.schema == "seacrowd_imtext":
142
+ yield idx, {
143
+ "id": capt["id"],
144
+ "image_paths": [images[image_id]["coco_url"], images[image_id]["flickr_url"]],
145
+ "texts": capt["caption"],
146
+ "metadata": {
147
+ "context": "",
148
+ "labels": ["Yes"],
149
+ },
150
+ }