File size: 4,603 Bytes
12d36f8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
# Copyright 2024 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import json
import os
import csv
from PIL import Image
import pandas as pd

import datasets


_CITATION = """\
@inproceedings{gautam2024kvasirvqa,
  title={Kvasir-VQA: A Text-Image Pair GI Tract Dataset},
  author={Gautam, Sushant and Storås, Andrea and Midoglu, Cise and Hicks, Steven A. and Thambawita, Vajira and Halvorsen, Pål and Riegler, Michael A.},
  booktitle={Proceedings of the First International Workshop on Vision-Language Models for Biomedical Applications (VLM4Bio '24)},
  year={2024},
  location={Melbourne, VIC, Australia},
  publisher={ACM},
  doi={10.1145/3689096.3689458}
}
"""

_DESCRIPTION = """\
The Kvasir-VQA dataset is an extended dataset derived from the HyperKvasir and Kvasir-Instrument datasets, augmented with question-and-answer annotations. This dataset is designed to facilitate advanced machine learning tasks in gastrointestinal (GI) diagnostics, including image captioning, Visual Question Answering (VQA), and text-based generation of synthetic medical images.
"""

_HOMEPAGE = "https://datasets.simula.no/kvasir-vqa/"

_LICENSE = "cc-by-nc-4.0"


class KvasirVQADataset(datasets.GeneratorBasedBuilder):
    """Kvasir-VQA: A Text-Image Pair GI Tract Dataset"""

    VERSION = datasets.Version("1.0.0")

    BUILDER_CONFIGS = [
        datasets.BuilderConfig(
            name="kvasir_vqa", version=VERSION, description="Kvasir-VQA dataset containing text-image pairs with question-and-answer annotations"),
    ]

    DEFAULT_CONFIG_NAME = "kvasir_vqa"

    def _info(self):
        features = datasets.Features(
            {
                "image": datasets.Image(),
                "source": datasets.Value("string"),
                "question": datasets.Value("string"),
                "answer": datasets.Value("string"),
                "img_id": datasets.Value("string"),
            }
        )
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=features,
            homepage=_HOMEPAGE,
            license=_LICENSE,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        data_dir = "."
        return [
            datasets.SplitGenerator(
                name="raw_annotations",
                gen_kwargs={
                    "metadata_file": os.path.join(data_dir, "metadata.csv"),
                    "image_dir": data_dir,
                },
            )
        ]
    
    def _generate_examples(self, metadata_file, image_dir):
        image_cache = {}
        df = pd.read_csv(metadata_file, encoding='utf-8')
        # shuffled_df = df.sample(frac=1, random_state=42).reset_index(drop=True)
        shuffled_df = df
        for idx, row in shuffled_df.iterrows():
                image_file = row["file_name"]
                image_path = os.path.join(image_dir, image_file)
                
                if image_file not in image_cache:
                    if os.path.exists(image_path):
                        with open(image_path, "rb") as img_file:
                            image_cache[image_file] = img_file.read()
                    else:
                        continue  # Skip if the image file does not exist

                yield idx, {
                    "image": image_cache[image_file],
                    "source": row["source"],
                    "question": row["question"],
                    "answer": row["answer"],
                    "img_id": image_file.replace(".jpg", "").replace("images/", ""),
                }

# RUN: datasets-cli test  HuggingFaceDataset-Binary.py --save_info --all_configs

## upload to huggingface, it will save as arrow

# huggingface-cli upload SimulaMet-HOST/xxKvasir-VQA  . .  --repo-type dataset  xxx

## then convert the arrow to parqueet

# datasets-cli convert_to_parquet  SimulaMet-HOST/xxKvasir-VQA 


# The file names were weird. I had to rename them to make it more readable.
# cloned the repo to local and pushed again to huggingface