|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import json |
|
import os |
|
import csv |
|
from PIL import Image |
|
import pandas as pd |
|
|
|
import datasets |
|
|
|
|
|
_CITATION = """\ |
|
@inproceedings{gautam2024kvasirvqa, |
|
title={Kvasir-VQA: A Text-Image Pair GI Tract Dataset}, |
|
author={Gautam, Sushant and Storås, Andrea and Midoglu, Cise and Hicks, Steven A. and Thambawita, Vajira and Halvorsen, Pål and Riegler, Michael A.}, |
|
booktitle={Proceedings of the First International Workshop on Vision-Language Models for Biomedical Applications (VLM4Bio '24)}, |
|
year={2024}, |
|
location={Melbourne, VIC, Australia}, |
|
publisher={ACM}, |
|
doi={10.1145/3689096.3689458} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
The Kvasir-VQA dataset is an extended dataset derived from the HyperKvasir and Kvasir-Instrument datasets, augmented with question-and-answer annotations. This dataset is designed to facilitate advanced machine learning tasks in gastrointestinal (GI) diagnostics, including image captioning, Visual Question Answering (VQA), and text-based generation of synthetic medical images. |
|
""" |
|
|
|
_HOMEPAGE = "https://datasets.simula.no/kvasir-vqa/" |
|
|
|
_LICENSE = "cc-by-nc-4.0" |
|
|
|
|
|
class KvasirVQADataset(datasets.GeneratorBasedBuilder): |
|
"""Kvasir-VQA: A Text-Image Pair GI Tract Dataset""" |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig( |
|
name="kvasir_vqa", version=VERSION, description="Kvasir-VQA dataset containing text-image pairs with question-and-answer annotations"), |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "kvasir_vqa" |
|
|
|
def _info(self): |
|
features = datasets.Features( |
|
{ |
|
"image": datasets.Image(), |
|
"source": datasets.Value("string"), |
|
"question": datasets.Value("string"), |
|
"answer": datasets.Value("string"), |
|
"img_id": datasets.Value("string"), |
|
} |
|
) |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
data_dir = "." |
|
return [ |
|
datasets.SplitGenerator( |
|
name="raw_annotations", |
|
gen_kwargs={ |
|
"metadata_file": os.path.join(data_dir, "metadata.csv"), |
|
"image_dir": data_dir, |
|
}, |
|
) |
|
] |
|
|
|
def _generate_examples(self, metadata_file, image_dir): |
|
image_cache = {} |
|
df = pd.read_csv(metadata_file, encoding='utf-8') |
|
|
|
shuffled_df = df |
|
for idx, row in shuffled_df.iterrows(): |
|
image_file = row["file_name"] |
|
image_path = os.path.join(image_dir, image_file) |
|
|
|
if image_file not in image_cache: |
|
if os.path.exists(image_path): |
|
with open(image_path, "rb") as img_file: |
|
image_cache[image_file] = img_file.read() |
|
else: |
|
continue |
|
|
|
yield idx, { |
|
"image": image_cache[image_file], |
|
"source": row["source"], |
|
"question": row["question"], |
|
"answer": row["answer"], |
|
"img_id": image_file.replace(".jpg", "").replace("images/", ""), |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|