File size: 12,995 Bytes
2383c68 b649179 2383c68 231fab5 2383c68 17ef071 b649179 17ef071 2383c68 1918cf7 d1e788c 1918cf7 d1e788c 1918cf7 2383c68 f24409b 7978096 0d2ce8f 17ef071 f24409b 1918cf7 b649179 7978096 f24409b 0d2ce8f 30af0a8 f24409b 29bddce f24409b 1918cf7 5871ce1 1918cf7 d7ee8ad 1918cf7 d7ee8ad 1918cf7 d7ee8ad b649179 8947b0d b649179 1918cf7 b649179 d1e788c b649179 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 |
import os
import json
import datasets
import pandas as pd
_DESCRIPTION = """\
MedIAnomaly is a benchmark for evaluating anomaly detection methods on seven diverse medical imaging datasets:
RSNA, VinCXR, BrainTumor, LAG, ISIC2018_Task3, Camelyon16, and BraTS2021. It supports both image-level
classification and pixel-level segmentation tasks.
All datasets follow a consistent one-class learning protocol: the training set contains only normal (non-anomalous)
images, while the test set includes both normal and abnormal cases. This setting is designed to reflect real-world
scenarios where anomalous samples are rare or unavailable during training. MedIAnomaly provides standardized preprocessing, train/test splits, and label formats to facilitate fair comparison
across methods.
"""
_HOMEPAGE = "https://github.com/caiyu6666/MedIAnomaly/tree/main"
_CITATION = """\
@article{cai2024medianomaly,
title={MedIAnomaly: A comparative study of anomaly detection in medical images},
author={Cai, Yu and Zhang, Weiwen and Chen, Hao and Cheng, Kwang-Ting},
journal={arXiv preprint arXiv:2404.04518},
year={2024}
}
"""
_BASE_URL = "https://huggingface.co/datasets/randall-lab/medianomaly/resolve/main"
_URLS = {
"rsna": f"{_BASE_URL}/rsna.tar",
"brats2021": f"{_BASE_URL}/brats2021.tar",
"braintumor": f"{_BASE_URL}/braintumor.tar",
"camelyon16": f"{_BASE_URL}/camelyon16.tar",
"isic2018_task3": f"{_BASE_URL}/isic2018.tar",
"lag": f"{_BASE_URL}/lag.tar",
"vincxr": f"{_BASE_URL}/vincxr.tar",
}
config_names = {"rsna": "RSNA", "vincxr": "VinCXR",
"brats2021": "BraTS2021", "braintumor": "BrainTumor",
"camelyon16": "Camelyon16", "isic2018_task3": "ISIC2018_Task3",
"lag": "LAG"}
class Medianomaly(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="rsna", version=datasets.Version("1.0.0"), description="RSNA Pneumonia dataset."),
datasets.BuilderConfig(name="brats2021", version=datasets.Version("1.0.0"), description="BraTS2021 brain tumor dataset."),
datasets.BuilderConfig(name="braintumor", version=datasets.Version("1.0.0"), description="BrainTumor MRI dataset."),
datasets.BuilderConfig(name="camelyon16", version=datasets.Version("1.0.0"), description="Camelyon16 histopathology dataset."),
datasets.BuilderConfig(name="isic2018_task3", version=datasets.Version("1.0.0"), description="ISIC 2018 melanoma classification dataset."),
datasets.BuilderConfig(name="lag", version=datasets.Version("1.0.0"), description="LAG (glaucoma detection) fundus dataset."),
datasets.BuilderConfig(name="vincxr", version=datasets.Version("1.0.0"), description="VinCXR chest X-ray dataset."),
]
def _info(self):
config_name = self.config.name.lower()
if config_name in ["rsna", "vincxr", "braintumor", "lag", "camelyon16"]:
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features({
"image": datasets.Image(),
"label": datasets.ClassLabel(names=["normal", "abnormal"]),
}),
supervised_keys=("image", "label"),
homepage=_HOMEPAGE,
license="apache-2.0",
citation=_CITATION,
)
elif config_name == "brats2021":
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features({
"image": datasets.Image(),
"label": datasets.ClassLabel(names=["normal", "abnormal"]),
"annotation": datasets.Image(),
}),
supervised_keys=("image", "label"),
homepage=_HOMEPAGE,
license="apache-2.0",
citation=_CITATION,
)
elif config_name == "isic2018_task3":
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features({
"image": datasets.Image(),
"label": datasets.ClassLabel(names=["normal", "abnormal"]),
"labels": datasets.Sequence(datasets.Value("int32")),
"MEL": datasets.ClassLabel(names=["melanoma", "non-melanoma"]),
"NV": datasets.ClassLabel(names=["nevus", "non-nevus"]),
"BCC": datasets.ClassLabel(names=["basal cell carcinoma", "non-basal cell carcinoma"]),
"AKIEC": datasets.ClassLabel(names=["actinic keratosis", "non-actinic keratosis"]),
"BKL": datasets.ClassLabel(names=["benign keratosis", "non-benign keratosis"]),
"VASC": datasets.ClassLabel(names=["vascular lesion", "non-vascular lesion"]),
"DF": datasets.ClassLabel(names=["dermatofibroma", "non-dermatofibroma"]),
}),
supervised_keys=("image", "label"),
homepage=_HOMEPAGE,
license="apache-2.0",
citation=_CITATION,
)
else:
raise NotImplementedError(f"{config_name} is not implemented in Medianomaly.")
def _split_generators(self, dl_manager):
config_name = self.config.name.lower()
if config_name not in _URLS:
raise NotImplementedError(f"{config_name} is not implemented in Medianomaly.")
archive_path = dl_manager.download_and_extract(_URLS[config_name])
if config_name in ["rsna", "vincxr", "braintumor", "lag"]:
data_dir = os.path.join(archive_path, config_names[config_name])
with open(os.path.join(data_dir, "data.json"), "r") as f:
metadata = json.load(f)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={
"samples": metadata["train"], "base_dir": data_dir, "config": config_name
}),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={
"samples": metadata["test"], "base_dir": data_dir, "config": config_name
}),
]
elif config_name == "brats2021":
data_dir = os.path.join(archive_path, config_names[config_name])
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={
"samples": "train", "base_dir": data_dir, "config": config_name
}),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={
"samples": "test", "base_dir": data_dir, "config": config_name
}),
]
elif config_name == "camelyon16":
data_dir = os.path.join(archive_path, config_names[config_name])
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={
"samples": "train", "base_dir": data_dir, "config": config_name
}),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={
"samples": "test", "base_dir": data_dir, "config": config_name
}),
]
elif config_name == "isic2018_task3":
data_dir = os.path.join(archive_path, config_names[config_name])
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={
"samples": "train", "base_dir": data_dir, "config": config_name
}),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={
"samples": "test", "base_dir": data_dir, "config": config_name
}),
]
def _generate_examples(self, samples, base_dir, config):
if config in ["rsna", "vincxr", "braintumor", "lag"]:
base_dir = os.path.join(base_dir, "images")
for label_str, items in samples.items(): # only "0" in train, "0"/"1" in test
label = int(label_str)
for idx, item in enumerate(items):
image_path = os.path.join(base_dir, item)
yield idx, {
"image": image_path,
"label": label,
}
elif config == "brats2021":
if samples == "train":
base_dir = os.path.join(base_dir, "train")
for idx, item in enumerate(os.listdir(base_dir)):
image_path = os.path.join(base_dir, item)
yield idx, {
"image": image_path,
"label": 0, # All training images are normal
}
elif samples == "test":
image_dir_normal = os.path.join(base_dir, "test", "normal")
image_dir_tumor = os.path.join(base_dir, "test", "tumor")
annot_dir = os.path.join(base_dir, "test", "annotation")
idx = 0
for fname in os.listdir(image_dir_normal):
if fname.endswith(".png"):
image_path = os.path.join(image_dir_normal, fname)
yield idx, {
"image": image_path,
"label": 0,
"annotation": None,
}
idx += 1
for fname in os.listdir(image_dir_tumor):
if fname.endswith(".png"):
image_path = os.path.join(image_dir_tumor, fname)
annot_name = fname.replace("flair", "seg")
annot_path = os.path.join(annot_dir, annot_name)
yield idx, {
"image": image_path,
"label": 1,
"annotation": annot_path,
}
idx += 1
elif config == "camelyon16":
if samples == "train":
base_dir = os.path.join(base_dir, "train")
base_dir = os.path.join(base_dir, "good")
for idx, item in enumerate(os.listdir(base_dir)):
image_path = os.path.join(base_dir, item)
yield idx, {
"image": image_path,
"label": 0, # All training images are normal
}
elif samples == "test":
base_dir = os.path.join(base_dir, "test")
good_dir = os.path.join(base_dir, "good")
ungood_dir = os.path.join(base_dir, "Ungood")
idx = 0
for item in os.listdir(good_dir):
if item.endswith(".png"):
image_path = os.path.join(good_dir, item)
yield idx, {
"image": image_path,
"label": 0,
}
idx += 1
for item in os.listdir(ungood_dir):
if item.endswith(".png"):
image_path = os.path.join(ungood_dir, item)
yield idx, {
"image": image_path,
"label": 1,
}
idx += 1
elif config == "isic2018_task3":
if samples == "train":
img_dir = os.path.join(base_dir, "ISIC2018_Task3_Training_Input")
label_dir = os.path.join(base_dir, "ISIC2018_Task3_Training_GroundTruth")
label_file = os.path.join(label_dir, "ISIC2018_Task3_Training_GroundTruth.csv")
else:
img_dir = os.path.join(base_dir, "ISIC2018_Task3_Test_Input")
label_dir = os.path.join(base_dir, "ISIC2018_Task3_Test_GroundTruth")
label_file = os.path.join(label_dir, "ISIC2018_Task3_Test_GroundTruth.csv")
df = pd.read_csv(label_file)
for idx, row in df.iterrows():
image_id = row["image"]
image_path = os.path.join(img_dir, f"{image_id}.jpg")
if not os.path.exists(image_path):
continue
label_vector = row.iloc[1:].astype(int).tolist()
yield idx, {
"image": image_path,
"label": 0 if label_vector == [0, 1, 0, 0, 0, 0, 0] else 1,
"labels": label_vector,
"MEL": label_vector[0],
"NV": label_vector[1],
"BCC": label_vector[2],
"AKIEC": label_vector[3],
"BKL": label_vector[4],
"DF": label_vector[5],
"VASC": label_vector[6],
} |