Datasets:
Tasks:
Image Classification
Sub-tasks:
multi-label-image-classification
Languages:
English
Size:
100B<n<1T
License:
YuxuanZhang888
commited on
Commit
•
0233002
1
Parent(s):
a6625d5
Changed the name of my dataset loading script.
Browse files- ColonCancerCTDataset.py +158 -0
ColonCancerCTDataset.py
ADDED
@@ -0,0 +1,158 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pydicom
|
2 |
+
from PIL import Image
|
3 |
+
import numpy as np
|
4 |
+
import io
|
5 |
+
import datasets
|
6 |
+
import gdown
|
7 |
+
import re
|
8 |
+
import s3fs
|
9 |
+
import random
|
10 |
+
|
11 |
+
example_manifest_url = "https://drive.google.com/uc?id=1JBkQTXeieyN9_6BGdTF_DDlFFyZrGyU6"
|
12 |
+
example_manifest_file = gdown.download(example_manifest_url, 'manifest_file.s5cmd', quiet = False)
|
13 |
+
full_manifest_url = "https://drive.google.com/uc?id=1KP6qxcQoPF4MJdEPNwW7J6BlL_sUJ17j"
|
14 |
+
full_manifest_file = gdown.download(full_manifest_url, 'full_manifest_file.s5cmd', quiet = False)
|
15 |
+
fs = s3fs.S3FileSystem(anon=True)
|
16 |
+
|
17 |
+
_DESCRIPTION = "This is the description"
|
18 |
+
_HOMEPAGE = "https://imaging.datacommons.cancer.gov/"
|
19 |
+
_LICENSE = "https://fairsharing.org/FAIRsharing.0b5a1d"
|
20 |
+
_CITATION = "National Cancer Institute Imaging Data Commons (IDC) Collections was accessed on DATE from https://registry.opendata.aws/nci-imaging-data-commons"
|
21 |
+
|
22 |
+
|
23 |
+
class ColonCancerCTDataset(datasets.GeneratorBasedBuilder):
|
24 |
+
"""TODO: Short description of my dataset."""
|
25 |
+
VERSION = datasets.Version("1.1.0")
|
26 |
+
|
27 |
+
BUILDER_CONFIGS = [
|
28 |
+
datasets.BuilderConfig(name="example", version=VERSION, description="This is a subset of the full dataset for demonstration purposes"),
|
29 |
+
datasets.BuilderConfig(name="full_data", version=VERSION, description="This is the complete dataset"),
|
30 |
+
]
|
31 |
+
DEFAULT_CONFIG_NAME = "example"
|
32 |
+
|
33 |
+
def _info(self):
|
34 |
+
return datasets.DatasetInfo(
|
35 |
+
description=_DESCRIPTION,
|
36 |
+
features=datasets.Features(
|
37 |
+
{
|
38 |
+
"image": datasets.Image(),
|
39 |
+
"ImageType": datasets.Sequence(datasets.Value('string')),
|
40 |
+
"StudyDate": datasets.Value('string'),
|
41 |
+
"SeriesDate": datasets.Value('string'),
|
42 |
+
"Manufacturer": datasets.Value('string'),
|
43 |
+
"StudyDescription": datasets.Value('string'),
|
44 |
+
"SeriesDescription": datasets.Value('string'),
|
45 |
+
"PatientSex": datasets.Value('string'),
|
46 |
+
"PatientAge": datasets.Value('string'),
|
47 |
+
"PregnancyStatus": datasets.Value('string'),
|
48 |
+
"BodyPartExamined": datasets.Value('string'),
|
49 |
+
}),
|
50 |
+
homepage = _HOMEPAGE,
|
51 |
+
license = _LICENSE,
|
52 |
+
citation = _CITATION
|
53 |
+
|
54 |
+
)
|
55 |
+
|
56 |
+
def _split_generators(self, dl_manager):
|
57 |
+
"""Returns SplitGenerators."""
|
58 |
+
# TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the
|
59 |
+
s3_series_paths = []
|
60 |
+
s3_individual_paths = []
|
61 |
+
if self.config.name == 'example':
|
62 |
+
manifest_file = example_manifest_file
|
63 |
+
else:
|
64 |
+
manifest_file = full_manifest_file
|
65 |
+
|
66 |
+
with open(manifest_file, 'r') as file:
|
67 |
+
for line in file:
|
68 |
+
match = re.search(r'cp (s3://[\S]+) .', line)
|
69 |
+
if match:
|
70 |
+
s3_series_paths.append(match.group(1)[:-2]) # Deleting the '/*' in directories
|
71 |
+
for series in s3_series_paths:
|
72 |
+
for content in fs.ls(series):
|
73 |
+
s3_individual_paths.append(fs.info(content)['Key'])
|
74 |
+
|
75 |
+
random.shuffle(s3_individual_paths)
|
76 |
+
|
77 |
+
# Define the split sizes
|
78 |
+
train_size = int(0.7 * len(s3_individual_paths))
|
79 |
+
val_size = int(0.15 * len(s3_individual_paths))
|
80 |
+
# Split the paths into train, validation, and test sets
|
81 |
+
train_paths = s3_individual_paths[:train_size]
|
82 |
+
val_paths = s3_individual_paths[train_size:train_size + val_size]
|
83 |
+
test_paths = s3_individual_paths[train_size + val_size:]
|
84 |
+
|
85 |
+
return [
|
86 |
+
datasets.SplitGenerator(
|
87 |
+
name=datasets.Split.TRAIN,
|
88 |
+
gen_kwargs={
|
89 |
+
"paths": train_paths,
|
90 |
+
"split": "train"
|
91 |
+
},
|
92 |
+
),
|
93 |
+
datasets.SplitGenerator(
|
94 |
+
name=datasets.Split.VALIDATION,
|
95 |
+
gen_kwargs={
|
96 |
+
"paths": val_paths,
|
97 |
+
"split": "dev"
|
98 |
+
},
|
99 |
+
),
|
100 |
+
datasets.SplitGenerator(
|
101 |
+
name=datasets.Split.TEST,
|
102 |
+
gen_kwargs={
|
103 |
+
"paths": test_paths,
|
104 |
+
"split": "test"
|
105 |
+
},
|
106 |
+
),
|
107 |
+
]
|
108 |
+
|
109 |
+
def _generate_examples(self, paths, split):
|
110 |
+
"""Yields examples."""
|
111 |
+
# TODO: This method will yield examples, i.e. rows in the dataset.
|
112 |
+
for path in paths:
|
113 |
+
key = path
|
114 |
+
with fs.open(path, 'rb') as f:
|
115 |
+
dicom_data = pydicom.dcmread(f)
|
116 |
+
pixel_array = dicom_data.pixel_array
|
117 |
+
# Adjust for MONOCHROME1 to invert the grayscale values
|
118 |
+
if dicom_data.PhotometricInterpretation == "MONOCHROME1":
|
119 |
+
pixel_array = np.max(pixel_array) - pixel_array
|
120 |
+
# Normalize or scale 16-bit or other depth images to 8-bit
|
121 |
+
if pixel_array.dtype != np.uint8:
|
122 |
+
pixel_array = (np.divide(pixel_array, np.max(pixel_array)) * 255).astype(np.uint8)
|
123 |
+
# Convert to RGB if it is not already (e.g., for color images)
|
124 |
+
if len(pixel_array.shape) == 2:
|
125 |
+
im = Image.fromarray(pixel_array, mode="L") # L mode is for grayscale
|
126 |
+
elif len(pixel_array.shape) == 3 and pixel_array.shape[2] in [3, 4]:
|
127 |
+
im = Image.fromarray(pixel_array, mode="RGB")
|
128 |
+
else:
|
129 |
+
raise ValueError("Unsupported DICOM image format")
|
130 |
+
with io.BytesIO() as output:
|
131 |
+
im.save(output, format="PNG")
|
132 |
+
png_image = output.getvalue()
|
133 |
+
# Extracting metadata
|
134 |
+
ImageType = dicom_data.get("ImageType", "")
|
135 |
+
StudyDate = dicom_data.get("StudyDate", "")
|
136 |
+
SeriesDate = dicom_data.get("SeriesDate", "")
|
137 |
+
Manufacturer = dicom_data.get("Manufacturer", "")
|
138 |
+
StudyDescription = dicom_data.get("StudyDescription", "")
|
139 |
+
SeriesDescription = dicom_data.get("SeriesDescription", "")
|
140 |
+
PatientSex = dicom_data.get("PatientSex", "")
|
141 |
+
PatientAge = dicom_data.get("PatientAge", "")
|
142 |
+
PregnancyStatus = dicom_data.get("PregnancyStatus", "")
|
143 |
+
if PregnancyStatus == None:
|
144 |
+
PregnancyStatus = "None"
|
145 |
+
else:
|
146 |
+
PregnancyStatus = "Yes"
|
147 |
+
BodyPartExamined = dicom_data.get("BodyPartExamined", "")
|
148 |
+
yield key, {"image": png_image,
|
149 |
+
"ImageType": ImageType,
|
150 |
+
"StudyDate": StudyDate,
|
151 |
+
"SeriesDate": SeriesDate,
|
152 |
+
"Manufacturer": Manufacturer,
|
153 |
+
"StudyDescription": StudyDescription,
|
154 |
+
"SeriesDescription": SeriesDescription,
|
155 |
+
"PatientSex": PatientSex,
|
156 |
+
"PatientAge": PatientAge,
|
157 |
+
"PregnancyStatus": PregnancyStatus,
|
158 |
+
"BodyPartExamined": BodyPartExamined}
|