|
import tensorflow as tf |
|
import numpy as np |
|
import datasets |
|
|
|
|
|
_DESCRIPTION = ( |
|
"This dataset consists 90k images of Chest-X-Ray from the Mimic-CXR dataset." |
|
"For each image, we have a consise report obtain from de PRO-CXR dataset" |
|
"All images have a size of 512x512 pixels." |
|
) |
|
|
|
|
|
_BASE_URL = "https://drive.google.com/file/d/1u27GCgIIRqDz8a5-VTcMJ1pEFQbGv_QB/view?usp=sharing" |
|
|
|
FEATURE_DESCRIPTION_TFRECORD = { |
|
'report': tf.io.FixedLenFeature([], tf.string), |
|
'image': tf.io.FixedLenFeature([], tf.string), |
|
} |
|
|
|
def _parse_image_function(example_proto): |
|
|
|
return tf.io.parse_single_example(example_proto, FEATURE_DESCRIPTION_TFRECORD) |
|
|
|
class ReportsCXR(datasets.GeneratorBasedBuilder): |
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description = _DESCRIPTION, |
|
features = datasets.Features({ |
|
'image': datasets.Image(), |
|
'report': datasets.Value(dtype='string') |
|
}) |
|
) |
|
def _get_drive_url(self, url): |
|
base_url = 'https://drive.google.com/uc?id=' |
|
split_url = url.split('/') |
|
return base_url + split_url[5] |
|
|
|
def _split_generators(self, dl_manager): |
|
archive_path = dl_manager.download(self._get_drive_url(_BASE_URL)) |
|
return [ |
|
datasets.SplitGenerator(name='full', gen_kwargs={'filepath': archive_path}) |
|
] |
|
|
|
|
|
def _generate_examples(self, filepath): |
|
raw_image_dataset = tf.data.TFRecordDataset(filepath) |
|
parsed_image_dataset = raw_image_dataset.map(_parse_image_function) |
|
for i, image_features in enumerate(parsed_image_dataset): |
|
image_raw = image_features['image'].numpy() |
|
str_report = image_features['report'].numpy() |
|
yield i, {'image': image_raw, 'report': str_report} |
|
|