File size: 3,935 Bytes
c8da237 3f86222 c8da237 3f86222 392414b 77a41dd 19974c9 ed86d0a 3a2fdad c787175 7406c7e 83ad0c8 bd519ec dc87221 d3db992 afbd958 260dd16 378b33d 4c6d8ac 4116f0b 76338b6 86522f2 c8da237 3f86222 dc87221 c8da237 260dd16 c8da237 3a2fdad c8da237 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 |
{
"schema": "https://github.com/Project-MONAI/MONAI-extra-test-data/releases/download/0.8.1/meta_schema_20220324.json",
"version": "0.5.4",
"changelog": {
"0.5.4": "fix the wrong GPU index issue of multi-node",
"0.5.3": "remove error dollar symbol in readme",
"0.5.2": "remove the CheckpointLoader from the train.json",
"0.5.1": "add RAM warning",
"0.5.0": "update TensorRT descriptions",
"0.4.9": "update the model weights",
"0.4.8": "update the TensorRT part in the README file",
"0.4.7": "fix mgpu finalize issue",
"0.4.6": "enable deterministic training",
"0.4.5": "add the command of executing inference with TensorRT models",
"0.4.4": "adapt to BundleWorkflow interface",
"0.4.3": "update this bundle to support TensorRT convert",
"0.4.2": "support monai 1.2 new FlexibleUNet",
"0.4.1": "add name tag",
"0.4.0": "add support for multi-GPU training and evaluation",
"0.3.2": "restructure readme to match updated template",
"0.3.1": "add figures of workflow and metrics, add invert transform",
"0.3.0": "update dataset processing",
"0.2.1": "update to use monai 1.0.1",
"0.2.0": "update license files",
"0.1.0": "complete the first version model package",
"0.0.1": "initialize the model package structure"
},
"monai_version": "1.2.0",
"pytorch_version": "1.13.1",
"numpy_version": "1.22.2",
"optional_packages_version": {
"nibabel": "4.0.1",
"pytorch-ignite": "0.4.9"
},
"name": "Endoscopic tool segmentation",
"task": "Endoscopic tool segmentation",
"description": "A pre-trained binary segmentation model for endoscopic tool segmentation",
"authors": "NVIDIA DLMED team",
"copyright": "Copyright (c) 2021-2022, NVIDIA CORPORATION",
"data_source": "private dataset",
"data_type": "RGB",
"image_classes": "three channel data, intensity [0-255]",
"label_classes": "single channel data, 1/255 is tool, 0 is background",
"pred_classes": "2 channels OneHot data, channel 1 is tool, channel 0 is background",
"eval_metrics": {
"mean_iou": 0.86
},
"references": [
"Tan, M. and Le, Q. V. Efficientnet: Rethinking model scaling for convolutional neural networks. ICML, 2019a. https://arxiv.org/pdf/1905.11946.pdf",
"O. Ronneberger, P. Fischer, and T. Brox. U-net: Convolutional networks for biomedical image segmentation. In International Conference on Medical image computing and computer-assisted intervention, pages 234\u2013241. Springer, 2015. https://arxiv.org/pdf/1505.04597.pdf"
],
"network_data_format": {
"inputs": {
"image": {
"type": "magnitude",
"format": "RGB",
"modality": "regular",
"num_channels": 3,
"spatial_shape": [
736,
480
],
"dtype": "float32",
"value_range": [
0,
1
],
"is_patch_data": false,
"channel_def": {
"0": "R",
"1": "G",
"2": "B"
}
}
},
"outputs": {
"pred": {
"type": "image",
"format": "segmentation",
"num_channels": 2,
"spatial_shape": [
736,
480
],
"dtype": "float32",
"value_range": [
0,
1
],
"is_patch_data": false,
"channel_def": {
"0": "background",
"1": "tools"
}
}
}
}
}
|