Citation

BibTex format

@inproceedings{Wang:2019:10.1007/978-3-030-12029-0_21,
author = {Wang, C and MacGillivray, T and Macnaught, G and Yang, G and Newby, D},
doi = {10.1007/978-3-030-12029-0_21},
pages = {191--199},
title = {A Two-Stage U-Net Model for 3D Multi-class Segmentation on Full-Resolution Cardiac Data},
url = {http://dx.doi.org/10.1007/978-3-030-12029-0_21},
year = {2019}
}

RIS format (EndNote, RefMan)

TY  - CPAPER
AB - Deep convolutional neural networks (CNNs) have achieved state-of-the-art performances for multi-class segmentation of medical images. However, a common problem when dealing with large, high resolution 3D data is that the volumes input into the deep CNNs has to be either cropped or downsampled due to limited memory capacity of computing devices. These operations can lead to loss of resolution and class imbalance in the input data batches, thus downgrade the performances of segmentation algorithms. Inspired by the architecture of image super-resolution CNN (SRCNN), we propose a two-stage modified U-Net framework that simultaneously learns to detect a ROI within the full volume and to classify voxels without losing the original resolution. Experiments on a variety of multi-modal 3D cardiac images have demonstrated that this framework shows better segmentation performances than state-of-the-art Deep CNNs with trained with the same similarity metrics.
AU - Wang,C
AU - MacGillivray,T
AU - Macnaught,G
AU - Yang,G
AU - Newby,D
DO - 10.1007/978-3-030-12029-0_21
EP - 199
PY - 2019///
SN - 0302-9743
SP - 191
TI - A Two-Stage U-Net Model for 3D Multi-class Segmentation on Full-Resolution Cardiac Data
UR - http://dx.doi.org/10.1007/978-3-030-12029-0_21
ER -