Imperial College London

DR BERNHARD KAINZ

Faculty of EngineeringDepartment of Computing

Reader in Medical Image Computing
 
 
 
//

Contact

 

+44 (0)20 7594 8349b.kainz Website CV

 
 
//

Location

 

372Huxley BuildingSouth Kensington Campus

//

Summary

 

Publications

Citation

BibTex format

@article{Zimmerer:2022:10.1109/TMI.2022.3170077,
author = {Zimmerer, D and Full, PM and Isensee, F and Jager, P and Adler, T and Petersen, J and Kohler, G and Ross, T and Reinke, A and Kascenas, A and Jensen, BS and O'Neil, AQ and Tan, J and Hou, B and Batten, J and Qiu, H and Kainz, B and Shvetsova, N and Fedulova, I and Dylov, DV and Yu, B and Zhai, J and Hu, J and Si, R and Zhou, S and Wang, S and Li, X and Chen, X and Zhao, Y and Marimont, SN and Tarroni, G and Saase, V and Maier-Hein, L and Maier-Hein, K},
doi = {10.1109/TMI.2022.3170077},
journal = {IEEE Transactions on Medical Imaging},
pages = {2728--2738},
title = {MOOD 2020: A public benchmark for out-of-distribution detection and localization on medical images},
url = {http://dx.doi.org/10.1109/TMI.2022.3170077},
volume = {41},
year = {2022}
}

RIS format (EndNote, RefMan)

TY  - JOUR
AB - Detecting Out-of-Distribution (OoD) data is one of the greatest challenges in safe and robust deployment of machine learning algorithms in medicine. When the algorithms encounter cases that deviate from the distribution of the training data, they often produce incorrect and over-confident predictions. OoD detection algorithms aim to catch erroneous predictions in advance by analysing the data distribution and detecting potential instances of failure. Moreover, flagging OoD cases may support human readers in identifying incidental findings. Due to the increased interest in OoD algorithms, benchmarks for different domains have recently been established. In the medical imaging domain, for which reliable predictions are often essential, an open benchmark has been missing. We introduce the Medical-Out-Of-Distribution-Analysis-Challenge (MOOD) as an open, fair, and unbiased benchmark for OoD methods in the medical imaging domain. The analysis of the submitted algorithms shows that performance has a strong positive correlation with the perceived difficulty, and that all algorithms show a high variance for different anomalies, making it yet hard to recommend them for clinical practice. We also see a strong correlation between challenge ranking and performance on a simple toy test set, indicating that this might be a valuable addition as a proxy dataset during anomaly detection algorithm development.
AU - Zimmerer,D
AU - Full,PM
AU - Isensee,F
AU - Jager,P
AU - Adler,T
AU - Petersen,J
AU - Kohler,G
AU - Ross,T
AU - Reinke,A
AU - Kascenas,A
AU - Jensen,BS
AU - O'Neil,AQ
AU - Tan,J
AU - Hou,B
AU - Batten,J
AU - Qiu,H
AU - Kainz,B
AU - Shvetsova,N
AU - Fedulova,I
AU - Dylov,DV
AU - Yu,B
AU - Zhai,J
AU - Hu,J
AU - Si,R
AU - Zhou,S
AU - Wang,S
AU - Li,X
AU - Chen,X
AU - Zhao,Y
AU - Marimont,SN
AU - Tarroni,G
AU - Saase,V
AU - Maier-Hein,L
AU - Maier-Hein,K
DO - 10.1109/TMI.2022.3170077
EP - 2738
PY - 2022///
SN - 0278-0062
SP - 2728
TI - MOOD 2020: A public benchmark for out-of-distribution detection and localization on medical images
T2 - IEEE Transactions on Medical Imaging
UR - http://dx.doi.org/10.1109/TMI.2022.3170077
UR - https://www.ncbi.nlm.nih.gov/pubmed/35468060
UR - https://ieeexplore.ieee.org/document/9762702
UR - http://hdl.handle.net/10044/1/96881
VL - 41
ER -