BibTex format
@article{Naidoo:2026:10.1016/j.media.2026.104006,
author = {Naidoo, P and Fernandes, P and Dadashi, Serej N and Stowell, CC and Manisty, CH and Francis, DP and Zolgharni, M},
doi = {10.1016/j.media.2026.104006},
journal = {Med Image Anal},
title = {Robust fine-grained echocardiographic view classification with supervised contrastive learning.},
url = {http://dx.doi.org/10.1016/j.media.2026.104006},
volume = {110},
year = {2026}
}
RIS format (EndNote, RefMan)
TY - JOUR
AB - Accurate classification of echocardiographic views is fundamental for automated cardiac analysis. However, clinical practice relies on a large, heterogeneous set of fine-grained acquisitions that introduce substantial inter-observer variability. Existing studies have primarily focused on limited view sets, often collapsing specialised views into broad categories, which limits their clinical relevance. We address this limitation by introducing TTE47, the first publicly available benchmark comprising 47 clinically meaningful views annotated independently by three experts. This dataset enables the rigorous quantification of inter-observer agreement and establishes a foundation for reproducible, clinically relevant evaluation. To tackle the dual challenges of subtle inter-class distinctions and structured label variability, we propose a novel supervised contrastive learning framework incorporating a tailored loss function. Our method outperforms cross-entropy and standard supervised contrastive baselines, achieving leading performance among evaluated methods on TTE47 and surpassing prior work on TMED-2 without dataset-specific pretraining, using a model pretrained on TTE47. Beyond accuracy, we introduce clustering-based metrics, Detection Rate and Label Recovery Precision, that measure semantic coherence and the model's ability to resist annotation variability. Results show that the learned feature space aligns more strongly with underlying anatomical structure than with any single annotator's style, enabling resilience to label shifts and maintaining robustness comparable to human-level disagreement. By integrating multi-expert evaluation, robust representation learning, and interpretable feature-space analysis, this work establishes a scalable and clinically relevant framework for fine-grained echo view classification. Our findings highlight the potential of contrastive pretraining to standardise interpretation, mitigate subjectivity, and enhance the reliability of AI
AU - Naidoo,P
AU - Fernandes,P
AU - Dadashi,Serej N
AU - Stowell,CC
AU - Manisty,CH
AU - Francis,DP
AU - Zolgharni,M
DO - 10.1016/j.media.2026.104006
PY - 2026///
TI - Robust fine-grained echocardiographic view classification with supervised contrastive learning.
T2 - Med Image Anal
UR - http://dx.doi.org/10.1016/j.media.2026.104006
UR - https://www.ncbi.nlm.nih.gov/pubmed/41771198
VL - 110
ER -