Imperial College London

DrBennyLo

Faculty of MedicineDepartment of Metabolism, Digestion and Reproduction

Visiting Reader
 
 
 
//

Contact

 

+44 (0)20 7594 0806benny.lo Website

 
 
//

Location

 

Bessemer BuildingSouth Kensington Campus

//

Summary

 

Publications

Citation

BibTex format

@article{Gu:2021:10.1109/TNNLS.2020.3009448,
author = {Gu, X and Guo, Y and Deligianni, F and Lo, B and Yang, G-Z},
doi = {10.1109/TNNLS.2020.3009448},
journal = {IEEE Transactions on Neural Networks and Learning Systems},
pages = {546--560},
title = {Cross-subject and cross-modal transfer for generalized abnormal gait pattern recognition},
url = {http://dx.doi.org/10.1109/TNNLS.2020.3009448},
volume = {32},
year = {2021}
}

RIS format (EndNote, RefMan)

TY  - JOUR
AB - For abnormal gait recognition, pattern-specific features indicating abnormalities are interleaved with the subject-specific differences representing biometric traits. Deep representations are, therefore, prone to overfitting, and the models derived cannot generalize well to new subjects. Furthermore, there is limited availability of abnormal gait data obtained from precise Motion Capture (Mocap) systems because of regulatory issues and slow adaptation of new technologies in health care. On the other hand, data captured from markerless vision sensors or wearable sensors can be obtained in home environments, but noises from such devices may prevent the effective extraction of relevant features. To address these challenges, we propose a cascade of deep architectures that can encode cross-modal and cross-subject transfer for abnormal gait recognition. Cross-modal transfer maps noisy data obtained from RGBD and wearable sensors to accurate 4-D representations of the lower limb and joints obtained from the Mocap system. Subsequently, cross-subject transfer allows disentangling subject-specific from abnormal pattern-specific gait features based on a multiencoder autoencoder architecture. To validate the proposed methodology, we obtained multimodal gait data based on a multicamera motion capture system along with synchronized recordings of electromyography (EMG) data and 4-D skeleton data extracted from a single RGBD camera. Classification accuracy was improved significantly in both Mocap and noisy modalities.
AU - Gu,X
AU - Guo,Y
AU - Deligianni,F
AU - Lo,B
AU - Yang,G-Z
DO - 10.1109/TNNLS.2020.3009448
EP - 560
PY - 2021///
SN - 1045-9227
SP - 546
TI - Cross-subject and cross-modal transfer for generalized abnormal gait pattern recognition
T2 - IEEE Transactions on Neural Networks and Learning Systems
UR - http://dx.doi.org/10.1109/TNNLS.2020.3009448
UR - http://hdl.handle.net/10044/1/81388
VL - 32
ER -