Imperial College London

Prof. Dr. Tobias Reichenbach

Faculty of EngineeringDepartment of Bioengineering

Visiting Professor
 
 
 
//

Contact

 

+44 (0)20 7594 6370reichenbach Website

 
 
//

Location

 

4.12Royal School of MinesSouth Kensington Campus

//

Summary

 

Publications

Citation

BibTex format

@article{Etard:2019:10.1016/j.neuroimage.2019.06.029,
author = {Etard, O and Kegler, M and Braiman, C and Forte, AE and Reichenbach, T},
doi = {10.1016/j.neuroimage.2019.06.029},
journal = {NeuroImage},
pages = {1--11},
title = {Decoding of selective attention to continuous speech from the human auditory brainstem response},
url = {http://dx.doi.org/10.1016/j.neuroimage.2019.06.029},
volume = {200},
year = {2019}
}

RIS format (EndNote, RefMan)

TY  - JOUR
AB - Humans are highly skilled at analysing complex acoustic scenes. The segregation of different acoustic streams and the formation of corresponding neural representations is mostly attributed to the auditory cortex. Decoding of selective attention from neuroimaging has therefore focussed on cortical responses to sound. However, the auditory brainstem response to speech is modulated by selective attention as well, as recently shown through measuring the brainstem's response to running speech. Although the response of the auditory brainstem has a smaller magnitude than that of the auditory cortex, it occurs at much higher frequencies and therefore has a higher information rate. Here we develop statistical models for extracting the brainstem response from multi-channel scalp recordings and for analysing the attentional modulation according to the focus of attention. We demonstrate that the attentional modulation of the brainstem response to speech can be employed to decode the attentional focus of a listener from short measurements of 10s or less in duration. The decoding remains accurate when obtained from three EEG channels only. We further show how out-of-the-box decoding that employs subject-independent models, as well as decoding that is independent of the specific attended speaker is capable of achieving similar accuracy. These results open up new avenues for investigating the neural mechanisms for selective attention in the brainstem and for developing efficient auditory brain-computer interfaces.
AU - Etard,O
AU - Kegler,M
AU - Braiman,C
AU - Forte,AE
AU - Reichenbach,T
DO - 10.1016/j.neuroimage.2019.06.029
EP - 11
PY - 2019///
SN - 1053-8119
SP - 1
TI - Decoding of selective attention to continuous speech from the human auditory brainstem response
T2 - NeuroImage
UR - http://dx.doi.org/10.1016/j.neuroimage.2019.06.029
UR - https://www.sciencedirect.com/science/article/pii/S1053811919305208
UR - http://hdl.handle.net/10044/1/70831
VL - 200
ER -