BibTex format

author = {Evers, C and Naylor, PA},
doi = {10.1109/TASLP.2018.2828321},
journal = {IEEE/ACM Transactions on Audio, Speech and Language Processing},
pages = {1484--1498},
title = {Acoustic SLAM},
url = {},
volume = {26},
year = {2018}

RIS format (EndNote, RefMan)

AB - An algorithm is presented that enables devices equipped with microphones, such as robots, to move within their environment in order to explore, adapt to and interact with sound sources of interest. Acoustic scene mapping creates a 3D representation of the positional information of sound sources across time and space. In practice, positional source information is only provided by Direction-of-Arrival (DoA) estimates of the source directions; the source-sensor range is typically difficult to obtain. DoA estimates are also adversely affected by reverberation, noise, and interference, leading to errors in source location estimation and consequent false DoA estimates. Moroever, many acoustic sources, such as human talkers, are not continuously active, such that periods of inactivity lead to missing DoA estimates. Withal, the DoA estimates are specified relative to the observer's sensor location and orientation. Accurate positional information about the observer therefore is crucial. This paper proposes Acoustic Simultaneous Localization and Mapping (aSLAM), which uses acoustic signals to simultaneously map the 3D positions of multiple sound sources whilst passively localizing the observer within the scene map. The performance of aSLAM is analyzed and evaluated using a series of realistic simulations. Results are presented to show the impact of the observer motion and sound source localization accuracy.
AU - Evers,C
AU - Naylor,PA
DO - 10.1109/TASLP.2018.2828321
EP - 1498
PY - 2018///
SN - 2329-9290
SP - 1484
TI - Acoustic SLAM
T2 - IEEE/ACM Transactions on Audio, Speech and Language Processing
UR -
UR -
UR -
VL - 26
ER -