BibTex format
@inproceedings{Goodman:2010,
author = {Goodman, DFM and Brette, R},
title = {Learning to localise sounds with spiking neural networks},
year = {2010}
}
In this section
@inproceedings{Goodman:2010,
author = {Goodman, DFM and Brette, R},
title = {Learning to localise sounds with spiking neural networks},
year = {2010}
}
TY - CPAPER
AB - To localise the source of a sound, we use location-specific properties of the signals received at the two ears caused by the asymmetric filtering of the original sound by our head and pinnae, the head-related transfer functions (HRTFs). These HRTFs change throughout an organism's lifetime, during development for example, and so the required neural circuitry cannot be entirely hardwired. Since HRTFs are not directly accessible from perceptual experience, they can only be inferred from filtered sounds. We present a spiking neural network model of sound localisation based on extracting location-specific synchrony patterns, and a simple supervised algorithm to learn the mapping between synchrony patterns and locations from a set of example sounds, with no previous knowledge of HRTFs. After learning, our model was able to accurately localise new sounds in both azimuth and elevation, including the difficult task of distinguishing sounds coming from the front and back.
AU - Goodman,DFM
AU - Brette,R
PY - 2010///
TI - Learning to localise sounds with spiking neural networks
ER -
For more information about the group, please contact:
Dr Dan Goodman
+44 (0)20 7594 6264
d.goodman@imperial.ac.uk