Citation

BibTex format

@article{Picinali:2022:10.1109/MSP.2022.3182929,
author = {Picinali, L and Katz, BFG and Geronazzo, M and Majdak, P and Reyes-Lecuona, A and Vinciarelli, A},
doi = {10.1109/MSP.2022.3182929},
journal = {IEEE: Signal Processing Magazine},
pages = {85--88},
title = {The SONICOM Project: artificial intelligence-driven immersive audio, from personalization to modeling},
url = {http://dx.doi.org/10.1109/MSP.2022.3182929},
volume = {39},
year = {2022}
}

RIS format (EndNote, RefMan)

TY  - JOUR
AB - Every individual perceives spatial audio differently, due in large part to the unique and complex shape of ears and head. Therefore, high-quality, headphone-based spatial audio should be uniquely tailored to each listener in an effective and efficient manner. Artificial intelligence (AI) is a powerful tool that can be used to drive forward research in spatial audio personalization. The SONICOM project aims to employ a data-driven approach that links physiological characteristics of the ear to the individual acoustic filters, which allows us to localize sound sources and perceive them as being located around us. A small amount of data acquired from users could allow personalized audio experiences, and AI could facilitate this by offering a new perspective on the matter. A Bayesian approach to computational neuroscience and binaural sound reproduction will be linked to create a metric for AI-based algorithms that will predict realistic spatial audio quality. Being able to consistently and repeatedly evaluate and quantify the improvements brought by technological advancements, as well as the impact these have on complex interactions in virtual environments, will be key for the development of new techniques and for unlocking new approaches to understanding the mechanisms of human spatial hearing and communication.
AU - Picinali,L
AU - Katz,BFG
AU - Geronazzo,M
AU - Majdak,P
AU - Reyes-Lecuona,A
AU - Vinciarelli,A
DO - 10.1109/MSP.2022.3182929
EP - 88
PY - 2022///
SN - 1053-5888
SP - 85
TI - The SONICOM Project: artificial intelligence-driven immersive audio, from personalization to modeling
T2 - IEEE: Signal Processing Magazine
UR - http://dx.doi.org/10.1109/MSP.2022.3182929
UR - https://ieeexplore.ieee.org/document/9931551
UR - http://hdl.handle.net/10044/1/100377
VL - 39
ER -