BibTex format
@inproceedings{Tavakoli:2019,
author = {Tavakoli, A and Levdik, V and Islam, R and Smith, CM and Kormushev, P},
publisher = {arXiv},
title = {Exploring Restart Distributions},
url = {https://arxiv.org/abs/1811.11298},
year = {2019}
}
In this section
Download a PDF with the full list of our publications: Robot-Intelligence-Lab-Publications-2021.pdf
A comprehensive list can also be found at Google Scholar, or by searching for the publications of author Kormushev, Petar.
@inproceedings{Tavakoli:2019,
author = {Tavakoli, A and Levdik, V and Islam, R and Smith, CM and Kormushev, P},
publisher = {arXiv},
title = {Exploring Restart Distributions},
url = {https://arxiv.org/abs/1811.11298},
year = {2019}
}
TY - CPAPER
AB - We consider the generic approach of using an experience memory to help exploration by adapting a restart distribution. That is, given the capacity to reset the state with those corresponding to the agent's past observations, we help exploration by promoting faster state-space coverage via restarting the agent from a more diverse set of initial states, as well as allowing it to restart in states associated with significant past experiences. This approach is compatible with both on-policy and off-policy methods. However, a caveat is that altering the distribution of initial states could change the optimal policies when searching within a restricted class of policies. To reduce this unsought learning bias, we evaluate our approach in deep reinforcement learning which benefits from the high representational capacity of deep neural networks. We instantiate three variants of our approach, each inspired by an idea in the context of experience replay. Using these variants, we show that performance gains can be achieved, especially in hard exploration problems.
AU - Tavakoli,A
AU - Levdik,V
AU - Islam,R
AU - Smith,CM
AU - Kormushev,P
PB - arXiv
PY - 2019///
TI - Exploring Restart Distributions
UR - https://arxiv.org/abs/1811.11298
UR - http://kormushev.com/papers/Tavakoli_RLDM-2019.pdf
UR - http://hdl.handle.net/10044/1/89799
ER -