BibTex format
@inproceedings{Tavakoli:2018,
author = {Tavakoli, A and Pardo, F and Kormushev, P},
publisher = {AAAI},
title = {Action branching architectures for deep reinforcement learning},
url = {http://kormushev.com/papers/Tavakoli_AAAI-2018.pdf},
year = {2018}
}
RIS format (EndNote, RefMan)
TY - CPAPER
AB - Discrete-action algorithms have been central to numerousrecent successes of deep reinforcement learning. However,applying these algorithms to high-dimensional action tasksrequires tackling the combinatorial increase of the numberof possible actions with the number of action dimensions.This problem is further exacerbated for continuous-actiontasks that require fine control of actions via discretization.In this paper, we propose a novel neural architecture fea-turing a shared decision module followed by several net-workbranches, one for each action dimension. This approachachieves a linear increase of the number of network outputswith the number of degrees of freedom by allowing a level ofindependence for each individual action dimension. To illus-trate the approach, we present a novel agent, called Branch-ing Dueling Q-Network (BDQ), as a branching variant ofthe Dueling Double Deep Q-Network (Dueling DDQN). Weevaluate the performance of our agent on a set of challeng-ing continuous control tasks. The empirical results show thatthe proposed agent scales gracefully to environments with in-creasing action dimensionality and indicate the significanceof the shared decision module in coordination of the dis-tributed action branches. Furthermore, we show that the pro-posed agent performs competitively against a state-of-the-art continuous control algorithm, Deep Deterministic PolicyGradient (DDPG).
AU - Tavakoli,A
AU - Pardo,F
AU - Kormushev,P
PB - AAAI
PY - 2018///
TI - Action branching architectures for deep reinforcement learning
UR - http://kormushev.com/papers/Tavakoli_AAAI-2018.pdf
UR - http://hdl.handle.net/10044/1/60671
ER -