BibTex format
@inproceedings{Kamthe:2017,
author = {Kamthe, S and Deisenroth, MP},
title = {Data-Efficient Reinforcement Learning with Probabilistic Model Predictive Control},
url = {http://arxiv.org/abs/1706.06491v1},
year = {2017}
}
In this section
@inproceedings{Kamthe:2017,
author = {Kamthe, S and Deisenroth, MP},
title = {Data-Efficient Reinforcement Learning with Probabilistic Model Predictive Control},
url = {http://arxiv.org/abs/1706.06491v1},
year = {2017}
}
TY - CPAPER
AB - Trial-and-error based reinforcement learning (RL) has seen rapid advancementsin recent times, especially with the advent of deep neural networks. However,the majority of autonomous RL algorithms either rely on engineered features ora large number of interactions with the environment. Such a large number ofinteractions may be impractical in many real-world applications. For example,robots are subject to wear and tear and, hence, millions of interactions maychange or damage the system. Moreover, practical systems have limitations inthe form of the maximum torque that can be safely applied. To reduce the numberof system interactions while naturally handling constraints, we propose amodel-based RL framework based on Model Predictive Control (MPC). Inparticular, we propose to learn a probabilistic transition model using GaussianProcesses (GPs) to incorporate model uncertainties into long-term predictions,thereby, reducing the impact of model errors. We then use MPC to find a controlsequence that minimises the expected long-term cost. We provide theoreticalguarantees for the first-order optimality in the GP-based transition modelswith deterministic approximate inference for long-term planning. The proposedframework demonstrates superior data efficiency and learning rates compared tothe current state of the art.
AU - Kamthe,S
AU - Deisenroth,MP
PY - 2017///
TI - Data-Efficient Reinforcement Learning with Probabilistic Model Predictive Control
UR - http://arxiv.org/abs/1706.06491v1
UR - http://hdl.handle.net/10044/1/52898
ER -