BibTex format

author = {Kamthe, S and Deisenroth, MP},
pages = {1701--1710},
publisher = {PMLR},
title = {Data-efficient reinforcement learning with probabilistic model predictive control},
url = {},
year = {2018}

RIS format (EndNote, RefMan)

AB - Trial-and-error based reinforcement learning(RL) has seen rapid advancements in recenttimes, especially with the advent of deep neural networks. However, the majority of autonomous RL algorithms require a large number of interactions with the environment. Alarge number of interactions may be impractical in many real-world applications, such asrobotics, and many practical systems have toobey limitations in the form of state spaceor control constraints. To reduce the numberof system interactions while simultaneouslyhandling constraints, we propose a modelbased RL framework based on probabilisticModel Predictive Control (MPC). In particular, we propose to learn a probabilistic transition model using Gaussian Processes (GPs)to incorporate model uncertainty into longterm predictions, thereby, reducing the impact of model errors. We then use MPC tofind a control sequence that minimises theexpected long-term cost. We provide theoretical guarantees for first-order optimality inthe GP-based transition models with deterministic approximate inference for long-termplanning. We demonstrate that our approachdoes not only achieve state-of-the-art dataefficiency, but also is a principled way for RLin constrained environments.
AU - Kamthe,S
AU - Deisenroth,MP
EP - 1710
PY - 2018///
SP - 1701
TI - Data-efficient reinforcement learning with probabilistic model predictive control
UR -
UR -
ER -