Citation

BibTex format

@article{Arulkumaran:2017:10.1109/MSP.2017.2743240,
author = {Arulkumaran, K and Deisenroth, MP and Brundage, M and Bharath, AA},
doi = {10.1109/MSP.2017.2743240},
journal = {IEEE Signal Processing Magazine},
pages = {26--38},
title = {A brief survey of deep reinforcement learning},
url = {http://dx.doi.org/10.1109/MSP.2017.2743240},
volume = {34},
year = {2017}
}

RIS format (EndNote, RefMan)

TY  - JOUR
AB - Deep reinforcement learning (DRL) is poised to revolutionize the field of artificial intelligence (AI) and represents a step toward building autonomous systems with a higherlevel understanding of the visual world. Currently, deep learning is enabling reinforcement learning (RL) to scale to problems that were previously intractable, such as learning to play video games directly from pixels. DRL algorithms are also applied to robotics, allowing control policies for robots to be learned directly from camera inputs in the real world. In this survey, we begin with an introduction to the general field of RL, then progress to the main streams of value-based and policy-based methods. Our survey will cover central algorithms in deep RL, including the deep Q-network (DQN), trust region policy optimization (TRPO), and asynchronous advantage actor critic. In parallel, we highlight the unique advantages of deep neural networks, focusing on visual understanding via RL. To conclude, we describe several current areas of research within the field.
AU - Arulkumaran,K
AU - Deisenroth,MP
AU - Brundage,M
AU - Bharath,AA
DO - 10.1109/MSP.2017.2743240
EP - 38
PY - 2017///
SN - 1053-5888
SP - 26
TI - A brief survey of deep reinforcement learning
T2 - IEEE Signal Processing Magazine
UR - http://dx.doi.org/10.1109/MSP.2017.2743240
UR - https://arxiv.org/pdf/1708.05866.pdf
UR - http://hdl.handle.net/10044/1/53340
VL - 34
ER -