@inproceedings{ce2c2a301efb45ff96aeabc2e526c9b2,
title = "A Communication-Efficient Multi-Agent Actor-Critic Algorithm for Distributed Reinforcement Learning",
abstract = "This paper considers a distributed reinforcement learning problem in which a network of multiple agents aim to cooperatively maximize the globally averaged return through communication with only local neighbors. A randomized communication-efficient multi-agent actor-critic algorithm is proposed for possibly unidirectional communication relationships depicted by a directed graph. It is shown that the algorithm can solve the problem for strongly connected graphs by allowing each agent to transmit only two scalar-valued variables at one time.",
author = "Yixuan Lin and Kaiqing Zhang and Zhuoran Yang and Zhaoran Wang and Tamer Basar and Romeil Sandhu and Ji Liu",
note = "Funding Information: The research of Zhang and Bas¸ar was supported in part by the US Army Research Laboratory (ARL) Cooperative Agreement W911NF-17-2-0196. The research of Sandhu was supported by the U.S. Air Force Office of Scientific Research (AFOSR) grant FA9550-18-1-0130 and National Science Foundation (NSF) grant ECCS-1749937. Funding Information: The research of Zhang and Basar was supported in part by the US Army Research Laboratory (ARL) Cooperative Agreement W911NF-17-2-0196 Publisher Copyright: {\textcopyright} 2019 IEEE.; 58th IEEE Conference on Decision and Control, CDC 2019 ; Conference date: 11-12-2019 Through 13-12-2019",
year = "2019",
month = dec,
doi = "10.1109/CDC40024.2019.9029257",
language = "English (US)",
series = "Proceedings of the IEEE Conference on Decision and Control",
publisher = "Institute of Electrical and Electronics Engineers Inc.",
pages = "5562--5567",
booktitle = "2019 IEEE 58th Conference on Decision and Control, CDC 2019",
address = "United States",
}