BibTex format
@inproceedings{Joulani:2016,
author = {Joulani, P and Gyorgy, A and Szepesvari, C},
title = {A unified modular analysis of online and stochastic optimization: adaptivity, optimism, non-convexity},
url = {http://hdl.handle.net/10044/1/49032},
year = {2016}
}
RIS format (EndNote, RefMan)
TY - CPAPER
AB - We present a simple unified analysis of adaptive Mirror Descent (MD) and Follow-the-Regularized-Leader (FTRL) algorithms for online and stochastic optimizationin (possibly infinite-dimensional) Hilbert spaces. The analysis is modular inthe sense that it completely decouples the effect of possible assumptions on theloss functions (such as smoothness, strong convexity, and non-convexity) andon the optimization regularizers (such as strong convexity, non-smooth penaltiesin composite-objective learning, and non-monotone step-size sequences). Wedemonstrate the power of this decoupling by obtaining generalized algorithms andimproved regret bounds for the so-called “adaptive optimistic online learning” set-ting. In addition, we simplify and extend a large body of previous work, includingseveral various AdaGrad formulations, composite-objective and implicit-updatealgorithms. In all cases, the results follow as simple corollaries within few linesof algebra. Finally, the decomposition enables us to obtain preliminary globalguarantees for limited classes of non-convex problems.
AU - Joulani,P
AU - Gyorgy,A
AU - Szepesvari,C
PY - 2016///
TI - A unified modular analysis of online and stochastic optimization: adaptivity, optimism, non-convexity
UR - http://hdl.handle.net/10044/1/49032
ER -