@InProceedings{Supelec885,
author = {Bilal PIOT and Matthieu Geist and Olivier Pietquin},
title = {{Boosted Bellman Residual Minimization Handling Expert Demonstrations}},
year = {2014},
booktitle = {{European Conference on Machine Learning and Principles and Practice of Knowledge Discovery in Databases (ECML/PKDD)}},
publisher = {Springer},
note = {(to appear)},
url = {http://www.metz.supelec.fr//metz/personnel/geist_mat/pdfs/supelec885.pdf},
abstract = {This paper addresses the problem of batch Reinforcement Learning
with Expert Demonstrations (RLED). In RLED, the goal is to find
an optimal policy of a Markov Decision Process (MDP), using a
data set of fixed sampled transitions of the MDP as well as a
data set of fixed expert demonstrations. This is slightly
different from the batch Reinforcement Learning (RL) framework
where only fixed sampled transitions of the MDP are available.
Thus, the aim of this article is to propose algorithms that
leverage those expert data. The idea proposed here differs from
the Approximate Dynamic Programming methods in the sense that we
minimize the Optimal Bellman Residual (OBR), where the
minimization is guided by constraints defined by the expert
demonstrations. This choice is motivated by the the fact that
controlling the OBR implies controlling the distance between the
estimated and optimal quality functions. However, this method
presents some difficulties as the criterion to minimize is
non-convex, non-differentiable and biased. Those difficulties are
overcome via the embedding of distributions in a Reproducing
Kernel Hilbert Space (RKHS) and a boosting technique which allows
obtaining non-parametric algorithms. Finally, our algorithms are
compared to the only state of the art algorithm, Approximate
Policy Iteration with Demonstrations (APID) algorithm, in
different experimental settings.}
}