@InCollection{Supelec726,
author = {Edouard Klein and Matthieu Geist and Olivier Pietquin},
title = {{Batch, Off-policy and Model-free Apprenticeship Learning}},
year = {2011},
booktitle = {{Proceedings of the European Workshop on Reinforcement Learning (EWRL 2011)}},
publisher = {Springer Verlag - Heidelberg Berlin},
pages = {12 pages},
month = {september},
series = {Lecture Notes in Computer Science (LNCS)},
address = {Athens (Greece)},
url = {http://www.metz.supelec.fr//metz/personnel/geist_mat/pdfs/supelec726.pdf},
abstract = {This paper addresses the problem of apprenticeship learning,
that is learning control policies from demonstration by an
expert. An efficient
framework for it is inverse reinforcement learning (IRL). Based on
the assumption that the expert maximizes a utility function, IRL
aims at
learning the underlying reward from example trajectories. Many
IRL algorithms
assume that the reward function is linearly parameterized and
rely on the computation of some associated feature expectations,
which is
done through Monte Carlo simulation. However, this assumes to
have full
trajectories for the expert policy as well as at least a
generative model
for intermediate policies. In this paper, we introduce a temporal
difference
method, namely LSTD-mu, to compute these feature expectations.
This allows extending apprenticeship learning to a batch and
off-policy
setting.}
}