@InCollection{Supelec423,
author = {Matthieu Geist and Olivier Pietquin and Gabriel Fricout},
title = {{Bayesian Reward Filtering}},
year = {2008},
booktitle = {{Recent Advances in Reinforcement Learning}},
publisher = {Springer Verlag},
volume = {5323},
pages = {96-109},
month = {June},
note = {Revised and selected papers of EWRL 2008},
editor = {S. Girgin et al.},
series = {Lecture Notes in Computer Science (LNCS)},
url = {http://www.metz.supelec.fr/metz/recherche/publis_pdf/Supelec423.pdf},
doi = {10.1007/978-3-540-89722-4_8},
abstract = {A wide variety of function approximation schemes have been applied to reinforcement learning. However, Bayesian filtering approaches,which have been shown efficient in other fields such as neural network training, have been little studied.We propose a general Bayesian filtering framework for reinforcement learning, as well as a specific implementation based on sigma point Kalman filtering and kernel machines. This allows us to derive an efficient off-policy model-free approximate temporal differences algorithm which will be demonstrated on two simple benchmarks.}
}