@InProceedings{Supelec854,
author = {Edouard Klein and Bilal PIOT and Matthieu Geist and Olivier Pietquin},
title = {{A cascaded supervised learning approach to inverse reinforcement learning}},
year = {2013},
booktitle = {{Proceedings of the European Conference on Machine Learning and Principles and Practice of Knowledge Discovery in Databases (ECML/PKDD 2013)}},
publisher = {Springer},
volume = {8188},
pages = {1-16},
month = {September},
editor = {Blockeel, Hendrik and Kersting, Kristian and Nijssen, Siegfried and Zelezny, Filip},
series = {Lecture Notes in Computer Science},
address = {Prague (Czech Republic) },
url = {http://www.ecmlpkdd2013.org/wp-content/uploads/2013/07/327.pdf},
isbn = {978-3-642-40987-5},
doi = {10.1007/978-3-642-40988-2_1},
abstract = {This paper considers the Inverse Reinforcement Learning (IRL) problem, that is inferring a reward function for which a demonstrated expert policy is optimal. We propose to break the IRL problem down into two generic Supervised Learning steps: this is the Cascaded Supervised IRL (CSI) approach. A classification step that defines a score function is followed by a regression step providing a reward function. A theoretical analysis shows that the demonstrated expert policy is nearoptimal for the computed reward function. Not needing to repeatedly solve a Markov Decision Process (MDP) and the ability to leverage existing techniques for classification and regression are two important advantages of the CSI approach. It is furthermore empirically demonstrated to compare positively to state-of-the-art approaches when using only transitions sampled according to the expert policy, up to the use of some heuristics. This is exemplified on two classical benchmarks (the mountain car problem and a highway driving simulator).}
}