@InProceedings{Supelec761,
author = {Edouard Klein and Matthieu Geist and Olivier Pietquin},
title = {{Reducing the dimentionality of the reward space in the Inverse Reinforcement Learning problem}},
year = {2011},
booktitle = {{Proceedings of the IEEE Workshop on Machine Learning Algorithms, Systems and Applications (MLASA 2011)}},
pages = {4 pages},
month = {December},
address = {Honolulu (USA)},
url = {http://www.metz.supelec.fr//metz/personnel/geist_mat/pdfs/supelec761.pdf},
abstract = {This paper deals with the Inverse Reinforcement Learning framework, whose purpose is to learn control policies from demonstrations by an expert. This method inferes from demonstrations a utility function the expert is allegedly maximizing. In this paper we map the reward space into a subset of smaller dimensionality without loss of generality for all Markov Decision Processes (MDPs). We then present three experimental results showing both the promising aspect of the application of this result to existing IRL methods and its shortcomings. We conclude with considerations on further research.}
}