@InProceedings{Supelec887,
author = {Bilal PIOT and Olivier Pietquin and Matthieu Geist},
title = {{Predicting when to laugh with structured classification}},
year = {2014},
booktitle = {{Annual Conference of the International Speech Communication Association (InterSpeech)}},
url = {http://www.metz.supelec.fr//metz/personnel/geist_mat/pdfs/supelec887.pdf},
abstract = {Today, Embodied Conversational Agents (ECAs) are emerging
as natural media to interact with machines. Applications are numerous
and ECAs can reduce the technological gap between people
by providing user-friendly interfaces. Yet, ECAs are still unable
to produce social signals appropriately during their interaction with
humans, which tends to make the interaction less instinctive.
Especially,
very little attention has been paid to the use of laughter in
human-avatar interactions despite the crucial role played by laughter
in human-human interaction. In this paper, a method for predicting
the most appropriate moment for laughing for an ECA is proposed.
Imitation learning via a structured classification algorithm is
used in
this purpose and is shown to produce a behavior similar to humans’
on a practical application: the yes/no game.}
}