Citation

BibTex format

@inproceedings{Hussain:2012,
author = {Hussain, MS and Monkaresi, H and Calvo, RA},
pages = {103--108},
title = {Combining classifiers in multimodal affect detection},
year = {2012}
}

RIS format (EndNote, RefMan)

TY  - CPAPER
AB - Affect detection where users' mental states are automatically recognized from facial expressions, speech, physiology and other modalities, requires accurate machine learning and classification techniques. This paper investigates how combined classifiers, and their base classifiers, can be used in affect detection using features from facial video and multichannel physiology. The base classifiers evaluated include function, lazy and decision trees; and the combined where implemented as vote classifiers. Results indicate that the accuracy of affect detection can be improved using the combined classifiers especially by fusing the multimodal features. The base classifiers that are more useful for certain modalities have been identified. Vote classifiers also performed best for most of the individuals compared to the base classifiers.
AU - Hussain,MS
AU - Monkaresi,H
AU - Calvo,RA
EP - 108
PY - 2012///
SN - 1445-1336
SP - 103
TI - Combining classifiers in multimodal affect detection
ER -