@inproceedings{aa73b535a9e1416d93df2edf2bca003c,
title = "Improving Valence Prediction in Dimensional Speech Emotion Recognition Using Linguistic Information",
abstract = "In dimensional emotion recognition, a model called valence, arousal, and dominance is widely used. The current research in dimensional speech emotion recognition has shown a problem that the performance of valence prediction is lower than arousal and dominance. This paper presents an approach to tackle this problem: Improving the low score of valence prediction by utilizing linguistic information. Our approach fuses acoustic features with linguistic features, which is a conversion from words to vectors. The results doubled the performance of valence prediction on both single-task learning single-output (predicting valence only) and multitask learning multi-output (predicting valence, arousal, and dominance). Using a proper combination of acoustic and linguistic features not only improved valence prediction, but also improved arousal and dominance predictions in multitask learning.",
keywords = "affective computing, dimensional emotion, linguistic feature, speech emotion recognition, valence prediction",
author = "Atmaja, {Bagus Tris} and Masato Akagi",
note = "Publisher Copyright: {\textcopyright} 2020 IEEE.; 23rd Conference of the Oriental COCOSDA International Committee for the Co-Ordination and Standardisation of Speech Databases and Assessment Techniques, O-COCOSDA 2020 ; Conference date: 05-11-2020 Through 07-11-2020",
year = "2020",
month = nov,
day = "5",
doi = "10.1109/O-COCOSDA50338.2020.9295032",
language = "English",
series = "Proceedings of 2020 23rd Conference of the Oriental COCOSDA International Committee for the Co-Ordination and Standardisation of Speech Databases and Assessment Techniques, O-COCOSDA 2020",
publisher = "Institute of Electrical and Electronics Engineers Inc.",
pages = "166--171",
booktitle = "Proceedings of 2020 23rd Conference of the Oriental COCOSDA International Committee for the Co-Ordination and Standardisation of Speech Databases and Assessment Techniques, O-COCOSDA 2020",
address = "United States",
}