% % GENERATED FROM https://www.coli.uni-saarland.de % by : anonymous % IP : coli2006.lst.uni-saarland.de % at : Mon, 05 Feb 2024 15:43:00 +0100 GMT % % Selection : Author: Shi-Min_Hu % @InProceedings{Albrecht_et_al:2002, AUTHOR = {Albrecht, Irene and Haber, Jörg and Kähler, Kolja and Schröder, Marc and Seidel, Hans-Peter}, TITLE = {May I talk to you? :-) - Facial Animation from Text}, YEAR = {2002}, BOOKTITLE = {Proceedings of the 10th Pacific Conference on Computer Graphics and Applications (Pacific Graphics 2002), October 9-11}, EDITOR = {Coquillart, Sabine and Shum, Heung-Yeung and Hu, Shi-Min}, ADDRESS = {Tsinghua University, Beijing}, URL = {http://www.mpi-sb.mpg.de/resources/FAM/publ/pg2002.pdf}, ABSTRACT = {We introduce a facial animation system that produces real-time animation sequences including speech synchronization and non-verbal speech-related facial expressions from plain text input. A state-of-the-art text-to-speech synthesis component performs linguistic analysis of the text input and creates a speech signal from phonetic and intonation information. The phonetic transcription is additionally used to drive a speech synchronization method for the physically based facial animation. Further high-level information from the linguistic analysis such as different types of accents or pauses as well as the type of the sentence is used to generate non-verbal speech-related facial expressions such as movement of head, eyes, and eyebrows or voluntary eye blinks. Moreover, emoticons are translated into XML markup that triggers emotional facial expressions.}, ANNOTE = {COLIURL : Albrecht:2002:FAT.pdf} } @InProceedings{Albrecht_et_al:2002_1, AUTHOR = {Albrecht, Irene and Haber, Jörg and Kähler, Kolja and Schröder, Marc and Seidel, Hans-Peter}, TITLE = {May I talk to you? :-) - Facial Animation from Text}, YEAR = {2002}, BOOKTITLE = {Proceedings of the 10th Pacific Conference on Computer Graphics and Applications (Pacific Graphics 2002), October 9-11}, EDITOR = {Coquillart, Sabine and Shum, Heung-Yeung and Hu, Shi-Min}, ADDRESS = {Tsinghua University, Beijing}, URL = {http://www.mpi-sb.mpg.de/resources/FAM/publ/pg2002.pdf}, ABSTRACT = {We introduce a facial animation system that produces real-time animation sequences including speech synchronization and non-verbal speech-related facial expressions from plain text input. A state-of-the-art text-to-speech synthesis component performs linguistic analysis of the text input and creates a speech signal from phonetic and intonation information. The phonetic transcription is additionally used to drive a speech synchronization method for the physically based facial animation. Further high-level information from the linguistic analysis such as different types of accents or pauses as well as the type of the sentence is used to generate non-verbal speech-related facial expressions such as movement of head, eyes, and eyebrows or voluntary eye blinks. Moreover, emoticons are translated into XML markup that triggers emotional facial expressions.}, ANNOTE = {COLIURL : Albrecht:2002:FAT.pdf} } @InProceedings{Albrecht_et_al:2002_2, AUTHOR = {Albrecht, Irene and Haber, Jörg and Kähler, Kolja and Schröder, Marc and Seidel, Hans-Peter}, TITLE = {May I talk to you? :-) - Facial Animation from Text}, YEAR = {2002}, BOOKTITLE = {Proceedings of the 10th Pacific Conference on Computer Graphics and Applications (Pacific Graphics 2002), October 9-11}, EDITOR = {Coquillart, Sabine and Shum, Heung-Yeung and Hu, Shi-Min}, ADDRESS = {Tsinghua University, Beijing}, URL = {http://www.mpi-sb.mpg.de/resources/FAM/publ/pg2002.pdf}, ABSTRACT = {We introduce a facial animation system that produces real-time animation sequences including speech synchronization and non-verbal speech-related facial expressions from plain text input. A state-of-the-art text-to-speech synthesis component performs linguistic analysis of the text input and creates a speech signal from phonetic and intonation information. The phonetic transcription is additionally used to drive a speech synchronization method for the physically based facial animation. Further high-level information from the linguistic analysis such as different types of accents or pauses as well as the type of the sentence is used to generate non-verbal speech-related facial expressions such as movement of head, eyes, and eyebrows or voluntary eye blinks. Moreover, emoticons are translated into XML markup that triggers emotional facial expressions.}, ANNOTE = {COLIURL : Albrecht:2002:FAT.pdf} } @InProceedings{Albrecht_et_al:2002_3, AUTHOR = {Albrecht, Irene and Haber, Jörg and Kähler, Kolja and Schröder, Marc and Seidel, Hans-Peter}, TITLE = {May I talk to you? :-) - Facial Animation from Text}, YEAR = {2002}, BOOKTITLE = {Proceedings of the 10th Pacific Conference on Computer Graphics and Applications (Pacific Graphics 2002), October 9-11}, EDITOR = {Coquillart, Sabine and Shum, Heung-Yeung and Hu, Shi-Min}, ADDRESS = {Tsinghua University, Beijing}, URL = {http://www.mpi-sb.mpg.de/resources/FAM/publ/pg2002.pdf}, ABSTRACT = {We introduce a facial animation system that produces real-time animation sequences including speech synchronization and non-verbal speech-related facial expressions from plain text input. A state-of-the-art text-to-speech synthesis component performs linguistic analysis of the text input and creates a speech signal from phonetic and intonation information. The phonetic transcription is additionally used to drive a speech synchronization method for the physically based facial animation. Further high-level information from the linguistic analysis such as different types of accents or pauses as well as the type of the sentence is used to generate non-verbal speech-related facial expressions such as movement of head, eyes, and eyebrows or voluntary eye blinks. Moreover, emoticons are translated into XML markup that triggers emotional facial expressions.}, ANNOTE = {COLIURL : Albrecht:2002:FAT.pdf} } @InProceedings{Albrecht_et_al:2002_4, AUTHOR = {Albrecht, Irene and Haber, Jörg and Kähler, Kolja and Schröder, Marc and Seidel, Hans-Peter}, TITLE = {May I talk to you? :-) - Facial Animation from Text}, YEAR = {2002}, BOOKTITLE = {Proceedings of the 10th Pacific Conference on Computer Graphics and Applications (Pacific Graphics 2002), October 9-11}, EDITOR = {Coquillart, Sabine and Shum, Heung-Yeung and Hu, Shi-Min}, ADDRESS = {Tsinghua University, Beijing}, URL = {http://www.mpi-sb.mpg.de/resources/FAM/publ/pg2002.pdf}, ABSTRACT = {We introduce a facial animation system that produces real-time animation sequences including speech synchronization and non-verbal speech-related facial expressions from plain text input. A state-of-the-art text-to-speech synthesis component performs linguistic analysis of the text input and creates a speech signal from phonetic and intonation information. The phonetic transcription is additionally used to drive a speech synchronization method for the physically based facial animation. Further high-level information from the linguistic analysis such as different types of accents or pauses as well as the type of the sentence is used to generate non-verbal speech-related facial expressions such as movement of head, eyes, and eyebrows or voluntary eye blinks. Moreover, emoticons are translated into XML markup that triggers emotional facial expressions.}, ANNOTE = {COLIURL : Albrecht:2002:FAT.pdf} }