@inproceedings{d3f7bd8d888345548e4422a924238e5e,
title = "Cross-modal perceptual learning in learning a tonal language",
abstract = "Limited evidence shows that visual input can facilitate learning novel sound-to-meaning mappings that are crucial to learning a second language. However, the mechanisms by which visual information influences auditory learning are still unclear. Here, we investigate to what extent visual input can lead to effective learning in another domain. We trained atonal speakers with Mandarin tones in 4 conditions: Auditory Only (AO) where only auditory tones were given as input; Animated Contour (AC) where moving visual pitch contours indicating the dynamic changes of tones were given in addition to auditory tones; Static Contour (SC) where static visual pitch contours were given in addition to auditory tones; Incongruent Contour (IC) where mismatched pitch contours were given in addition to auditory tones. The results show the advantage of AC and SC over AO in learning tonal categories and that IC inhibits learning, suggesting that extracting {\textquoteleft}compatible{\textquoteright} properties cross modalities benefits learning most. ",
keywords = "lexical tones, cross-modal perception, second language acquisition, cross-modal learning, L2 tone learning",
author = "Xin Wang and Luan Li and Bob McMurray",
note = "Copyright the Author(s) 2022. Version archived for private and non-commercial use with the permission of the author/s and according to publisher conditions. For further rights please contact the publisher.; Annual Meeting of the Cognitive Science Society (44th : 2022), CogSci 2022 ; Conference date: 27-07-2022 Through 30-07-2022",
year = "2022",
language = "English",
series = "Proceedings of the Annual Meeting of the Cognitive Science Society",
publisher = "Cognitive Science Society",
pages = "934--939",
editor = "Jennifer Culbertson and Andrew Perfors and Hugh Rabagliati and Veronica Ramenzoni",
booktitle = "CogSci2022",
}