BibTeX

@article{Arora2022espnet,
  title={{ESPnet-SLU: Advancing Spoken Language Understanding through ESPnet}},
  author = {Arora, Siddhant and Dalmia, Siddharth and Denisov, Pavel and Chang, Xuankai and Ueda, Yushi and Peng, Yifan and Zhang, Yuekai and Kumar, Sujay and Ganesan, Karthik and Yan, Brian and Vu, Ngoc Thang and Black, Alan W and Watanabe, Shinji},
  booktitle={2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
  organization={IEEE},
  year={2022}
}

@InProceedings{neumann:2019b,
  author = {Bao, Fang and Neumann, Michael and Vu, Ngoc Thang},
  title     = {CycleGAN-based Emotion Style Transfer as Data Augmentation for Speech Emotion Recognition},
  booktitle = {Proceedings of Interspeech, Graz},
  year      = {2019}
}

@Article{batliner22:ethical,
  author = 	 {Anton Batliner and Michael Neumann and Felix Burkhardt and Alice Baird and Sarina Meyer and Ngoc Thang Vu and Bj{ö}rn W. Schuller},
  title = 	 {Ethical Awareness in Paralinguistics: A Taxonomy of Applications},
  journal = 	 {International Journal of Human-Computer Interaction},
  year = 	 2022,
  volume = 	 0,
  number = 	 0,
  pages = 	 {1-18},
  publisher = {Taylor & Francis},
  doi =       {10.1080/10447318.2022.2140385},
  url =       {https://doi.org/10.1080/10447318.2022.2140385}
}

@InProceedings{blohm-etal-2018-comparing,
    title = "Comparing Attention-Based Convolutional and Recurrent Neural Networks: Success and Limitations in Machine Reading Comprehension",
  author = "Blohm, Matthias  and
      Jagfeld, Glorianna  and
      Sood, Ekta  and
      Yu, Xiang  and
      Vu, Ngoc Thang",
    booktitle = "Proceedings of the 22nd Conference on Computational Natural Language Learning",
    month = oct,
    year = "2018",
    address = "Brussels, Belgium",
    publisher = "Association for Computational Linguistics",
    url = "https://www.aclweb.org/anthology/K18-1011",
    doi = "10.18653/v1/K18-1011",
    pages = "108-118",
    abstract = "We propose a machine reading comprehension model based on the compare-aggregate framework with two-staged attention that achieves state-of-the-art results on the MovieQA question answering dataset. To investigate the limitations of our model as well as the behavioral difference between convolutional and recurrent neural networks, we generate adversarial examples to confuse the model and compare to human performance. Furthermore, we assess the generalizability of our model by analyzing its differences to human inference, drawing upon insights from cognitive science.",
}

@InProceedings{ccetinouglu-schulz-vu:2016:W16-58,
  author = { c {C}etino u {g}lu, {O}zlem  and  Schulz, Sarah  and  Vu, Ngoc Thang},
  title     = {Challenges of Computational Processing of Code-Switching},
  booktitle = {Proceedings of the Second Workshop on Computational Approaches to Code Switching},
  month     = {November},
  year      = {2016},
  address   = {Austin, Texas},
  publisher = {Association for Computational Linguistics},
  pages     = {1-11},
  url       = {https://www.aclweb.org/anthology/W16-5801}
}

@article{Denisov2021ims,
  title={{IMS}{'} Systems for the {IWSLT} 2021 Low-Resource Speech Translation Task},
  author = {Denisov, Pavel and Mager, Manuel and Vu, Ngoc Thang},
  journal={Proceedings of the 18th International Conference on Spoken Language Translation (IWSLT 2021)},
  pages={175-181},
  year={2021}
}

@article{Denisov2019end,
  title={{End-to-End Multi-Speaker Speech Recognition Using Speaker Embeddings and Transfer Learning}},
  author = {Denisov, Pavel and Vu, Ngoc Thang},
  journal={Proceedings of Interspeech 2019},
  pages={4425-4429},
  year={2019}
}

@article{Denisov2019ims,
  title={{IMS-speech: A speech to text tool}},
  author = {Denisov, Pavel and Vu, Ngoc Thang},
  journal={Studientexte zur Sprachkommunikation: Elektronische Sprachsignalverarbeitung 2019},
  pages={170-177},
  year={2019},
  publisher={TUDpress, Dresden}
}

@article{Denisov2020pretrained,
  title={{Pretrained Semantic Speech Embeddings for End-to-End Spoken Language Understanding via Cross-Modal Teacher-Student Learning}},
  author = {Denisov, Pavel and Vu, Ngoc Thang},
  journal={Proceedings of Interspeech 2020},
  pages={881-885},
  year={2020}
}

@InProceedings{Denisov2018unsupervised,
  title={Unsupervised domain adaptation by adversarial learning for robust speech recognition},
  author = {Denisov, Pavel and Vu, Ngoc Thang and Font, Marc Ferras},
  booktitle={Speech Communication; 13th ITG-Symposium},
  year={2018},
}

@article{Hamed2022investigations,
  title={{Investigations on speech recognition systems for low-resource dialectal Arabic-English code-switching speech}},
  author = {Hamed, Injy and Denisov, Pavel and Li, Chia-Yu and Elmahdy, Mohamed and Abdennadher, Slim and Vu, Ngoc Thang},
  journal = {Computer Speech & Language},
  volume = {72},
  year={2022},
  publisher={Elsevier}
}

@InProceedings{Jagfeld2017a,
  author = {Glorianna Jagfeld and Ngoc Thang Vu},
  title = 	 {{Encoding Word Confusion Networks with Recurrent Neural Networks for Dialog State Tracking}},
  Booktitle = { Proceedings of the Speech-Centric Natural Language Processing Workshop 

@InProceedings{Jenne2019a,
  author = {Sabrina Jenne and Antje Schweitzer and Sabine Zerbian and Ngoc Thang Vu},
  title = 	 {{Phonological Error Detection for Pronunciation Training Using Neural Spectrogram Recognition}},
  booktitle = {Proceedings of International Congress of Phonetic Sciences (ICPhS)},
  year = 	 {2019},
  address = 	 {Melbourne}}

@InProceedings{Jenne2019b,
  author = {Sabrina Jenne and Ngoc Thang Vu},
  title={{Multimodal Articulation-Based Pronunciation Error Detection with Spectrogram and Acoustic Features}},
  year={2019},
  booktitle={Proc. Interspeech 2019},
  pages={3549-3553},
  address ={Graz}
}

@article{Li2020adviser,
  title={{ADVISER: A Toolkit for Developing Multi-modal, Multi-domain and Socially-engaged Conversational Agents}},
  author = {Li, Chia-Yu and Ortega, Daniel and V{ä}th, Dirk and Lux, Florian and Vanderlyn, Lindsey and Schmidt, Maximilian and Neumann, Michael and V{ö}lkel, Moritz and Denisov, Pavel and Jenne, Sabrina and Kacarevic, Zorica and Vu, Ngoc Thang},
  journal={Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics: System Demonstrations},
  pages={279-286},
  publisher={Association for Computational Linguistics},
  year={2020}
}

@INPROCEEDINGS{10022897,
  author={Lux, Florian and Chen, Ching-Yi and Vu, Ngoc Thang},
  booktitle={2022 IEEE Spoken Language Technology Workshop (SLT)}, 
  title={Combining Contrastive and Non-Contrastive Losses for Fine-Tuning Pretrained Models in Speech Analysis}, 
  year={2023},
  volume={},
  number={},
  pages={876-883},
  doi={10.1109/SLT54892.2023.10022897}}

@InProceedings{LuxBlizzard2023,
author={Florian Lux and Julia Koch and Sarina Meyer and Thomas Bott and Nadja Schauffler and Pavel Denisov and Antje Schweitzer and Ngoc Thang Vu},
title =	 {The IMS Toucan system for the Blizzard Challenge 2023},
booktitle = {Blizzard Challenge 2023},
year = 2023}

@inproceedings{lux2022low,
  title={Low-Resource Multilingual and Zero-Shot Multispeaker TTS},
  author={Lux, Florian and Koch, Julia and Vu, Ngoc Thang},
  booktitle={Proceedings of the 2nd Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics and the 12th International Joint Conference on Natural Language Processing},
  pages={741-751},
  year={2022}
}

@INPROCEEDINGS{10022433,
  author={Lux, Florian and Koch, Julia and Vu, Ngoc Thang},
  booktitle={2022 IEEE Spoken Language Technology Workshop (SLT)}, 
  title={Exact Prosody Cloning in Zero-Shot Multispeaker Text-to-Speech}, 
  year={2023},
  volume={},
  number={},
  pages={962-969},
  doi={10.1109/SLT54892.2023.10022433}}

@inproceedings{lux23_interspeech,
  author={Florian Lux and Pascal Tilli and Sarina Meyer and Ngoc Thang Vu},
  title={{Controllable Generation of Artificial Speaker Embeddings through Discovery of Principal Directions}},
  year=2023,
  booktitle={Proc. INTERSPEECH 2023},
  pages={4788-4792},
  doi={10.21437/Interspeech.2023-858}
}

@INPROCEEDINGS{9414298,
  author={Lux, Florian and Vu, Ngoc Thang},
  booktitle={IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)}, 
  title={Meta-Learning for Improving Rare Word Recognition in End-to-End ASR}, 
  year={2021},
  volume={},
  number={},
  pages={5974-5978},
  doi={10.1109/ICASSP39728.2021.9414298}}

@InProceedings{meyer22:speaker,
  author = 	 {Sarina Meyer and Florian Lux and Pavel Denisov and Julia Koch and Pascal Tilli and Ngoc Thang Vu},
  title = 	 {Speaker Anonymization with Phonetic Intermediate Representations},
  booktitle = {Proc. Interspeech 2022},
  year = 	 2022,
  pages = 	 {4925-4929},
  address = 	 {Incheon, Korea},
  url =       {https://www.isca-speech.org/archive/interspeech_2022/meyer22b_interspeech}
}

@INPROCEEDINGS{10096607,
  author={Meyer, Sarina and Lux, Florian and Koch, Julia and Denisov, Pavel and Tilli, Pascal and Vu, Ngoc Thang},
  booktitle={ICASSP 2023 - 2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)}, 
  title={Prosody Is Not Identity: A Speaker Anonymization Approach Using Prosody Cloning}, 
  year=2023,
  pages={1-5},
  doi={10.1109/ICASSP49357.2023.10096607}}

@InProceedings{meyer23:anonymizing,
  author = 	 {Sarina Meyer and Pascal Tilli and Pavel Denisov and Florian Lux and Julia Koch and Ngoc Thang Vu},
  title = 	 {Anonymizing Speech with Generative Adversarial Networks to Preserve Speaker Privacy},
  booktitle = {Proc. IEEE Spoken Language Technology Workshop (SLT) 2022},
  year = 	 2023,
  pages = 	 {912-919},
  address = 	 {Doha, Qatar},
  url =       {https://arxiv.org/abs/2210.07002}
}

@InProceedings{meyer22:cascade,
  author = 	 {Sarina Meyer and Pascal Tilli and Florian Lux and Pavel Denisov and Julia Koch and Ngoc Thang Vu},
  title = 	 {Cascade of Phonetic Speech Recognition, Speaker Embeddings GAN and Multispeaker Speech Synthesis for the VoicePrivacy 2022 Challenge},
  booktitle = {Proc. 2nd Symposium on Security and Privacy in Speech Communication},
  year = 	 2022,
  address = 	 {Incheon, Korea},
  url =       {https://www.isca-speech.org/archive/spsc_2022/meyer22_spsc}
}

@InProceedings{neumann:2017,
  author = {Neumann, Michael and Vu, Ngoc Thang},
  title     = {Attentive Convolutional Neural Network based Speech Emotion Recognition:
               {A} Study on the Impact of Input Features, Signal Length, and Acted
               Speech},
  booktitle = {Proceedings of Interspeech 2017, Stockholm},
  year      = {2017}
}

@InProceedings{neumann:2018,
  author = {Neumann, Michael and Vu, Ngoc Thang},
  title     = {Cross-lingual and multilingual speech emotion recognition on English and French},
  booktitle = {Proceedings of the International Conference on Acoustics, Speech and Signal Processing (ICASSP), Calgary},
  year      = {2018}
}

@InProceedings{neumann:2019a,
  author = {Neumann, Michael and Vu, Ngoc Thang},
  title     = {Improving Speech Emotion Recognition with Unsupervised Representation Learning on Unlabeled Speech},
  booktitle = {Proceedings of the International Conference on Acoustics, Speech and Signal Processing (ICASSP), Brighton},
  year      = {2019}
}

@InProceedings{NguyenEtAl:17b,
  author = {Nguyen, Kim Anh  and  Maximilian Köeper and {Schulte im Walde}, Sabine  and  Vu, Ngoc Thang},
  title     = {{Hierarchical Embeddings for Hypernymy Detection and Directionality}},
  booktitle = {Proceedings of the Conference on Empirical Methods in Natural Language Processing},
  year      = {2017},
  address   = {Copenhagen, Denmark},
  pages     = {233-243},
}

@InProceedings{NguyenEtAl:17,
  author = {Nguyen, Kim Anh  and  {Schulte im Walde}, Sabine  and  Vu, Ngoc Thang},
  title     = {{Distinguishing Antonyms and Synonyms in a Pattern-based Neural Network}},
  booktitle = {Proceedings of the 15th Conference of the European Chapter of the Association for Computational Linguistics},
  year      = {2017},
  address   = {Valencia, Spain},
  pages     = {76-85},
}

@InProceedings{NguyenEtAl:18,
  author = {Nguyen, Kim-Anh  and {Schulte im Walde}, Sabine  and  Vu, Ngoc Thang},
  title     = {{Introducing Two Vietnamese Datasets for Evaluating Semantic Models of (Dis-)Similarity and Relatedness}},
  booktitle = {Proceedings of the 16th Annual Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies},
  year      = {2018},
  address   = {New Orleans, LA, USA},
  pages     = {199-205},
  url       = {https://www.aclweb.org/anthology/N18-2032},
}

@InProceedings{ortega2019context,
  title={Context-aware neural-based dialog act classification on automatically generated transcriptions},
  author={Ortega, Daniel and Li, Chia-Yu and Vallejo, Gisela and Denisov, Pavel and Vu, Ngoc Thang},
  booktitle={2019 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
  pages={7265-7269},
  year={2019},
  organization={IEEE}
}

@InProceedings{papay18:_addres_low_resour_scenar_charac_embed,
  author = {Sean Papay and Sebastian Padó and {Ngoc Thang} Vu},
  title =        {Addressing Low-Resource Scenarios with Character-aware Embeddings},
  booktitle = {Proceedings of the NAACL Workshop on Subword and Character Level Models},
  url= {https://aclweb.org/anthology/W18-1204.pdf},
  abstract = {Most modern approaches to computing word embeddings assume the availability of text corpora with billions of words. In this paper, we explore a setup where only corpora with millions of words are available, and many words in any new text are out of vocabulary. This setup is both of practical interest – modeling the situation for specific domains and low-resource languages – and of psycholinguistic interest, since it corresponds much more closely to the actual experiences and challenges of human language learning and use. We evaluate skip-gram word embeddings and two types of character-based embeddings on word relatedness prediction. On large corpora, performance of both model types is equal for frequent words, but character awareness already helps for infrequent words. Consistently, on small corpora, the character-based models perform overall better than skip-grams. The concatenation of different embeddings performs best on small corpora and robustly on large corpora.},
  keywords =  {workshop myown},
  year =         2018,
  address =      {New Orleans, LA}}

@InProceedings{roesiger/etal:2017,
  author = {Rösiger, Ina and Stehwien, Sabrina and Riester, Arndt and Vu, Ngoc Thang},
  title = 	 {Improving Coreference Resolution
with Automatically Predicted Prosodic Information},
  booktitle = {{Proceedings of the First Workshop on Speech-Centric Natural Language Processing}},
  year = 	 2017,
  pages = 	 {78-83},
  address = 	 {Copenhagen},
  publisher = {Association for Computational Linguistics},
  url = {https://www.aclweb.org/anthology/W17-4610}
}

@InProceedings{Schweitzer/Vu:2016,
  author = {Antje Schweitzer and Ngoc Thang Vu},
title={Cross-Gender and Cross-Dialect Tone Recognition for Vietnamese},
year=2016,
booktitle={Interspeech 2016},
doi={10.21437/Interspeech.2016-405},
url={https://dx.doi.org/10.21437/Interspeech.2016-405},
pages={1064-1068}
}

@article{Stehwien/Schweitzer/Vu:2020,
title = {Acoustic and temporal representations in convolutional neural network models of prosodic events},
journal = {Speech Communication},
volume = 125,
pages = {128-141},
year = 2020,
doi = {https://doi.org/10.1016/j.specom.2020.10.005},
  author = {Sabrina Stehwien and Antje Schweitzer and Ngoc Thang Vu}
}

@InProceedings{stehwien-vu:2016,
  author = {Sabrina Stehwien and Ngoc Thang Vu},
  title = 	 {Exploring the Correlation of Pitch Accents and Semantic Slots for Spoken Language Understanding},
  booktitle = {Proceedings of Interspeech},
  year = 	 2016,
  pages = 	 {730-735},
  address = 	 {San Francisco, USA}}

@InProceedings{stehwien-vu:2017-1,
  author = {Sabrina Stehwien and Ngoc Thang Vu},
  title = 	 {First step towards enhancing word embeddings with pitch accent features for DNN-based slot filling on recognized text},
  booktitle = {Tagungsband der 28. Konferenz Elektronische Sprachsignalverarbeitung (ESSV 2017)},
  year = 	 2017,
  pages = 	 {194-201},
  address = 	 {Saarbr{ü}cken,Germany}}

@InProceedings{stehwien-vu:2017-2,
  author = {Sabrina Stehwien and Ngoc Thang Vu},
  title = 	 { Prosodic Event Recognition using Convolutional Neural Networks with Context information},
  booktitle = {Proceedings of Interspeech},
  year = 	 2017,
  address = 	 {Stockholm, Sweden}}

@InProceedings{Stehwien/Vu/Schweitzer:2018,
  title = {Effects of Word Embeddings on Neural Network-based Pitch Accent Detection},
  author = {Sabrina Stehwien and Ngoc Thang Vu and Antje Schweitzer},
  booktitle = {Proceedings of Speech Prosody 2018, Pozna{n}},
  year = {2018}
  }

@InProceedings{StiefelVu:2017,
  author = {Moritz Stiefel and Ngoc Thang Vu},
  title     = {Enriching {ASR} Lattices with {POS} Tags for Dependency Parsing},
  booktitle = {SCNLP

@InProceedings{yu-etal-2019-imsurreal,
    title = {{{IMS}ur{R}eal: {IMS} at the Surface Realization Shared Task 2019}},
  author = "Yu, Xiang  and
      Falenska, Agnieszka  and
      Haid, Marina  and
      Vu, Ngoc Thang  and
      Kuhn, Jonas",
    booktitle = "Proceedings of the 2nd Workshop on Multilingual Surface Realisation (MSR 2019)",
    month = nov,
    year = "2019",
    address = "Hong Kong, China",
    publisher = "Association for Computational Linguistics",
    url = "https://www.aclweb.org/anthology/D19-6306",
    doi = "10.18653/v1/D19-6306",
    pages = "50-58",
}

@InProceedings{yu-falenska-vu:2017:SCLeM,
  author = {Yu, Xiang  and  Falenska, Agnieszka  and  Vu, Ngoc Thang},
  title     = {A General-Purpose Tagger with Convolutional Neural Networks},
  booktitle = {Proceedings of the First Workshop on Subword and Character Level Models in NLP},
  month     = {September},
  year      = 2017,
  address   = {Copenhagen, Denmark},
  publisher = {Association for Computational Linguistics},
  pages     = {124-129},
  abstract  = {We present a general-purpose tagger based on convolutional neural networks
	(CNN), used for both composing word vectors and encoding context information.
	The CNN tagger is robust across different tagging tasks: without task-specific
	tuning of hyper-parameters, it achieves state-of-the-art results in
	part-of-speech tagging, morphological tagging and supertagging. The CNN tagger
	is also robust against the out-of-vocabulary problem; it performs well on
	artificially unnormalized texts.},
  url       = {https://www.aclweb.org/anthology/W17-4118}
}

@InProceedings{yu-etal-2019-head,
    title = {{Head-First Linearization with Tree-Structured Representation}},
  author = "Yu, Xiang  and
      Falenska, Agnieszka  and
      Vu, Ngoc Thang  and
      Kuhn, Jonas",
    booktitle = "Proceedings of the 12th International Conference on Natural Language Generation",
    month = oct # " - " # nov,
    year = "2019",
    address = "Tokyo, Japan",
    publisher = "Association for Computational Linguistics",
    url = "https://www.aclweb.org/anthology/W19-8636",
    pages = "279-289",
}

@InProceedings{yu-etal-2020-fast,
    title = "Fast and Accurate Non-Projective Dependency Tree Linearization",
  author = "Yu, Xiang  and
      Tannert, Simon  and
      Vu, Ngoc Thang  and
      Kuhn, Jonas",
    booktitle = "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics",
    month = jul,
    year = "2020",
    address = "Online",
    publisher = "Association for Computational Linguistics",
    url = "https://www.aclweb.org/anthology/2020.acl-main.134",
    doi = "10.18653/v1/2020.acl-main.134",
    pages = "1451-1462",
    abstract = "We propose a graph-based method to tackle the dependency tree linearization task. We formulate the task as a Traveling Salesman Problem (TSP), and use a biaffine attention model to calculate the edge costs. We facilitate the decoding by solving the TSP for each subtree and combining the solution into a projective tree. We then design a transition system as post-processing, inspired by non-projective transition-based parsing, to obtain non-projective sentences. Our proposed method outperforms the state-of-the-art linearizer while being 10 times faster in training and decoding.",
}

@InProceedings{yu-vu:2017:ACL,
  title={Character Composition Model with Convolutional Neural Networks for Dependency Parsing on Morphologically Rich Languages},
  author = {Yu, Xiang and Vu, Ngoc Thang},
  booktitle={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers)},
  volume={2},
  pages={672-678},
  year={2017}
}

@InProceedings{yu-etal-2018-approximate,
    title = "Approximate Dynamic Oracle for Dependency Parsing with Reinforcement Learning",
  author = "Yu, Xiang  and
      Vu, Ngoc Thang  and
      Kuhn, Jonas",
    booktitle = "Proceedings of the Second Workshop on Universal Dependencies ({UDW} 2018)",
    month = nov,
    year = "2018",
    address = "Brussels, Belgium",
    publisher = "Association for Computational Linguistics",
    url = "https://www.aclweb.org/anthology/W18-6021",
    doi = "10.18653/v1/W18-6021",
    pages = "183-191",
    abstract = "We present a general approach with reinforcement learning (RL) to approximate dynamic oracles for transition systems where exact dynamic oracles are difficult to derive. We treat oracle parsing as a reinforcement learning problem, design the reward function inspired by the classical dynamic oracle, and use Deep Q-Learning (DQN) techniques to train the oracle with gold trees as features. The combination of a priori knowledge and data-driven methods enables an efficient dynamic oracle, which improves the parser performance over static oracles in several transition systems.",
}

@InProceedings{yu-etal-2019-learning,
    title = "Learning the {D}yck Language with Attention-based {S}eq2{S}eq Models",
  author = "Yu, Xiang  and
      Vu, Ngoc Thang  and
      Kuhn, Jonas",
    booktitle = "Proceedings of the 2019 ACL Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP",
    month = aug,
    year = "2019",
    address = "Florence, Italy",
    publisher = "Association for Computational Linguistics",
    url = "https://www.aclweb.org/anthology/W19-4815",
    doi = "10.18653/v1/W19-4815",
    pages = "138-146",
}

@InProceedings{yu-etal-2020-ensemble,
    title = "Ensemble Self-Training for Low-Resource Languages: Grapheme-to-Phoneme Conversion and Morphological Inflection",
  author = "Yu, Xiang  and
      Vu, Ngoc Thang  and
      Kuhn, Jonas",
    booktitle = "Proceedings of the 17th SIGMORPHON Workshop on Computational Research in Phonetics, Phonology, and Morphology",
    month = jul,
    year = "2020",
    address = "Online",
    publisher = "Association for Computational Linguistics",
    url = "https://www.aclweb.org/anthology/2020.sigmorphon-1.5",
    doi = "10.18653/v1/2020.sigmorphon-1.5",
    pages = "70-78",
    abstract = "We present an iterative data augmentation framework, which trains and searches for an optimal ensemble and simultaneously annotates new training data in a self-training style. We apply this framework on two SIGMORPHON 2020 shared tasks: grapheme-to-phoneme conversion and morphological inflection. With very simple base models in the ensemble, we rank the first and the fourth in these two tasks. We show in the analysis that our system works especially well on low-resource languages.",
}