BibTeX

@inproceedings{Khlyzova2022,
    title = "On the Complementarity of Images and Text for the Expression of Emotions in Social Media",
    author = "Khlyzova, Anna  and
      Silberer, Carina  and
      Klinger, Roman",
    booktitle = "Proceedings of the 12th Workshop on Computational Approaches to Subjectivity, Sentiment {&} Social Media Analysis",
    month = may,
    year = "2022",
    address = "Dublin, Ireland",
    publisher = "Association for Computational Linguistics",
    url = "https://aclanthology.org/2022.wassa-1.1",
    pages = "1-15",
}

@inproceedings{ shen23:_vgsi,
	title = {{Combining Tradition with Modernness: Exploring Event Representations in Vision-and-Language Models for Visual Goal-Step Inference}},
	author = "Shen, Chong  and
	Silberer, Carina",
	editor = "Padmakumar, Vishakh  and
	Vallejo, Gisela  and
	Fu, Yao",
	booktitle = "Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 4: Student Research Workshop)",
	month = jul,
	year = "2023",
	address = "Toronto, Canada",
	publisher = "Association for Computational Linguistics",
	url = "https://aclanthology.org/2023.acl-srw.36",
	doi = "10.18653/v1/2023.acl-srw.36",
	pages = "254-265",
	abstract = "Procedural knowledge understanding (PKU) underlies the ability to infer goal-step relations. The task of Visual Goal{--}Step Inference addresses this ability in the multimodal domain. It requires to identify images that represent the steps towards achieving a textually expressed goal. The best existing methods encode texts and images either with independent encoders, or with object-level multimodal encoders using blackbox transformers. This stands in contrast to early, linguistically inspired methods for event representations, which focus on capturing the most crucial information, namely actions and the participants, to learn stereotypical event sequences and hence procedural knowledge. In this work, we study various methods and their effects on PKU of injecting the early shallow event representations to nowadays multimodal deep learning-based models. We find that the early, linguistically inspired methods for representing event knowledge does contribute to understand procedures in combination with modern vision-and-language models. In the future, we are going to explore more complex structure of events and study how to exploit it on top of large language models.",
}