BibTeX
@InProceedings{dayanik20:_maskin_actor_infor_leads_fairer,
abstract = {A central concern in Computational Social Sciences (CSS) is fairness: where the role of NLP is to scale up text analysis to large corpora, the quality of automatic analyses should be as independent as possible of textual properties. We analyze the performance of a state-of-the-art neural model on the task of political claims detection (i.e., the identification of forward-looking statements made by political actors) and identify a strong frequency bias: claims made by frequent actors are recognized better. We propose two simple debiasing methods which mask proper names and pronouns during training of the model, thus removing personal information bias. We find that (a) these methods significantly decrease frequency bias while keeping the overall performance stable; and (b) the resulting models improve when evaluated in an out-of-domain setting.},
added-at = {2020-04-04T15:05:44.000+0200},
address = {Online},
author = {Dayanik, Erenay and Padó, Sebastian},
biburl = {https://puma.ub.uni-stuttgart.de/bibtex/23293989e68e3eda1db5018a5ac18dee4/sp},
booktitle = {Proceedings of ACL},
interhash = {00061c6a1cf89353a1c20cb29b483974},
intrahash = {3293989e68e3eda1db5018a5ac18dee4},
keywords = {conference myown},
pages = {4385-4391},
timestamp = {2020-12-07T16:42:14.000+0100},
title = {Masking Actor Information Leads to Fairer Political Claims Detection},
url = {https://www.aclweb.org/anthology/2020.acl-main.404/},
year = 2020
}