BibTeX
@inproceedings{nikolaev23:_universe,
abstract = {It has been argued that BERT ``rediscovers the traditional NLP
pipeline'', with lower layers extracting morphosyntactic features and
higher layers creating holistic sentence-level representations.
In this paper, we critically examine this assumption through a
principle-component-guided analysis, extracing sets of inputs that
correspond to specific activation patterns in BERT sentence representations.
We find that even in higher layers, the model mostly picks up on a
variegated bunch of low-level features, many related to sentence
complexity, that presumably arise from its specific pre-training
objectives.},
added-at = {2023-04-26T18:18:19.000+0200},
address = {Nancy, France},
author = {Nikolaev, Dmitry and Pad{รณ}, Sebastian},
biburl = {https://puma.ub.uni-stuttgart.de/bibtex/2995882df8792875cb3a67bbea378a90a/sp},
booktitle = {Proceedings of IWCS},
interhash = {9dabc394be5ffe8d8b9818192791f99a},
intrahash = {995882df8792875cb3a67bbea378a90a},
keywords = {conference myown},
timestamp = {2023-07-04T19:36:51.000+0200},
title = {The Universe of Utterances According to {BERT}},
url = {https://iwcs.pimoid.fr/60.pdf},
year = 2023
}