BibTeX

@inproceedings{nikolaev23:_inves_trans,
  abstract = { The question of what kinds of linguistic information are encoded in different layers
    of Transformer-based language models is of considerable interest for the NLP community.
    Existing work, however, has overwhelmingly focused on word-level representations and
    encoder-only language models with the masked-token training objective.
    In this paper, we present experiments with semantic structural probing,
    a method for studying sentence-level representations
    via finding a subspace of the embedding space that provides
    suitable task-specific pairwise distances between data-points.
    We apply our method to language models from different families (encoder-only, decoder-only,
    encoder-decoder) and of different sizes in the context of two tasks, semantic textual similarity
    and natural-language inference. We find that model families differ substantially in their
    performance and layer dynamics, but that the results are largely model-size invariant.},
  added-at = {2023-10-08T21:21:47.000+0200},
  address = {Singapore},
  author = {Nikolaev, Dmitry and Pad{รณ}, Sebastian},
  biburl = {https://puma.ub.uni-stuttgart.de/bibtex/2412d5acd02f1f1261544a2208b8b24eb/sp},
  booktitle = {Proceedings of the BlackboxNLP workshop},
  interhash = {fbe02e9b3012e988b0767116af72521d},
  intrahash = {412d5acd02f1f1261544a2208b8b24eb},
  keywords = {myown workshop},
  note = {To appear},
  timestamp = {2023-10-19T15:59:55.000+0200},
  title = {Investigating semantic subspaces of Transformer sentence embeddings through linear structural probing},
  url = {https://arxiv.org/abs/2310.11923},
  year = 2023
}