BibTeX

@InProceedings{RiedlElsafty18,
title = {{Document-based Recommender System for Job Postings using Dense Representations}},
  author = {Elsafty, Ahmed and Riedl, Martin and Biemann, Chris},
booktitle = {{Proceedings of the 2018 Conference of the North American Chapter
	of the Association for Computational Linguistics: Human Language
	Technologies}},
year = {2018},
address =  {New Orleans, Louisiana}
}

@InProceedings{riedl19:_clust_based_artic_ident_histor_newsp,
  author = {Martin Riedl and Daniela Betz and Sebastian Padó},
  title =        {Clustering-Based Article Identification in Historical Newspapers},
  booktitle = {Proceedings of the NAACL LaTeCH-CLfL workshop},
  year =         2019,
keywords =  {workshop myown},
 url={https://aclweb.org/anthology/papers/W/W19/W19-2502/},
  address =      {Minneapolis, MN}}

@article{riedl18:tokenization,
  author = {Riedl, Martin and Biemann, Chris},
  title = {Using Semantics for Granularities of Tokenization, Computational Linguistics},
  journal = {Computational Linguistics},
  year = {2018},
  volume = {44},
  number = {3},
}

@InProceedings{riedl-pado-2018-named,
    title = "A Named Entity Recognition Shootout for {G}erman",
  author = "Riedl, Martin  and
      Pad{ó}, Sebastian",
    booktitle = "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers)",
    month = jul,
    year = "2018",
    address = "Melbourne, Australia",
    publisher = "Association for Computational Linguistics",
    url = "https://www.aclweb.org/anthology/P18-2020",
    pages = "120-125",
    abstract = "We ask how to practically build a model for German named entity recognition (NER) that performs at the state of the art for both contemporary and historical texts, i.e., a big-data and a small-data scenario. The two best-performing model families are pitted against each other (linear-chain CRFs and BiLSTM) to observe the trade-off between expressiveness and data requirements. BiLSTM outperforms the CRF when large datasets are available and performs inferior for the smallest dataset. BiLSTMs profit substantially from transfer learning, which enables them to be trained on multiple corpora, resulting in a new state-of-the-art model for German NER on two contemporary German corpora (CoNLL 2003 and GermEval 2014) and two historic corpora.",
}