@inproceedings{kovatchev-etal-2022-longhorns, title = "longhorns at {DADC} 2022: How many linguists does it take to fool a Question Answering model? A systematic approach to adversarial attacks.", author = "Kovatchev, Venelin and Chatterjee, Trina and Govindarajan, Venkata S and Chen, Jifan and Choi, Eunsol and Chronis, Gabriella and Das, Anubrata and Erk, Katrin and Lease, Matthew and Li, Junyi Jessy and Wu, Yating and Mahowald, Kyle", booktitle = "Proceedings of the First Workshop on Dynamic Adversarial Data Collection", month = jul, year = "2022", address = "Seattle, WA", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2022.dadc-1.5", doi = "10.18653/v1/2022.dadc-1.5", pages = "41--52", abstract = "Developing methods to adversarially challenge NLP systems is a promising avenue for improving both model performance and interpretability. Here, we describe the approach of the team {``}longhorns{''} on Task 1 of the The First Workshop on Dynamic Adversarial Data Collection (DADC), which asked teams to manually fool a model on an Extractive Question Answering task. Our team finished first (pending validation), with a model error rate of 62{\%}. We advocate for a systematic, linguistically informed approach to formulating adversarial questions, and we describe the results of our pilot experiments, as well as our official submission.", }