BibTex format

author = {Gaskell, A and Miao, Y and Toni, F and Specia, L},
publisher = {International Joint Conferences on Artificial Intelligence},
title = {Logically consistent adversarial attacks for soft theorem provers},
url = {},
year = {2022}

RIS format (EndNote, RefMan)

AB - Recent efforts within the AI community haveyielded impressive results towards “soft theoremproving” over natural language sentences using lan-guage models. We propose a novel, generativeadversarial framework for probing and improvingthese models’ reasoning capabilities. Adversarialattacks in this domain suffer from the logical in-consistency problem, whereby perturbations to theinput may alter the label. Our Logically consis-tent AdVersarial Attacker, LAVA, addresses this bycombining a structured generative process with asymbolic solver, guaranteeing logical consistency.Our framework successfully generates adversarialattacks and identifies global weaknesses commonacross multiple target models. Our analyses revealnaive heuristics and vulnerabilities in these mod-els’ reasoning capabilities, exposing an incompletegrasp of logical deduction under logic programs.Finally, in addition to effective probing of thesemodels, we show that training on the generatedsamples improves the target model’s performance.
AU - Gaskell,A
AU - Miao,Y
AU - Toni,F
AU - Specia,L
PB - International Joint Conferences on Artificial Intelligence
PY - 2022///
TI - Logically consistent adversarial attacks for soft theorem provers
UR -
ER -