@inproceedings{cf579920110b48c0bf952471ebb66867,
title = "Considerations for applying logical reasoning to explain neural network outputs",
abstract = "We discuss the impact of presenting explanations to people for Artificial Intelligence (AI) decisions powered by Neural Networks, according to three types of logical reasoning (inductive, deductive, and abductive). We start from examples in the existing literature on explaining artificial neural networks. We see that abductive reasoning is (unintentionally) the most commonly used as default in user testing for comparing the quality of explanation techniques. We discuss whether this may be because this reasoning type balances the technical challenges of generating the explanations, and the effectiveness of the explanations. Also, by illustrating how the original (abductive) explanation can be converted into the remaining two reasoning types we are able to identify considerations needed to support these kinds of transformations.",
keywords = "Explainable User Interfaces, Reasoning, XAI",
author = "Cau, {Federico Maria} and Spano, {Lucio Davide} and Nava Tintarev",
note = "Publisher Copyright: {\textcopyright} 2020 CEUR-WS. All rights reserved.; 2020 Italian Workshop on Explainable Artificial Intelligence, XAI.it 2020 ; Conference date: 25-11-2020 Through 26-11-2020",
year = "2020",
month = jan,
day = "1",
language = "English",
volume = "2742",
series = "CEUR Workshop Proceedings",
publisher = "Rheinisch-Westfaelische Technische Hochschule Aachen * Lehrstuhl Informatik V",
pages = "96--103",
booktitle = "Proceedings of the Italian Workshop on Explainable Artificial Intelligence",
}