@inbook{323ecbe06f44436897c5b6740a13d26a,
title = "Neurosurgery, explainable AI, and legal liability",
abstract = "One of the challenges of AI technologies is its “black box” nature, or the lack of explainability and interpretability of these technologies. This chapter explores whether AI systems in healthcare generally, and in neurosurgery specifically, should be explainable, for what purposes, and whether the current XAI (“explainable AI”) approaches and techniques are able to achieve these purposes. The chapter concludes that XAI techniques, at least currently, are not the only and not necessarily the best way to achieve trust in AI and ensure patient autonomy or improved clinical decision, and they are of limited significance in determining liability. Instead, we argue, we need more transparency around AI systems, their training and validation, as this information is likely to better achieve these goals.",
keywords = "AI, Explainability, Law, Neurosurgery",
author = "Rita Matulionyte and {Suero Molina}, Eric and {Di Ieva}, Antonio",
year = "2024",
doi = "10.1007/978-3-031-64892-2_34",
language = "English",
isbn = "9783031648915",
series = "Advances in Experimental Medicine and Biology",
publisher = "Springer",
pages = "543--553",
editor = "{Di Ieva}, Antonio and {Suero Molina}, Eric and Sidong Liu and Carlo Russo",
booktitle = "Computational neurosurgery",
}