@inbook{ea10e9e848154314a0990c0a93e00843,
title = "Conclusions",
abstract = "Past works on reasoning about inconsistency in AI have suffered from multiple flaws: (i) they apply to one logic at a time and are often invented for one logic after another. (ii) They assume that the AI researcher will legislate how applications resolve inconsistency even though the AI researcher may often know nothing about a specific application which may be built in a completely different time frame and geography than the AI researcher{\textquoteright}s work – in the real world, users are often stuck with the consequences of their decisions and would often like to decide what they want to do with their data (including what data to consider and what not to consider when there are inconsistencies). An AI system for reasoning about inconsistent information must support the user in his/her needs rather than forcing something down their throats. (iii) Most existing frameworks use some form or the other of maximal consistent subsets.",
author = "Martinez, {Maria Vanina} and Cristian Molinaro and Subrahmanian, {V. S.} and Leila Amgoud",
note = "Publisher Copyright: {\textcopyright} 2013, The Author(s).",
year = "2013",
doi = "10.1007/978-1-4614-6750-2_6",
language = "English (US)",
series = "SpringerBriefs in Computer Science",
publisher = "Springer",
number = "9781461467496",
pages = "41--42",
booktitle = "SpringerBriefs in Computer Science",
edition = "9781461467496",
}