On 2025-09-06 at 19:19:01 +0200, Andrey Konovalov wrote: >On Mon, Aug 25, 2025 at 10:30 PM Maciej Wieczor-Retman ><maciej.wieczor-retman@xxxxxxxxx> wrote: >> >> Inline KASAN on x86 does tag mismatch reports by passing the faulty >> address and metadata through the INT3 instruction - scheme that's setup >> in the LLVM's compiler code (specifically HWAddressSanitizer.cpp). >> >> Add a kasan hook to the INT3 handling function. >> >> Disable KASAN in an INT3 core kernel selftest function since it can raise >> a false tag mismatch report and potentially panic the kernel. >> >> Make part of that hook - which decides whether to die or recover from a >> tag mismatch - arch independent to avoid duplicating a long comment on >> both x86 and arm64 architectures. >> >> Signed-off-by: Maciej Wieczor-Retman <maciej.wieczor-retman@xxxxxxxxx> >> --- >> Changelog v5: >> - Add die to argument list of kasan_inline_recover() in >> arch/arm64/kernel/traps.c. >> >> Changelog v4: >> - Make kasan_handler() a stub in a header file. Remove #ifdef from >> traps.c. >> - Consolidate the "recover" comment into one place. >> - Make small changes to the patch message. >> >> MAINTAINERS | 2 +- >> arch/x86/include/asm/kasan.h | 26 ++++++++++++++++++++++++++ >> arch/x86/kernel/alternative.c | 4 +++- >> arch/x86/kernel/traps.c | 4 ++++ >> arch/x86/mm/Makefile | 2 ++ >> arch/x86/mm/kasan_inline.c | 23 +++++++++++++++++++++++ >> include/linux/kasan.h | 24 ++++++++++++++++++++++++ >> 7 files changed, 83 insertions(+), 2 deletions(-) >> create mode 100644 arch/x86/mm/kasan_inline.c >> >> diff --git a/MAINTAINERS b/MAINTAINERS >> index 788532771832..f5b1ce242002 100644 >> --- a/MAINTAINERS >> +++ b/MAINTAINERS >> @@ -13177,7 +13177,7 @@ S: Maintained >> B: https://bugzilla.kernel.org/buglist.cgi?component=Sanitizers&product=Memory%20Management >> F: Documentation/dev-tools/kasan.rst >> F: arch/*/include/asm/*kasan*.h >> -F: arch/*/mm/kasan_init* >> +F: arch/*/mm/kasan_* >> F: include/linux/kasan*.h >> F: lib/Kconfig.kasan >> F: mm/kasan/ >> diff --git a/arch/x86/include/asm/kasan.h b/arch/x86/include/asm/kasan.h >> index 1963eb2fcff3..5bf38bb836e1 100644 >> --- a/arch/x86/include/asm/kasan.h >> +++ b/arch/x86/include/asm/kasan.h >> @@ -6,7 +6,28 @@ >> #include <linux/kasan-tags.h> >> #include <linux/types.h> >> #define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL) >> +#ifdef CONFIG_KASAN_SW_TAGS >> + >> +/* >> + * LLVM ABI for reporting tag mismatches in inline KASAN mode. >> + * On x86 the INT3 instruction is used to carry metadata in RAX >> + * to the KASAN report. >> + * >> + * SIZE refers to how many bytes the faulty memory access >> + * requested. >> + * WRITE bit, when set, indicates the access was a write, otherwise >> + * it was a read. >> + * RECOVER bit, when set, should allow the kernel to carry on after >> + * a tag mismatch. Otherwise die() is called. >> + */ >> +#define KASAN_RAX_RECOVER 0x20 >> +#define KASAN_RAX_WRITE 0x10 >> +#define KASAN_RAX_SIZE_MASK 0x0f >> +#define KASAN_RAX_SIZE(rax) (1 << ((rax) & KASAN_RAX_SIZE_MASK)) >> + >> +#else >> #define KASAN_SHADOW_SCALE_SHIFT 3 > >Putting this under else in this patch looks odd, we can move this part >to "x86: Make software tag-based kasan available". Sure, will do! > >> +#endif >> >> /* >> * Compiler uses shadow offset assuming that addresses start >> @@ -35,10 +56,15 @@ >> #define __tag_shifted(tag) FIELD_PREP(GENMASK_ULL(60, 57), tag) >> #define __tag_reset(addr) (sign_extend64((u64)(addr), 56)) >> #define __tag_get(addr) ((u8)FIELD_GET(GENMASK_ULL(60, 57), (u64)addr)) >> +bool kasan_inline_handler(struct pt_regs *regs); >> #else >> #define __tag_shifted(tag) 0UL >> #define __tag_reset(addr) (addr) >> #define __tag_get(addr) 0 >> +static inline bool kasan_inline_handler(struct pt_regs *regs) >> +{ >> + return false; >> +} >> #endif /* CONFIG_KASAN_SW_TAGS */ >> >> static inline void *__tag_set(const void *__addr, u8 tag) >> diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c >> index 2a330566e62b..4cb085daad31 100644 >> --- a/arch/x86/kernel/alternative.c >> +++ b/arch/x86/kernel/alternative.c >> @@ -2228,7 +2228,7 @@ int3_exception_notify(struct notifier_block *self, unsigned long val, void *data >> } >> >> /* Must be noinline to ensure uniqueness of int3_selftest_ip. */ >> -static noinline void __init int3_selftest(void) >> +static noinline __no_sanitize_address void __init int3_selftest(void) >> { >> static __initdata struct notifier_block int3_exception_nb = { >> .notifier_call = int3_exception_notify, >> @@ -2236,6 +2236,7 @@ static noinline void __init int3_selftest(void) >> }; >> unsigned int val = 0; >> >> + kasan_disable_current(); >> BUG_ON(register_die_notifier(&int3_exception_nb)); >> >> /* >> @@ -2253,6 +2254,7 @@ static noinline void __init int3_selftest(void) >> >> BUG_ON(val != 1); >> >> + kasan_enable_current(); >> unregister_die_notifier(&int3_exception_nb); >> } >> >> diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c >> index 0f6f187b1a9e..2a119279980f 100644 >> --- a/arch/x86/kernel/traps.c >> +++ b/arch/x86/kernel/traps.c >> @@ -912,6 +912,10 @@ static bool do_int3(struct pt_regs *regs) >> if (kprobe_int3_handler(regs)) >> return true; >> #endif >> + >> + if (kasan_inline_handler(regs)) >> + return true; >> + >> res = notify_die(DIE_INT3, "int3", regs, 0, X86_TRAP_BP, SIGTRAP); >> >> return res == NOTIFY_STOP; >> diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile >> index 5b9908f13dcf..1dc18090cbe7 100644 >> --- a/arch/x86/mm/Makefile >> +++ b/arch/x86/mm/Makefile >> @@ -36,7 +36,9 @@ obj-$(CONFIG_PTDUMP) += dump_pagetables.o >> obj-$(CONFIG_PTDUMP_DEBUGFS) += debug_pagetables.o >> >> KASAN_SANITIZE_kasan_init_$(BITS).o := n >> +KASAN_SANITIZE_kasan_inline.o := n >> obj-$(CONFIG_KASAN) += kasan_init_$(BITS).o >> +obj-$(CONFIG_KASAN_SW_TAGS) += kasan_inline.o >> >> KMSAN_SANITIZE_kmsan_shadow.o := n >> obj-$(CONFIG_KMSAN) += kmsan_shadow.o >> diff --git a/arch/x86/mm/kasan_inline.c b/arch/x86/mm/kasan_inline.c >> new file mode 100644 >> index 000000000000..9f85dfd1c38b >> --- /dev/null >> +++ b/arch/x86/mm/kasan_inline.c >> @@ -0,0 +1,23 @@ >> +// SPDX-License-Identifier: GPL-2.0 >> +#include <linux/kasan.h> >> +#include <linux/kdebug.h> >> + >> +bool kasan_inline_handler(struct pt_regs *regs) >> +{ >> + int metadata = regs->ax; >> + u64 addr = regs->di; >> + u64 pc = regs->ip; >> + bool recover = metadata & KASAN_RAX_RECOVER; >> + bool write = metadata & KASAN_RAX_WRITE; >> + size_t size = KASAN_RAX_SIZE(metadata); >> + >> + if (user_mode(regs)) >> + return false; >> + >> + if (!kasan_report((void *)addr, size, write, pc)) >> + return false; > >Hm, this part is different than on arm64: there, we don't check the >return value. > >Do I understand correctly that the return value from this function >controls whether we skip over the int3 instruction and continue the >execution? If so, we should return the same value regardless of >whether the report is suppressed or not. And then you should not need >to explicitly check for KASAN_BIT_MULTI_SHOT in the latter patch. I recall there were some corner cases where this code path got called in outline mode, didn't have a mismatch but still died due to the die() below. But I'll recheck and either apply what you wrote above or get add a better explanation to the patch message. > >> + >> + kasan_inline_recover(recover, "Oops - KASAN", regs, metadata, die); > >Maybe name this is as kasan_die_unless_recover()? Sure, sounds good > > >> + >> + return true; >> +} >> diff --git a/include/linux/kasan.h b/include/linux/kasan.h >> index 54481f8c30c5..8691ad870f3b 100644 >> --- a/include/linux/kasan.h >> +++ b/include/linux/kasan.h >> @@ -663,4 +663,28 @@ void kasan_non_canonical_hook(unsigned long addr); >> static inline void kasan_non_canonical_hook(unsigned long addr) { } >> #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ >> >> +#ifdef CONFIG_KASAN_SW_TAGS >> +/* >> + * The instrumentation allows to control whether we can proceed after >> + * a crash was detected. This is done by passing the -recover flag to >> + * the compiler. Disabling recovery allows to generate more compact >> + * code. >> + * >> + * Unfortunately disabling recovery doesn't work for the kernel right >> + * now. KASAN reporting is disabled in some contexts (for example when >> + * the allocator accesses slab object metadata; this is controlled by >> + * current->kasan_depth). All these accesses are detected by the tool, >> + * even though the reports for them are not printed. >> + * >> + * This is something that might be fixed at some point in the future. >> + */ >> +static inline void kasan_inline_recover( >> + bool recover, char *msg, struct pt_regs *regs, unsigned long err, >> + void die_fn(const char *str, struct pt_regs *regs, long err)) >> +{ >> + if (!recover) >> + die_fn(msg, regs, err); >> +} >> +#endif >> + >> #endif /* LINUX_KASAN_H */ >> -- >> 2.50.1 >> -- Kind regards Maciej Wieczór-Retman