On 8/5/25 4:26 PM, Sabyrzhan Tasbolatov wrote: > LoongArch needs deferred KASAN initialization as it has a custom > kasan_arch_is_ready() implementation that tracks shadow memory > readiness via the kasan_early_stage flag. > > Select ARCH_DEFER_KASAN to enable the unified static key mechanism > for runtime KASAN control. Call kasan_init_generic() which handles > Generic KASAN initialization and enables the static key. > > Replace kasan_arch_is_ready() with kasan_enabled() and delete the > flag kasan_early_stage in favor of the unified kasan_enabled() > interface. > > Note that init_task.kasan_depth = 0 is called after kasan_init_generic(), > which is different than in other arch kasan_init(). This is left > unchanged as it cannot be tested. > > Closes: https://bugzilla.kernel.org/show_bug.cgi?id=217049 > Signed-off-by: Sabyrzhan Tasbolatov <snovitoll@xxxxxxxxx> > --- > Changes in v4: > - Replaced !kasan_enabled() with !kasan_shadow_initialized() in > loongarch which selects ARCH_DEFER_KASAN (Andrey Ryabinin) > --- > arch/loongarch/Kconfig | 1 + > arch/loongarch/include/asm/kasan.h | 7 ------- > arch/loongarch/mm/kasan_init.c | 8 ++------ > 3 files changed, 3 insertions(+), 13 deletions(-) > > diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig > index f0abc38c40a..f6304c073ec 100644 > --- a/arch/loongarch/Kconfig > +++ b/arch/loongarch/Kconfig > @@ -9,6 +9,7 @@ config LOONGARCH > select ACPI_PPTT if ACPI > select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI > select ARCH_BINFMT_ELF_STATE > + select ARCH_DEFER_KASAN > select ARCH_DISABLE_KASAN_INLINE > select ARCH_ENABLE_MEMORY_HOTPLUG > select ARCH_ENABLE_MEMORY_HOTREMOVE > diff --git a/arch/loongarch/include/asm/kasan.h b/arch/loongarch/include/asm/kasan.h > index 62f139a9c87..0e50e5b5e05 100644 > --- a/arch/loongarch/include/asm/kasan.h > +++ b/arch/loongarch/include/asm/kasan.h > @@ -66,7 +66,6 @@ > #define XKPRANGE_WC_SHADOW_OFFSET (KASAN_SHADOW_START + XKPRANGE_WC_KASAN_OFFSET) > #define XKVRANGE_VC_SHADOW_OFFSET (KASAN_SHADOW_START + XKVRANGE_VC_KASAN_OFFSET) > > -extern bool kasan_early_stage; > extern unsigned char kasan_early_shadow_page[PAGE_SIZE]; > > #define kasan_mem_to_shadow kasan_mem_to_shadow > @@ -75,12 +74,6 @@ void *kasan_mem_to_shadow(const void *addr); > #define kasan_shadow_to_mem kasan_shadow_to_mem > const void *kasan_shadow_to_mem(const void *shadow_addr); > > -#define kasan_arch_is_ready kasan_arch_is_ready > -static __always_inline bool kasan_arch_is_ready(void) > -{ > - return !kasan_early_stage; > -} > - > #define addr_has_metadata addr_has_metadata > static __always_inline bool addr_has_metadata(const void *addr) > { > diff --git a/arch/loongarch/mm/kasan_init.c b/arch/loongarch/mm/kasan_init.c > index d2681272d8f..57fb6e98376 100644 > --- a/arch/loongarch/mm/kasan_init.c > +++ b/arch/loongarch/mm/kasan_init.c > @@ -40,11 +40,9 @@ static pgd_t kasan_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE); > #define __pte_none(early, pte) (early ? pte_none(pte) : \ > ((pte_val(pte) & _PFN_MASK) == (unsigned long)__pa(kasan_early_shadow_page))) > > -bool kasan_early_stage = true; > - > void *kasan_mem_to_shadow(const void *addr) > { > - if (!kasan_arch_is_ready()) { > + if (!kasan_shadow_initialized()) { > return (void *)(kasan_early_shadow_page); > } else { > unsigned long maddr = (unsigned long)addr; > @@ -298,8 +296,6 @@ void __init kasan_init(void) > kasan_populate_early_shadow(kasan_mem_to_shadow((void *)VMALLOC_START), > kasan_mem_to_shadow((void *)KFENCE_AREA_END)); > > - kasan_early_stage = false; > - There is a reason for this line to be here. Your patch will change the result of the follow up kasan_mem_to_shadow() call and feed the wrong address to kasan_map_populate() > /* Populate the linear mapping */ > for_each_mem_range(i, &pa_start, &pa_end) { > void *start = (void *)phys_to_virt(pa_start); > @@ -329,5 +325,5 @@ void __init kasan_init(void) > > /* At this point kasan is fully initialized. Enable error messages */ > init_task.kasan_depth = 0; > - pr_info("KernelAddressSanitizer initialized.\n"); > + kasan_init_generic(); > }