On Tue, May 13, 2025 at 03:17:00PM -0700, Shakeel Butt wrote: > > IIRC Power64 has issues here, 'funnily' their local_t is NMI safe. > > Perhaps we could do the same for their this_cpu_*(), but ideally someone > > with actual power hardware should do this ;-) > > > > Is CONFIG_ARCH_HAS_NMI_SAFE_THIS_CPU_OPS the right config to > differentiate between such archs? I see Power64 does not have that > enabled. > > There is no config symbol for this presently. > > Hmm what about CONFIG_ARCH_HAS_NMI_SAFE_THIS_CPU_OPS? Hmm, I didn't know about that one, and it escaped my grep yesterday. Anyway, PPC is fixable, just not sure its worth it for them. > > > > > - (if the above leaves any 64bit arch) its 64bit atomics implementation is safe > > > > True, only because HPPA does not in fact have NMIs. > > What is HPPA? arch/parisc the worst 64bit arch ever. They saw sparc32-smp and thought that was a great idea, or something along those lines. Both are quite terrible. Sparc64 realized the mistake and fixed it -- it has cmpxchg. Nick, is this something that's useful for you guys? --- diff --git a/arch/powerpc/include/asm/percpu.h b/arch/powerpc/include/asm/percpu.h index ecf5ac70cfae..aa188db68ef5 100644 --- a/arch/powerpc/include/asm/percpu.h +++ b/arch/powerpc/include/asm/percpu.h @@ -25,6 +25,11 @@ DECLARE_STATIC_KEY_FALSE(__percpu_first_chunk_is_paged); #define percpu_first_chunk_is_paged false #endif +#ifdef CONFIG_PPC_BOOK3S_64 +#define __pcpu_local_irq_save(f) powerpc_local_irq_pmu_save(f) +#define __pcpu_local_irq_restore(s) powerpc_local_irq_pmu_restore(f) +#endif + #include <asm-generic/percpu.h> #include <asm/paca.h> diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h index 02aeca21479a..5c8376588dfb 100644 --- a/include/asm-generic/percpu.h +++ b/include/asm-generic/percpu.h @@ -75,6 +75,11 @@ extern void setup_per_cpu_areas(void); #define PER_CPU_ATTRIBUTES #endif +#ifndef __pcpu_local_irq_save +#define __pcpu_local_irq_save(x) raw_local_irq_save(x) +#define __pcpu_local_irq_restore(x) raw_local_irq_restore(x) +#endif + #define raw_cpu_generic_read(pcp) \ ({ \ *raw_cpu_ptr(&(pcp)); \ @@ -146,9 +151,9 @@ do { \ ({ \ TYPEOF_UNQUAL(pcp) ___ret; \ unsigned long ___flags; \ - raw_local_irq_save(___flags); \ + __pcpu_local_irq_save(___flags); \ ___ret = raw_cpu_generic_read(pcp); \ - raw_local_irq_restore(___flags); \ + __pcpu_local_irq_restore(___flags); \ ___ret; \ }) @@ -165,9 +170,9 @@ do { \ #define this_cpu_generic_to_op(pcp, val, op) \ do { \ unsigned long __flags; \ - raw_local_irq_save(__flags); \ + __pcpu_local_irq_save(__flags); \ raw_cpu_generic_to_op(pcp, val, op); \ - raw_local_irq_restore(__flags); \ + __pcpu_local_irq_restore(__flags); \ } while (0) @@ -175,9 +180,9 @@ do { \ ({ \ TYPEOF_UNQUAL(pcp) __ret; \ unsigned long __flags; \ - raw_local_irq_save(__flags); \ + __pcpu_local_irq_save(__flags); \ __ret = raw_cpu_generic_add_return(pcp, val); \ - raw_local_irq_restore(__flags); \ + __pcpu_local_irq_restore(__flags); \ __ret; \ }) @@ -185,9 +190,9 @@ do { \ ({ \ TYPEOF_UNQUAL(pcp) __ret; \ unsigned long __flags; \ - raw_local_irq_save(__flags); \ + __pcpu_local_irq_save(__flags); \ __ret = raw_cpu_generic_xchg(pcp, nval); \ - raw_local_irq_restore(__flags); \ + __pcpu_local_irq_restore(__flags); \ __ret; \ }) @@ -195,9 +200,9 @@ do { \ ({ \ bool __ret; \ unsigned long __flags; \ - raw_local_irq_save(__flags); \ + __pcpu_local_irq_save(__flags); \ __ret = raw_cpu_generic_try_cmpxchg(pcp, ovalp, nval); \ - raw_local_irq_restore(__flags); \ + __pcpu_local_irq_restore(__flags); \ __ret; \ }) @@ -205,9 +210,9 @@ do { \ ({ \ TYPEOF_UNQUAL(pcp) __ret; \ unsigned long __flags; \ - raw_local_irq_save(__flags); \ + __pcpu_local_irq_save(__flags); \ __ret = raw_cpu_generic_cmpxchg(pcp, oval, nval); \ - raw_local_irq_restore(__flags); \ + __pcpu_local_irq_restore(__flags); \ __ret; \ })