Continue spreading the notion of affinity to the percpu interrupt request code by updating the call sites that use request_percpu_nmi() (all two of them) to take an affinity pointer. This pointer is firmly NULL for now. Signed-off-by: Marc Zyngier <maz@xxxxxxxxxx> --- arch/arm64/kernel/smp.c | 2 +- drivers/perf/arm_pmu.c | 4 +++- include/linux/interrupt.h | 4 ++-- kernel/irq/manage.c | 11 +++++++---- 4 files changed, 13 insertions(+), 8 deletions(-) diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 68cea3a4a35ca..6fb838eee2e7d 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -1094,7 +1094,7 @@ static void ipi_setup_sgi(int ipi) irq = ipi_irq_base + ipi; if (ipi_should_be_nmi(ipi)) { - err = request_percpu_nmi(irq, ipi_handler, "IPI", &irq_stat); + err = request_percpu_nmi(irq, ipi_handler, "IPI", NULL, &irq_stat); WARN(err, "Could not request IRQ %d as NMI, err=%d\n", irq, err); } else { err = request_percpu_irq(irq, ipi_handler, "IPI", &irq_stat); diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index 6e3c72e9b41cd..d1f738eb77f1e 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -678,7 +678,9 @@ int armpmu_request_irq(int irq, int cpu) irq_ops = &pmunmi_ops; } } else if (armpmu_count_irq_users(cpu, irq) == 0) { - err = request_percpu_nmi(irq, handler, "arm-pmu", &cpu_armpmu); + err = request_percpu_nmi(irq, handler, "arm-pmu", + NULL, + &cpu_armpmu); /* If cannot get an NMI, get a normal interrupt */ if (err) { diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 52147d5f432b3..2134e7c08c169 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -197,8 +197,8 @@ request_percpu_irq(unsigned int irq, irq_handler_t handler, } extern int __must_check -request_percpu_nmi(unsigned int irq, irq_handler_t handler, - const char *devname, void __percpu *dev); +request_percpu_nmi(unsigned int irq, irq_handler_t handler, const char *devname, + const struct cpumask *affinity, void __percpu *dev_id); extern const void *free_irq(unsigned int, void *); extern void free_percpu_irq(unsigned int, void __percpu *); diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index ac394f0b422b8..a17ac522622e7 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -2530,6 +2530,7 @@ EXPORT_SYMBOL_GPL(__request_percpu_irq); * @irq: Interrupt line to allocate * @handler: Function to be called when the IRQ occurs. * @name: An ascii name for the claiming device + * @affinity: A cpumask describing the target CPUs for this interrupt * @dev_id: A percpu cookie passed back to the handler function * * This call allocates interrupt resources for a per CPU NMI. Per CPU NMIs @@ -2547,7 +2548,8 @@ EXPORT_SYMBOL_GPL(__request_percpu_irq); * will fail returning a negative value. */ int request_percpu_nmi(unsigned int irq, irq_handler_t handler, - const char *name, void __percpu *dev_id) + const char *name, + const struct cpumask *affinity, void __percpu *dev_id) { struct irqaction *action; struct irq_desc *desc; @@ -2564,13 +2566,14 @@ int request_percpu_nmi(unsigned int irq, irq_handler_t handler, !irq_supports_nmi(desc)) return -EINVAL; - /* The line cannot already be NMI */ - if (irq_is_nmi(desc)) + /* The line cannot be NMI already if the new request covers all CPUs */ + if (irq_is_nmi(desc) && + (!affinity || cpumask_equal(affinity, cpu_possible_mask))) return -EINVAL; action = create_percpu_irqaction(handler, IRQF_NO_THREAD | IRQF_NOBALANCING, - name, NULL, dev_id); + name, affinity, dev_id); if (!action) return -ENOMEM; -- 2.39.2