After a lazy switch occurs, RPAL locks the receiver to the current CPU by modifying its cpumask. If the receiver performs a fork operation at this point, the kernel will copy the modified cpumask to the new task, causing the new task to be permanently locked on the current CPU. This patch addresses this issue by detecting whether the original task is locked to the current CPU by RPAL during fork. If locked, assigning the cpumask that existed before the lazy switch to the new task. This ensures the new task will not be locked to the current CPU. Signed-off-by: Bo Li <libo.gcs85@xxxxxxxxxxxxx> --- arch/x86/kernel/process.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index c1d2dac72b9c..be8845e2ca4d 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -29,6 +29,7 @@ #include <trace/events/power.h> #include <linux/hw_breakpoint.h> #include <linux/entry-common.h> +#include <linux/rpal.h> #include <asm/cpu.h> #include <asm/cpuid/api.h> #include <asm/apic.h> @@ -88,6 +89,19 @@ EXPORT_PER_CPU_SYMBOL(cpu_tss_rw); DEFINE_PER_CPU(bool, __tss_limit_invalid); EXPORT_PER_CPU_SYMBOL_GPL(__tss_limit_invalid); +#ifdef CONFIG_RPAL +static void rpal_fix_task_dump(struct task_struct *dst, + struct task_struct *src) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&src->pi_lock, flags); + if (rpal_test_task_thread_flag(src, RPAL_CPU_LOCKED_BIT)) + cpumask_copy(&dst->cpus_mask, &src->rpal_cd->old_mask); + raw_spin_unlock_irqrestore(&src->pi_lock, flags); +} +#endif + /* * this gets called so that we can store lazy state into memory and copy the * current task into the new thread. @@ -100,6 +114,10 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) #ifdef CONFIG_VM86 dst->thread.vm86 = NULL; #endif +#ifdef CONFIG_RPAL + if (src->rpal_rs) + rpal_fix_task_dump(dst, src); +#endif return 0; } -- 2.20.1