[PATCH] fix

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@xxxxxxxxxx>
---
 mm/memcontrol.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 2464a58fbf17..40fcc2259e5f 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3697,7 +3697,8 @@ static void mem_cgroup_free(struct mem_cgroup *memcg)

 static struct mem_cgroup *mem_cgroup_alloc(struct mem_cgroup *parent)
 {
-	struct memcg_vmstats_percpu *statc, __percpu *pstatc_pcpu;
+	struct memcg_vmstats_percpu *statc;
+	struct memcg_vmstats_percpu __percpu *pstatc_pcpu;
 	struct mem_cgroup *memcg;
 	int node, cpu;
 	int __maybe_unused i;
--
2.49.0


I have duplicated this again at the end of this mail for easy application.

Could we get this fix in or drop the series so the build is fixed for
mm-new? Thanks!


On Wed, May 14, 2025 at 11:41:52AM -0700, Shakeel Butt wrote:
> The function memcg_rstat_updated() is used to track the memcg stats
> updates for optimizing the flushes. At the moment, it is not re-entrant
> safe and the callers disabled irqs before calling. However to achieve
> the goal of updating memcg stats without irqs, memcg_rstat_updated()
> needs to be re-entrant safe against irqs.
>
> This patch makes memcg_rstat_updated() re-entrant safe using this_cpu_*
> ops. On archs with CONFIG_ARCH_HAS_NMI_SAFE_THIS_CPU_OPS, this patch is
> also making memcg_rstat_updated() nmi safe.
>
> Signed-off-by: Shakeel Butt <shakeel.butt@xxxxxxxxx>
> Reviewed-by: Vlastimil Babka <vbabka@xxxxxxx>
> ---
>  mm/memcontrol.c | 28 +++++++++++++++++-----------
>  1 file changed, 17 insertions(+), 11 deletions(-)
>
> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
> index 89476a71a18d..2464a58fbf17 100644
> --- a/mm/memcontrol.c
> +++ b/mm/memcontrol.c
> @@ -505,8 +505,8 @@ struct memcg_vmstats_percpu {
>  	unsigned int			stats_updates;
>
>  	/* Cached pointers for fast iteration in memcg_rstat_updated() */
> -	struct memcg_vmstats_percpu	*parent;
> -	struct memcg_vmstats		*vmstats;
> +	struct memcg_vmstats_percpu __percpu	*parent_pcpu;
> +	struct memcg_vmstats			*vmstats;
>
>  	/* The above should fit a single cacheline for memcg_rstat_updated() */
>
> @@ -588,16 +588,21 @@ static bool memcg_vmstats_needs_flush(struct memcg_vmstats *vmstats)
>
>  static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val)
>  {
> +	struct memcg_vmstats_percpu __percpu *statc_pcpu;
>  	struct memcg_vmstats_percpu *statc;
> -	int cpu = smp_processor_id();
> +	int cpu;
>  	unsigned int stats_updates;
>
>  	if (!val)
>  		return;
>
> +	/* Don't assume callers have preemption disabled. */
> +	cpu = get_cpu();
> +
>  	cgroup_rstat_updated(memcg->css.cgroup, cpu);
> -	statc = this_cpu_ptr(memcg->vmstats_percpu);
> -	for (; statc; statc = statc->parent) {
> +	statc_pcpu = memcg->vmstats_percpu;
> +	for (; statc_pcpu; statc_pcpu = statc->parent_pcpu) {
> +		statc = this_cpu_ptr(statc_pcpu);
>  		/*
>  		 * If @memcg is already flushable then all its ancestors are
>  		 * flushable as well and also there is no need to increase
> @@ -606,14 +611,15 @@ static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val)
>  		if (memcg_vmstats_needs_flush(statc->vmstats))
>  			break;
>
> -		stats_updates = READ_ONCE(statc->stats_updates) + abs(val);
> -		WRITE_ONCE(statc->stats_updates, stats_updates);
> +		stats_updates = this_cpu_add_return(statc_pcpu->stats_updates,
> +						    abs(val));
>  		if (stats_updates < MEMCG_CHARGE_BATCH)
>  			continue;
>
> +		stats_updates = this_cpu_xchg(statc_pcpu->stats_updates, 0);
>  		atomic64_add(stats_updates, &statc->vmstats->stats_updates);
> -		WRITE_ONCE(statc->stats_updates, 0);
>  	}
> +	put_cpu();
>  }
>
>  static void __mem_cgroup_flush_stats(struct mem_cgroup *memcg, bool force)
> @@ -3691,7 +3697,7 @@ static void mem_cgroup_free(struct mem_cgroup *memcg)
>
>  static struct mem_cgroup *mem_cgroup_alloc(struct mem_cgroup *parent)
>  {
> -	struct memcg_vmstats_percpu *statc, *pstatc;
> +	struct memcg_vmstats_percpu *statc, __percpu *pstatc_pcpu;
>  	struct mem_cgroup *memcg;
>  	int node, cpu;
>  	int __maybe_unused i;
> @@ -3722,9 +3728,9 @@ static struct mem_cgroup *mem_cgroup_alloc(struct mem_cgroup *parent)
>
>  	for_each_possible_cpu(cpu) {
>  		if (parent)
> -			pstatc = per_cpu_ptr(parent->vmstats_percpu, cpu);
> +			pstatc_pcpu = parent->vmstats_percpu;
>  		statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
> -		statc->parent = parent ? pstatc : NULL;
> +		statc->parent_pcpu = parent ? pstatc_pcpu : NULL;
>  		statc->vmstats = memcg->vmstats;
>  	}
>
> --
> 2.47.1
>
>

----8<----

[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux