* Qianfeng Rong <rongqianfeng@xxxxxxxx> [250812 09:52]: > Commit 16f5dfbc851b ("gfp: include __GFP_NOWARN in GFP_NOWAIT") made > GFP_NOWAIT implicitly include __GFP_NOWARN. > > Therefore, explicit __GFP_NOWARN combined with GFP_NOWAIT (e.g., > `GFP_NOWAIT | __GFP_NOWARN`) is now redundant. Let's clean up these > redundant flags across subsystems. > > No functional changes. > > Reviewed-by: Harry Yoo <harry.yoo@xxxxxxxxxx> > Signed-off-by: Qianfeng Rong <rongqianfeng@xxxxxxxx> Reviewed-by: Liam R. Howlett <Liam.Howlett@xxxxxxxxxx> > --- > v1->v2: > - Added a modification to remove redundant __GFP_NOWARN in > mm/damon/ops-common.c > --- > mm/damon/ops-common.c | 2 +- > mm/filemap.c | 2 +- > mm/mmu_gather.c | 4 ++-- > mm/rmap.c | 2 +- > mm/vmalloc.c | 2 +- > 5 files changed, 6 insertions(+), 6 deletions(-) > > diff --git a/mm/damon/ops-common.c b/mm/damon/ops-common.c > index 99321ff5cb92..b43595730f08 100644 > --- a/mm/damon/ops-common.c > +++ b/mm/damon/ops-common.c > @@ -303,7 +303,7 @@ static unsigned int __damon_migrate_folio_list( > * instead of migrated. > */ > .gfp_mask = (GFP_HIGHUSER_MOVABLE & ~__GFP_RECLAIM) | > - __GFP_NOWARN | __GFP_NOMEMALLOC | GFP_NOWAIT, > + __GFP_NOMEMALLOC | GFP_NOWAIT, > .nid = target_nid, > }; > > diff --git a/mm/filemap.c b/mm/filemap.c > index 4e5c9544fee4..c21e98657e0b 100644 > --- a/mm/filemap.c > +++ b/mm/filemap.c > @@ -1961,7 +1961,7 @@ struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index, > gfp &= ~__GFP_FS; > if (fgp_flags & FGP_NOWAIT) { > gfp &= ~GFP_KERNEL; > - gfp |= GFP_NOWAIT | __GFP_NOWARN; > + gfp |= GFP_NOWAIT; > } > if (WARN_ON_ONCE(!(fgp_flags & (FGP_LOCK | FGP_FOR_MMAP)))) > fgp_flags |= FGP_LOCK; > diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c > index b49cc6385f1f..374aa6f021c6 100644 > --- a/mm/mmu_gather.c > +++ b/mm/mmu_gather.c > @@ -32,7 +32,7 @@ static bool tlb_next_batch(struct mmu_gather *tlb) > if (tlb->batch_count == MAX_GATHER_BATCH_COUNT) > return false; > > - batch = (void *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN); > + batch = (void *)__get_free_page(GFP_NOWAIT); > if (!batch) > return false; > > @@ -364,7 +364,7 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table) > struct mmu_table_batch **batch = &tlb->batch; > > if (*batch == NULL) { > - *batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN); > + *batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT); > if (*batch == NULL) { > tlb_table_invalidate(tlb); > tlb_remove_table_one(table); > diff --git a/mm/rmap.c b/mm/rmap.c > index 568198e9efc2..7baa7385e1ce 100644 > --- a/mm/rmap.c > +++ b/mm/rmap.c > @@ -285,7 +285,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) > list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) { > struct anon_vma *anon_vma; > > - avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN); > + avc = anon_vma_chain_alloc(GFP_NOWAIT); > if (unlikely(!avc)) { > unlock_anon_vma_root(root); > root = NULL; > diff --git a/mm/vmalloc.c b/mm/vmalloc.c > index 6dbcdceecae1..90c3de1a0417 100644 > --- a/mm/vmalloc.c > +++ b/mm/vmalloc.c > @@ -5177,7 +5177,7 @@ static void vmap_init_nodes(void) > int n = clamp_t(unsigned int, num_possible_cpus(), 1, 128); > > if (n > 1) { > - vn = kmalloc_array(n, sizeof(*vn), GFP_NOWAIT | __GFP_NOWARN); > + vn = kmalloc_array(n, sizeof(*vn), GFP_NOWAIT); > if (vn) { > /* Node partition is 16 pages. */ > vmap_zone_size = (1 << 4) * PAGE_SIZE; > -- > 2.34.1 >