On 5/15/2025 7:41 AM, Ackerley Tng wrote:
Previously, gbl_chg was passed from alloc_hugetlb_folio() into
dequeue_hugetlb_folio_vma(), leaking the concept of gbl_chg into
dequeue_hugetlb_folio_vma().
This patch consolidates the interpretation of gbl_chg into
alloc_hugetlb_folio(), also renaming dequeue_hugetlb_folio_vma() to
dequeue_hugetlb_folio() so dequeue_hugetlb_folio() can just focus on
dequeuing a folio.
Change-Id: I31bf48af2400b6e13b44d03c8be22ce1a9092a9c
Signed-off-by: Ackerley Tng <ackerleytng@xxxxxxxxxx>
---
mm/hugetlb.c | 28 +++++++++++-----------------
1 file changed, 11 insertions(+), 17 deletions(-)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 6ea1be71aa42..b843e869496f 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1364,9 +1364,9 @@ static unsigned long available_huge_pages(struct hstate *h)
return h->free_huge_pages - h->resv_huge_pages;
}
-static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h,
- struct vm_area_struct *vma,
- unsigned long address, long gbl_chg)
+static struct folio *dequeue_hugetlb_folio(struct hstate *h,
+ struct vm_area_struct *vma,
+ unsigned long address)
The rename seems not needed in this patch, since the function still takes vma
and uses it. May be better to move the rename to a later patch.
{
struct folio *folio = NULL;
struct mempolicy *mpol;
@@ -1374,13 +1374,6 @@ static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h,
nodemask_t *nodemask;
int nid;
- /*
- * gbl_chg==1 means the allocation requires a new page that was not
- * reserved before. Making sure there's at least one free page.
- */
- if (gbl_chg && !available_huge_pages(h))
- goto err;
-
gfp_mask = htlb_alloc_mask(h);
nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
@@ -1398,9 +1391,6 @@ static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h,
mpol_cond_put(mpol);
return folio;
-
-err:
- return NULL;
}
/*
@@ -3074,12 +3064,16 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
goto out_uncharge_cgroup_reservation;
spin_lock_irq(&hugetlb_lock);
+
/*
- * glb_chg is passed to indicate whether or not a page must be taken
- * from the global free pool (global change). gbl_chg == 0 indicates
- * a reservation exists for the allocation.
+ * gbl_chg == 0 indicates a reservation exists for the allocation - so
+ * try dequeuing a page. If there are available_huge_pages(), try using
+ * them!
*/
- folio = dequeue_hugetlb_folio_vma(h, vma, addr, gbl_chg);
+ folio = NULL;
+ if (!gbl_chg || available_huge_pages(h))
+ folio = dequeue_hugetlb_folio(h, vma, addr);
+
if (!folio) {
spin_unlock_irq(&hugetlb_lock);
folio = alloc_buddy_hugetlb_folio_with_mpol(h, vma, addr);