[RFC PATCH v2 21/51] mm: hugetlb: Inline huge_node() into callers

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



huge_node()'s role was to read struct mempolicy (mpol) from the vma
and also interpret mpol to get node id and nodemask.

huge_node() can be inlined into callers since 2 out of 3 of the
callers will be refactored in later patches to take and interpret mpol
without reading mpol from the vma.

Signed-off-by: Ackerley Tng <ackerleytng@xxxxxxxxxx>

Change-Id: Ic94b2ed916fd4f89b7d2755288a3a2f6a56051f7
---
 include/linux/mempolicy.h | 12 ------------
 mm/hugetlb.c              | 13 ++++++++++---
 mm/mempolicy.c            | 21 ---------------------
 3 files changed, 10 insertions(+), 36 deletions(-)

diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 840c576abcfd..41fc53605ef0 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -140,9 +140,6 @@ extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new);
 
 extern int policy_node_nodemask(struct mempolicy *mpol, gfp_t gfp_flags,
 				pgoff_t ilx, nodemask_t **nodemask);
-extern int huge_node(struct vm_area_struct *vma,
-				unsigned long addr, gfp_t gfp_flags,
-				struct mempolicy **mpol, nodemask_t **nodemask);
 extern bool init_nodemask_of_mempolicy(nodemask_t *mask);
 extern bool mempolicy_in_oom_domain(struct task_struct *tsk,
 				const nodemask_t *mask);
@@ -260,15 +257,6 @@ static inline int policy_node_nodemask(struct mempolicy *mpol, gfp_t gfp_flags,
 	return 0;
 }
 
-static inline int huge_node(struct vm_area_struct *vma,
-				unsigned long addr, gfp_t gfp_flags,
-				struct mempolicy **mpol, nodemask_t **nodemask)
-{
-	*mpol = NULL;
-	*nodemask = NULL;
-	return 0;
-}
-
 static inline bool init_nodemask_of_mempolicy(nodemask_t *m)
 {
 	return false;
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index b822b204e9b3..5cc261b90e39 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1372,10 +1372,12 @@ static struct folio *dequeue_hugetlb_folio(struct hstate *h,
 	struct mempolicy *mpol;
 	gfp_t gfp_mask;
 	nodemask_t *nodemask;
+	pgoff_t ilx;
 	int nid;
 
 	gfp_mask = htlb_alloc_mask(h);
-	nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
+	mpol = get_vma_policy(vma, address, h->order, &ilx);
+	nid = policy_node_nodemask(mpol, gfp_mask, ilx, &nodemask);
 
 	if (mpol_is_preferred_many(mpol)) {
 		folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask,
@@ -2321,8 +2323,11 @@ static struct folio *alloc_surplus_hugetlb_folio(struct hstate *h,
 	gfp_t gfp_mask = htlb_alloc_mask(h);
 	int nid;
 	nodemask_t *nodemask;
+	pgoff_t ilx;
+
+	mpol = get_vma_policy(vma, addr, h->order, &ilx);
+	nid = policy_node_nodemask(mpol, gfp_mask, ilx, &nodemask);
 
-	nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask);
 	if (mpol_is_preferred_many(mpol)) {
 		gfp_t gfp = gfp_mask & ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
 
@@ -6829,10 +6834,12 @@ static struct folio *alloc_hugetlb_folio_vma(struct hstate *h,
 	nodemask_t *nodemask;
 	struct folio *folio;
 	gfp_t gfp_mask;
+	pgoff_t ilx;
 	int node;
 
 	gfp_mask = htlb_alloc_mask(h);
-	node = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
+	mpol = get_vma_policy(vma, address, h->order, &ilx);
+	node = policy_node_nodemask(mpol, gfp_mask, ilx, &nodemask);
 	/*
 	 * This is used to allocate a temporary hugetlb to hold the copied
 	 * content, which will then be copied again to the final hugetlb
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 7837158ee5a8..39d0abc407dc 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2145,27 +2145,6 @@ int policy_node_nodemask(struct mempolicy *mpol, gfp_t gfp_flags,
 }
 
 #ifdef CONFIG_HUGETLBFS
-/*
- * huge_node(@vma, @addr, @gfp_flags, @mpol)
- * @vma: virtual memory area whose policy is sought
- * @addr: address in @vma for shared policy lookup and interleave policy
- * @gfp_flags: for requested zone
- * @mpol: pointer to mempolicy pointer for reference counted mempolicy
- * @nodemask: pointer to nodemask pointer for 'bind' and 'prefer-many' policy
- *
- * Returns a nid suitable for a huge page allocation and a pointer
- * to the struct mempolicy for conditional unref after allocation.
- * If the effective policy is 'bind' or 'prefer-many', returns a pointer
- * to the mempolicy's @nodemask for filtering the zonelist.
- */
-int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags,
-		struct mempolicy **mpol, nodemask_t **nodemask)
-{
-	pgoff_t ilx;
-
-	*mpol = get_vma_policy(vma, addr, hstate_vma(vma)->order, &ilx);
-	return policy_node_nodemask(*mpol, gfp_flags, ilx, nodemask);
-}
 
 /*
  * init_nodemask_of_mempolicy
-- 
2.49.0.1045.g170613ef41-goog





[Index of Archives]     [Linux Ext4 Filesystem]     [Union Filesystem]     [Filesystem Testing]     [Ceph Users]     [Ecryptfs]     [NTFS 3]     [AutoFS]     [Kernel Newbies]     [Share Photos]     [Security]     [Netfilter]     [Bugtraq]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux Cachefs]     [Reiser Filesystem]     [Linux RAID]     [NTFS 3]     [Samba]     [Device Mapper]     [CEPH Development]

  Powered by Linux