[PATCH] vfio/type1: Remove Fine Grained Superpages detection

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



VFIO is looking to enable an optimization where it can rely on the
unmap operation not splitting and returning the size of a larger IOPTE.

However since commits:
  d50651636fb ("iommu/io-pgtable-arm-v7s: Remove split on unmap behavior")
  33729a5fc0ca ("iommu/io-pgtable-arm: Remove split on unmap behavior")

There are no iommu drivers that do split on unmap anymore. Instead all
iommu drivers are expected to unmap the whole contiguous page and return
its size.

Thus, there is no purpose in vfio_test_domain_fgsp() as it is only
checking if the iommu supports 2*PAGE_SIZE as a contiguous page or not.

Currently only AMD v1 supports such a page size so all this logic only
activates on AMD v1.

Remove vfio_test_domain_fgsp() and just rely on a direct 2*PAGE_SIZE check
instead so there is no behavior change.

Maybe it should always activate the iommu_iova_to_phys(), it shouldn't
have a performance downside since split is gone.

Signed-off-by: Jason Gunthorpe <jgg@xxxxxxxxxx>
---
 drivers/vfio/vfio_iommu_type1.c | 71 +++++++++------------------------
 1 file changed, 19 insertions(+), 52 deletions(-)

diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 0ac56072af9f23..529561bbbef98a 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -80,7 +80,6 @@ struct vfio_domain {
 	struct iommu_domain	*domain;
 	struct list_head	next;
 	struct list_head	group_list;
-	bool			fgsp : 1;	/* Fine-grained super pages */
 	bool			enforce_cache_coherency : 1;
 };
 
@@ -1056,6 +1055,7 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
 	LIST_HEAD(unmapped_region_list);
 	struct iommu_iotlb_gather iotlb_gather;
 	int unmapped_region_cnt = 0;
+	bool scan_for_contig;
 	long unlocked = 0;
 
 	if (!dma->size)
@@ -1079,9 +1079,15 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
 		cond_resched();
 	}
 
+	/*
+	 * For historical reasons this has only triggered on AMDv1 page tables,
+	 * though these days it should work everywhere.
+	 */
+	scan_for_contig = !(domain->domain->pgsize_bitmap & (2 * PAGE_SIZE));
 	iommu_iotlb_gather_init(&iotlb_gather);
 	while (iova < end) {
-		size_t unmapped, len;
+		size_t len = PAGE_SIZE;
+		size_t unmapped;
 		phys_addr_t phys, next;
 
 		phys = iommu_iova_to_phys(domain->domain, iova);
@@ -1094,12 +1100,18 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
 		 * To optimize for fewer iommu_unmap() calls, each of which
 		 * may require hardware cache flushing, try to find the
 		 * largest contiguous physical memory chunk to unmap.
+		 *
+		 * If the iova is part of a contiguous page > PAGE_SIZE then
+		 * unmap will unmap the whole contiguous page and return its
+		 * size.
 		 */
-		for (len = PAGE_SIZE;
-		     !domain->fgsp && iova + len < end; len += PAGE_SIZE) {
-			next = iommu_iova_to_phys(domain->domain, iova + len);
-			if (next != phys + len)
-				break;
+		if (scan_for_contig) {
+			for (; iova + len < end; len += PAGE_SIZE) {
+				next = iommu_iova_to_phys(domain->domain,
+							  iova + len);
+				if (next != phys + len)
+					break;
+			}
 		}
 
 		/*
@@ -1833,49 +1845,6 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
 	return ret;
 }
 
-/*
- * We change our unmap behavior slightly depending on whether the IOMMU
- * supports fine-grained superpages.  IOMMUs like AMD-Vi will use a superpage
- * for practically any contiguous power-of-two mapping we give it.  This means
- * we don't need to look for contiguous chunks ourselves to make unmapping
- * more efficient.  On IOMMUs with coarse-grained super pages, like Intel VT-d
- * with discrete 2M/1G/512G/1T superpages, identifying contiguous chunks
- * significantly boosts non-hugetlbfs mappings and doesn't seem to hurt when
- * hugetlbfs is in use.
- */
-static void vfio_test_domain_fgsp(struct vfio_domain *domain, struct list_head *regions)
-{
-	int ret, order = get_order(PAGE_SIZE * 2);
-	struct vfio_iova *region;
-	struct page *pages;
-	dma_addr_t start;
-
-	pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
-	if (!pages)
-		return;
-
-	list_for_each_entry(region, regions, list) {
-		start = ALIGN(region->start, PAGE_SIZE * 2);
-		if (start >= region->end || (region->end - start < PAGE_SIZE * 2))
-			continue;
-
-		ret = iommu_map(domain->domain, start, page_to_phys(pages), PAGE_SIZE * 2,
-				IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE,
-				GFP_KERNEL_ACCOUNT);
-		if (!ret) {
-			size_t unmapped = iommu_unmap(domain->domain, start, PAGE_SIZE);
-
-			if (unmapped == PAGE_SIZE)
-				iommu_unmap(domain->domain, start + PAGE_SIZE, PAGE_SIZE);
-			else
-				domain->fgsp = true;
-		}
-		break;
-	}
-
-	__free_pages(pages, order);
-}
-
 static struct vfio_iommu_group *find_iommu_group(struct vfio_domain *domain,
 						 struct iommu_group *iommu_group)
 {
@@ -2314,8 +2283,6 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
 		}
 	}
 
-	vfio_test_domain_fgsp(domain, &iova_copy);
-
 	/* replay mappings on new domains */
 	ret = vfio_iommu_replay(iommu, domain);
 	if (ret)

base-commit: 5a7ff05a5717e2ac4f4f83bcdd9033f246e9946b
-- 
2.43.0





[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux