[RFC PATCH 6/9] vfio-pci-core: support the new vfio ops

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



This use the new mt to create the mmap offset entries when user calls
get_region_info, and return the vmmap range offset, and when the user
use the same region range for mmaping, we have access on the vmmap
entry, where we could know which bar from the bar_index, and later
change mmaping attributes like WC. On top of that since we use the mt
range offset, eventually we will not need this legacy
VFIO_PCI_INDEX_TO_OFFSET, which means more dynamic offset calculation
that remove the limitation of the legacy system.  To avoid duplicating
the functions for mmap & get_region_info, this create a common
function and use mmap_mt/vmmap if not null, for ioctl2 just override
VFIO_DEVICE_GET_REGION_INFO only with the new function.

Signed-off-by: Mahmoud Adam <mngyadam@xxxxxxxxx>
---
This follow the same temprory suffix "2", but also this is only for
the migration period, the other function will be dropped and replace
eventually.
 drivers/vfio/pci/vfio_pci_core.c | 72 +++++++++++++++++++++++++++++---
 include/linux/vfio_pci_core.h    |  4 ++
 2 files changed, 71 insertions(+), 5 deletions(-)

diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c
index 7a431a03bd850..8418d98ac66ce 100644
--- a/drivers/vfio/pci/vfio_pci_core.c
+++ b/drivers/vfio/pci/vfio_pci_core.c
@@ -1041,8 +1041,10 @@ static int vfio_pci_ioctl_get_info(struct vfio_pci_core_device *vdev,
 	return copy_to_user(arg, &info, minsz) ? -EFAULT : 0;
 }
 
-static int vfio_pci_ioctl_get_region_info(struct vfio_pci_core_device *vdev,
-					  struct vfio_region_info __user *arg)
+
+static int _vfio_pci_ioctl_get_region_info(struct vfio_pci_core_device *vdev,
+					   struct maple_tree *mmap_mt,
+					   struct vfio_region_info __user *arg)
 {
 	unsigned long minsz = offsetofend(struct vfio_region_info, offset);
 	struct pci_dev *pdev = vdev->pdev;
@@ -1170,10 +1172,32 @@ static int vfio_pci_ioctl_get_region_info(struct vfio_pci_core_device *vdev,
 		kfree(caps.buf);
 	}
 
-	info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
+	if (mmap_mt) {
+		ret = vfio_pci_mmap_alloc(vdev, mmap_mt,
+					  info.flags, info.size, info.index,
+					  (unsigned long *) &info.offset);
+		if (ret)
+			return ret;
+	} else {
+		info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
+	}
+
 	return copy_to_user(arg, &info, minsz) ? -EFAULT : 0;
 }
 
+static int vfio_pci_ioctl_get_region_info(struct vfio_pci_core_device *vdev,
+					   struct vfio_region_info __user *arg)
+{
+	return _vfio_pci_ioctl_get_region_info(vdev, NULL, arg);
+}
+
+static int vfio_pci_ioctl_get_region_info2(struct vfio_pci_core_device *vdev,
+					   struct maple_tree *mmap_mt,
+					   struct vfio_region_info __user *arg)
+{
+	return _vfio_pci_ioctl_get_region_info(vdev, mmap_mt, arg);
+}
+
 static int vfio_pci_ioctl_get_irq_info(struct vfio_pci_core_device *vdev,
 				       struct vfio_irq_info __user *arg)
 {
@@ -1514,6 +1538,23 @@ long vfio_pci_core_ioctl(struct vfio_device *core_vdev, unsigned int cmd,
 }
 EXPORT_SYMBOL_GPL(vfio_pci_core_ioctl);
 
+
+long vfio_pci_core_ioctl2(struct vfio_device *core_vdev, unsigned int cmd,
+			  unsigned long arg, struct maple_tree *mmap_mt)
+{
+	struct vfio_pci_core_device *vdev =
+		container_of(core_vdev, struct vfio_pci_core_device, vdev);
+	void __user *uarg = (void __user *)arg;
+
+	switch (cmd) {
+	case VFIO_DEVICE_GET_REGION_INFO:
+		return vfio_pci_ioctl_get_region_info2(vdev, mmap_mt, uarg);
+	default:
+		return vfio_pci_core_ioctl(core_vdev, cmd, arg);
+	}
+}
+EXPORT_SYMBOL_GPL(vfio_pci_core_ioctl2);
+
 static int vfio_pci_core_feature_token(struct vfio_device *device, u32 flags,
 				       uuid_t __user *arg, size_t argsz)
 {
@@ -1748,16 +1789,24 @@ static const struct vm_operations_struct vfio_pci_vm_ops = {
 #endif
 };
 
-int vfio_pci_core_mmap(struct vfio_device *core_vdev, struct vm_area_struct *vma)
+static int _vfio_pci_core_mmap(struct vfio_device *core_vdev,
+			       struct vm_area_struct *vma,
+			       struct vfio_mmap *core_vmmap)
 {
 	struct vfio_pci_core_device *vdev =
 		container_of(core_vdev, struct vfio_pci_core_device, vdev);
+	struct vfio_pci_mmap *vmmap = NULL;
 	struct pci_dev *pdev = vdev->pdev;
 	unsigned int index;
 	u64 phys_len, req_len, pgoff, req_start;
 	int ret;
 
-	index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
+	if (core_vmmap) {
+		vmmap = container_of(core_vmmap, struct vfio_pci_mmap, core);
+		index = vmmap->bar_index;
+	} else {
+		index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
+	}
 
 	if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions)
 		return -EINVAL;
@@ -1836,8 +1885,21 @@ int vfio_pci_core_mmap(struct vfio_device *core_vdev, struct vm_area_struct *vma
 
 	return 0;
 }
+
+int vfio_pci_core_mmap(struct vfio_device *core_vdev, struct vm_area_struct *vma)
+{
+	return _vfio_pci_core_mmap(core_vdev, vma, NULL);
+}
 EXPORT_SYMBOL_GPL(vfio_pci_core_mmap);
 
+int vfio_pci_core_mmap2(struct vfio_device *core_vdev,
+			struct vm_area_struct *vma,
+			struct vfio_mmap *core_vmmap)
+{
+	return _vfio_pci_core_mmap(core_vdev, vma, core_vmmap);
+}
+EXPORT_SYMBOL_GPL(vfio_pci_core_mmap2);
+
 void vfio_pci_core_request(struct vfio_device *core_vdev, unsigned int count)
 {
 	struct vfio_pci_core_device *vdev =
diff --git a/include/linux/vfio_pci_core.h b/include/linux/vfio_pci_core.h
index 532d2914a9c2e..cb52b92340451 100644
--- a/include/linux/vfio_pci_core.h
+++ b/include/linux/vfio_pci_core.h
@@ -118,6 +118,8 @@ int vfio_pci_core_sriov_configure(struct vfio_pci_core_device *vdev,
 				  int nr_virtfn);
 long vfio_pci_core_ioctl(struct vfio_device *core_vdev, unsigned int cmd,
 		unsigned long arg);
+long vfio_pci_core_ioctl2(struct vfio_device *core_vdev, unsigned int cmd,
+			unsigned long arg, struct maple_tree *mmap_attrs_mt);
 int vfio_pci_core_ioctl_feature(struct vfio_device *device, u32 flags,
 				void __user *arg, size_t argsz);
 ssize_t vfio_pci_core_read(struct vfio_device *core_vdev, char __user *buf,
@@ -125,6 +127,8 @@ ssize_t vfio_pci_core_read(struct vfio_device *core_vdev, char __user *buf,
 ssize_t vfio_pci_core_write(struct vfio_device *core_vdev, const char __user *buf,
 		size_t count, loff_t *ppos);
 int vfio_pci_core_mmap(struct vfio_device *core_vdev, struct vm_area_struct *vma);
+int vfio_pci_core_mmap2(struct vfio_device *core_vdev, struct vm_area_struct *vma,
+		struct vfio_mmap *core_vmmap);
 void vfio_pci_core_request(struct vfio_device *core_vdev, unsigned int count);
 int vfio_pci_core_match(struct vfio_device *core_vdev, char *buf);
 int vfio_pci_core_enable(struct vfio_pci_core_device *vdev);
-- 
2.47.3




Amazon Web Services Development Center Germany GmbH
Tamara-Danz-Str. 13
10243 Berlin
Geschaeftsfuehrung: Christian Schlaeger, Jonathan Weiss
Eingetragen am Amtsgericht Charlottenburg unter HRB 257764 B
Sitz: Berlin
Ust-ID: DE 365 538 597





[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux