[RFC v2 04/14] vfio/nvidia-vgpu: allocate vGPU channels when creating vGPUs

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Creating a vGPU requires allocating a portion of the channels from the
reserved channel pool.

Allocate the channels from the reserved channel pool when creating a vGPU.

Cc: Aniket Agashe <aniketa@xxxxxxxxxx>
Signed-off-by: Zhi Wang <zhiw@xxxxxxxxxx>
---
 drivers/vfio/pci/nvidia-vgpu/pf.h       | 10 ++++
 drivers/vfio/pci/nvidia-vgpu/vgpu.c     | 76 +++++++++++++++++++++++++
 drivers/vfio/pci/nvidia-vgpu/vgpu_mgr.c | 33 ++++++++++-
 drivers/vfio/pci/nvidia-vgpu/vgpu_mgr.h | 21 +++++++
 4 files changed, 138 insertions(+), 2 deletions(-)

diff --git a/drivers/vfio/pci/nvidia-vgpu/pf.h b/drivers/vfio/pci/nvidia-vgpu/pf.h
index 19f0aca56d12..b8008d8ee434 100644
--- a/drivers/vfio/pci/nvidia-vgpu/pf.h
+++ b/drivers/vfio/pci/nvidia-vgpu/pf.h
@@ -85,4 +85,14 @@ static inline int nvidia_vgpu_mgr_init_handle(struct pci_dev *pdev,
 #define nvidia_vgpu_mgr_rm_ctrl_done(m, g, c) \
 	((m)->handle.ops->rm_ctrl_done(g, c))
 
+#define nvidia_vgpu_mgr_alloc_chids(m, o, s) ({ \
+	typeof(m) __m = (m); \
+	__m->handle.ops->alloc_chids(__m->handle.pf_drvdata, o, s); \
+})
+
+#define nvidia_vgpu_mgr_free_chids(m, o, s) ({ \
+	typeof(m) __m = (m); \
+	__m->handle.ops->free_chids(__m->handle.pf_drvdata, o, s); \
+})
+
 #endif
diff --git a/drivers/vfio/pci/nvidia-vgpu/vgpu.c b/drivers/vfio/pci/nvidia-vgpu/vgpu.c
index cbb51b939f0b..52b946469043 100644
--- a/drivers/vfio/pci/nvidia-vgpu/vgpu.c
+++ b/drivers/vfio/pci/nvidia-vgpu/vgpu.c
@@ -3,6 +3,8 @@
  * Copyright © 2025 NVIDIA Corporation
  */
 
+#include <linux/log2.h>
+
 #include "debug.h"
 #include "vgpu_mgr.h"
 
@@ -43,6 +45,70 @@ static int register_vgpu(struct nvidia_vgpu *vgpu)
 	return 0;
 }
 
+static void clean_chids(struct nvidia_vgpu *vgpu)
+{
+	struct nvidia_vgpu_mgr *vgpu_mgr = vgpu->vgpu_mgr;
+	struct nvidia_vgpu_chid *chid = &vgpu->chid;
+
+	vgpu_debug(vgpu, "free guest channel offset %d size %d\n", chid->chid_offset,
+		   chid->num_chid);
+
+	if (vgpu_mgr->use_chid_alloc_bitmap)
+		bitmap_clear(vgpu_mgr->chid_alloc_bitmap, chid->chid_offset, chid->num_chid);
+	else
+		nvidia_vgpu_mgr_free_chids(vgpu_mgr, chid->chid_offset, chid->num_chid);
+}
+
+static inline u32 prev_pow2(const u32 x)
+{
+	return x ? 1U << ilog2(x) : 0;
+}
+
+static void get_alloc_chids_num(struct nvidia_vgpu *vgpu, u32 *size)
+{
+	struct nvidia_vgpu_mgr *vgpu_mgr = vgpu->vgpu_mgr;
+	struct nvidia_vgpu_info *info = &vgpu->info;
+	struct nvidia_vgpu_type *type = info->vgpu_type;
+	u32 v;
+
+	/* Calculate with total reserved CHIDs for vGPUs. */
+	v = (vgpu_mgr->total_avail_chids) / type->max_instance;
+	*size = prev_pow2(v);
+}
+
+static int setup_chids(struct nvidia_vgpu *vgpu)
+{
+	struct nvidia_vgpu_mgr *vgpu_mgr = vgpu->vgpu_mgr;
+	struct nvidia_vgpu_chid *chid = &vgpu->chid;
+	u32 size, offset;
+	int ret;
+
+	get_alloc_chids_num(vgpu, &size);
+
+	if (vgpu_mgr->use_chid_alloc_bitmap) {
+		offset = bitmap_find_next_zero_area(vgpu_mgr->chid_alloc_bitmap,
+						    vgpu_mgr->total_avail_chids, 0, size, 0);
+
+		if (offset + size > vgpu_mgr->total_avail_chids)
+			return -ENOSPC;
+
+		bitmap_set(vgpu_mgr->chid_alloc_bitmap, offset, size);
+	} else {
+		ret = nvidia_vgpu_mgr_alloc_chids(vgpu_mgr, &offset, size);
+		if (ret)
+			return ret;
+	}
+
+	chid->chid_offset = offset;
+	chid->num_chid = size;
+	chid->num_plugin_channels = 1;
+
+	vgpu_debug(vgpu, "alloc guest channel offset %u size %u\n", chid->chid_offset,
+		   chid->num_chid);
+
+	return 0;
+}
+
 /**
  * nvidia_vgpu_mgr_destroy_vgpu - destroy a vGPU instance
  * @vgpu: the vGPU instance going to be destroyed.
@@ -54,6 +120,7 @@ int nvidia_vgpu_mgr_destroy_vgpu(struct nvidia_vgpu *vgpu)
 	if (!atomic_cmpxchg(&vgpu->status, 1, 0))
 		return -ENODEV;
 
+	clean_chids(vgpu);
 	unregister_vgpu(vgpu);
 
 	vgpu_debug(vgpu, "destroyed\n");
@@ -93,10 +160,19 @@ int nvidia_vgpu_mgr_create_vgpu(struct nvidia_vgpu *vgpu)
 	if (ret)
 		return ret;
 
+	ret = setup_chids(vgpu);
+	if (ret)
+		goto err_setup_chids;
+
 	atomic_set(&vgpu->status, 1);
 
 	vgpu_debug(vgpu, "created\n");
 
 	return 0;
+
+err_setup_chids:
+	unregister_vgpu(vgpu);
+
+	return ret;
 }
 EXPORT_SYMBOL_GPL(nvidia_vgpu_mgr_create_vgpu);
diff --git a/drivers/vfio/pci/nvidia-vgpu/vgpu_mgr.c b/drivers/vfio/pci/nvidia-vgpu/vgpu_mgr.c
index a7f8a00f96bf..8565bb881fda 100644
--- a/drivers/vfio/pci/nvidia-vgpu/vgpu_mgr.c
+++ b/drivers/vfio/pci/nvidia-vgpu/vgpu_mgr.c
@@ -6,6 +6,14 @@
 #include "debug.h"
 #include "vgpu_mgr.h"
 
+static void clean_vgpu_mgr(struct nvidia_vgpu_mgr *vgpu_mgr)
+{
+	if (vgpu_mgr->use_chid_alloc_bitmap) {
+		bitmap_free(vgpu_mgr->chid_alloc_bitmap);
+		vgpu_mgr->chid_alloc_bitmap = NULL;
+	}
+}
+
 static void vgpu_mgr_release(struct kref *kref)
 {
 	struct nvidia_vgpu_mgr *vgpu_mgr =
@@ -17,6 +25,7 @@ static void vgpu_mgr_release(struct kref *kref)
 		return;
 
 	nvidia_vgpu_mgr_clean_metadata(vgpu_mgr);
+	clean_vgpu_mgr(vgpu_mgr);
 	nvidia_vgpu_mgr_free_gsp_client(vgpu_mgr, &vgpu_mgr->gsp_client);
 	kvfree(vgpu_mgr);
 }
@@ -95,6 +104,20 @@ static void attach_vgpu_mgr(struct nvidia_vgpu_mgr *vgpu_mgr,
 	handle_data->vfio.pf_detach_handle_fn = pf_detach_handle_fn;
 }
 
+static int setup_chid_alloc_bitmap(struct nvidia_vgpu_mgr *vgpu_mgr)
+{
+	if (WARN_ON(!vgpu_mgr->use_chid_alloc_bitmap))
+		return 0;
+
+	vgpu_mgr->chid_alloc_bitmap = bitmap_alloc(vgpu_mgr->total_avail_chids, GFP_KERNEL);
+	if (!vgpu_mgr->chid_alloc_bitmap)
+		return -ENOMEM;
+	bitmap_zero(vgpu_mgr->chid_alloc_bitmap, vgpu_mgr->total_avail_chids);
+
+	vgpu_mgr_debug(vgpu_mgr, "using chid allocation bitmap.\n");
+	return 0;
+}
+
 static int init_vgpu_mgr(struct nvidia_vgpu_mgr *vgpu_mgr)
 {
 	vgpu_mgr->total_avail_chids = nvidia_vgpu_mgr_get_avail_chids(vgpu_mgr);
@@ -103,12 +126,17 @@ static int init_vgpu_mgr(struct nvidia_vgpu_mgr *vgpu_mgr)
 	vgpu_mgr_debug(vgpu_mgr, "total avail chids %u\n", vgpu_mgr->total_avail_chids);
 	vgpu_mgr_debug(vgpu_mgr, "total fbmem size 0x%llx\n", vgpu_mgr->total_fbmem_size);
 
-	return 0;
+	return vgpu_mgr->use_chid_alloc_bitmap ? setup_chid_alloc_bitmap(vgpu_mgr) : 0;
 }
 
 static int setup_pf_driver_caps(struct nvidia_vgpu_mgr *vgpu_mgr, unsigned long *caps)
 {
-	/* more to come */
+#define HAS_CAP(cap) \
+	test_bit(NVIDIA_VGPU_PF_DRIVER_CAP_HAS_##cap, caps)
+
+	vgpu_mgr->use_chid_alloc_bitmap = !HAS_CAP(CHID_ALLOC);
+
+#undef HAS_CAP
 	return 0;
 }
 
@@ -169,6 +197,7 @@ static int pf_attach_handle_fn(void *handle, struct nvidia_vgpu_vfio_handle_data
 	detach_vgpu_mgr(handle_data);
 	nvidia_vgpu_mgr_clean_metadata(vgpu_mgr);
 fail_setup_metadata:
+	clean_vgpu_mgr(vgpu_mgr);
 fail_init_vgpu_mgr:
 	nvidia_vgpu_mgr_free_gsp_client(vgpu_mgr, &vgpu_mgr->gsp_client);
 fail_alloc_gsp_client:
diff --git a/drivers/vfio/pci/nvidia-vgpu/vgpu_mgr.h b/drivers/vfio/pci/nvidia-vgpu/vgpu_mgr.h
index 0519b595378f..5a7a6103a677 100644
--- a/drivers/vfio/pci/nvidia-vgpu/vgpu_mgr.h
+++ b/drivers/vfio/pci/nvidia-vgpu/vgpu_mgr.h
@@ -36,6 +36,19 @@ struct nvidia_vgpu_info {
 	struct nvidia_vgpu_type *vgpu_type;
 };
 
+/**
+ * struct nvidia_vgpu_chid - per-vGPU channel IDs
+ *
+ * @chid_offset: beginning offset of channel IDs
+ * @num_chid: number of allocated channel IDs
+ * @num_plugin_channels: number of channels for vGPU manager
+ */
+struct nvidia_vgpu_chid {
+	u32 chid_offset;
+	u32 num_chid;
+	u32 num_plugin_channels;
+};
+
 /**
  * struct nvidia_vgpu - per-vGPU state
  *
@@ -45,6 +58,7 @@ struct nvidia_vgpu_info {
  * @vgpu_list: list node to the vGPU list
  * @info: vGPU info
  * @vgpu_mgr: pointer to vGPU manager
+ * @chid: vGPU channel IDs
  */
 struct nvidia_vgpu {
 	/* Per-vGPU lock */
@@ -55,6 +69,8 @@ struct nvidia_vgpu {
 
 	struct nvidia_vgpu_info info;
 	struct nvidia_vgpu_mgr *vgpu_mgr;
+
+	struct nvidia_vgpu_chid chid;
 };
 
 /**
@@ -72,6 +88,8 @@ struct nvidia_vgpu {
  * @gsp_client: the GSP client
  * @vgpu_types: installed vGPU types
  * @num_vgpu_types: number of installed vGPU types
+ * @use_alloc_bitmap: use chid allocator for the PF driver doesn't support chid allocation
+ * @chid_alloc_bitmap: chid allocator bitmap
  */
 struct nvidia_vgpu_mgr {
 	struct kref refcount;
@@ -92,6 +110,9 @@ struct nvidia_vgpu_mgr {
 	struct nvidia_vgpu_gsp_client gsp_client;
 	struct nvidia_vgpu_type *vgpu_types;
 	unsigned int num_vgpu_types;
+
+	bool use_chid_alloc_bitmap;
+	void *chid_alloc_bitmap;
 };
 
 #define nvidia_vgpu_mgr_for_each_vgpu(vgpu, vgpu_mgr) \
-- 
2.34.1





[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux