On 5/15/2025 7:41 AM, Ackerley Tng wrote:
[...]
+
+static int kvm_gmem_convert_range(struct file *file, pgoff_t start,
+ size_t nr_pages, bool shared,
+ pgoff_t *error_index)
+{
+ struct conversion_work *work, *tmp, *rollback_stop_item;
+ LIST_HEAD(work_list);
+ struct inode *inode;
+ enum shareability m;
+ int ret;
+
+ inode = file_inode(file);
+
+ filemap_invalidate_lock(inode->i_mapping);
+
+ m = shared ? SHAREABILITY_ALL : SHAREABILITY_GUEST;
+ ret = kvm_gmem_convert_compute_work(inode, start, nr_pages, m, &work_list);
+ if (ret || list_empty(&work_list))
+ goto out;
+
+ list_for_each_entry(work, &work_list, list)
+ kvm_gmem_convert_invalidate_begin(inode, work);
+
+ list_for_each_entry(work, &work_list, list) {
+ ret = kvm_gmem_convert_should_proceed(inode, work, shared,
+ error_index);
Since kvm_gmem_invalidate_begin() begins to handle shared memory,
kvm_gmem_convert_invalidate_begin() will zap the table.
The shared mapping could be zapped in kvm_gmem_convert_invalidate_begin() even
when kvm_gmem_convert_should_proceed() returns error.
The sequence is a bit confusing to me, at least in this patch so far.
+ if (ret)
+ goto invalidate_end;
+ }
+
+ list_for_each_entry(work, &work_list, list) {
+ rollback_stop_item = work;
+ ret = kvm_gmem_shareability_apply(inode, work, m);
+ if (ret)
+ break;
+ }
+
+ if (ret) {
+ m = shared ? SHAREABILITY_GUEST : SHAREABILITY_ALL;
+ list_for_each_entry(work, &work_list, list) {
+ if (work == rollback_stop_item)
+ break;
+
+ WARN_ON(kvm_gmem_shareability_apply(inode, work, m));
+ }
+ }
+
+invalidate_end:
+ list_for_each_entry(work, &work_list, list)
+ kvm_gmem_convert_invalidate_end(inode, work);
+out:
+ filemap_invalidate_unlock(inode->i_mapping);
+
+ list_for_each_entry_safe(work, tmp, &work_list, list) {
+ list_del(&work->list);
+ kfree(work);
+ }
+
+ return ret;
+}
+
[...]
@@ -186,15 +490,26 @@ static void kvm_gmem_invalidate_begin(struct kvm_gmem *gmem, pgoff_t start,
unsigned long index;
xa_for_each_range(&gmem->bindings, index, slot, start, end - 1) {
+ enum kvm_gfn_range_filter filter;
pgoff_t pgoff = slot->gmem.pgoff;
+ filter = KVM_FILTER_PRIVATE;
+ if (kvm_gmem_memslot_supports_shared(slot)) {
+ /*
+ * Unmapping would also cause invalidation, but cannot
+ * rely on mmu_notifiers to do invalidation via
+ * unmapping, since memory may not be mapped to
+ * userspace.
+ */
+ filter |= KVM_FILTER_SHARED;
+ }
+
struct kvm_gfn_range gfn_range = {
.start = slot->base_gfn + max(pgoff, start) - pgoff,
.end = slot->base_gfn + min(pgoff + slot->npages, end) - pgoff,
.slot = slot,
.may_block = true,
- /* guest memfd is relevant to only private mappings. */
- .attr_filter = KVM_FILTER_PRIVATE,
+ .attr_filter = filter,
};
if (!found_memslot) {
@@ -484,11 +799,49 @@ EXPORT_SYMBOL_GPL(kvm_gmem_memslot_supports_shared);
#define kvm_gmem_mmap NULL
#endif /* CONFIG_KVM_GMEM_SHARED_MEM */
[...]