On 5/15/2025 7:42 AM, Ackerley Tng wrote:
[...]
+
+static inline int kvm_gmem_try_split_folio_in_filemap(struct inode *inode,
+ struct folio *folio)
+{
+ size_t to_nr_pages;
+ void *priv;
+
+ if (!kvm_gmem_has_custom_allocator(inode))
+ return 0;
+
+ priv = kvm_gmem_allocator_private(inode);
+ to_nr_pages = kvm_gmem_allocator_ops(inode)->nr_pages_in_page(priv);
+
+ if (kvm_gmem_has_some_shared(inode, folio->index, to_nr_pages))
What if a huge page whose attribute is shared?
+ return kvm_gmem_split_folio_in_filemap(inode, folio);
+
+ return 0;
+}
+
[...]
static int kvm_gmem_shareability_setup(struct maple_tree *mt, loff_t size, u64 flags)
@@ -563,11 +1005,16 @@ static struct folio *kvm_gmem_get_folio(struct inode *inode, pgoff_t index)
return folio;
if (kvm_gmem_has_custom_allocator(inode)) {
- void *p = kvm_gmem_allocator_private(inode);
+ size_t nr_pages;
+ void *p;
+ p = kvm_gmem_allocator_private(inode);
folio = kvm_gmem_allocator_ops(inode)->alloc_folio(p);
if (IS_ERR(folio))
return folio;
+
+ nr_pages = kvm_gmem_allocator_ops(inode)->nr_pages_in_folio(p);
+ index_floor = round_down(index, nr_pages);
} else {
gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
@@ -580,10 +1027,11 @@ static struct folio *kvm_gmem_get_folio(struct inode *inode, pgoff_t index)
folio_put(folio);
return ERR_PTR(ret);
}
+
+ index_floor = index;
}
allocated_size = folio_size(folio);
- index_floor = round_down(index, folio_nr_pages(folio));
ret = kvm_gmem_filemap_add_folio(inode->i_mapping, folio, index_floor);
if (ret) {
folio_put(folio);
@@ -600,6 +1048,13 @@ static struct folio *kvm_gmem_get_folio(struct inode *inode, pgoff_t index)
return ERR_PTR(ret);
}
+ /* Leave just filemap's refcounts on folio. */
+ folio_put(folio);
+
+ ret = kvm_gmem_try_split_folio_in_filemap(inode, folio);
When !CONFIG_KVM_GMEM_SHARED_MEM, kvm_gmem_try_split_folio_in_filemap() is
undefined.
+ if (ret)
+ goto err;
+
spin_lock(&inode->i_lock);
inode->i_blocks += allocated_size / 512;
spin_unlock(&inode->i_lock);
[...]