Binbin Wu <binbin.wu@xxxxxxxxxxxxxxx> writes: > On 5/15/2025 7:42 AM, Ackerley Tng wrote: > [...] >> + >> +static inline int kvm_gmem_try_split_folio_in_filemap(struct inode *inode, >> + struct folio *folio) >> +{ >> + size_t to_nr_pages; >> + void *priv; >> + >> + if (!kvm_gmem_has_custom_allocator(inode)) >> + return 0; >> + >> + priv = kvm_gmem_allocator_private(inode); >> + to_nr_pages = kvm_gmem_allocator_ops(inode)->nr_pages_in_page(priv); >> + >> + if (kvm_gmem_has_some_shared(inode, folio->index, to_nr_pages)) > > What if a huge page whose attribute is shared? > This checks if there are any shared pages in the range [folio->index, folio->index + to_nr_pages), so if the entire huge page is shared this function should also return true. folio->index is the start of the merged huge page, and to_nr_pages is the number of pages in the merged huge page, so this should be querying exactly the entire huge page. Note to self: rename kvm_gmem_has_some_shared() to kvm_gmem_has_any_shared() in the next revision. Hope I answered your question! Let me know if I misunderstood your question. >> + return kvm_gmem_split_folio_in_filemap(inode, folio); >> + >> + return 0; >> +} >> + > [...] >> >> static int kvm_gmem_shareability_setup(struct maple_tree *mt, loff_t size, u64 flags) >> @@ -563,11 +1005,16 @@ static struct folio *kvm_gmem_get_folio(struct inode *inode, pgoff_t index) >> return folio; >> >> if (kvm_gmem_has_custom_allocator(inode)) { >> - void *p = kvm_gmem_allocator_private(inode); >> + size_t nr_pages; >> + void *p; >> >> + p = kvm_gmem_allocator_private(inode); >> folio = kvm_gmem_allocator_ops(inode)->alloc_folio(p); >> if (IS_ERR(folio)) >> return folio; >> + >> + nr_pages = kvm_gmem_allocator_ops(inode)->nr_pages_in_folio(p); >> + index_floor = round_down(index, nr_pages); >> } else { >> gfp_t gfp = mapping_gfp_mask(inode->i_mapping); >> >> @@ -580,10 +1027,11 @@ static struct folio *kvm_gmem_get_folio(struct inode *inode, pgoff_t index) >> folio_put(folio); >> return ERR_PTR(ret); >> } >> + >> + index_floor = index; >> } >> allocated_size = folio_size(folio); >> >> - index_floor = round_down(index, folio_nr_pages(folio)); >> ret = kvm_gmem_filemap_add_folio(inode->i_mapping, folio, index_floor); >> if (ret) { >> folio_put(folio); >> @@ -600,6 +1048,13 @@ static struct folio *kvm_gmem_get_folio(struct inode *inode, pgoff_t index) >> return ERR_PTR(ret); >> } >> >> + /* Leave just filemap's refcounts on folio. */ >> + folio_put(folio); >> + >> + ret = kvm_gmem_try_split_folio_in_filemap(inode, folio); > > When !CONFIG_KVM_GMEM_SHARED_MEM, kvm_gmem_try_split_folio_in_filemap() is > undefined. > Will fix this in the next revision. Thanks! >> + if (ret) >> + goto err; >> + >> spin_lock(&inode->i_lock); >> inode->i_blocks += allocated_size / 512; >> spin_unlock(&inode->i_lock); >> > [...]