* Vitaly Wool <vitaly.wool@xxxxxxxxxxx> [250715 09:58]: > Reimplement vrealloc() to be able to set node and alignment should > a user need to do so. Rename the function to vrealloc_node_align() > to better match what it actually does now and introduce macros for > vrealloc() and friends for backward compatibility. > > With that change we also provide the ability for the Rust part of > the kernel to set node and alignment in its allocations. > > Signed-off-by: Vitaly Wool <vitaly.wool@xxxxxxxxxxx> > Reviewed-by: Uladzislau Rezki (Sony) <urezki@xxxxxxxxx> > Reviewed-by: Vlastimil Babka <vbabka@xxxxxxx> Reviewed-by: Liam R. Howlett <Liam.Howlett@xxxxxxxxxx> > --- > include/linux/vmalloc.h | 12 +++++++++--- > mm/nommu.c | 3 ++- > mm/vmalloc.c | 29 ++++++++++++++++++++++++----- > 3 files changed, 35 insertions(+), 9 deletions(-) > > diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h > index fdc9aeb74a44..68791f7cb3ba 100644 > --- a/include/linux/vmalloc.h > +++ b/include/linux/vmalloc.h > @@ -197,9 +197,15 @@ extern void *__vcalloc_noprof(size_t n, size_t size, gfp_t flags) __alloc_size(1 > extern void *vcalloc_noprof(size_t n, size_t size) __alloc_size(1, 2); > #define vcalloc(...) alloc_hooks(vcalloc_noprof(__VA_ARGS__)) > > -void * __must_check vrealloc_noprof(const void *p, size_t size, gfp_t flags) > - __realloc_size(2); > -#define vrealloc(...) alloc_hooks(vrealloc_noprof(__VA_ARGS__)) > +void *__must_check vrealloc_node_align_noprof(const void *p, size_t size, > + unsigned long align, gfp_t flags, int nid) __realloc_size(2); > +#define vrealloc_node_noprof(_p, _s, _f, _nid) \ > + vrealloc_node_align_noprof(_p, _s, 1, _f, _nid) > +#define vrealloc_noprof(_p, _s, _f) \ > + vrealloc_node_align_noprof(_p, _s, 1, _f, NUMA_NO_NODE) > +#define vrealloc_node_align(...) alloc_hooks(vrealloc_node_align_noprof(__VA_ARGS__)) > +#define vrealloc_node(...) alloc_hooks(vrealloc_node_noprof(__VA_ARGS__)) > +#define vrealloc(...) alloc_hooks(vrealloc_noprof(__VA_ARGS__)) > > extern void vfree(const void *addr); > extern void vfree_atomic(const void *addr); > diff --git a/mm/nommu.c b/mm/nommu.c > index b624acec6d2e..afde6c626b07 100644 > --- a/mm/nommu.c > +++ b/mm/nommu.c > @@ -119,7 +119,8 @@ void *__vmalloc_noprof(unsigned long size, gfp_t gfp_mask) > } > EXPORT_SYMBOL(__vmalloc_noprof); > > -void *vrealloc_noprof(const void *p, size_t size, gfp_t flags) > +void *vrealloc_node_align_noprof(const void *p, size_t size, unsigned long align, > + gfp_t flags, int node) > { > return krealloc_noprof(p, size, (flags | __GFP_COMP) & ~__GFP_HIGHMEM); > } > diff --git a/mm/vmalloc.c b/mm/vmalloc.c > index ab986dd09b6a..e0a593651d96 100644 > --- a/mm/vmalloc.c > +++ b/mm/vmalloc.c > @@ -4081,19 +4081,29 @@ void *vzalloc_node_noprof(unsigned long size, int node) > EXPORT_SYMBOL(vzalloc_node_noprof); > > /** > - * vrealloc - reallocate virtually contiguous memory; contents remain unchanged > + * vrealloc_node_align_noprof - reallocate virtually contiguous memory; contents > + * remain unchanged > * @p: object to reallocate memory for > * @size: the size to reallocate > + * @align: requested alignment > * @flags: the flags for the page level allocator > + * @nid: node number of the target node > + * > + * If @p is %NULL, vrealloc_XXX() behaves exactly like vmalloc_XXX(). If @size > + * is 0 and @p is not a %NULL pointer, the object pointed to is freed. > * > - * If @p is %NULL, vrealloc() behaves exactly like vmalloc(). If @size is 0 and > - * @p is not a %NULL pointer, the object pointed to is freed. > + * If the caller wants the new memory to be on specific node *only*, > + * __GFP_THISNODE flag should be set, otherwise the function will try to avoid > + * reallocation and possibly disregard the specified @nid. > * > * If __GFP_ZERO logic is requested, callers must ensure that, starting with the > * initial memory allocation, every subsequent call to this API for the same > * memory allocation is flagged with __GFP_ZERO. Otherwise, it is possible that > * __GFP_ZERO is not fully honored by this API. > * > + * Requesting an alignment that is bigger than the alignment of the existing > + * allocation will fail. > + * > * In any case, the contents of the object pointed to are preserved up to the > * lesser of the new and old sizes. > * > @@ -4103,7 +4113,8 @@ EXPORT_SYMBOL(vzalloc_node_noprof); > * Return: pointer to the allocated memory; %NULL if @size is zero or in case of > * failure > */ > -void *vrealloc_noprof(const void *p, size_t size, gfp_t flags) > +void *vrealloc_node_align_noprof(const void *p, size_t size, unsigned long align, > + gfp_t flags, int nid) > { > struct vm_struct *vm = NULL; > size_t alloced_size = 0; > @@ -4127,6 +4138,12 @@ void *vrealloc_noprof(const void *p, size_t size, gfp_t flags) > if (WARN(alloced_size < old_size, > "vrealloc() has mismatched area vs requested sizes (%p)\n", p)) > return NULL; > + if (WARN(!IS_ALIGNED((unsigned long)p, align), > + "will not reallocate with a bigger alignment (0x%lx)\n", align)) > + return NULL; > + if (unlikely(flags & __GFP_THISNODE) && nid != NUMA_NO_NODE && > + nid != page_to_nid(vmalloc_to_page(p))) > + goto need_realloc; > } > > /* > @@ -4157,8 +4174,10 @@ void *vrealloc_noprof(const void *p, size_t size, gfp_t flags) > return (void *)p; > } > > +need_realloc: > /* TODO: Grow the vm_area, i.e. allocate and map additional pages. */ > - n = __vmalloc_noprof(size, flags); > + n = __vmalloc_node_noprof(size, align, flags, nid, __builtin_return_address(0)); > + > if (!n) > return NULL; > > -- > 2.39.2 >