From: Changyuan Lyu <changyuanl@xxxxxxxxxx> Allow users of KHO to cancel the previous preservation by adding the necessary interfaces to unpreserve folio. Signed-off-by: Changyuan Lyu <changyuanl@xxxxxxxxxx> Co-developed-by: Pasha Tatashin <pasha.tatashin@xxxxxxxxxx> Signed-off-by: Pasha Tatashin <pasha.tatashin@xxxxxxxxxx> --- include/linux/kexec_handover.h | 12 +++++ kernel/kexec_handover.c | 84 ++++++++++++++++++++++++++++------ 2 files changed, 83 insertions(+), 13 deletions(-) diff --git a/include/linux/kexec_handover.h b/include/linux/kexec_handover.h index f98565def593..3d209f9e9d3a 100644 --- a/include/linux/kexec_handover.h +++ b/include/linux/kexec_handover.h @@ -42,7 +42,9 @@ struct kho_serialization; bool kho_is_enabled(void); int kho_preserve_folio(struct folio *folio); +int kho_unpreserve_folio(struct folio *folio); int kho_preserve_phys(phys_addr_t phys, size_t size); +int kho_unpreserve_phys(phys_addr_t phys, size_t size); struct folio *kho_restore_folio(phys_addr_t phys); int kho_add_subtree(struct kho_serialization *ser, const char *name, void *fdt); int kho_retrieve_subtree(const char *name, phys_addr_t *phys); @@ -69,11 +71,21 @@ static inline int kho_preserve_folio(struct folio *folio) return -EOPNOTSUPP; } +static inline int kho_unpreserve_folio(struct folio *folio) +{ + return -EOPNOTSUPP; +} + static inline int kho_preserve_phys(phys_addr_t phys, size_t size) { return -EOPNOTSUPP; } +static inline int kho_unpreserve_phys(phys_addr_t phys, size_t size) +{ + return -EOPNOTSUPP; +} + static inline struct folio *kho_restore_folio(phys_addr_t phys) { return NULL; diff --git a/kernel/kexec_handover.c b/kernel/kexec_handover.c index 8ff561e36a87..eb305e7e6129 100644 --- a/kernel/kexec_handover.c +++ b/kernel/kexec_handover.c @@ -101,26 +101,33 @@ static void *xa_load_or_alloc(struct xarray *xa, unsigned long index, size_t sz) return elm; } -static void __kho_unpreserve(struct kho_mem_track *track, unsigned long pfn, - unsigned long end_pfn) +static void __kho_unpreserve_order(struct kho_mem_track *track, unsigned long pfn, + unsigned int order) { struct kho_mem_phys_bits *bits; struct kho_mem_phys *physxa; + const unsigned long pfn_high = pfn >> order; - while (pfn < end_pfn) { - const unsigned int order = - min(count_trailing_zeros(pfn), ilog2(end_pfn - pfn)); - const unsigned long pfn_high = pfn >> order; + physxa = xa_load(&track->orders, order); + if (!physxa) + return; - physxa = xa_load(&track->orders, order); - if (!physxa) - continue; + bits = xa_load(&physxa->phys_bits, pfn_high / PRESERVE_BITS); + if (!bits) + return; - bits = xa_load(&physxa->phys_bits, pfn_high / PRESERVE_BITS); - if (!bits) - continue; + clear_bit(pfn_high % PRESERVE_BITS, bits->preserve); +} - clear_bit(pfn_high % PRESERVE_BITS, bits->preserve); +static void __kho_unpreserve(struct kho_mem_track *track, unsigned long pfn, + unsigned long end_pfn) +{ + unsigned int order; + + while (pfn < end_pfn) { + order = min(count_trailing_zeros(pfn), ilog2(end_pfn - pfn)); + + __kho_unpreserve_order(track, pfn, order); pfn += 1 << order; } @@ -607,6 +614,29 @@ int kho_preserve_folio(struct folio *folio) } EXPORT_SYMBOL_GPL(kho_preserve_folio); +/** + * kho_unpreserve_folio - unpreserve a folio. + * @folio: folio to unpreserve. + * + * Instructs KHO to unpreserve a folio that was preserved by + * kho_preserve_folio() before. + * + * Return: 0 on success, error code on failure + */ +int kho_unpreserve_folio(struct folio *folio) +{ + const unsigned long pfn = folio_pfn(folio); + const unsigned int order = folio_order(folio); + struct kho_mem_track *track = &kho_out.ser.track; + + if (kho_out.finalized) + return -EBUSY; + + __kho_unpreserve_order(track, pfn, order); + return 0; +} +EXPORT_SYMBOL_GPL(kho_unpreserve_folio); + /** * kho_preserve_phys - preserve a physically contiguous range across kexec. * @phys: physical address of the range. @@ -652,6 +682,34 @@ int kho_preserve_phys(phys_addr_t phys, size_t size) } EXPORT_SYMBOL_GPL(kho_preserve_phys); +/** + * kho_unpreserve_phys - unpreserve a physically contiguous range across kexec. + * @phys: physical address of the range. + * @size: size of the range. + * + * Instructs KHO to unpreserve the memory range from @phys to @phys + @size + * across kexec. + * + * Return: 0 on success, error code on failure + */ +int kho_unpreserve_phys(phys_addr_t phys, size_t size) +{ + struct kho_mem_track *track = &kho_out.ser.track; + unsigned long pfn = PHYS_PFN(phys); + unsigned long end_pfn = PHYS_PFN(phys + size); + + if (kho_out.finalized) + return -EBUSY; + + if (!PAGE_ALIGNED(phys) || !PAGE_ALIGNED(size)) + return -EINVAL; + + __kho_unpreserve(track, pfn, end_pfn); + + return 0; +} +EXPORT_SYMBOL_GPL(kho_unpreserve_phys); + int __kho_abort(void) { int err; -- 2.49.0.1101.gccaa498523-goog