> for_each_possible_cpu(cpu) { > data = per_cpu(runtime_data, cpu); > @@ -1066,6 +1069,14 @@ int __init sev_es_efi_map_ghcbs(pgd_t *pgd) > > if (kernel_map_pages_in_pgd(pgd, pfn, address, 1, pflags)) > return 1; > + > + address = per_cpu(svsm_caa_pa, cpu); > + if (!address) > + return 1; > + > + pfn = address >> PAGE_SHIFT; > + if (kernel_map_pages_in_pgd(pgd, pfn, address, 1, pflags_enc)) > + return 1; > } The kernel allocates the caa page(s) only when running under svsm, see alloc_runtime_data(), so this is not correct. I think we either have to return to the original behavior of only doing something in case address is not NULL, or wrap the caa code block into a 'if (snp_vmpl) { ... }', following what alloc_runtime_data() is doing. take care, Gerd