In the process of decoupling KVM's view of the FGT bits from the wider architectural state, use KVM's own FGT tables to build a synthetic view of what is actually known. This allows for some checking along the way. Reviewed-by: Joey Gouly <joey.gouly@xxxxxxx> Signed-off-by: Marc Zyngier <maz@xxxxxxxxxx> --- arch/arm64/include/asm/kvm_host.h | 14 ++++ arch/arm64/kvm/emulate-nested.c | 106 ++++++++++++++++++++++++++++++ 2 files changed, 120 insertions(+) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 7a1ef5be7efb2..95fedd27f4bb8 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -607,6 +607,20 @@ struct kvm_sysreg_masks { } mask[NR_SYS_REGS - __SANITISED_REG_START__]; }; +struct fgt_masks { + const char *str; + u64 mask; + u64 nmask; + u64 res0; +}; + +extern struct fgt_masks hfgrtr_masks; +extern struct fgt_masks hfgwtr_masks; +extern struct fgt_masks hfgitr_masks; +extern struct fgt_masks hdfgrtr_masks; +extern struct fgt_masks hdfgwtr_masks; +extern struct fgt_masks hafgrtr_masks; + struct kvm_cpu_context { struct user_pt_regs regs; /* sp = sp_el0 */ diff --git a/arch/arm64/kvm/emulate-nested.c b/arch/arm64/kvm/emulate-nested.c index 52a2d63a667c9..528b33fcfcfd6 100644 --- a/arch/arm64/kvm/emulate-nested.c +++ b/arch/arm64/kvm/emulate-nested.c @@ -2033,6 +2033,105 @@ static u32 encoding_next(u32 encoding) return sys_reg(op0 + 1, 0, 0, 0, 0); } +#define FGT_MASKS(__n, __m) \ + struct fgt_masks __n = { .str = #__m, .res0 = __m, } + +FGT_MASKS(hfgrtr_masks, HFGRTR_EL2_RES0); +FGT_MASKS(hfgwtr_masks, HFGWTR_EL2_RES0); +FGT_MASKS(hfgitr_masks, HFGITR_EL2_RES0); +FGT_MASKS(hdfgrtr_masks, HDFGRTR_EL2_RES0); +FGT_MASKS(hdfgwtr_masks, HDFGWTR_EL2_RES0); +FGT_MASKS(hafgrtr_masks, HAFGRTR_EL2_RES0); + +static __init bool aggregate_fgt(union trap_config tc) +{ + struct fgt_masks *rmasks, *wmasks; + + switch (tc.fgt) { + case HFGRTR_GROUP: + rmasks = &hfgrtr_masks; + wmasks = &hfgwtr_masks; + break; + case HDFGRTR_GROUP: + rmasks = &hdfgrtr_masks; + wmasks = &hdfgwtr_masks; + break; + case HAFGRTR_GROUP: + rmasks = &hafgrtr_masks; + wmasks = NULL; + break; + case HFGITR_GROUP: + rmasks = &hfgitr_masks; + wmasks = NULL; + break; + } + + /* + * A bit can be reserved in either the R or W register, but + * not both. + */ + if ((BIT(tc.bit) & rmasks->res0) && + (!wmasks || (BIT(tc.bit) & wmasks->res0))) + return false; + + if (tc.pol) + rmasks->mask |= BIT(tc.bit) & ~rmasks->res0; + else + rmasks->nmask |= BIT(tc.bit) & ~rmasks->res0; + + if (wmasks) { + if (tc.pol) + wmasks->mask |= BIT(tc.bit) & ~wmasks->res0; + else + wmasks->nmask |= BIT(tc.bit) & ~wmasks->res0; + } + + return true; +} + +static __init int check_fgt_masks(struct fgt_masks *masks) +{ + unsigned long duplicate = masks->mask & masks->nmask; + u64 res0 = masks->res0; + int ret = 0; + + if (duplicate) { + int i; + + for_each_set_bit(i, &duplicate, 64) { + kvm_err("%s[%d] bit has both polarities\n", + masks->str, i); + } + + ret = -EINVAL; + } + + masks->res0 = ~(masks->mask | masks->nmask); + if (masks->res0 != res0) + kvm_info("Implicit %s = %016llx, expecting %016llx\n", + masks->str, masks->res0, res0); + + return ret; +} + +static __init int check_all_fgt_masks(int ret) +{ + static struct fgt_masks * const masks[] __initconst = { + &hfgrtr_masks, + &hfgwtr_masks, + &hfgitr_masks, + &hdfgrtr_masks, + &hdfgwtr_masks, + &hafgrtr_masks, + }; + int err = 0; + + for (int i = 0; i < ARRAY_SIZE(masks); i++) + err |= check_fgt_masks(masks[i]); + + return ret ?: err; +} + int __init populate_nv_trap_config(void) { int ret = 0; @@ -2097,8 +2196,15 @@ int __init populate_nv_trap_config(void) ret = xa_err(prev); print_nv_trap_error(fgt, "Failed FGT insertion", ret); } + + if (!aggregate_fgt(tc)) { + ret = -EINVAL; + print_nv_trap_error(fgt, "FGT bit is reserved", ret); + } } + ret = check_all_fgt_masks(ret); + kvm_info("nv: %ld fine grained trap handlers\n", ARRAY_SIZE(encoding_to_fgt)); -- 2.39.2