It is well-known to be possible to abuse the eBPF JIT to construct gadgets for code re-use attacks. To hinder this constant blinding was added in "bpf: add generic constant blinding for use in jits". This mitigation has one weakness though: It ignores jump instructions due to their correct offsets not being known when constant blinding is applied. This can be abused to construct "jump-chains" with crafted offsets so that certain desirable instructions are generated by the JIT compiler. F.e. two consecutive BPF_JMP | BPF_JA codes with an appropriate offset might generate the following jumps: ... 0xffffffffc000f822: jmp 0xffffffffc00108df 0xffffffffc000f827: jmp 0xffffffffc0010861 ... If those are hit unaligned we can get two consecutive useful instructions: ... 0xffffffffc000f823: mov $0xe9000010,%eax 0xffffffffc000f828: xor $0xe9000010,%eax ... This patch adds a mitigation to prevent said chains from being generated by re-writing any instructions which are not reachable anyways. By preventing consecutive jumps, only a single instruction can be encoded, which is believed to be insufficient to be useful. No functional changes for a benign filter program are intended. Fixes: 4f3446bb809f ("bpf: add generic constant blinding for use in jits") Signed-off-by: Lion <nnamrec@xxxxxxxxx> --- net/core/filter.c | 68 ++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 65 insertions(+), 3 deletions(-) diff --git a/net/core/filter.c b/net/core/filter.c index bc6828761a47..b8eb2fa309c6 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -970,6 +970,65 @@ static int check_load_and_stores(const struct sock_filter *filter, int flen) return ret; } +/* Security: + * + * As it is possible to abuse the JIT compiler to produce instructions for + * code re-use, remove anything which is not reachable in a benign program + * anyways + */ +static int remove_dead_code(struct sock_filter *filter, int flen) +{ + int pc; + unsigned long *live; + + if (flen == 0) + return 0; + + live = bitmap_zalloc(flen, GFP_KERNEL); + if (!live) + return -ENOMEM; + + /* No back jumps and no loops, can do a single forward pass here */ + set_bit(0, live); + for (pc = 0; pc < flen; pc++) { + if (!test_bit(pc, live)) { + /* Dead. Some arbitrary instruction here. */ + filter[pc].code = BPF_LD | BPF_IMM; + filter[pc].k = 0; + filter[pc].jt = 0; + filter[pc].jf = 0; + continue; + } + + switch (filter[pc].code) { + case BPF_RET | BPF_K: + case BPF_RET | BPF_A: + break; + case BPF_JMP | BPF_JA: + set_bit(pc + 1 + filter[pc].k, live); + break; + case BPF_JMP | BPF_JEQ | BPF_K: + case BPF_JMP | BPF_JEQ | BPF_X: + case BPF_JMP | BPF_JGE | BPF_K: + case BPF_JMP | BPF_JGE | BPF_X: + case BPF_JMP | BPF_JGT | BPF_K: + case BPF_JMP | BPF_JGT | BPF_X: + case BPF_JMP | BPF_JSET | BPF_K: + case BPF_JMP | BPF_JSET | BPF_X: + set_bit(pc + 1 + filter[pc].jt, live); + set_bit(pc + 1 + filter[pc].jf, live); + break; + default: + /* Continue to next instruction */ + set_bit(pc + 1, live); + break; + } + } + + kfree(live); + return 0; +} + static bool chk_code_allowed(u16 code_to_probe) { static const bool codes[] = { @@ -1061,11 +1120,11 @@ static bool bpf_check_basics_ok(const struct sock_filter *filter, * * Returns 0 if the rule set is legal or -EINVAL if not. */ -static int bpf_check_classic(const struct sock_filter *filter, +static int bpf_check_classic(struct sock_filter *filter, unsigned int flen) { bool anc_found; - int pc; + int pc, ret; /* Check the filter code now */ for (pc = 0; pc < flen; pc++) { @@ -1133,7 +1192,10 @@ static int bpf_check_classic(const struct sock_filter *filter, switch (filter[flen - 1].code) { case BPF_RET | BPF_K: case BPF_RET | BPF_A: - return check_load_and_stores(filter, flen); + ret = check_load_and_stores(filter, flen); + if (ret) + return ret; + return remove_dead_code(filter, flen); } return -EINVAL; -- 2.49.0