bpf: fix net.core.bpf_jit_enable race
global bpf_jit_enable variable is tested multiple times in JITs, blinding and verifier core. The malicious root can try to toggle it while loading the programs. This race condition was accounted for and there should be no issues, but it's safer to avoid this race condition. Signed-off-by: Alexei Starovoitov <ast@kernel.org> Acked-by: Daniel Borkmann <daniel@iogearbox.net> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
This commit is contained in:
parent
1ea47e01ad
commit
60b58afc96
10 changed files with 13 additions and 11 deletions
|
@ -1824,7 +1824,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
||||||
/* If BPF JIT was not enabled then we must fall back to
|
/* If BPF JIT was not enabled then we must fall back to
|
||||||
* the interpreter.
|
* the interpreter.
|
||||||
*/
|
*/
|
||||||
if (!bpf_jit_enable)
|
if (!prog->jit_requested)
|
||||||
return orig_prog;
|
return orig_prog;
|
||||||
|
|
||||||
/* If constant blinding was enabled and we failed during blinding
|
/* If constant blinding was enabled and we failed during blinding
|
||||||
|
|
|
@ -844,7 +844,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
||||||
int image_size;
|
int image_size;
|
||||||
u8 *image_ptr;
|
u8 *image_ptr;
|
||||||
|
|
||||||
if (!bpf_jit_enable)
|
if (!prog->jit_requested)
|
||||||
return orig_prog;
|
return orig_prog;
|
||||||
|
|
||||||
tmp = bpf_jit_blind_constants(prog);
|
tmp = bpf_jit_blind_constants(prog);
|
||||||
|
|
|
@ -1869,7 +1869,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
||||||
unsigned int image_size;
|
unsigned int image_size;
|
||||||
u8 *image_ptr;
|
u8 *image_ptr;
|
||||||
|
|
||||||
if (!bpf_jit_enable || !cpu_has_mips64r2)
|
if (!prog->jit_requested || !cpu_has_mips64r2)
|
||||||
return prog;
|
return prog;
|
||||||
|
|
||||||
tmp = bpf_jit_blind_constants(prog);
|
tmp = bpf_jit_blind_constants(prog);
|
||||||
|
|
|
@ -993,7 +993,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
|
||||||
struct bpf_prog *tmp_fp;
|
struct bpf_prog *tmp_fp;
|
||||||
bool bpf_blinded = false;
|
bool bpf_blinded = false;
|
||||||
|
|
||||||
if (!bpf_jit_enable)
|
if (!fp->jit_requested)
|
||||||
return org_fp;
|
return org_fp;
|
||||||
|
|
||||||
tmp_fp = bpf_jit_blind_constants(org_fp);
|
tmp_fp = bpf_jit_blind_constants(org_fp);
|
||||||
|
|
|
@ -1300,7 +1300,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
|
||||||
struct bpf_jit jit;
|
struct bpf_jit jit;
|
||||||
int pass;
|
int pass;
|
||||||
|
|
||||||
if (!bpf_jit_enable)
|
if (!fp->jit_requested)
|
||||||
return orig_fp;
|
return orig_fp;
|
||||||
|
|
||||||
tmp = bpf_jit_blind_constants(fp);
|
tmp = bpf_jit_blind_constants(fp);
|
||||||
|
|
|
@ -1517,7 +1517,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
||||||
u8 *image_ptr;
|
u8 *image_ptr;
|
||||||
int pass;
|
int pass;
|
||||||
|
|
||||||
if (!bpf_jit_enable)
|
if (!prog->jit_requested)
|
||||||
return orig_prog;
|
return orig_prog;
|
||||||
|
|
||||||
tmp = bpf_jit_blind_constants(prog);
|
tmp = bpf_jit_blind_constants(prog);
|
||||||
|
|
|
@ -1121,7 +1121,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
||||||
int pass;
|
int pass;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (!bpf_jit_enable)
|
if (!prog->jit_requested)
|
||||||
return orig_prog;
|
return orig_prog;
|
||||||
|
|
||||||
tmp = bpf_jit_blind_constants(prog);
|
tmp = bpf_jit_blind_constants(prog);
|
||||||
|
|
|
@ -458,6 +458,7 @@ struct bpf_binary_header {
|
||||||
struct bpf_prog {
|
struct bpf_prog {
|
||||||
u16 pages; /* Number of allocated pages */
|
u16 pages; /* Number of allocated pages */
|
||||||
u16 jited:1, /* Is our filter JIT'ed? */
|
u16 jited:1, /* Is our filter JIT'ed? */
|
||||||
|
jit_requested:1,/* archs need to JIT the prog */
|
||||||
locked:1, /* Program image locked? */
|
locked:1, /* Program image locked? */
|
||||||
gpl_compatible:1, /* Is filter GPL compatible? */
|
gpl_compatible:1, /* Is filter GPL compatible? */
|
||||||
cb_access:1, /* Is control block accessed? */
|
cb_access:1, /* Is control block accessed? */
|
||||||
|
@ -804,7 +805,7 @@ static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp)
|
||||||
return fp->jited && bpf_jit_is_ebpf();
|
return fp->jited && bpf_jit_is_ebpf();
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool bpf_jit_blinding_enabled(void)
|
static inline bool bpf_jit_blinding_enabled(struct bpf_prog *prog)
|
||||||
{
|
{
|
||||||
/* These are the prerequisites, should someone ever have the
|
/* These are the prerequisites, should someone ever have the
|
||||||
* idea to call blinding outside of them, we make sure to
|
* idea to call blinding outside of them, we make sure to
|
||||||
|
@ -812,7 +813,7 @@ static inline bool bpf_jit_blinding_enabled(void)
|
||||||
*/
|
*/
|
||||||
if (!bpf_jit_is_ebpf())
|
if (!bpf_jit_is_ebpf())
|
||||||
return false;
|
return false;
|
||||||
if (!bpf_jit_enable)
|
if (!prog->jit_requested)
|
||||||
return false;
|
return false;
|
||||||
if (!bpf_jit_harden)
|
if (!bpf_jit_harden)
|
||||||
return false;
|
return false;
|
||||||
|
|
|
@ -94,6 +94,7 @@ struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
|
||||||
fp->pages = size / PAGE_SIZE;
|
fp->pages = size / PAGE_SIZE;
|
||||||
fp->aux = aux;
|
fp->aux = aux;
|
||||||
fp->aux->prog = fp;
|
fp->aux->prog = fp;
|
||||||
|
fp->jit_requested = ebpf_jit_enabled();
|
||||||
|
|
||||||
INIT_LIST_HEAD_RCU(&fp->aux->ksym_lnode);
|
INIT_LIST_HEAD_RCU(&fp->aux->ksym_lnode);
|
||||||
|
|
||||||
|
@ -721,7 +722,7 @@ struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
|
||||||
struct bpf_insn *insn;
|
struct bpf_insn *insn;
|
||||||
int i, rewritten;
|
int i, rewritten;
|
||||||
|
|
||||||
if (!bpf_jit_blinding_enabled())
|
if (!bpf_jit_blinding_enabled(prog))
|
||||||
return prog;
|
return prog;
|
||||||
|
|
||||||
clone = bpf_prog_clone_create(prog, GFP_USER);
|
clone = bpf_prog_clone_create(prog, GFP_USER);
|
||||||
|
|
|
@ -5080,7 +5080,7 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
|
||||||
/* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup
|
/* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup
|
||||||
* handlers are currently limited to 64 bit only.
|
* handlers are currently limited to 64 bit only.
|
||||||
*/
|
*/
|
||||||
if (ebpf_jit_enabled() && BITS_PER_LONG == 64 &&
|
if (prog->jit_requested && BITS_PER_LONG == 64 &&
|
||||||
insn->imm == BPF_FUNC_map_lookup_elem) {
|
insn->imm == BPF_FUNC_map_lookup_elem) {
|
||||||
map_ptr = env->insn_aux_data[i + delta].map_ptr;
|
map_ptr = env->insn_aux_data[i + delta].map_ptr;
|
||||||
if (map_ptr == BPF_MAP_PTR_POISON ||
|
if (map_ptr == BPF_MAP_PTR_POISON ||
|
||||||
|
|
Loading…
Add table
Reference in a new issue