1
0
Fork 0
mirror of synced 2025-03-06 20:59:54 +01:00
linux/tools/testing/selftests/bpf/prog_tests/rcu_read_lock.c
Alexei Starovoitov 6fcd486b3a bpf: Refactor RCU enforcement in the verifier.
bpf_rcu_read_lock/unlock() are only available in clang compiled kernels. Lack
of such key mechanism makes it impossible for sleepable bpf programs to use RCU
pointers.

Allow bpf_rcu_read_lock/unlock() in GCC compiled kernels (though GCC doesn't
support btf_type_tag yet) and allowlist certain field dereferences in important
data structures like tast_struct, cgroup, socket that are used by sleepable
programs either as RCU pointer or full trusted pointer (which is valid outside
of RCU CS). Use BTF_TYPE_SAFE_RCU and BTF_TYPE_SAFE_TRUSTED macros for such
tagging. They will be removed once GCC supports btf_type_tag.

With that refactor check_ptr_to_btf_access(). Make it strict in enforcing
PTR_TRUSTED and PTR_UNTRUSTED while deprecating old PTR_TO_BTF_ID without
modifier flags. There is a chance that this strict enforcement might break
existing programs (especially on GCC compiled kernels), but this cleanup has to
start sooner than later. Note PTR_TO_CTX access still yields old deprecated
PTR_TO_BTF_ID. Once it's converted to strict PTR_TRUSTED or PTR_UNTRUSTED the
kfuncs and helpers will be able to default to KF_TRUSTED_ARGS. KF_RCU will
remain as a weaker version of KF_TRUSTED_ARGS where obj refcnt could be 0.

Adjust rcu_read_lock selftest to run on gcc and clang compiled kernels.

Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: David Vernet <void@manifault.com>
Link: https://lore.kernel.org/bpf/20230303041446.3630-7-alexei.starovoitov@gmail.com
2023-03-03 17:42:20 +01:00

148 lines
3.6 KiB
C

// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates.*/
#define _GNU_SOURCE
#include <unistd.h>
#include <sys/syscall.h>
#include <sys/types.h>
#include <test_progs.h>
#include <bpf/btf.h>
#include "rcu_read_lock.skel.h"
#include "cgroup_helpers.h"
static unsigned long long cgroup_id;
static void test_success(void)
{
struct rcu_read_lock *skel;
int err;
skel = rcu_read_lock__open();
if (!ASSERT_OK_PTR(skel, "skel_open"))
return;
skel->bss->target_pid = syscall(SYS_gettid);
bpf_program__set_autoload(skel->progs.get_cgroup_id, true);
bpf_program__set_autoload(skel->progs.task_succ, true);
bpf_program__set_autoload(skel->progs.two_regions, true);
bpf_program__set_autoload(skel->progs.non_sleepable_1, true);
bpf_program__set_autoload(skel->progs.non_sleepable_2, true);
bpf_program__set_autoload(skel->progs.task_trusted_non_rcuptr, true);
err = rcu_read_lock__load(skel);
if (!ASSERT_OK(err, "skel_load"))
goto out;
err = rcu_read_lock__attach(skel);
if (!ASSERT_OK(err, "skel_attach"))
goto out;
syscall(SYS_getpgid);
ASSERT_EQ(skel->bss->task_storage_val, 2, "task_storage_val");
ASSERT_EQ(skel->bss->cgroup_id, cgroup_id, "cgroup_id");
out:
rcu_read_lock__destroy(skel);
}
static void test_rcuptr_acquire(void)
{
struct rcu_read_lock *skel;
int err;
skel = rcu_read_lock__open();
if (!ASSERT_OK_PTR(skel, "skel_open"))
return;
skel->bss->target_pid = syscall(SYS_gettid);
bpf_program__set_autoload(skel->progs.task_acquire, true);
err = rcu_read_lock__load(skel);
if (!ASSERT_OK(err, "skel_load"))
goto out;
err = rcu_read_lock__attach(skel);
ASSERT_OK(err, "skel_attach");
out:
rcu_read_lock__destroy(skel);
}
static const char * const inproper_region_tests[] = {
"miss_lock",
"no_lock",
"miss_unlock",
"non_sleepable_rcu_mismatch",
"inproper_sleepable_helper",
"inproper_sleepable_kfunc",
"nested_rcu_region",
};
static void test_inproper_region(void)
{
struct rcu_read_lock *skel;
struct bpf_program *prog;
int i, err;
for (i = 0; i < ARRAY_SIZE(inproper_region_tests); i++) {
skel = rcu_read_lock__open();
if (!ASSERT_OK_PTR(skel, "skel_open"))
return;
prog = bpf_object__find_program_by_name(skel->obj, inproper_region_tests[i]);
if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
goto out;
bpf_program__set_autoload(prog, true);
err = rcu_read_lock__load(skel);
ASSERT_ERR(err, "skel_load");
out:
rcu_read_lock__destroy(skel);
}
}
static const char * const rcuptr_misuse_tests[] = {
"task_untrusted_rcuptr",
"cross_rcu_region",
};
static void test_rcuptr_misuse(void)
{
struct rcu_read_lock *skel;
struct bpf_program *prog;
int i, err;
for (i = 0; i < ARRAY_SIZE(rcuptr_misuse_tests); i++) {
skel = rcu_read_lock__open();
if (!ASSERT_OK_PTR(skel, "skel_open"))
return;
prog = bpf_object__find_program_by_name(skel->obj, rcuptr_misuse_tests[i]);
if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
goto out;
bpf_program__set_autoload(prog, true);
err = rcu_read_lock__load(skel);
ASSERT_ERR(err, "skel_load");
out:
rcu_read_lock__destroy(skel);
}
}
void test_rcu_read_lock(void)
{
int cgroup_fd;
cgroup_fd = test__join_cgroup("/rcu_read_lock");
if (!ASSERT_GE(cgroup_fd, 0, "join_cgroup /rcu_read_lock"))
goto out;
cgroup_id = get_cgroup_id("/rcu_read_lock");
if (test__start_subtest("success"))
test_success();
if (test__start_subtest("rcuptr_acquire"))
test_rcuptr_acquire();
if (test__start_subtest("negative_tests_inproper_region"))
test_inproper_region();
if (test__start_subtest("negative_tests_rcuptr_misuse"))
test_rcuptr_misuse();
close(cgroup_fd);
out:;
}