This patch introduces non-owning reference semantics to the verifier, specifically linked_list API kfunc handling. release_on_unlock logic for refs is refactored - with small functional changes - to implement these semantics, and bpf_list_push_{front,back} are migrated to use them. When a list node is pushed to a list, the program still has a pointer to the node: n = bpf_obj_new(typeof(*n)); bpf_spin_lock(&l); bpf_list_push_back(&l, n); /* n still points to the just-added node */ bpf_spin_unlock(&l); What the verifier considers n to be after the push, and thus what can be done with n, are changed by this patch. Common properties both before/after this patch: * After push, n is only a valid reference to the node until end of critical section * After push, n cannot be pushed to any list * After push, the program can read the node's fields using n Before: * After push, n retains the ref_obj_id which it received on bpf_obj_new, but the associated bpf_reference_state's release_on_unlock field is set to true * release_on_unlock field and associated logic is used to implement "n is only a valid ref until end of critical section" * After push, n cannot be written to, the node must be removed from the list before writing to its fields * After push, n is marked PTR_UNTRUSTED After: * After push, n's ref is released and ref_obj_id set to 0. NON_OWN_REF type flag is added to reg's type, indicating that it's a non-owning reference. * NON_OWN_REF flag and logic is used to implement "n is only a valid ref until end of critical section" * n can be written to (except for special fields e.g. bpf_list_node, timer, ...) Summary of specific implementation changes to achieve the above: * release_on_unlock field, ref_set_release_on_unlock helper, and logic to "release on unlock" based on that field are removed * The anonymous active_lock struct used by bpf_verifier_state is pulled out into a named struct bpf_active_lock. * NON_OWN_REF type flag is introduced along with verifier logic changes to handle non-owning refs * Helpers are added to use NON_OWN_REF flag to implement non-owning ref semantics as described above * invalidate_non_owning_refs - helper to clobber all non-owning refs matching a particular bpf_active_lock identity. Replaces release_on_unlock logic in process_spin_lock. * ref_set_non_owning - set NON_OWN_REF type flag after doing some sanity checking * ref_convert_owning_non_owning - convert owning reference w/ specified ref_obj_id to non-owning references. Set NON_OWN_REF flag for each reg with that ref_obj_id and 0-out its ref_obj_id * Update linked_list selftests to account for minor semantic differences introduced by this patch * Writes to a release_on_unlock node ref are not allowed, while writes to non-owning reference pointees are. As a result the linked_list "write after push" failure tests are no longer scenarios that should fail. * The test##missing_lock##op and test##incorrect_lock##op macro-generated failure tests need to have a valid node argument in order to have the same error output as before. Otherwise verification will fail early and the expected error output won't be seen. Signed-off-by: Dave Marchevsky <davemarchevsky@fb.com> Link: https://lore.kernel.org/r/20230212092715.1422619-2-davemarchevsky@fb.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
385 lines
7.3 KiB
C
385 lines
7.3 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
#include <vmlinux.h>
|
|
#include <bpf/bpf_tracing.h>
|
|
#include <bpf/bpf_helpers.h>
|
|
#include <bpf/bpf_core_read.h>
|
|
#include "bpf_experimental.h"
|
|
|
|
#ifndef ARRAY_SIZE
|
|
#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
|
|
#endif
|
|
|
|
#include "linked_list.h"
|
|
|
|
static __always_inline
|
|
int list_push_pop(struct bpf_spin_lock *lock, struct bpf_list_head *head, bool leave_in_map)
|
|
{
|
|
struct bpf_list_node *n;
|
|
struct foo *f;
|
|
|
|
f = bpf_obj_new(typeof(*f));
|
|
if (!f)
|
|
return 2;
|
|
|
|
bpf_spin_lock(lock);
|
|
n = bpf_list_pop_front(head);
|
|
bpf_spin_unlock(lock);
|
|
if (n) {
|
|
bpf_obj_drop(container_of(n, struct foo, node));
|
|
bpf_obj_drop(f);
|
|
return 3;
|
|
}
|
|
|
|
bpf_spin_lock(lock);
|
|
n = bpf_list_pop_back(head);
|
|
bpf_spin_unlock(lock);
|
|
if (n) {
|
|
bpf_obj_drop(container_of(n, struct foo, node));
|
|
bpf_obj_drop(f);
|
|
return 4;
|
|
}
|
|
|
|
|
|
bpf_spin_lock(lock);
|
|
f->data = 42;
|
|
bpf_list_push_front(head, &f->node);
|
|
bpf_spin_unlock(lock);
|
|
if (leave_in_map)
|
|
return 0;
|
|
bpf_spin_lock(lock);
|
|
n = bpf_list_pop_back(head);
|
|
bpf_spin_unlock(lock);
|
|
if (!n)
|
|
return 5;
|
|
f = container_of(n, struct foo, node);
|
|
if (f->data != 42) {
|
|
bpf_obj_drop(f);
|
|
return 6;
|
|
}
|
|
|
|
bpf_spin_lock(lock);
|
|
f->data = 13;
|
|
bpf_list_push_front(head, &f->node);
|
|
bpf_spin_unlock(lock);
|
|
bpf_spin_lock(lock);
|
|
n = bpf_list_pop_front(head);
|
|
bpf_spin_unlock(lock);
|
|
if (!n)
|
|
return 7;
|
|
f = container_of(n, struct foo, node);
|
|
if (f->data != 13) {
|
|
bpf_obj_drop(f);
|
|
return 8;
|
|
}
|
|
bpf_obj_drop(f);
|
|
|
|
bpf_spin_lock(lock);
|
|
n = bpf_list_pop_front(head);
|
|
bpf_spin_unlock(lock);
|
|
if (n) {
|
|
bpf_obj_drop(container_of(n, struct foo, node));
|
|
return 9;
|
|
}
|
|
|
|
bpf_spin_lock(lock);
|
|
n = bpf_list_pop_back(head);
|
|
bpf_spin_unlock(lock);
|
|
if (n) {
|
|
bpf_obj_drop(container_of(n, struct foo, node));
|
|
return 10;
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
|
|
static __always_inline
|
|
int list_push_pop_multiple(struct bpf_spin_lock *lock, struct bpf_list_head *head, bool leave_in_map)
|
|
{
|
|
struct bpf_list_node *n;
|
|
struct foo *f[8], *pf;
|
|
int i;
|
|
|
|
/* Loop following this check adds nodes 2-at-a-time in order to
|
|
* validate multiple release_on_unlock release logic
|
|
*/
|
|
if (ARRAY_SIZE(f) % 2)
|
|
return 10;
|
|
|
|
for (i = 0; i < ARRAY_SIZE(f); i += 2) {
|
|
f[i] = bpf_obj_new(typeof(**f));
|
|
if (!f[i])
|
|
return 2;
|
|
f[i]->data = i;
|
|
|
|
f[i + 1] = bpf_obj_new(typeof(**f));
|
|
if (!f[i + 1]) {
|
|
bpf_obj_drop(f[i]);
|
|
return 9;
|
|
}
|
|
f[i + 1]->data = i + 1;
|
|
|
|
bpf_spin_lock(lock);
|
|
bpf_list_push_front(head, &f[i]->node);
|
|
bpf_list_push_front(head, &f[i + 1]->node);
|
|
bpf_spin_unlock(lock);
|
|
}
|
|
|
|
for (i = 0; i < ARRAY_SIZE(f); i++) {
|
|
bpf_spin_lock(lock);
|
|
n = bpf_list_pop_front(head);
|
|
bpf_spin_unlock(lock);
|
|
if (!n)
|
|
return 3;
|
|
pf = container_of(n, struct foo, node);
|
|
if (pf->data != (ARRAY_SIZE(f) - i - 1)) {
|
|
bpf_obj_drop(pf);
|
|
return 4;
|
|
}
|
|
bpf_spin_lock(lock);
|
|
bpf_list_push_back(head, &pf->node);
|
|
bpf_spin_unlock(lock);
|
|
}
|
|
|
|
if (leave_in_map)
|
|
return 0;
|
|
|
|
for (i = 0; i < ARRAY_SIZE(f); i++) {
|
|
bpf_spin_lock(lock);
|
|
n = bpf_list_pop_back(head);
|
|
bpf_spin_unlock(lock);
|
|
if (!n)
|
|
return 5;
|
|
pf = container_of(n, struct foo, node);
|
|
if (pf->data != i) {
|
|
bpf_obj_drop(pf);
|
|
return 6;
|
|
}
|
|
bpf_obj_drop(pf);
|
|
}
|
|
bpf_spin_lock(lock);
|
|
n = bpf_list_pop_back(head);
|
|
bpf_spin_unlock(lock);
|
|
if (n) {
|
|
bpf_obj_drop(container_of(n, struct foo, node));
|
|
return 7;
|
|
}
|
|
|
|
bpf_spin_lock(lock);
|
|
n = bpf_list_pop_front(head);
|
|
bpf_spin_unlock(lock);
|
|
if (n) {
|
|
bpf_obj_drop(container_of(n, struct foo, node));
|
|
return 8;
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
static __always_inline
|
|
int list_in_list(struct bpf_spin_lock *lock, struct bpf_list_head *head, bool leave_in_map)
|
|
{
|
|
struct bpf_list_node *n;
|
|
struct bar *ba[8], *b;
|
|
struct foo *f;
|
|
int i;
|
|
|
|
f = bpf_obj_new(typeof(*f));
|
|
if (!f)
|
|
return 2;
|
|
for (i = 0; i < ARRAY_SIZE(ba); i++) {
|
|
b = bpf_obj_new(typeof(*b));
|
|
if (!b) {
|
|
bpf_obj_drop(f);
|
|
return 3;
|
|
}
|
|
b->data = i;
|
|
bpf_spin_lock(&f->lock);
|
|
bpf_list_push_back(&f->head, &b->node);
|
|
bpf_spin_unlock(&f->lock);
|
|
}
|
|
|
|
bpf_spin_lock(lock);
|
|
f->data = 42;
|
|
bpf_list_push_front(head, &f->node);
|
|
bpf_spin_unlock(lock);
|
|
|
|
if (leave_in_map)
|
|
return 0;
|
|
|
|
bpf_spin_lock(lock);
|
|
n = bpf_list_pop_front(head);
|
|
bpf_spin_unlock(lock);
|
|
if (!n)
|
|
return 4;
|
|
f = container_of(n, struct foo, node);
|
|
if (f->data != 42) {
|
|
bpf_obj_drop(f);
|
|
return 5;
|
|
}
|
|
|
|
for (i = 0; i < ARRAY_SIZE(ba); i++) {
|
|
bpf_spin_lock(&f->lock);
|
|
n = bpf_list_pop_front(&f->head);
|
|
bpf_spin_unlock(&f->lock);
|
|
if (!n) {
|
|
bpf_obj_drop(f);
|
|
return 6;
|
|
}
|
|
b = container_of(n, struct bar, node);
|
|
if (b->data != i) {
|
|
bpf_obj_drop(f);
|
|
bpf_obj_drop(b);
|
|
return 7;
|
|
}
|
|
bpf_obj_drop(b);
|
|
}
|
|
bpf_spin_lock(&f->lock);
|
|
n = bpf_list_pop_front(&f->head);
|
|
bpf_spin_unlock(&f->lock);
|
|
if (n) {
|
|
bpf_obj_drop(f);
|
|
bpf_obj_drop(container_of(n, struct bar, node));
|
|
return 8;
|
|
}
|
|
bpf_obj_drop(f);
|
|
return 0;
|
|
}
|
|
|
|
static __always_inline
|
|
int test_list_push_pop(struct bpf_spin_lock *lock, struct bpf_list_head *head)
|
|
{
|
|
int ret;
|
|
|
|
ret = list_push_pop(lock, head, false);
|
|
if (ret)
|
|
return ret;
|
|
return list_push_pop(lock, head, true);
|
|
}
|
|
|
|
static __always_inline
|
|
int test_list_push_pop_multiple(struct bpf_spin_lock *lock, struct bpf_list_head *head)
|
|
{
|
|
int ret;
|
|
|
|
ret = list_push_pop_multiple(lock, head, false);
|
|
if (ret)
|
|
return ret;
|
|
return list_push_pop_multiple(lock, head, true);
|
|
}
|
|
|
|
static __always_inline
|
|
int test_list_in_list(struct bpf_spin_lock *lock, struct bpf_list_head *head)
|
|
{
|
|
int ret;
|
|
|
|
ret = list_in_list(lock, head, false);
|
|
if (ret)
|
|
return ret;
|
|
return list_in_list(lock, head, true);
|
|
}
|
|
|
|
SEC("tc")
|
|
int map_list_push_pop(void *ctx)
|
|
{
|
|
struct map_value *v;
|
|
|
|
v = bpf_map_lookup_elem(&array_map, &(int){0});
|
|
if (!v)
|
|
return 1;
|
|
return test_list_push_pop(&v->lock, &v->head);
|
|
}
|
|
|
|
SEC("tc")
|
|
int inner_map_list_push_pop(void *ctx)
|
|
{
|
|
struct map_value *v;
|
|
void *map;
|
|
|
|
map = bpf_map_lookup_elem(&map_of_maps, &(int){0});
|
|
if (!map)
|
|
return 1;
|
|
v = bpf_map_lookup_elem(map, &(int){0});
|
|
if (!v)
|
|
return 1;
|
|
return test_list_push_pop(&v->lock, &v->head);
|
|
}
|
|
|
|
SEC("tc")
|
|
int global_list_push_pop(void *ctx)
|
|
{
|
|
return test_list_push_pop(&glock, &ghead);
|
|
}
|
|
|
|
SEC("tc")
|
|
int map_list_push_pop_multiple(void *ctx)
|
|
{
|
|
struct map_value *v;
|
|
int ret;
|
|
|
|
v = bpf_map_lookup_elem(&array_map, &(int){0});
|
|
if (!v)
|
|
return 1;
|
|
return test_list_push_pop_multiple(&v->lock, &v->head);
|
|
}
|
|
|
|
SEC("tc")
|
|
int inner_map_list_push_pop_multiple(void *ctx)
|
|
{
|
|
struct map_value *v;
|
|
void *map;
|
|
int ret;
|
|
|
|
map = bpf_map_lookup_elem(&map_of_maps, &(int){0});
|
|
if (!map)
|
|
return 1;
|
|
v = bpf_map_lookup_elem(map, &(int){0});
|
|
if (!v)
|
|
return 1;
|
|
return test_list_push_pop_multiple(&v->lock, &v->head);
|
|
}
|
|
|
|
SEC("tc")
|
|
int global_list_push_pop_multiple(void *ctx)
|
|
{
|
|
int ret;
|
|
|
|
ret = list_push_pop_multiple(&glock, &ghead, false);
|
|
if (ret)
|
|
return ret;
|
|
return list_push_pop_multiple(&glock, &ghead, true);
|
|
}
|
|
|
|
SEC("tc")
|
|
int map_list_in_list(void *ctx)
|
|
{
|
|
struct map_value *v;
|
|
int ret;
|
|
|
|
v = bpf_map_lookup_elem(&array_map, &(int){0});
|
|
if (!v)
|
|
return 1;
|
|
return test_list_in_list(&v->lock, &v->head);
|
|
}
|
|
|
|
SEC("tc")
|
|
int inner_map_list_in_list(void *ctx)
|
|
{
|
|
struct map_value *v;
|
|
void *map;
|
|
int ret;
|
|
|
|
map = bpf_map_lookup_elem(&map_of_maps, &(int){0});
|
|
if (!map)
|
|
return 1;
|
|
v = bpf_map_lookup_elem(map, &(int){0});
|
|
if (!v)
|
|
return 1;
|
|
return test_list_in_list(&v->lock, &v->head);
|
|
}
|
|
|
|
SEC("tc")
|
|
int global_list_in_list(void *ctx)
|
|
{
|
|
return test_list_in_list(&glock, &ghead);
|
|
}
|
|
|
|
char _license[] SEC("license") = "GPL";
|