This patch cleans up a few things: * dynptr_fail.c: There is no sys_nanosleep tracepoint. dynptr_fail only tests that the prog load fails, so just SEC("?raw_tp") suffices here. * test_bpf_cookie: There is no sys_nanosleep kprobe. The prog is loaded in userspace through bpf_program__attach_kprobe_opts passing in SYS_NANOSLEEP_KPROBE_NAME, so just SEC("k{ret}probe") suffices here. * test_helper_restricted: There is no sys_nanosleep kprobe. test_helper_restricted only tests that the prog load fails, so just SEC("?kprobe")( suffices here. There are no functional changes. Suggested-by: Andrii Nakryiko <andrii@kernel.org> Signed-off-by: Joanne Koong <joannelkoong@gmail.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Yonghong Song <yhs@fb.com> Link: https://lore.kernel.org/bpf/20220805171405.2272103-1-joannelkoong@gmail.com
123 lines
1.8 KiB
C
123 lines
1.8 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
#include <time.h>
|
|
#include <linux/bpf.h>
|
|
#include <bpf/bpf_helpers.h>
|
|
|
|
struct timer {
|
|
struct bpf_timer t;
|
|
};
|
|
|
|
struct lock {
|
|
struct bpf_spin_lock l;
|
|
};
|
|
|
|
struct {
|
|
__uint(type, BPF_MAP_TYPE_ARRAY);
|
|
__uint(max_entries, 1);
|
|
__type(key, __u32);
|
|
__type(value, struct timer);
|
|
} timers SEC(".maps");
|
|
|
|
struct {
|
|
__uint(type, BPF_MAP_TYPE_ARRAY);
|
|
__uint(max_entries, 1);
|
|
__type(key, __u32);
|
|
__type(value, struct lock);
|
|
} locks SEC(".maps");
|
|
|
|
static int timer_cb(void *map, int *key, struct timer *timer)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static void timer_work(void)
|
|
{
|
|
struct timer *timer;
|
|
const int key = 0;
|
|
|
|
timer = bpf_map_lookup_elem(&timers, &key);
|
|
if (timer) {
|
|
bpf_timer_init(&timer->t, &timers, CLOCK_MONOTONIC);
|
|
bpf_timer_set_callback(&timer->t, timer_cb);
|
|
bpf_timer_start(&timer->t, 10E9, 0);
|
|
bpf_timer_cancel(&timer->t);
|
|
}
|
|
}
|
|
|
|
static void spin_lock_work(void)
|
|
{
|
|
const int key = 0;
|
|
struct lock *lock;
|
|
|
|
lock = bpf_map_lookup_elem(&locks, &key);
|
|
if (lock) {
|
|
bpf_spin_lock(&lock->l);
|
|
bpf_spin_unlock(&lock->l);
|
|
}
|
|
}
|
|
|
|
SEC("?raw_tp/sys_enter")
|
|
int raw_tp_timer(void *ctx)
|
|
{
|
|
timer_work();
|
|
|
|
return 0;
|
|
}
|
|
|
|
SEC("?tp/syscalls/sys_enter_nanosleep")
|
|
int tp_timer(void *ctx)
|
|
{
|
|
timer_work();
|
|
|
|
return 0;
|
|
}
|
|
|
|
SEC("?kprobe")
|
|
int kprobe_timer(void *ctx)
|
|
{
|
|
timer_work();
|
|
|
|
return 0;
|
|
}
|
|
|
|
SEC("?perf_event")
|
|
int perf_event_timer(void *ctx)
|
|
{
|
|
timer_work();
|
|
|
|
return 0;
|
|
}
|
|
|
|
SEC("?raw_tp/sys_enter")
|
|
int raw_tp_spin_lock(void *ctx)
|
|
{
|
|
spin_lock_work();
|
|
|
|
return 0;
|
|
}
|
|
|
|
SEC("?tp/syscalls/sys_enter_nanosleep")
|
|
int tp_spin_lock(void *ctx)
|
|
{
|
|
spin_lock_work();
|
|
|
|
return 0;
|
|
}
|
|
|
|
SEC("?kprobe")
|
|
int kprobe_spin_lock(void *ctx)
|
|
{
|
|
spin_lock_work();
|
|
|
|
return 0;
|
|
}
|
|
|
|
SEC("?perf_event")
|
|
int perf_event_spin_lock(void *ctx)
|
|
{
|
|
spin_lock_work();
|
|
|
|
return 0;
|
|
}
|
|
|
|
const char LICENSE[] SEC("license") = "GPL";
|