Recent change to use tp/syscalls/sys_enter_nanosleep for perf_buffer
selftests causes this selftest to fail on 4.9 kernel in libbpf CI ([0]):
libbpf: prog 'handle_sys_enter': failed to attach to perf_event FD 6: Invalid argument
libbpf: prog 'handle_sys_enter': failed to attach to tracepoint 'syscalls/sys_enter_nanosleep': Invalid argument
It's not exactly clear why, because perf_event itself is created for
this tracepoint, but I can't even compile 4.9 kernel locally, so it's
hard to figure this out. If anyone has better luck and would like to
help investigating this, I'd really appreciate this.
For now, unblock CI by switching back to raw_syscalls/sys_enter, but reduce
amount of unnecessary samples emitted by filter by process ID. Use
explicit ARRAY map for that to make it work on 4.9 as well, because
global data isn't yet supported there.
Fixes: aa274f98b2
("selftests/bpf: Fix possible/online index mismatch in perf_buffer test")
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20211022201342.3490692-1-andrii@kernel.org
149 lines
3.7 KiB
C
149 lines
3.7 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
#define _GNU_SOURCE
|
|
#include <pthread.h>
|
|
#include <sched.h>
|
|
#include <sys/socket.h>
|
|
#include <test_progs.h>
|
|
#include "test_perf_buffer.skel.h"
|
|
#include "bpf/libbpf_internal.h"
|
|
|
|
static int duration;
|
|
|
|
/* AddressSanitizer sometimes crashes due to data dereference below, due to
|
|
* this being mmap()'ed memory. Disable instrumentation with
|
|
* no_sanitize_address attribute
|
|
*/
|
|
__attribute__((no_sanitize_address))
|
|
static void on_sample(void *ctx, int cpu, void *data, __u32 size)
|
|
{
|
|
int cpu_data = *(int *)data, duration = 0;
|
|
cpu_set_t *cpu_seen = ctx;
|
|
|
|
if (cpu_data != cpu)
|
|
CHECK(cpu_data != cpu, "check_cpu_data",
|
|
"cpu_data %d != cpu %d\n", cpu_data, cpu);
|
|
|
|
CPU_SET(cpu, cpu_seen);
|
|
}
|
|
|
|
int trigger_on_cpu(int cpu)
|
|
{
|
|
cpu_set_t cpu_set;
|
|
int err;
|
|
|
|
CPU_ZERO(&cpu_set);
|
|
CPU_SET(cpu, &cpu_set);
|
|
|
|
err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set), &cpu_set);
|
|
if (err && CHECK(err, "set_affinity", "cpu #%d, err %d\n", cpu, err))
|
|
return err;
|
|
|
|
usleep(1);
|
|
|
|
return 0;
|
|
}
|
|
|
|
void serial_test_perf_buffer(void)
|
|
{
|
|
int err, on_len, nr_on_cpus = 0, nr_cpus, i, j;
|
|
int zero = 0, my_pid = getpid();
|
|
struct perf_buffer_opts pb_opts = {};
|
|
struct test_perf_buffer *skel;
|
|
cpu_set_t cpu_seen;
|
|
struct perf_buffer *pb;
|
|
int last_fd = -1, fd;
|
|
bool *online;
|
|
|
|
nr_cpus = libbpf_num_possible_cpus();
|
|
if (CHECK(nr_cpus < 0, "nr_cpus", "err %d\n", nr_cpus))
|
|
return;
|
|
|
|
err = parse_cpu_mask_file("/sys/devices/system/cpu/online",
|
|
&online, &on_len);
|
|
if (CHECK(err, "nr_on_cpus", "err %d\n", err))
|
|
return;
|
|
|
|
for (i = 0; i < on_len; i++)
|
|
if (online[i])
|
|
nr_on_cpus++;
|
|
|
|
/* load program */
|
|
skel = test_perf_buffer__open_and_load();
|
|
if (CHECK(!skel, "skel_load", "skeleton open/load failed\n"))
|
|
goto out_close;
|
|
|
|
err = bpf_map_update_elem(bpf_map__fd(skel->maps.my_pid_map), &zero, &my_pid, 0);
|
|
if (!ASSERT_OK(err, "my_pid_update"))
|
|
goto out_close;
|
|
|
|
/* attach probe */
|
|
err = test_perf_buffer__attach(skel);
|
|
if (CHECK(err, "attach_kprobe", "err %d\n", err))
|
|
goto out_close;
|
|
|
|
/* set up perf buffer */
|
|
pb_opts.sample_cb = on_sample;
|
|
pb_opts.ctx = &cpu_seen;
|
|
pb = perf_buffer__new(bpf_map__fd(skel->maps.perf_buf_map), 1, &pb_opts);
|
|
if (!ASSERT_OK_PTR(pb, "perf_buf__new"))
|
|
goto out_close;
|
|
|
|
CHECK(perf_buffer__epoll_fd(pb) < 0, "epoll_fd",
|
|
"bad fd: %d\n", perf_buffer__epoll_fd(pb));
|
|
|
|
/* trigger kprobe on every CPU */
|
|
CPU_ZERO(&cpu_seen);
|
|
for (i = 0; i < nr_cpus; i++) {
|
|
if (i >= on_len || !online[i]) {
|
|
printf("skipping offline CPU #%d\n", i);
|
|
continue;
|
|
}
|
|
|
|
if (trigger_on_cpu(i))
|
|
goto out_close;
|
|
}
|
|
|
|
/* read perf buffer */
|
|
err = perf_buffer__poll(pb, 100);
|
|
if (CHECK(err < 0, "perf_buffer__poll", "err %d\n", err))
|
|
goto out_free_pb;
|
|
|
|
if (CHECK(CPU_COUNT(&cpu_seen) != nr_on_cpus, "seen_cpu_cnt",
|
|
"expect %d, seen %d\n", nr_on_cpus, CPU_COUNT(&cpu_seen)))
|
|
goto out_free_pb;
|
|
|
|
if (CHECK(perf_buffer__buffer_cnt(pb) != nr_on_cpus, "buf_cnt",
|
|
"got %zu, expected %d\n", perf_buffer__buffer_cnt(pb), nr_on_cpus))
|
|
goto out_close;
|
|
|
|
for (i = 0, j = 0; i < nr_cpus; i++) {
|
|
if (i >= on_len || !online[i])
|
|
continue;
|
|
|
|
fd = perf_buffer__buffer_fd(pb, j);
|
|
CHECK(fd < 0 || last_fd == fd, "fd_check", "last fd %d == fd %d\n", last_fd, fd);
|
|
last_fd = fd;
|
|
|
|
err = perf_buffer__consume_buffer(pb, j);
|
|
if (CHECK(err, "drain_buf", "cpu %d, err %d\n", i, err))
|
|
goto out_close;
|
|
|
|
CPU_CLR(i, &cpu_seen);
|
|
if (trigger_on_cpu(i))
|
|
goto out_close;
|
|
|
|
err = perf_buffer__consume_buffer(pb, j);
|
|
if (CHECK(err, "consume_buf", "cpu %d, err %d\n", j, err))
|
|
goto out_close;
|
|
|
|
if (CHECK(!CPU_ISSET(i, &cpu_seen), "cpu_seen", "cpu %d not seen\n", i))
|
|
goto out_close;
|
|
j++;
|
|
}
|
|
|
|
out_free_pb:
|
|
perf_buffer__free(pb);
|
|
out_close:
|
|
test_perf_buffer__destroy(skel);
|
|
free(online);
|
|
}
|