Merge branch 'bpf-skip-non-exist-keys-in-generic_map_lookup_batch'
Yan Zhai says: ==================== bpf: skip non exist keys in generic_map_lookup_batch The generic_map_lookup_batch currently returns EINTR if it fails with ENOENT and retries several times on bpf_map_copy_value. The next batch would start from the same location, presuming it's a transient issue. This is incorrect if a map can actually have "holes", i.e. "get_next_key" can return a key that does not point to a valid value. At least the array of maps type may contain such holes legitly. Right now these holes show up, generic batch lookup cannot proceed any more. It will always fail with EINTR errors. This patch fixes this behavior by skipping the non-existing key, and does not return EINTR any more. V2->V3: deleted a unused macro V1->V2: split the fix and selftests; fixed a few selftests issues. V2: https://lore.kernel.org/bpf/cover.1738905497.git.yan@cloudflare.com/ V1: https://lore.kernel.org/bpf/Z6OYbS4WqQnmzi2z@debian.debian/ ==================== Link: https://patch.msgid.link/cover.1739171594.git.yan@cloudflare.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
commit
dbf7cc5600
2 changed files with 49 additions and 31 deletions
|
@ -1977,8 +1977,6 @@ int generic_map_update_batch(struct bpf_map *map, struct file *map_file,
|
|||
return err;
|
||||
}
|
||||
|
||||
#define MAP_LOOKUP_RETRIES 3
|
||||
|
||||
int generic_map_lookup_batch(struct bpf_map *map,
|
||||
const union bpf_attr *attr,
|
||||
union bpf_attr __user *uattr)
|
||||
|
@ -1988,8 +1986,8 @@ int generic_map_lookup_batch(struct bpf_map *map,
|
|||
void __user *values = u64_to_user_ptr(attr->batch.values);
|
||||
void __user *keys = u64_to_user_ptr(attr->batch.keys);
|
||||
void *buf, *buf_prevkey, *prev_key, *key, *value;
|
||||
int err, retry = MAP_LOOKUP_RETRIES;
|
||||
u32 value_size, cp, max_count;
|
||||
int err;
|
||||
|
||||
if (attr->batch.elem_flags & ~BPF_F_LOCK)
|
||||
return -EINVAL;
|
||||
|
@ -2035,14 +2033,8 @@ int generic_map_lookup_batch(struct bpf_map *map,
|
|||
err = bpf_map_copy_value(map, key, value,
|
||||
attr->batch.elem_flags);
|
||||
|
||||
if (err == -ENOENT) {
|
||||
if (retry) {
|
||||
retry--;
|
||||
continue;
|
||||
}
|
||||
err = -EINTR;
|
||||
break;
|
||||
}
|
||||
if (err == -ENOENT)
|
||||
goto next_key;
|
||||
|
||||
if (err)
|
||||
goto free_buf;
|
||||
|
@ -2057,12 +2049,12 @@ int generic_map_lookup_batch(struct bpf_map *map,
|
|||
goto free_buf;
|
||||
}
|
||||
|
||||
cp++;
|
||||
next_key:
|
||||
if (!prev_key)
|
||||
prev_key = buf_prevkey;
|
||||
|
||||
swap(prev_key, key);
|
||||
retry = MAP_LOOKUP_RETRIES;
|
||||
cp++;
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
|
|
|
@ -120,11 +120,12 @@ static void validate_fetch_results(int outer_map_fd,
|
|||
|
||||
static void fetch_and_validate(int outer_map_fd,
|
||||
struct bpf_map_batch_opts *opts,
|
||||
__u32 batch_size, bool delete_entries)
|
||||
__u32 batch_size, bool delete_entries,
|
||||
bool has_holes)
|
||||
{
|
||||
__u32 *fetched_keys, *fetched_values, total_fetched = 0;
|
||||
__u32 batch_key = 0, fetch_count, step_size;
|
||||
int err, max_entries = OUTER_MAP_ENTRIES;
|
||||
int err, max_entries = OUTER_MAP_ENTRIES - !!has_holes;
|
||||
__u32 *fetched_keys, *fetched_values, total_fetched = 0, i;
|
||||
__u32 batch_key = 0, fetch_count, step_size = batch_size;
|
||||
__u32 value_size = sizeof(__u32);
|
||||
|
||||
/* Total entries needs to be fetched */
|
||||
|
@ -134,9 +135,8 @@ static void fetch_and_validate(int outer_map_fd,
|
|||
"Memory allocation failed for fetched_keys or fetched_values",
|
||||
"error=%s\n", strerror(errno));
|
||||
|
||||
for (step_size = batch_size;
|
||||
step_size <= max_entries;
|
||||
step_size += batch_size) {
|
||||
/* hash map may not always return full batch */
|
||||
for (i = 0; i < OUTER_MAP_ENTRIES; i++) {
|
||||
fetch_count = step_size;
|
||||
err = delete_entries
|
||||
? bpf_map_lookup_and_delete_batch(outer_map_fd,
|
||||
|
@ -155,6 +155,7 @@ static void fetch_and_validate(int outer_map_fd,
|
|||
if (err && errno == ENOSPC) {
|
||||
/* Fetch again with higher batch size */
|
||||
total_fetched = 0;
|
||||
step_size += batch_size;
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -184,18 +185,19 @@ static void fetch_and_validate(int outer_map_fd,
|
|||
}
|
||||
|
||||
static void _map_in_map_batch_ops(enum bpf_map_type outer_map_type,
|
||||
enum bpf_map_type inner_map_type)
|
||||
enum bpf_map_type inner_map_type,
|
||||
bool has_holes)
|
||||
{
|
||||
__u32 max_entries = OUTER_MAP_ENTRIES - !!has_holes;
|
||||
__u32 *outer_map_keys, *inner_map_fds;
|
||||
__u32 max_entries = OUTER_MAP_ENTRIES;
|
||||
LIBBPF_OPTS(bpf_map_batch_opts, opts);
|
||||
__u32 value_size = sizeof(__u32);
|
||||
int batch_size[2] = {5, 10};
|
||||
__u32 map_index, op_index;
|
||||
int outer_map_fd, ret;
|
||||
|
||||
outer_map_keys = calloc(max_entries, value_size);
|
||||
inner_map_fds = calloc(max_entries, value_size);
|
||||
outer_map_keys = calloc(OUTER_MAP_ENTRIES, value_size);
|
||||
inner_map_fds = calloc(OUTER_MAP_ENTRIES, value_size);
|
||||
CHECK((!outer_map_keys || !inner_map_fds),
|
||||
"Memory allocation failed for outer_map_keys or inner_map_fds",
|
||||
"error=%s\n", strerror(errno));
|
||||
|
@ -209,6 +211,24 @@ static void _map_in_map_batch_ops(enum bpf_map_type outer_map_type,
|
|||
((outer_map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
|
||||
? 9 : 1000) - map_index;
|
||||
|
||||
/* This condition is only meaningful for array of maps.
|
||||
*
|
||||
* max_entries == OUTER_MAP_ENTRIES - 1 if it is true. Say
|
||||
* max_entries is short for n, then outer_map_keys looks like:
|
||||
*
|
||||
* [n, n-1, ... 2, 1]
|
||||
*
|
||||
* We change it to
|
||||
*
|
||||
* [n, n-1, ... 2, 0]
|
||||
*
|
||||
* So it will leave key 1 as a hole. It will serve to test the
|
||||
* correctness when batch on an array: a "non-exist" key might be
|
||||
* actually allocated and returned from key iteration.
|
||||
*/
|
||||
if (has_holes)
|
||||
outer_map_keys[max_entries - 1]--;
|
||||
|
||||
/* batch operation - map_update */
|
||||
ret = bpf_map_update_batch(outer_map_fd, outer_map_keys,
|
||||
inner_map_fds, &max_entries, &opts);
|
||||
|
@ -219,15 +239,17 @@ static void _map_in_map_batch_ops(enum bpf_map_type outer_map_type,
|
|||
/* batch operation - map_lookup */
|
||||
for (op_index = 0; op_index < 2; ++op_index)
|
||||
fetch_and_validate(outer_map_fd, &opts,
|
||||
batch_size[op_index], false);
|
||||
batch_size[op_index], false,
|
||||
has_holes);
|
||||
|
||||
/* batch operation - map_lookup_delete */
|
||||
if (outer_map_type == BPF_MAP_TYPE_HASH_OF_MAPS)
|
||||
fetch_and_validate(outer_map_fd, &opts,
|
||||
max_entries, true /*delete*/);
|
||||
max_entries, true /*delete*/,
|
||||
has_holes);
|
||||
|
||||
/* close all map fds */
|
||||
for (map_index = 0; map_index < max_entries; map_index++)
|
||||
for (map_index = 0; map_index < OUTER_MAP_ENTRIES; map_index++)
|
||||
close(inner_map_fds[map_index]);
|
||||
close(outer_map_fd);
|
||||
|
||||
|
@ -237,16 +259,20 @@ static void _map_in_map_batch_ops(enum bpf_map_type outer_map_type,
|
|||
|
||||
void test_map_in_map_batch_ops_array(void)
|
||||
{
|
||||
_map_in_map_batch_ops(BPF_MAP_TYPE_ARRAY_OF_MAPS, BPF_MAP_TYPE_ARRAY);
|
||||
_map_in_map_batch_ops(BPF_MAP_TYPE_ARRAY_OF_MAPS, BPF_MAP_TYPE_ARRAY, false);
|
||||
printf("%s:PASS with inner ARRAY map\n", __func__);
|
||||
_map_in_map_batch_ops(BPF_MAP_TYPE_ARRAY_OF_MAPS, BPF_MAP_TYPE_HASH);
|
||||
_map_in_map_batch_ops(BPF_MAP_TYPE_ARRAY_OF_MAPS, BPF_MAP_TYPE_HASH, false);
|
||||
printf("%s:PASS with inner HASH map\n", __func__);
|
||||
_map_in_map_batch_ops(BPF_MAP_TYPE_ARRAY_OF_MAPS, BPF_MAP_TYPE_ARRAY, true);
|
||||
printf("%s:PASS with inner ARRAY map with holes\n", __func__);
|
||||
_map_in_map_batch_ops(BPF_MAP_TYPE_ARRAY_OF_MAPS, BPF_MAP_TYPE_HASH, true);
|
||||
printf("%s:PASS with inner HASH map with holes\n", __func__);
|
||||
}
|
||||
|
||||
void test_map_in_map_batch_ops_hash(void)
|
||||
{
|
||||
_map_in_map_batch_ops(BPF_MAP_TYPE_HASH_OF_MAPS, BPF_MAP_TYPE_ARRAY);
|
||||
_map_in_map_batch_ops(BPF_MAP_TYPE_HASH_OF_MAPS, BPF_MAP_TYPE_ARRAY, false);
|
||||
printf("%s:PASS with inner ARRAY map\n", __func__);
|
||||
_map_in_map_batch_ops(BPF_MAP_TYPE_HASH_OF_MAPS, BPF_MAP_TYPE_HASH);
|
||||
_map_in_map_batch_ops(BPF_MAP_TYPE_HASH_OF_MAPS, BPF_MAP_TYPE_HASH, false);
|
||||
printf("%s:PASS with inner HASH map\n", __func__);
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue