1
0
Fork 0
mirror of synced 2025-03-06 20:59:54 +01:00

flow_dissector: use RCU protection to fetch dev_net()

__skb_flow_dissect() can be called from arbitrary contexts.

It must extend its RCU protection section to include
the call to dev_net(), which can become dev_net_rcu().

This makes sure the net structure can not disappear under us.

Fixes: 9b52e3f267 ("flow_dissector: handle no-skb use case")
Signed-off-by: Eric Dumazet <edumazet@google.com>
Reviewed-by: Kuniyuki Iwashima <kuniyu@amazon.com>
Link: https://patch.msgid.link/20250205155120.1676781-10-edumazet@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Eric Dumazet 2025-02-05 15:51:17 +00:00 committed by Jakub Kicinski
parent 4b8474a095
commit afec62cd0a

View file

@ -1108,10 +1108,12 @@ bool __skb_flow_dissect(const struct net *net,
FLOW_DISSECTOR_KEY_BASIC, FLOW_DISSECTOR_KEY_BASIC,
target_container); target_container);
rcu_read_lock();
if (skb) { if (skb) {
if (!net) { if (!net) {
if (skb->dev) if (skb->dev)
net = dev_net(skb->dev); net = dev_net_rcu(skb->dev);
else if (skb->sk) else if (skb->sk)
net = sock_net(skb->sk); net = sock_net(skb->sk);
} }
@ -1122,7 +1124,6 @@ bool __skb_flow_dissect(const struct net *net,
enum netns_bpf_attach_type type = NETNS_BPF_FLOW_DISSECTOR; enum netns_bpf_attach_type type = NETNS_BPF_FLOW_DISSECTOR;
struct bpf_prog_array *run_array; struct bpf_prog_array *run_array;
rcu_read_lock();
run_array = rcu_dereference(init_net.bpf.run_array[type]); run_array = rcu_dereference(init_net.bpf.run_array[type]);
if (!run_array) if (!run_array)
run_array = rcu_dereference(net->bpf.run_array[type]); run_array = rcu_dereference(net->bpf.run_array[type]);
@ -1150,17 +1151,17 @@ bool __skb_flow_dissect(const struct net *net,
prog = READ_ONCE(run_array->items[0].prog); prog = READ_ONCE(run_array->items[0].prog);
result = bpf_flow_dissect(prog, &ctx, n_proto, nhoff, result = bpf_flow_dissect(prog, &ctx, n_proto, nhoff,
hlen, flags); hlen, flags);
if (result == BPF_FLOW_DISSECTOR_CONTINUE) if (result != BPF_FLOW_DISSECTOR_CONTINUE) {
goto dissect_continue; __skb_flow_bpf_to_target(&flow_keys, flow_dissector,
__skb_flow_bpf_to_target(&flow_keys, flow_dissector, target_container);
target_container); rcu_read_unlock();
rcu_read_unlock(); return result == BPF_OK;
return result == BPF_OK; }
} }
dissect_continue:
rcu_read_unlock();
} }
rcu_read_unlock();
if (dissector_uses_key(flow_dissector, if (dissector_uses_key(flow_dissector,
FLOW_DISSECTOR_KEY_ETH_ADDRS)) { FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
struct ethhdr *eth = eth_hdr(skb); struct ethhdr *eth = eth_hdr(skb);