1
0
Fork 0
mirror of synced 2025-03-06 20:59:54 +01:00

Merge branch 'net-second-round-to-use-dev_net_rcu'

Eric Dumazet says:

====================
net: second round to use dev_net_rcu()

dev_net(dev) should either be protected by RTNL or RCU.

There is no LOCKDEP support yet for this helper.

Adding it would trigger too many splats.

This second series fixes some of them.
====================

Link: https://patch.msgid.link/20250207135841.1948589-1-edumazet@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2025-02-10 18:09:11 -08:00
commit 9dfedb8dc7
6 changed files with 50 additions and 35 deletions

View file

@ -198,10 +198,12 @@ struct sk_buff *l3mdev_l3_out(struct sock *sk, struct sk_buff *skb, u16 proto)
if (netif_is_l3_slave(dev)) {
struct net_device *master;
rcu_read_lock();
master = netdev_master_upper_dev_get_rcu(dev);
if (master && master->l3mdev_ops->l3mdev_l3_out)
skb = master->l3mdev_ops->l3mdev_l3_out(master, sk,
skb, proto);
rcu_read_unlock();
}
return skb;

View file

@ -3447,10 +3447,12 @@ static const struct seq_operations neigh_stat_seq_ops = {
static void __neigh_notify(struct neighbour *n, int type, int flags,
u32 pid)
{
struct net *net = dev_net(n->dev);
struct sk_buff *skb;
int err = -ENOBUFS;
struct net *net;
rcu_read_lock();
net = dev_net_rcu(n->dev);
skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
if (skb == NULL)
goto errout;
@ -3463,9 +3465,11 @@ static void __neigh_notify(struct neighbour *n, int type, int flags,
goto errout;
}
rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
return;
goto out;
errout:
rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
out:
rcu_read_unlock();
}
void neigh_app_ns(struct neighbour *n)

View file

@ -659,10 +659,12 @@ static int arp_xmit_finish(struct net *net, struct sock *sk, struct sk_buff *skb
*/
void arp_xmit(struct sk_buff *skb)
{
rcu_read_lock();
/* Send it off, maybe filter it using firewalling first. */
NF_HOOK(NFPROTO_ARP, NF_ARP_OUT,
dev_net(skb->dev), NULL, skb, NULL, skb->dev,
dev_net_rcu(skb->dev), NULL, skb, NULL, skb->dev,
arp_xmit_finish);
rcu_read_unlock();
}
EXPORT_SYMBOL(arp_xmit);

View file

@ -2165,21 +2165,21 @@ static void mld_send_cr(struct inet6_dev *idev)
static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
{
struct net *net = dev_net(dev);
struct sock *sk = net->ipv6.igmp_sk;
const struct in6_addr *snd_addr, *saddr;
int err, len, payload_len, full_len;
struct in6_addr addr_buf;
struct inet6_dev *idev;
struct sk_buff *skb;
struct mld_msg *hdr;
const struct in6_addr *snd_addr, *saddr;
struct in6_addr addr_buf;
int hlen = LL_RESERVED_SPACE(dev);
int tlen = dev->needed_tailroom;
int err, len, payload_len, full_len;
u8 ra[8] = { IPPROTO_ICMPV6, 0,
IPV6_TLV_ROUTERALERT, 2, 0, 0,
IPV6_TLV_PADN, 0 };
struct flowi6 fl6;
struct dst_entry *dst;
struct flowi6 fl6;
struct net *net;
struct sock *sk;
if (type == ICMPV6_MGM_REDUCTION)
snd_addr = &in6addr_linklocal_allrouters;
@ -2190,19 +2190,21 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
payload_len = len + sizeof(ra);
full_len = sizeof(struct ipv6hdr) + payload_len;
skb = alloc_skb(hlen + tlen + full_len, GFP_KERNEL);
rcu_read_lock();
IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_OUTREQUESTS);
rcu_read_unlock();
skb = sock_alloc_send_skb(sk, hlen + tlen + full_len, 1, &err);
net = dev_net_rcu(dev);
idev = __in6_dev_get(dev);
IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTREQUESTS);
if (!skb) {
rcu_read_lock();
IP6_INC_STATS(net, __in6_dev_get(dev),
IPSTATS_MIB_OUTDISCARDS);
IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
rcu_read_unlock();
return;
}
sk = net->ipv6.igmp_sk;
skb_set_owner_w(skb, sk);
skb->priority = TC_PRIO_CONTROL;
skb_reserve(skb, hlen);
@ -2227,9 +2229,6 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
IPPROTO_ICMPV6,
csum_partial(hdr, len, 0));
rcu_read_lock();
idev = __in6_dev_get(skb->dev);
icmpv6_flow_init(sk, &fl6, type,
&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
skb->dev->ifindex);

View file

@ -418,15 +418,11 @@ static struct sk_buff *ndisc_alloc_skb(struct net_device *dev,
{
int hlen = LL_RESERVED_SPACE(dev);
int tlen = dev->needed_tailroom;
struct sock *sk = dev_net(dev)->ipv6.ndisc_sk;
struct sk_buff *skb;
skb = alloc_skb(hlen + sizeof(struct ipv6hdr) + len + tlen, GFP_ATOMIC);
if (!skb) {
ND_PRINTK(0, err, "ndisc: %s failed to allocate an skb\n",
__func__);
if (!skb)
return NULL;
}
skb->protocol = htons(ETH_P_IPV6);
skb->dev = dev;
@ -437,7 +433,9 @@ static struct sk_buff *ndisc_alloc_skb(struct net_device *dev,
/* Manually assign socket ownership as we avoid calling
* sock_alloc_send_pskb() to bypass wmem buffer limits
*/
skb_set_owner_w(skb, sk);
rcu_read_lock();
skb_set_owner_w(skb, dev_net_rcu(dev)->ipv6.ndisc_sk);
rcu_read_unlock();
return skb;
}
@ -473,16 +471,20 @@ static void ip6_nd_hdr(struct sk_buff *skb,
void ndisc_send_skb(struct sk_buff *skb, const struct in6_addr *daddr,
const struct in6_addr *saddr)
{
struct dst_entry *dst = skb_dst(skb);
struct net *net = dev_net(skb->dev);
struct sock *sk = net->ipv6.ndisc_sk;
struct inet6_dev *idev;
int err;
struct icmp6hdr *icmp6h = icmp6_hdr(skb);
struct dst_entry *dst = skb_dst(skb);
struct inet6_dev *idev;
struct net *net;
struct sock *sk;
int err;
u8 type;
type = icmp6h->icmp6_type;
rcu_read_lock();
net = dev_net_rcu(skb->dev);
sk = net->ipv6.ndisc_sk;
if (!dst) {
struct flowi6 fl6;
int oif = skb->dev->ifindex;
@ -490,6 +492,7 @@ void ndisc_send_skb(struct sk_buff *skb, const struct in6_addr *daddr,
icmpv6_flow_init(sk, &fl6, type, saddr, daddr, oif);
dst = icmp6_dst_alloc(skb->dev, &fl6);
if (IS_ERR(dst)) {
rcu_read_unlock();
kfree_skb(skb);
return;
}
@ -504,7 +507,6 @@ void ndisc_send_skb(struct sk_buff *skb, const struct in6_addr *daddr,
ip6_nd_hdr(skb, saddr, daddr, READ_ONCE(inet6_sk(sk)->hop_limit), skb->len);
rcu_read_lock();
idev = __in6_dev_get(dst->dev);
IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTREQUESTS);
@ -1694,7 +1696,7 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
bool ret;
if (netif_is_l3_master(skb->dev)) {
dev = __dev_get_by_index(dev_net(skb->dev), IPCB(skb)->iif);
dev = dev_get_by_index_rcu(dev_net(skb->dev), IPCB(skb)->iif);
if (!dev)
return;
}

View file

@ -2101,6 +2101,7 @@ static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
{
struct ovs_header *ovs_header;
struct ovs_vport_stats vport_stats;
struct net *net_vport;
int err;
ovs_header = genlmsg_put(skb, portid, seq, &dp_vport_genl_family,
@ -2117,12 +2118,15 @@ static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
nla_put_u32(skb, OVS_VPORT_ATTR_IFINDEX, vport->dev->ifindex))
goto nla_put_failure;
if (!net_eq(net, dev_net(vport->dev))) {
int id = peernet2id_alloc(net, dev_net(vport->dev), gfp);
rcu_read_lock();
net_vport = dev_net_rcu(vport->dev);
if (!net_eq(net, net_vport)) {
int id = peernet2id_alloc(net, net_vport, GFP_ATOMIC);
if (nla_put_s32(skb, OVS_VPORT_ATTR_NETNSID, id))
goto nla_put_failure;
goto nla_put_failure_unlock;
}
rcu_read_unlock();
ovs_vport_get_stats(vport, &vport_stats);
if (nla_put_64bit(skb, OVS_VPORT_ATTR_STATS,
@ -2143,6 +2147,8 @@ static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
genlmsg_end(skb, ovs_header);
return 0;
nla_put_failure_unlock:
rcu_read_unlock();
nla_put_failure:
err = -EMSGSIZE;
error: