1
0
Fork 0
mirror of synced 2025-03-06 20:59:54 +01:00

net: protect netdev->napi_list with netdev_lock()

Hold netdev->lock when NAPIs are getting added or removed.
This will allow safe access to NAPI instances of a net_device
without rtnl_lock.

Create a family of helpers which assume the lock is already taken.
Switch iavf to them, as it makes extensive use of netdev->lock,
already.

Reviewed-by: Joe Damato <jdamato@fastly.com>
Reviewed-by: Eric Dumazet <edumazet@google.com>
Reviewed-by: Kuniyuki Iwashima <kuniyu@amazon.com>
Link: https://patch.msgid.link/20250115035319.559603-6-kuba@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2025-01-14 19:53:13 -08:00
parent 5112457f3d
commit 1b23cdbd2b
3 changed files with 60 additions and 15 deletions

View file

@ -1800,8 +1800,8 @@ static int iavf_alloc_q_vectors(struct iavf_adapter *adapter)
q_vector->v_idx = q_idx; q_vector->v_idx = q_idx;
q_vector->reg_idx = q_idx; q_vector->reg_idx = q_idx;
cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask); cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
netif_napi_add(adapter->netdev, &q_vector->napi, netif_napi_add_locked(adapter->netdev, &q_vector->napi,
iavf_napi_poll); iavf_napi_poll);
} }
return 0; return 0;
@ -1827,7 +1827,7 @@ static void iavf_free_q_vectors(struct iavf_adapter *adapter)
for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
struct iavf_q_vector *q_vector = &adapter->q_vectors[q_idx]; struct iavf_q_vector *q_vector = &adapter->q_vectors[q_idx];
netif_napi_del(&q_vector->napi); netif_napi_del_locked(&q_vector->napi);
} }
kfree(adapter->q_vectors); kfree(adapter->q_vectors);
adapter->q_vectors = NULL; adapter->q_vectors = NULL;

View file

@ -2456,7 +2456,7 @@ struct net_device {
* Drivers are free to use it for other protection. * Drivers are free to use it for other protection.
* *
* Protects: * Protects:
* @net_shaper_hierarchy, @reg_state * @napi_list, @net_shaper_hierarchy, @reg_state
* *
* Partially protects (writers must hold both @lock and rtnl_lock): * Partially protects (writers must hold both @lock and rtnl_lock):
* @up * @up
@ -2712,8 +2712,19 @@ static inline void netif_napi_set_irq(struct napi_struct *napi, int irq)
*/ */
#define NAPI_POLL_WEIGHT 64 #define NAPI_POLL_WEIGHT 64
void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi, void netif_napi_add_weight_locked(struct net_device *dev,
int (*poll)(struct napi_struct *, int), int weight); struct napi_struct *napi,
int (*poll)(struct napi_struct *, int),
int weight);
static inline void
netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi,
int (*poll)(struct napi_struct *, int), int weight)
{
netdev_lock(dev);
netif_napi_add_weight_locked(dev, napi, poll, weight);
netdev_unlock(dev);
}
/** /**
* netif_napi_add() - initialize a NAPI context * netif_napi_add() - initialize a NAPI context
@ -2731,6 +2742,13 @@ netif_napi_add(struct net_device *dev, struct napi_struct *napi,
netif_napi_add_weight(dev, napi, poll, NAPI_POLL_WEIGHT); netif_napi_add_weight(dev, napi, poll, NAPI_POLL_WEIGHT);
} }
static inline void
netif_napi_add_locked(struct net_device *dev, struct napi_struct *napi,
int (*poll)(struct napi_struct *, int))
{
netif_napi_add_weight_locked(dev, napi, poll, NAPI_POLL_WEIGHT);
}
static inline void static inline void
netif_napi_add_tx_weight(struct net_device *dev, netif_napi_add_tx_weight(struct net_device *dev,
struct napi_struct *napi, struct napi_struct *napi,
@ -2741,6 +2759,15 @@ netif_napi_add_tx_weight(struct net_device *dev,
netif_napi_add_weight(dev, napi, poll, weight); netif_napi_add_weight(dev, napi, poll, weight);
} }
static inline void
netif_napi_add_config_locked(struct net_device *dev, struct napi_struct *napi,
int (*poll)(struct napi_struct *, int), int index)
{
napi->index = index;
napi->config = &dev->napi_config[index];
netif_napi_add_weight_locked(dev, napi, poll, NAPI_POLL_WEIGHT);
}
/** /**
* netif_napi_add_config - initialize a NAPI context with persistent config * netif_napi_add_config - initialize a NAPI context with persistent config
* @dev: network device * @dev: network device
@ -2752,9 +2779,9 @@ static inline void
netif_napi_add_config(struct net_device *dev, struct napi_struct *napi, netif_napi_add_config(struct net_device *dev, struct napi_struct *napi,
int (*poll)(struct napi_struct *, int), int index) int (*poll)(struct napi_struct *, int), int index)
{ {
napi->index = index; netdev_lock(dev);
napi->config = &dev->napi_config[index]; netif_napi_add_config_locked(dev, napi, poll, index);
netif_napi_add_weight(dev, napi, poll, NAPI_POLL_WEIGHT); netdev_unlock(dev);
} }
/** /**
@ -2774,6 +2801,8 @@ static inline void netif_napi_add_tx(struct net_device *dev,
netif_napi_add_tx_weight(dev, napi, poll, NAPI_POLL_WEIGHT); netif_napi_add_tx_weight(dev, napi, poll, NAPI_POLL_WEIGHT);
} }
void __netif_napi_del_locked(struct napi_struct *napi);
/** /**
* __netif_napi_del - remove a NAPI context * __netif_napi_del - remove a NAPI context
* @napi: NAPI context * @napi: NAPI context
@ -2782,7 +2811,18 @@ static inline void netif_napi_add_tx(struct net_device *dev,
* containing @napi. Drivers might want to call this helper to combine * containing @napi. Drivers might want to call this helper to combine
* all the needed RCU grace periods into a single one. * all the needed RCU grace periods into a single one.
*/ */
void __netif_napi_del(struct napi_struct *napi); static inline void __netif_napi_del(struct napi_struct *napi)
{
netdev_lock(napi->dev);
__netif_napi_del_locked(napi);
netdev_unlock(napi->dev);
}
static inline void netif_napi_del_locked(struct napi_struct *napi)
{
__netif_napi_del_locked(napi);
synchronize_net();
}
/** /**
* netif_napi_del - remove a NAPI context * netif_napi_del - remove a NAPI context

View file

@ -6910,9 +6910,12 @@ netif_napi_dev_list_add(struct net_device *dev, struct napi_struct *napi)
list_add_rcu(&napi->dev_list, higher); /* adds after higher */ list_add_rcu(&napi->dev_list, higher); /* adds after higher */
} }
void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi, void netif_napi_add_weight_locked(struct net_device *dev,
int (*poll)(struct napi_struct *, int), int weight) struct napi_struct *napi,
int (*poll)(struct napi_struct *, int),
int weight)
{ {
netdev_assert_locked(dev);
if (WARN_ON(test_and_set_bit(NAPI_STATE_LISTED, &napi->state))) if (WARN_ON(test_and_set_bit(NAPI_STATE_LISTED, &napi->state)))
return; return;
@ -6953,7 +6956,7 @@ void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi,
dev->threaded = false; dev->threaded = false;
netif_napi_set_irq(napi, -1); netif_napi_set_irq(napi, -1);
} }
EXPORT_SYMBOL(netif_napi_add_weight); EXPORT_SYMBOL(netif_napi_add_weight_locked);
void napi_disable(struct napi_struct *n) void napi_disable(struct napi_struct *n)
{ {
@ -7024,8 +7027,10 @@ static void flush_gro_hash(struct napi_struct *napi)
} }
/* Must be called in process context */ /* Must be called in process context */
void __netif_napi_del(struct napi_struct *napi) void __netif_napi_del_locked(struct napi_struct *napi)
{ {
netdev_assert_locked(napi->dev);
if (!test_and_clear_bit(NAPI_STATE_LISTED, &napi->state)) if (!test_and_clear_bit(NAPI_STATE_LISTED, &napi->state))
return; return;
@ -7045,7 +7050,7 @@ void __netif_napi_del(struct napi_struct *napi)
napi->thread = NULL; napi->thread = NULL;
} }
} }
EXPORT_SYMBOL(__netif_napi_del); EXPORT_SYMBOL(__netif_napi_del_locked);
static int __napi_poll(struct napi_struct *n, bool *repoll) static int __napi_poll(struct napi_struct *n, bool *repoll)
{ {