1
0
Fork 0
mirror of synced 2025-03-06 20:59:54 +01:00

net: protect NAPI enablement with netdev_lock()

Wrap napi_enable() / napi_disable() with netdev_lock().
Provide the "already locked" flavor of the API.

iavf needs the usual adjustment. A number of drivers call
napi_enable() under a spin lock, so they have to be modified
to take netdev_lock() first, then spin lock then call
napi_enable_locked().

Protecting napi_enable() implies that napi->napi_id is protected
by netdev_lock().

Acked-by: Francois Romieu <romieu@fr.zoreil.com> # via-velocity
Reviewed-by: Eric Dumazet <edumazet@google.com>
Reviewed-by: Kuniyuki Iwashima <kuniyu@amazon.com>
Link: https://patch.msgid.link/20250115035319.559603-7-kuba@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2025-01-14 19:53:14 -08:00
parent 1b23cdbd2b
commit 413f0271f3
6 changed files with 56 additions and 22 deletions

View file

@ -462,7 +462,7 @@ static void pcnet32_netif_start(struct net_device *dev)
val = lp->a->read_csr(ioaddr, CSR3);
val &= 0x00ff;
lp->a->write_csr(ioaddr, CSR3, val);
napi_enable(&lp->napi);
napi_enable_locked(&lp->napi);
}
/*
@ -889,6 +889,7 @@ static int pcnet32_set_ringparam(struct net_device *dev,
if (netif_running(dev))
pcnet32_netif_stop(dev);
netdev_lock(dev);
spin_lock_irqsave(&lp->lock, flags);
lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
@ -920,6 +921,7 @@ static int pcnet32_set_ringparam(struct net_device *dev,
}
spin_unlock_irqrestore(&lp->lock, flags);
netdev_unlock(dev);
netif_info(lp, drv, dev, "Ring Param Settings: RX: %d, TX: %d\n",
lp->rx_ring_size, lp->tx_ring_size);
@ -985,6 +987,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
if (netif_running(dev))
pcnet32_netif_stop(dev);
netdev_lock(dev);
spin_lock_irqsave(&lp->lock, flags);
lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
@ -1122,6 +1125,7 @@ clean_up:
lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
}
spin_unlock_irqrestore(&lp->lock, flags);
netdev_unlock(dev);
return rc;
} /* end pcnet32_loopback_test */
@ -2101,6 +2105,7 @@ static int pcnet32_open(struct net_device *dev)
return -EAGAIN;
}
netdev_lock(dev);
spin_lock_irqsave(&lp->lock, flags);
/* Check for a valid station address */
if (!is_valid_ether_addr(dev->dev_addr)) {
@ -2266,7 +2271,7 @@ static int pcnet32_open(struct net_device *dev)
goto err_free_ring;
}
napi_enable(&lp->napi);
napi_enable_locked(&lp->napi);
/* Re-initialize the PCNET32, and start it when done. */
lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
@ -2300,6 +2305,7 @@ static int pcnet32_open(struct net_device *dev)
lp->a->read_csr(ioaddr, CSR0));
spin_unlock_irqrestore(&lp->lock, flags);
netdev_unlock(dev);
return 0; /* Always succeed */
@ -2315,6 +2321,7 @@ err_free_ring:
err_free_irq:
spin_unlock_irqrestore(&lp->lock, flags);
netdev_unlock(dev);
free_irq(dev->irq, dev);
return rc;
}

View file

@ -1180,7 +1180,7 @@ static void iavf_napi_enable_all(struct iavf_adapter *adapter)
q_vector = &adapter->q_vectors[q_idx];
napi = &q_vector->napi;
napi_enable(napi);
napi_enable_locked(napi);
}
}
@ -1196,7 +1196,7 @@ static void iavf_napi_disable_all(struct iavf_adapter *adapter)
for (q_idx = 0; q_idx < q_vectors; q_idx++) {
q_vector = &adapter->q_vectors[q_idx];
napi_disable(&q_vector->napi);
napi_disable_locked(&q_vector->napi);
}
}

View file

@ -4392,6 +4392,7 @@ static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node)
if (pp->neta_armada3700)
return 0;
netdev_lock(port->napi.dev);
spin_lock(&pp->lock);
/*
* Configuring the driver for a new CPU while the driver is
@ -4418,7 +4419,7 @@ static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node)
/* Mask all ethernet port interrupts */
on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
napi_enable(&port->napi);
napi_enable_locked(&port->napi);
/*
* Enable per-CPU interrupts on the CPU that is
@ -4439,6 +4440,8 @@ static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node)
MVNETA_CAUSE_LINK_CHANGE);
netif_tx_start_all_queues(pp->dev);
spin_unlock(&pp->lock);
netdev_unlock(port->napi.dev);
return 0;
}

View file

@ -2320,7 +2320,8 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu)
if (ret < 0)
goto out_free_tmp_vptr_1;
napi_disable(&vptr->napi);
netdev_lock(dev);
napi_disable_locked(&vptr->napi);
spin_lock_irqsave(&vptr->lock, flags);
@ -2342,12 +2343,13 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu)
velocity_give_many_rx_descs(vptr);
napi_enable(&vptr->napi);
napi_enable_locked(&vptr->napi);
mac_enable_int(vptr->mac_regs);
netif_start_queue(dev);
spin_unlock_irqrestore(&vptr->lock, flags);
netdev_unlock(dev);
velocity_free_rings(tmp_vptr);

View file

@ -382,7 +382,7 @@ struct napi_struct {
struct sk_buff *skb;
struct list_head rx_list; /* Pending GRO_NORMAL skbs */
int rx_count; /* length of rx_list */
unsigned int napi_id;
unsigned int napi_id; /* protected by netdev_lock */
struct hrtimer timer;
struct task_struct *thread;
unsigned long gro_flush_timeout;
@ -570,16 +570,11 @@ static inline bool napi_complete(struct napi_struct *n)
int dev_set_threaded(struct net_device *dev, bool threaded);
/**
* napi_disable - prevent NAPI from scheduling
* @n: NAPI context
*
* Stop NAPI from being scheduled on this context.
* Waits till any outstanding processing completes.
*/
void napi_disable(struct napi_struct *n);
void napi_disable_locked(struct napi_struct *n);
void napi_enable(struct napi_struct *n);
void napi_enable_locked(struct napi_struct *n);
/**
* napi_synchronize - wait until NAPI is not running

View file

@ -6958,11 +6958,13 @@ void netif_napi_add_weight_locked(struct net_device *dev,
}
EXPORT_SYMBOL(netif_napi_add_weight_locked);
void napi_disable(struct napi_struct *n)
void napi_disable_locked(struct napi_struct *n)
{
unsigned long val, new;
might_sleep();
netdev_assert_locked(n->dev);
set_bit(NAPI_STATE_DISABLE, &n->state);
val = READ_ONCE(n->state);
@ -6985,16 +6987,25 @@ void napi_disable(struct napi_struct *n)
clear_bit(NAPI_STATE_DISABLE, &n->state);
}
EXPORT_SYMBOL(napi_disable);
EXPORT_SYMBOL(napi_disable_locked);
/**
* napi_enable - enable NAPI scheduling
* @n: NAPI context
* napi_disable() - prevent NAPI from scheduling
* @n: NAPI context
*
* Resume NAPI from being scheduled on this context.
* Must be paired with napi_disable.
* Stop NAPI from being scheduled on this context.
* Waits till any outstanding processing completes.
* Takes netdev_lock() for associated net_device.
*/
void napi_enable(struct napi_struct *n)
void napi_disable(struct napi_struct *n)
{
netdev_lock(n->dev);
napi_disable_locked(n);
netdev_unlock(n->dev);
}
EXPORT_SYMBOL(napi_disable);
void napi_enable_locked(struct napi_struct *n)
{
unsigned long new, val = READ_ONCE(n->state);
@ -7011,6 +7022,22 @@ void napi_enable(struct napi_struct *n)
new |= NAPIF_STATE_THREADED;
} while (!try_cmpxchg(&n->state, &val, new));
}
EXPORT_SYMBOL(napi_enable_locked);
/**
* napi_enable() - enable NAPI scheduling
* @n: NAPI context
*
* Enable scheduling of a NAPI instance.
* Must be paired with napi_disable().
* Takes netdev_lock() for associated net_device.
*/
void napi_enable(struct napi_struct *n)
{
netdev_lock(n->dev);
napi_enable_locked(n);
netdev_unlock(n->dev);
}
EXPORT_SYMBOL(napi_enable);
static void flush_gro_hash(struct napi_struct *napi)