1
0
Fork 0
mirror of synced 2025-03-06 20:59:54 +01:00

net: ethernet: ti: am65-cpsw: fix memleak in certain XDP cases

If the XDP program doesn't result in XDP_PASS then we leak the
memory allocated by am65_cpsw_build_skb().

It is pointless to allocate SKB memory before running the XDP
program as we would be wasting CPU cycles for cases other than XDP_PASS.
Move the SKB allocation after evaluating the XDP program result.

This fixes the memleak. A performance boost is seen for XDP_DROP test.

XDP_DROP test:
Before: 460256 rx/s                  0 err/s
After:  784130 rx/s                  0 err/s

Fixes: 8acacc40f7 ("net: ethernet: ti: am65-cpsw: Add minimal XDP support")
Signed-off-by: Roger Quadros <rogerq@kernel.org>
Link: https://patch.msgid.link/20250210-am65-cpsw-xdp-fixes-v1-1-ec6b1f7f1aca@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Roger Quadros 2025-02-10 16:52:15 +02:00 committed by Jakub Kicinski
parent b698b9a8ac
commit 5db843258d

View file

@ -842,7 +842,8 @@ static void am65_cpsw_nuss_tx_cleanup(void *data, dma_addr_t desc_dma)
static struct sk_buff *am65_cpsw_build_skb(void *page_addr,
struct net_device *ndev,
unsigned int len)
unsigned int len,
unsigned int headroom)
{
struct sk_buff *skb;
@ -852,7 +853,7 @@ static struct sk_buff *am65_cpsw_build_skb(void *page_addr,
if (unlikely(!skb))
return NULL;
skb_reserve(skb, AM65_CPSW_HEADROOM);
skb_reserve(skb, headroom);
skb->dev = ndev;
return skb;
@ -1315,16 +1316,8 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow,
dev_dbg(dev, "%s rx csum_info:%#x\n", __func__, csum_info);
dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
skb = am65_cpsw_build_skb(page_addr, ndev,
AM65_CPSW_MAX_PACKET_SIZE);
if (unlikely(!skb)) {
new_page = page;
goto requeue;
}
if (port->xdp_prog) {
xdp_init_buff(&xdp, PAGE_SIZE, &port->xdp_rxq[flow->id]);
xdp_prepare_buff(&xdp, page_addr, AM65_CPSW_HEADROOM,
@ -1334,9 +1327,16 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow,
if (*xdp_state != AM65_CPSW_XDP_PASS)
goto allocate;
/* Compute additional headroom to be reserved */
headroom = (xdp.data - xdp.data_hard_start) - skb_headroom(skb);
skb_reserve(skb, headroom);
headroom = xdp.data - xdp.data_hard_start;
} else {
headroom = AM65_CPSW_HEADROOM;
}
skb = am65_cpsw_build_skb(page_addr, ndev,
AM65_CPSW_MAX_PACKET_SIZE, headroom);
if (unlikely(!skb)) {
new_page = page;
goto requeue;
}
ndev_priv = netdev_priv(ndev);