1
0
Fork 0
mirror of synced 2025-03-06 20:59:54 +01:00

Merge branch 'net-ethernet-ti-am65-cpsw-xdp-fixes'

Roger Quadros says:

====================
net: ethernet: ti: am65-cpsw: XDP fixes

This series fixes memleak and statistics for XDP cases.
====================

Link: https://patch.msgid.link/20250210-am65-cpsw-xdp-fixes-v1-0-ec6b1f7f1aca@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2025-02-12 20:08:47 -08:00
commit 0469b410c8

View file

@ -828,21 +828,30 @@ static void am65_cpsw_nuss_xmit_free(struct am65_cpsw_tx_chn *tx_chn,
static void am65_cpsw_nuss_tx_cleanup(void *data, dma_addr_t desc_dma)
{
struct am65_cpsw_tx_chn *tx_chn = data;
enum am65_cpsw_tx_buf_type buf_type;
struct cppi5_host_desc_t *desc_tx;
struct xdp_frame *xdpf;
struct sk_buff *skb;
void **swdata;
desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
swdata = cppi5_hdesc_get_swdata(desc_tx);
skb = *(swdata);
am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
buf_type = am65_cpsw_nuss_buf_type(tx_chn, desc_dma);
if (buf_type == AM65_CPSW_TX_BUF_TYPE_SKB) {
skb = *(swdata);
dev_kfree_skb_any(skb);
} else {
xdpf = *(swdata);
xdp_return_frame(xdpf);
}
dev_kfree_skb_any(skb);
am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
}
static struct sk_buff *am65_cpsw_build_skb(void *page_addr,
struct net_device *ndev,
unsigned int len)
unsigned int len,
unsigned int headroom)
{
struct sk_buff *skb;
@ -852,7 +861,7 @@ static struct sk_buff *am65_cpsw_build_skb(void *page_addr,
if (unlikely(!skb))
return NULL;
skb_reserve(skb, AM65_CPSW_HEADROOM);
skb_reserve(skb, headroom);
skb->dev = ndev;
return skb;
@ -1169,9 +1178,11 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_rx_flow *flow,
struct xdp_frame *xdpf;
struct bpf_prog *prog;
struct page *page;
int pkt_len;
u32 act;
int err;
pkt_len = *len;
prog = READ_ONCE(port->xdp_prog);
if (!prog)
return AM65_CPSW_XDP_PASS;
@ -1189,8 +1200,10 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_rx_flow *flow,
netif_txq = netdev_get_tx_queue(ndev, tx_chn->id);
xdpf = xdp_convert_buff_to_frame(xdp);
if (unlikely(!xdpf))
if (unlikely(!xdpf)) {
ndev->stats.tx_dropped++;
goto drop;
}
__netif_tx_lock(netif_txq, cpu);
err = am65_cpsw_xdp_tx_frame(ndev, tx_chn, xdpf,
@ -1199,14 +1212,14 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_rx_flow *flow,
if (err)
goto drop;
dev_sw_netstats_tx_add(ndev, 1, *len);
dev_sw_netstats_rx_add(ndev, pkt_len);
ret = AM65_CPSW_XDP_CONSUMED;
goto out;
case XDP_REDIRECT:
if (unlikely(xdp_do_redirect(ndev, xdp, prog)))
goto drop;
dev_sw_netstats_rx_add(ndev, *len);
dev_sw_netstats_rx_add(ndev, pkt_len);
ret = AM65_CPSW_XDP_REDIRECT;
goto out;
default:
@ -1315,16 +1328,8 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow,
dev_dbg(dev, "%s rx csum_info:%#x\n", __func__, csum_info);
dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
skb = am65_cpsw_build_skb(page_addr, ndev,
AM65_CPSW_MAX_PACKET_SIZE);
if (unlikely(!skb)) {
new_page = page;
goto requeue;
}
if (port->xdp_prog) {
xdp_init_buff(&xdp, PAGE_SIZE, &port->xdp_rxq[flow->id]);
xdp_prepare_buff(&xdp, page_addr, AM65_CPSW_HEADROOM,
@ -1334,9 +1339,16 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow,
if (*xdp_state != AM65_CPSW_XDP_PASS)
goto allocate;
/* Compute additional headroom to be reserved */
headroom = (xdp.data - xdp.data_hard_start) - skb_headroom(skb);
skb_reserve(skb, headroom);
headroom = xdp.data - xdp.data_hard_start;
} else {
headroom = AM65_CPSW_HEADROOM;
}
skb = am65_cpsw_build_skb(page_addr, ndev,
AM65_CPSW_MAX_PACKET_SIZE, headroom);
if (unlikely(!skb)) {
new_page = page;
goto requeue;
}
ndev_priv = netdev_priv(ndev);