Fix leak, when user changes ring parameters.
During reallocation of RX buffers, new DMA mappings are created for
those buffers. New buffers with different RX ring count should
substitute older ones, but those buffers were freed in ice_vsi_cfg_rxq
and reallocated again with ice_alloc_rx_buf. kfree on rx_buf caused
leak of already mapped DMA.
Reallocate ZC with xdp_buf struct, when BPF program loads. Reallocate
back to rx_buf, when BPF program unloads.
If BPF program is loaded/unloaded and XSK pools are created, reallocate
RX queues accordingly in XDP_SETUP_XSK_POOL handler.
Steps for reproduction:
while :
do
for ((i=0; i<=8160; i=i+32))
do
ethtool -G enp130s0f0 rx $i tx $i
sleep 0.5
ethtool -g enp130s0f0
done
done
Fixes: 617f3e1b58
("ice: xsk: allocate separate memory for XDP SW ring")
Signed-off-by: Przemyslaw Patynowski <przemyslawx.patynowski@intel.com>
Signed-off-by: Mateusz Palczewski <mateusz.palczewski@intel.com>
Tested-by: Chandan <chandanx.rout@intel.com> (A Contingent Worker at Intel)
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
84 lines
2.2 KiB
C
84 lines
2.2 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/* Copyright (c) 2019, Intel Corporation. */
|
|
|
|
#ifndef _ICE_XSK_H_
|
|
#define _ICE_XSK_H_
|
|
#include "ice_txrx.h"
|
|
|
|
#define PKTS_PER_BATCH 8
|
|
|
|
#ifdef __clang__
|
|
#define loop_unrolled_for _Pragma("clang loop unroll_count(8)") for
|
|
#elif __GNUC__ >= 8
|
|
#define loop_unrolled_for _Pragma("GCC unroll 8") for
|
|
#else
|
|
#define loop_unrolled_for for
|
|
#endif
|
|
|
|
struct ice_vsi;
|
|
|
|
#ifdef CONFIG_XDP_SOCKETS
|
|
int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool,
|
|
u16 qid);
|
|
int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget);
|
|
int ice_xsk_wakeup(struct net_device *netdev, u32 queue_id, u32 flags);
|
|
bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count);
|
|
bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi);
|
|
void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring);
|
|
void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring);
|
|
bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, u32 budget, int napi_budget);
|
|
int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc);
|
|
#else
|
|
static inline bool
|
|
ice_xmit_zc(struct ice_tx_ring __always_unused *xdp_ring,
|
|
u32 __always_unused budget,
|
|
int __always_unused napi_budget)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
static inline int
|
|
ice_xsk_pool_setup(struct ice_vsi __always_unused *vsi,
|
|
struct xsk_buff_pool __always_unused *pool,
|
|
u16 __always_unused qid)
|
|
{
|
|
return -EOPNOTSUPP;
|
|
}
|
|
|
|
static inline int
|
|
ice_clean_rx_irq_zc(struct ice_rx_ring __always_unused *rx_ring,
|
|
int __always_unused budget)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline bool
|
|
ice_alloc_rx_bufs_zc(struct ice_rx_ring __always_unused *rx_ring,
|
|
u16 __always_unused count)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
static inline bool ice_xsk_any_rx_ring_ena(struct ice_vsi __always_unused *vsi)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
static inline int
|
|
ice_xsk_wakeup(struct net_device __always_unused *netdev,
|
|
u32 __always_unused queue_id, u32 __always_unused flags)
|
|
{
|
|
return -EOPNOTSUPP;
|
|
}
|
|
|
|
static inline void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring) { }
|
|
static inline void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring) { }
|
|
|
|
static inline int
|
|
ice_realloc_zc_buf(struct ice_vsi __always_unused *vsi,
|
|
bool __always_unused zc)
|
|
{
|
|
return 0;
|
|
}
|
|
#endif /* CONFIG_XDP_SOCKETS */
|
|
#endif /* !_ICE_XSK_H_ */
|