ice: Pull out next_to_clean bump out of ice_put_rx_buf()
Plan is to move ice_put_rx_buf() to the end of ice_clean_rx_irq() so in order to keep the ability of walking through HW Rx descriptors, pull out next_to_clean handling out of ice_put_rx_buf(). Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Reviewed-by: Alexander Lobakin <alexandr.lobakin@intel.com> Link: https://lore.kernel.org/bpf/20230131204506.219292-5-maciej.fijalkowski@intel.com
This commit is contained in:
parent
ac07533911
commit
d7956d81f1
1 changed files with 16 additions and 13 deletions
|
@ -898,11 +898,12 @@ ice_reuse_rx_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *old_buf)
|
||||||
* for use by the CPU.
|
* for use by the CPU.
|
||||||
*/
|
*/
|
||||||
static struct ice_rx_buf *
|
static struct ice_rx_buf *
|
||||||
ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size)
|
ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size,
|
||||||
|
const unsigned int ntc)
|
||||||
{
|
{
|
||||||
struct ice_rx_buf *rx_buf;
|
struct ice_rx_buf *rx_buf;
|
||||||
|
|
||||||
rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
|
rx_buf = &rx_ring->rx_buf[ntc];
|
||||||
rx_buf->pgcnt =
|
rx_buf->pgcnt =
|
||||||
#if (PAGE_SIZE < 8192)
|
#if (PAGE_SIZE < 8192)
|
||||||
page_count(rx_buf->page);
|
page_count(rx_buf->page);
|
||||||
|
@ -1040,19 +1041,12 @@ ice_construct_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
|
||||||
* @rx_ring: Rx descriptor ring to transact packets on
|
* @rx_ring: Rx descriptor ring to transact packets on
|
||||||
* @rx_buf: Rx buffer to pull data from
|
* @rx_buf: Rx buffer to pull data from
|
||||||
*
|
*
|
||||||
* This function will update next_to_clean and then clean up the contents
|
* This function will clean up the contents of the rx_buf. It will either
|
||||||
* of the rx_buf. It will either recycle the buffer or unmap it and free
|
* recycle the buffer or unmap it and free the associated resources.
|
||||||
* the associated resources.
|
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf)
|
ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf)
|
||||||
{
|
{
|
||||||
u16 ntc = rx_ring->next_to_clean + 1;
|
|
||||||
|
|
||||||
/* fetch, update, and store next to clean */
|
|
||||||
ntc = (ntc < rx_ring->count) ? ntc : 0;
|
|
||||||
rx_ring->next_to_clean = ntc;
|
|
||||||
|
|
||||||
if (!rx_buf)
|
if (!rx_buf)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
@ -1114,6 +1108,8 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
|
||||||
unsigned int xdp_res, xdp_xmit = 0;
|
unsigned int xdp_res, xdp_xmit = 0;
|
||||||
struct sk_buff *skb = rx_ring->skb;
|
struct sk_buff *skb = rx_ring->skb;
|
||||||
struct bpf_prog *xdp_prog = NULL;
|
struct bpf_prog *xdp_prog = NULL;
|
||||||
|
u32 ntc = rx_ring->next_to_clean;
|
||||||
|
u32 cnt = rx_ring->count;
|
||||||
bool failure;
|
bool failure;
|
||||||
|
|
||||||
/* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
|
/* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
|
||||||
|
@ -1136,7 +1132,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
|
||||||
u16 rx_ptype;
|
u16 rx_ptype;
|
||||||
|
|
||||||
/* get the Rx desc from Rx ring based on 'next_to_clean' */
|
/* get the Rx desc from Rx ring based on 'next_to_clean' */
|
||||||
rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
|
rx_desc = ICE_RX_DESC(rx_ring, ntc);
|
||||||
|
|
||||||
/* status_error_len will always be zero for unused descriptors
|
/* status_error_len will always be zero for unused descriptors
|
||||||
* because it's cleared in cleanup, and overlaps with hdr_addr
|
* because it's cleared in cleanup, and overlaps with hdr_addr
|
||||||
|
@ -1160,6 +1156,8 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
|
||||||
if (rx_desc->wb.rxdid == FDIR_DESC_RXDID &&
|
if (rx_desc->wb.rxdid == FDIR_DESC_RXDID &&
|
||||||
ctrl_vsi->vf)
|
ctrl_vsi->vf)
|
||||||
ice_vc_fdir_irq_handler(ctrl_vsi, rx_desc);
|
ice_vc_fdir_irq_handler(ctrl_vsi, rx_desc);
|
||||||
|
if (++ntc == cnt)
|
||||||
|
ntc = 0;
|
||||||
ice_put_rx_buf(rx_ring, NULL);
|
ice_put_rx_buf(rx_ring, NULL);
|
||||||
cleaned_count++;
|
cleaned_count++;
|
||||||
continue;
|
continue;
|
||||||
|
@ -1169,7 +1167,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
|
||||||
ICE_RX_FLX_DESC_PKT_LEN_M;
|
ICE_RX_FLX_DESC_PKT_LEN_M;
|
||||||
|
|
||||||
/* retrieve a buffer from the ring */
|
/* retrieve a buffer from the ring */
|
||||||
rx_buf = ice_get_rx_buf(rx_ring, size);
|
rx_buf = ice_get_rx_buf(rx_ring, size, ntc);
|
||||||
|
|
||||||
if (!size) {
|
if (!size) {
|
||||||
xdp->data = NULL;
|
xdp->data = NULL;
|
||||||
|
@ -1203,6 +1201,8 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
|
||||||
total_rx_pkts++;
|
total_rx_pkts++;
|
||||||
|
|
||||||
cleaned_count++;
|
cleaned_count++;
|
||||||
|
if (++ntc == cnt)
|
||||||
|
ntc = 0;
|
||||||
ice_put_rx_buf(rx_ring, rx_buf);
|
ice_put_rx_buf(rx_ring, rx_buf);
|
||||||
continue;
|
continue;
|
||||||
construct_skb:
|
construct_skb:
|
||||||
|
@ -1222,6 +1222,8 @@ construct_skb:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (++ntc == cnt)
|
||||||
|
ntc = 0;
|
||||||
ice_put_rx_buf(rx_ring, rx_buf);
|
ice_put_rx_buf(rx_ring, rx_buf);
|
||||||
cleaned_count++;
|
cleaned_count++;
|
||||||
|
|
||||||
|
@ -1262,6 +1264,7 @@ construct_skb:
|
||||||
total_rx_pkts++;
|
total_rx_pkts++;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
rx_ring->next_to_clean = ntc;
|
||||||
/* return up to cleaned_count buffers to hardware */
|
/* return up to cleaned_count buffers to hardware */
|
||||||
failure = ice_alloc_rx_bufs(rx_ring, cleaned_count);
|
failure = ice_alloc_rx_bufs(rx_ring, cleaned_count);
|
||||||
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue