net: enetc: move skb creation into enetc_build_skb
We need to build an skb from two code paths now: from the plain RX data path and from the XDP data path when the verdict is XDP_PASS. Create a new enetc_build_skb function which contains the essential steps for building an skb based on the first and last positions of buffer descriptors within the RX ring. We also squash the enetc_process_skb function into enetc_build_skb, because what that function did wasn't very meaningful on its own. The "rx_frm_cnt++" instruction has been moved around napi_gro_receive for cosmetic reasons, to be in the same spot as rx_byte_cnt++, which itself must be before napi_gro_receive, because that's when we lose ownership of the skb. Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
2fa423f5f0
commit
a800abd3ec
1 changed files with 44 additions and 37 deletions
|
@ -513,13 +513,6 @@ static void enetc_get_offloads(struct enetc_bdr *rx_ring,
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
static void enetc_process_skb(struct enetc_bdr *rx_ring,
|
|
||||||
struct sk_buff *skb)
|
|
||||||
{
|
|
||||||
skb_record_rx_queue(skb, rx_ring->index);
|
|
||||||
skb->protocol = eth_type_trans(skb, rx_ring->ndev);
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool enetc_page_reusable(struct page *page)
|
static bool enetc_page_reusable(struct page *page)
|
||||||
{
|
{
|
||||||
return (!page_is_pfmemalloc(page) && page_ref_count(page) == 1);
|
return (!page_is_pfmemalloc(page) && page_ref_count(page) == 1);
|
||||||
|
@ -627,6 +620,47 @@ static bool enetc_check_bd_errors_and_consume(struct enetc_bdr *rx_ring,
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static struct sk_buff *enetc_build_skb(struct enetc_bdr *rx_ring,
|
||||||
|
u32 bd_status, union enetc_rx_bd **rxbd,
|
||||||
|
int *i, int *cleaned_cnt)
|
||||||
|
{
|
||||||
|
struct sk_buff *skb;
|
||||||
|
u16 size;
|
||||||
|
|
||||||
|
size = le16_to_cpu((*rxbd)->r.buf_len);
|
||||||
|
skb = enetc_map_rx_buff_to_skb(rx_ring, *i, size);
|
||||||
|
if (!skb)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
enetc_get_offloads(rx_ring, *rxbd, skb);
|
||||||
|
|
||||||
|
(*cleaned_cnt)++;
|
||||||
|
|
||||||
|
enetc_rxbd_next(rx_ring, rxbd, i);
|
||||||
|
|
||||||
|
/* not last BD in frame? */
|
||||||
|
while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
|
||||||
|
bd_status = le32_to_cpu((*rxbd)->r.lstatus);
|
||||||
|
size = ENETC_RXB_DMA_SIZE;
|
||||||
|
|
||||||
|
if (bd_status & ENETC_RXBD_LSTATUS_F) {
|
||||||
|
dma_rmb();
|
||||||
|
size = le16_to_cpu((*rxbd)->r.buf_len);
|
||||||
|
}
|
||||||
|
|
||||||
|
enetc_add_rx_buff_to_skb(rx_ring, *i, size, skb);
|
||||||
|
|
||||||
|
(*cleaned_cnt)++;
|
||||||
|
|
||||||
|
enetc_rxbd_next(rx_ring, rxbd, i);
|
||||||
|
}
|
||||||
|
|
||||||
|
skb_record_rx_queue(skb, rx_ring->index);
|
||||||
|
skb->protocol = eth_type_trans(skb, rx_ring->ndev);
|
||||||
|
|
||||||
|
return skb;
|
||||||
|
}
|
||||||
|
|
||||||
#define ENETC_RXBD_BUNDLE 16 /* # of BDs to update at once */
|
#define ENETC_RXBD_BUNDLE 16 /* # of BDs to update at once */
|
||||||
|
|
||||||
static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
|
static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
|
||||||
|
@ -643,7 +677,6 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
|
||||||
union enetc_rx_bd *rxbd;
|
union enetc_rx_bd *rxbd;
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
u32 bd_status;
|
u32 bd_status;
|
||||||
u16 size;
|
|
||||||
|
|
||||||
if (cleaned_cnt >= ENETC_RXBD_BUNDLE)
|
if (cleaned_cnt >= ENETC_RXBD_BUNDLE)
|
||||||
cleaned_cnt -= enetc_refill_rx_ring(rx_ring,
|
cleaned_cnt -= enetc_refill_rx_ring(rx_ring,
|
||||||
|
@ -661,41 +694,15 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
|
||||||
&rxbd, &i))
|
&rxbd, &i))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
size = le16_to_cpu(rxbd->r.buf_len);
|
skb = enetc_build_skb(rx_ring, bd_status, &rxbd, &i,
|
||||||
skb = enetc_map_rx_buff_to_skb(rx_ring, i, size);
|
&cleaned_cnt);
|
||||||
if (!skb)
|
if (!skb)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
enetc_get_offloads(rx_ring, rxbd, skb);
|
|
||||||
|
|
||||||
cleaned_cnt++;
|
|
||||||
|
|
||||||
enetc_rxbd_next(rx_ring, &rxbd, &i);
|
|
||||||
|
|
||||||
/* not last BD in frame? */
|
|
||||||
while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
|
|
||||||
bd_status = le32_to_cpu(rxbd->r.lstatus);
|
|
||||||
size = ENETC_RXB_DMA_SIZE;
|
|
||||||
|
|
||||||
if (bd_status & ENETC_RXBD_LSTATUS_F) {
|
|
||||||
dma_rmb();
|
|
||||||
size = le16_to_cpu(rxbd->r.buf_len);
|
|
||||||
}
|
|
||||||
|
|
||||||
enetc_add_rx_buff_to_skb(rx_ring, i, size, skb);
|
|
||||||
|
|
||||||
cleaned_cnt++;
|
|
||||||
|
|
||||||
enetc_rxbd_next(rx_ring, &rxbd, &i);
|
|
||||||
}
|
|
||||||
|
|
||||||
rx_byte_cnt += skb->len;
|
rx_byte_cnt += skb->len;
|
||||||
|
rx_frm_cnt++;
|
||||||
enetc_process_skb(rx_ring, skb);
|
|
||||||
|
|
||||||
napi_gro_receive(napi, skb);
|
napi_gro_receive(napi, skb);
|
||||||
|
|
||||||
rx_frm_cnt++;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
rx_ring->next_to_clean = i;
|
rx_ring->next_to_clean = i;
|
||||||
|
|
Loading…
Add table
Reference in a new issue