net: stmmac: Prepare to add Split Header support
In order to add Split Header support, stmmac_rx() needs to take into account that packet may be split accross multiple descriptors. Refactor the logic of this function in order to support this scenario. Changes from v2: - Fixup if condition detection (Jakub) - Don't stop NAPI with unfinished packet (Jakub) - Use napi_alloc_skb() (Jakub) Signed-off-by: Jose Abreu <joabreu@synopsys.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
25e80cd05f
commit
ec222003bd
2 changed files with 98 additions and 63 deletions
|
@ -74,6 +74,12 @@ struct stmmac_rx_queue {
|
||||||
u32 rx_zeroc_thresh;
|
u32 rx_zeroc_thresh;
|
||||||
dma_addr_t dma_rx_phy;
|
dma_addr_t dma_rx_phy;
|
||||||
u32 rx_tail_addr;
|
u32 rx_tail_addr;
|
||||||
|
unsigned int state_saved;
|
||||||
|
struct {
|
||||||
|
struct sk_buff *skb;
|
||||||
|
unsigned int len;
|
||||||
|
unsigned int error;
|
||||||
|
} state;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct stmmac_channel {
|
struct stmmac_channel {
|
||||||
|
|
|
@ -3353,9 +3353,10 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
|
||||||
{
|
{
|
||||||
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
|
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
|
||||||
struct stmmac_channel *ch = &priv->channel[queue];
|
struct stmmac_channel *ch = &priv->channel[queue];
|
||||||
|
unsigned int count = 0, error = 0, len = 0;
|
||||||
|
int status = 0, coe = priv->hw->rx_csum;
|
||||||
unsigned int next_entry = rx_q->cur_rx;
|
unsigned int next_entry = rx_q->cur_rx;
|
||||||
int coe = priv->hw->rx_csum;
|
struct sk_buff *skb = NULL;
|
||||||
unsigned int count = 0;
|
|
||||||
|
|
||||||
if (netif_msg_rx_status(priv)) {
|
if (netif_msg_rx_status(priv)) {
|
||||||
void *rx_head;
|
void *rx_head;
|
||||||
|
@ -3369,10 +3370,28 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
|
||||||
stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
|
stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
|
||||||
}
|
}
|
||||||
while (count < limit) {
|
while (count < limit) {
|
||||||
|
enum pkt_hash_types hash_type;
|
||||||
struct stmmac_rx_buffer *buf;
|
struct stmmac_rx_buffer *buf;
|
||||||
|
unsigned int prev_len = 0;
|
||||||
struct dma_desc *np, *p;
|
struct dma_desc *np, *p;
|
||||||
int entry, status;
|
int entry;
|
||||||
|
u32 hash;
|
||||||
|
|
||||||
|
if (!count && rx_q->state_saved) {
|
||||||
|
skb = rx_q->state.skb;
|
||||||
|
error = rx_q->state.error;
|
||||||
|
len = rx_q->state.len;
|
||||||
|
} else {
|
||||||
|
rx_q->state_saved = false;
|
||||||
|
skb = NULL;
|
||||||
|
error = 0;
|
||||||
|
len = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (count >= limit)
|
||||||
|
break;
|
||||||
|
|
||||||
|
read_again:
|
||||||
entry = next_entry;
|
entry = next_entry;
|
||||||
buf = &rx_q->buf_pool[entry];
|
buf = &rx_q->buf_pool[entry];
|
||||||
|
|
||||||
|
@ -3407,29 +3426,25 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
|
||||||
page_pool_recycle_direct(rx_q->page_pool, buf->page);
|
page_pool_recycle_direct(rx_q->page_pool, buf->page);
|
||||||
priv->dev->stats.rx_errors++;
|
priv->dev->stats.rx_errors++;
|
||||||
buf->page = NULL;
|
buf->page = NULL;
|
||||||
} else {
|
error = 1;
|
||||||
enum pkt_hash_types hash_type;
|
}
|
||||||
struct sk_buff *skb;
|
|
||||||
unsigned int des;
|
|
||||||
int frame_len;
|
|
||||||
u32 hash;
|
|
||||||
|
|
||||||
stmmac_get_desc_addr(priv, p, &des);
|
if (unlikely(error && (status & rx_not_ls)))
|
||||||
frame_len = stmmac_get_rx_frame_len(priv, p, coe);
|
goto read_again;
|
||||||
|
if (unlikely(error)) {
|
||||||
/* If frame length is greater than skb buffer size
|
if (skb)
|
||||||
* (preallocated during init) then the packet is
|
dev_kfree_skb(skb);
|
||||||
* ignored
|
|
||||||
*/
|
|
||||||
if (frame_len > priv->dma_buf_sz) {
|
|
||||||
if (net_ratelimit())
|
|
||||||
netdev_err(priv->dev,
|
|
||||||
"len %d larger than size (%d)\n",
|
|
||||||
frame_len, priv->dma_buf_sz);
|
|
||||||
priv->dev->stats.rx_length_errors++;
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Buffer is good. Go on. */
|
||||||
|
|
||||||
|
if (likely(status & rx_not_ls)) {
|
||||||
|
len += priv->dma_buf_sz;
|
||||||
|
} else {
|
||||||
|
prev_len = len;
|
||||||
|
len = stmmac_get_rx_frame_len(priv, p, coe);
|
||||||
|
|
||||||
/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
|
/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
|
||||||
* Type frames (LLC/LLC-SNAP)
|
* Type frames (LLC/LLC-SNAP)
|
||||||
*
|
*
|
||||||
|
@ -3439,37 +3454,49 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
|
||||||
*/
|
*/
|
||||||
if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
|
if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
|
||||||
unlikely(status != llc_snap))
|
unlikely(status != llc_snap))
|
||||||
frame_len -= ETH_FCS_LEN;
|
len -= ETH_FCS_LEN;
|
||||||
|
|
||||||
if (netif_msg_rx_status(priv)) {
|
|
||||||
netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
|
|
||||||
p, entry, des);
|
|
||||||
netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
|
|
||||||
frame_len, status);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
skb = netdev_alloc_skb_ip_align(priv->dev, frame_len);
|
if (!skb) {
|
||||||
if (unlikely(!skb)) {
|
skb = napi_alloc_skb(&ch->rx_napi, len);
|
||||||
|
if (!skb) {
|
||||||
priv->dev->stats.rx_dropped++;
|
priv->dev->stats.rx_dropped++;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
dma_sync_single_for_cpu(priv->device, buf->addr,
|
dma_sync_single_for_cpu(priv->device, buf->addr, len,
|
||||||
frame_len, DMA_FROM_DEVICE);
|
DMA_FROM_DEVICE);
|
||||||
skb_copy_to_linear_data(skb, page_address(buf->page),
|
skb_copy_to_linear_data(skb, page_address(buf->page),
|
||||||
frame_len);
|
len);
|
||||||
skb_put(skb, frame_len);
|
skb_put(skb, len);
|
||||||
|
|
||||||
if (netif_msg_pktdata(priv)) {
|
/* Data payload copied into SKB, page ready for recycle */
|
||||||
netdev_dbg(priv->dev, "frame received (%dbytes)",
|
page_pool_recycle_direct(rx_q->page_pool, buf->page);
|
||||||
frame_len);
|
buf->page = NULL;
|
||||||
print_pkt(skb->data, frame_len);
|
} else {
|
||||||
|
unsigned int buf_len = len - prev_len;
|
||||||
|
|
||||||
|
if (likely(status & rx_not_ls))
|
||||||
|
buf_len = priv->dma_buf_sz;
|
||||||
|
|
||||||
|
dma_sync_single_for_cpu(priv->device, buf->addr,
|
||||||
|
buf_len, DMA_FROM_DEVICE);
|
||||||
|
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
|
||||||
|
buf->page, 0, buf_len,
|
||||||
|
priv->dma_buf_sz);
|
||||||
|
|
||||||
|
/* Data payload appended into SKB */
|
||||||
|
page_pool_release_page(rx_q->page_pool, buf->page);
|
||||||
|
buf->page = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (likely(status & rx_not_ls))
|
||||||
|
goto read_again;
|
||||||
|
|
||||||
|
/* Got entire packet into SKB. Finish it. */
|
||||||
|
|
||||||
stmmac_get_rx_hwtstamp(priv, p, np, skb);
|
stmmac_get_rx_hwtstamp(priv, p, np, skb);
|
||||||
|
|
||||||
stmmac_rx_vlan(priv->dev, skb);
|
stmmac_rx_vlan(priv->dev, skb);
|
||||||
|
|
||||||
skb->protocol = eth_type_trans(skb, priv->dev);
|
skb->protocol = eth_type_trans(skb, priv->dev);
|
||||||
|
|
||||||
if (unlikely(!coe))
|
if (unlikely(!coe))
|
||||||
|
@ -3483,13 +3510,15 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
|
||||||
skb_record_rx_queue(skb, queue);
|
skb_record_rx_queue(skb, queue);
|
||||||
napi_gro_receive(&ch->rx_napi, skb);
|
napi_gro_receive(&ch->rx_napi, skb);
|
||||||
|
|
||||||
/* Data payload copied into SKB, page ready for recycle */
|
|
||||||
page_pool_recycle_direct(rx_q->page_pool, buf->page);
|
|
||||||
buf->page = NULL;
|
|
||||||
|
|
||||||
priv->dev->stats.rx_packets++;
|
priv->dev->stats.rx_packets++;
|
||||||
priv->dev->stats.rx_bytes += frame_len;
|
priv->dev->stats.rx_bytes += len;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (status & rx_not_ls) {
|
||||||
|
rx_q->state_saved = true;
|
||||||
|
rx_q->state.skb = skb;
|
||||||
|
rx_q->state.error = error;
|
||||||
|
rx_q->state.len = len;
|
||||||
}
|
}
|
||||||
|
|
||||||
stmmac_rx_refill(priv, queue);
|
stmmac_rx_refill(priv, queue);
|
||||||
|
|
Loading…
Add table
Reference in a new issue