mt76: improve tx queue stop/wake
Instead of stopping and waking only a single queue, handle all phy tx queues mapped ot the same hardware queue. Also allow the driver to block tx queues Signed-off-by: Felix Fietkau <nbd@nbd.name>
This commit is contained in:
parent
d211c00338
commit
90d494c99a
3 changed files with 13 additions and 21 deletions
|
@ -220,7 +220,6 @@ static void
|
||||||
mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush)
|
mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush)
|
||||||
{
|
{
|
||||||
struct mt76_queue_entry entry;
|
struct mt76_queue_entry entry;
|
||||||
bool wake = false;
|
|
||||||
int last;
|
int last;
|
||||||
|
|
||||||
if (!q)
|
if (!q)
|
||||||
|
@ -238,7 +237,6 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush)
|
||||||
if (entry.txwi) {
|
if (entry.txwi) {
|
||||||
if (!(dev->drv->drv_flags & MT_DRV_TXWI_NO_FREE))
|
if (!(dev->drv->drv_flags & MT_DRV_TXWI_NO_FREE))
|
||||||
mt76_put_txwi(dev, entry.txwi);
|
mt76_put_txwi(dev, entry.txwi);
|
||||||
wake = !flush;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!flush && q->tail == last)
|
if (!flush && q->tail == last)
|
||||||
|
@ -253,16 +251,8 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush)
|
||||||
spin_unlock_bh(&q->lock);
|
spin_unlock_bh(&q->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
wake = wake && q->stopped &&
|
|
||||||
q->qid < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8;
|
|
||||||
if (wake)
|
|
||||||
q->stopped = false;
|
|
||||||
|
|
||||||
if (!q->queued)
|
if (!q->queued)
|
||||||
wake_up(&dev->tx_wait);
|
wake_up(&dev->tx_wait);
|
||||||
|
|
||||||
if (wake)
|
|
||||||
ieee80211_wake_queue(dev->hw, q->qid);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void *
|
static void *
|
||||||
|
|
|
@ -136,6 +136,7 @@ struct mt76_queue {
|
||||||
int queued;
|
int queued;
|
||||||
int buf_size;
|
int buf_size;
|
||||||
bool stopped;
|
bool stopped;
|
||||||
|
bool blocked;
|
||||||
|
|
||||||
u8 buf_offset;
|
u8 buf_offset;
|
||||||
u8 hw_idx;
|
u8 hw_idx;
|
||||||
|
|
|
@ -291,12 +291,6 @@ mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta,
|
||||||
spin_lock_bh(&q->lock);
|
spin_lock_bh(&q->lock);
|
||||||
__mt76_tx_queue_skb(phy, qid, skb, wcid, sta, NULL);
|
__mt76_tx_queue_skb(phy, qid, skb, wcid, sta, NULL);
|
||||||
dev->queue_ops->kick(dev, q);
|
dev->queue_ops->kick(dev, q);
|
||||||
|
|
||||||
if (q->queued > q->ndesc - 8 && !q->stopped) {
|
|
||||||
ieee80211_stop_queue(phy->hw, skb_get_queue_mapping(skb));
|
|
||||||
q->stopped = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
spin_unlock_bh(&q->lock);
|
spin_unlock_bh(&q->lock);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(mt76_tx);
|
EXPORT_SYMBOL_GPL(mt76_tx);
|
||||||
|
@ -381,6 +375,13 @@ mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(mt76_release_buffered_frames);
|
EXPORT_SYMBOL_GPL(mt76_release_buffered_frames);
|
||||||
|
|
||||||
|
static bool
|
||||||
|
mt76_txq_stopped(struct mt76_queue *q)
|
||||||
|
{
|
||||||
|
return q->stopped || q->blocked ||
|
||||||
|
q->queued + MT_TXQ_FREE_THR >= q->ndesc;
|
||||||
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_queue *q,
|
mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_queue *q,
|
||||||
struct mt76_txq *mtxq)
|
struct mt76_txq *mtxq)
|
||||||
|
@ -419,10 +420,7 @@ mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_queue *q,
|
||||||
test_bit(MT76_RESET, &phy->state))
|
test_bit(MT76_RESET, &phy->state))
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
|
|
||||||
if (stop)
|
if (stop || mt76_txq_stopped(q))
|
||||||
break;
|
|
||||||
|
|
||||||
if (q->queued + MT_TXQ_FREE_THR >= q->ndesc)
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
skb = mt76_txq_dequeue(phy, mtxq);
|
skb = mt76_txq_dequeue(phy, mtxq);
|
||||||
|
@ -463,7 +461,7 @@ mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (q->queued + MT_TXQ_FREE_THR >= q->ndesc)
|
if (mt76_txq_stopped(q))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
txq = ieee80211_next_txq(phy->hw, qid);
|
txq = ieee80211_next_txq(phy->hw, qid);
|
||||||
|
@ -498,11 +496,14 @@ mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
|
||||||
|
|
||||||
void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid)
|
void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid)
|
||||||
{
|
{
|
||||||
|
struct mt76_queue *q;
|
||||||
int len;
|
int len;
|
||||||
|
|
||||||
if (qid >= 4)
|
if (qid >= 4)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
q = phy->q_tx[qid];
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
|
|
||||||
do {
|
do {
|
||||||
|
|
Loading…
Add table
Reference in a new issue