1
0
Fork 0
mirror of synced 2025-03-06 20:59:54 +01:00
linux/drivers/net/wireless/silabs/wfx/queue.c
Jérôme Pouiller f7385a2024 wifi: wfx: allow to send frames during ROC
Until now, all the traffic was blocked during scan operation. However,
scan operation is going to be used to implement Remain On Channel (ROC).
In this case, special frames (marked with IEEE80211_TX_CTL_TX_OFFCHAN)
must be sent during the operation.

These frames need to be sent on the virtual interface #2. Until now,
this interface was only used by the device for internal purpose. But
since API 3.9, it can be used to send data during scan operation (we
hijack the scan process to implement ROC).

Thus, we need to change a bit the way we match the frames with the
interface.

Fortunately, the frames received during the scan are marked with the
correct interface number. So there is no change to do on this part.

Signed-off-by: Jérôme Pouiller <jerome.pouiller@silabs.com>
Signed-off-by: Kalle Valo <kvalo@kernel.org>
Link: https://lore.kernel.org/r/20231004172843.195332-8-jerome.pouiller@silabs.com
2023-10-09 09:53:07 +03:00

322 lines
8.6 KiB
C

// SPDX-License-Identifier: GPL-2.0-only
/*
* Queue between the tx operation and the bh workqueue.
*
* Copyright (c) 2017-2020, Silicon Laboratories, Inc.
* Copyright (c) 2010, ST-Ericsson
*/
#include <linux/sched.h>
#include <net/mac80211.h>
#include "queue.h"
#include "wfx.h"
#include "sta.h"
#include "data_tx.h"
#include "traces.h"
void wfx_tx_lock(struct wfx_dev *wdev)
{
atomic_inc(&wdev->tx_lock);
}
void wfx_tx_unlock(struct wfx_dev *wdev)
{
int tx_lock = atomic_dec_return(&wdev->tx_lock);
WARN(tx_lock < 0, "inconsistent tx_lock value");
if (!tx_lock)
wfx_bh_request_tx(wdev);
}
void wfx_tx_flush(struct wfx_dev *wdev)
{
int ret;
/* Do not wait for any reply if chip is frozen */
if (wdev->chip_frozen)
return;
wfx_tx_lock(wdev);
mutex_lock(&wdev->hif_cmd.lock);
ret = wait_event_timeout(wdev->hif.tx_buffers_empty, !wdev->hif.tx_buffers_used,
msecs_to_jiffies(3000));
if (!ret) {
dev_warn(wdev->dev, "cannot flush tx buffers (%d still busy)\n",
wdev->hif.tx_buffers_used);
wfx_pending_dump_old_frames(wdev, 3000);
/* FIXME: drop pending frames here */
wdev->chip_frozen = true;
}
mutex_unlock(&wdev->hif_cmd.lock);
wfx_tx_unlock(wdev);
}
void wfx_tx_lock_flush(struct wfx_dev *wdev)
{
wfx_tx_lock(wdev);
wfx_tx_flush(wdev);
}
void wfx_tx_queues_init(struct wfx_vif *wvif)
{
/* The device is in charge to respect the details of the QoS parameters. The driver just
* ensure that it roughtly respect the priorities to avoid any shortage.
*/
const int priorities[IEEE80211_NUM_ACS] = { 1, 2, 64, 128 };
int i;
for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
skb_queue_head_init(&wvif->tx_queue[i].normal);
skb_queue_head_init(&wvif->tx_queue[i].cab);
skb_queue_head_init(&wvif->tx_queue[i].offchan);
wvif->tx_queue[i].priority = priorities[i];
}
}
bool wfx_tx_queue_empty(struct wfx_vif *wvif, struct wfx_queue *queue)
{
return skb_queue_empty_lockless(&queue->normal) &&
skb_queue_empty_lockless(&queue->cab) &&
skb_queue_empty_lockless(&queue->offchan);
}
void wfx_tx_queues_check_empty(struct wfx_vif *wvif)
{
int i;
for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
WARN_ON(atomic_read(&wvif->tx_queue[i].pending_frames));
WARN_ON(!wfx_tx_queue_empty(wvif, &wvif->tx_queue[i]));
}
}
static void __wfx_tx_queue_drop(struct wfx_vif *wvif,
struct sk_buff_head *skb_queue, struct sk_buff_head *dropped)
{
struct sk_buff *skb, *tmp;
spin_lock_bh(&skb_queue->lock);
skb_queue_walk_safe(skb_queue, skb, tmp) {
__skb_unlink(skb, skb_queue);
skb_queue_head(dropped, skb);
}
spin_unlock_bh(&skb_queue->lock);
}
void wfx_tx_queue_drop(struct wfx_vif *wvif, struct wfx_queue *queue,
struct sk_buff_head *dropped)
{
__wfx_tx_queue_drop(wvif, &queue->normal, dropped);
__wfx_tx_queue_drop(wvif, &queue->cab, dropped);
__wfx_tx_queue_drop(wvif, &queue->offchan, dropped);
wake_up(&wvif->wdev->tx_dequeue);
}
void wfx_tx_queues_put(struct wfx_vif *wvif, struct sk_buff *skb)
{
struct wfx_queue *queue = &wvif->tx_queue[skb_get_queue_mapping(skb)];
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
if (tx_info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
skb_queue_tail(&queue->offchan, skb);
else if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM)
skb_queue_tail(&queue->cab, skb);
else
skb_queue_tail(&queue->normal, skb);
}
void wfx_pending_drop(struct wfx_dev *wdev, struct sk_buff_head *dropped)
{
struct wfx_queue *queue;
struct wfx_vif *wvif;
struct sk_buff *skb;
WARN(!wdev->chip_frozen, "%s should only be used to recover a frozen device", __func__);
while ((skb = skb_dequeue(&wdev->tx_pending)) != NULL) {
wvif = wfx_skb_wvif(wdev, skb);
if (wvif) {
queue = &wvif->tx_queue[skb_get_queue_mapping(skb)];
WARN_ON(skb_get_queue_mapping(skb) > 3);
WARN_ON(!atomic_read(&queue->pending_frames));
atomic_dec(&queue->pending_frames);
}
skb_queue_head(dropped, skb);
}
}
struct sk_buff *wfx_pending_get(struct wfx_dev *wdev, u32 packet_id)
{
struct wfx_queue *queue;
struct wfx_hif_req_tx *req;
struct wfx_vif *wvif;
struct wfx_hif_msg *hif;
struct sk_buff *skb;
spin_lock_bh(&wdev->tx_pending.lock);
skb_queue_walk(&wdev->tx_pending, skb) {
hif = (struct wfx_hif_msg *)skb->data;
req = (struct wfx_hif_req_tx *)hif->body;
if (req->packet_id != packet_id)
continue;
spin_unlock_bh(&wdev->tx_pending.lock);
wvif = wfx_skb_wvif(wdev, skb);
if (wvif) {
queue = &wvif->tx_queue[skb_get_queue_mapping(skb)];
WARN_ON(skb_get_queue_mapping(skb) > 3);
WARN_ON(!atomic_read(&queue->pending_frames));
atomic_dec(&queue->pending_frames);
}
skb_unlink(skb, &wdev->tx_pending);
return skb;
}
spin_unlock_bh(&wdev->tx_pending.lock);
WARN(1, "cannot find packet in pending queue");
return NULL;
}
void wfx_pending_dump_old_frames(struct wfx_dev *wdev, unsigned int limit_ms)
{
ktime_t now = ktime_get();
struct wfx_tx_priv *tx_priv;
struct wfx_hif_req_tx *req;
struct sk_buff *skb;
bool first = true;
spin_lock_bh(&wdev->tx_pending.lock);
skb_queue_walk(&wdev->tx_pending, skb) {
tx_priv = wfx_skb_tx_priv(skb);
req = wfx_skb_txreq(skb);
if (ktime_after(now, ktime_add_ms(tx_priv->xmit_timestamp, limit_ms))) {
if (first) {
dev_info(wdev->dev, "frames stuck in firmware since %dms or more:\n",
limit_ms);
first = false;
}
dev_info(wdev->dev, " id %08x sent %lldms ago\n",
req->packet_id, ktime_ms_delta(now, tx_priv->xmit_timestamp));
}
}
spin_unlock_bh(&wdev->tx_pending.lock);
}
unsigned int wfx_pending_get_pkt_us_delay(struct wfx_dev *wdev, struct sk_buff *skb)
{
ktime_t now = ktime_get();
struct wfx_tx_priv *tx_priv = wfx_skb_tx_priv(skb);
return ktime_us_delta(now, tx_priv->xmit_timestamp);
}
bool wfx_tx_queues_has_cab(struct wfx_vif *wvif)
{
struct ieee80211_vif *vif = wvif_to_vif(wvif);
int i;
if (vif->type != NL80211_IFTYPE_AP)
return false;
for (i = 0; i < IEEE80211_NUM_ACS; ++i)
/* Note: since only AP can have mcast frames in queue and only one vif can be AP,
* all queued frames has same interface id
*/
if (!skb_queue_empty_lockless(&wvif->tx_queue[i].cab))
return true;
return false;
}
static int wfx_tx_queue_get_weight(struct wfx_queue *queue)
{
return atomic_read(&queue->pending_frames) * queue->priority;
}
static struct sk_buff *wfx_tx_queues_get_skb(struct wfx_dev *wdev)
{
struct wfx_queue *queues[IEEE80211_NUM_ACS * ARRAY_SIZE(wdev->vif)];
int i, j, num_queues = 0;
struct wfx_vif *wvif;
struct wfx_hif_msg *hif;
struct sk_buff *skb;
/* sort the queues */
wvif = NULL;
while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
WARN_ON(num_queues >= ARRAY_SIZE(queues));
queues[num_queues] = &wvif->tx_queue[i];
for (j = num_queues; j > 0; j--)
if (wfx_tx_queue_get_weight(queues[j]) <
wfx_tx_queue_get_weight(queues[j - 1]))
swap(queues[j - 1], queues[j]);
num_queues++;
}
}
wvif = NULL;
while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
for (i = 0; i < num_queues; i++) {
skb = skb_dequeue(&queues[i]->offchan);
if (!skb)
continue;
hif = (struct wfx_hif_msg *)skb->data;
/* Offchan frames are assigned to a special interface.
* The only interface allowed to send data during scan.
*/
WARN_ON(hif->interface != 2);
atomic_inc(&queues[i]->pending_frames);
trace_queues_stats(wdev, queues[i]);
return skb;
}
}
if (mutex_is_locked(&wdev->scan_lock))
return NULL;
wvif = NULL;
while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
if (!wvif->after_dtim_tx_allowed)
continue;
for (i = 0; i < num_queues; i++) {
skb = skb_dequeue(&queues[i]->cab);
if (!skb)
continue;
/* Note: since only AP can have mcast frames in queue and only one vif can
* be AP, all queued frames has same interface id
*/
hif = (struct wfx_hif_msg *)skb->data;
WARN_ON(hif->interface != wvif->id);
WARN_ON(queues[i] != &wvif->tx_queue[skb_get_queue_mapping(skb)]);
atomic_inc(&queues[i]->pending_frames);
trace_queues_stats(wdev, queues[i]);
return skb;
}
/* No more multicast to sent */
wvif->after_dtim_tx_allowed = false;
schedule_work(&wvif->update_tim_work);
}
for (i = 0; i < num_queues; i++) {
skb = skb_dequeue(&queues[i]->normal);
if (skb) {
atomic_inc(&queues[i]->pending_frames);
trace_queues_stats(wdev, queues[i]);
return skb;
}
}
return NULL;
}
struct wfx_hif_msg *wfx_tx_queues_get(struct wfx_dev *wdev)
{
struct wfx_tx_priv *tx_priv;
struct sk_buff *skb;
if (atomic_read(&wdev->tx_lock))
return NULL;
skb = wfx_tx_queues_get_skb(wdev);
if (!skb)
return NULL;
skb_queue_tail(&wdev->tx_pending, skb);
wake_up(&wdev->tx_dequeue);
tx_priv = wfx_skb_tx_priv(skb);
tx_priv->xmit_timestamp = ktime_get();
return (struct wfx_hif_msg *)skb->data;
}