1
0
Fork 0
mirror of synced 2025-03-06 20:59:54 +01:00

wireless-next patches for v6.14

Multi-Link Operation implementation continues, both in stack and in
 drivers. Otherwise it has been relatively quiet.
 
 Major changes:
 
 cfg80211/mac80211
 
 * define wiphy guard
 
 * get TX power per link
 
 * EHT 320 MHz channel support for mesh
 
 ath11k
 
 * QCA6698AQ support
 
 ath9k
 
 * RX inactivity detection
 
 rtl8xxxu
 
 * add more USB device IDs
 
 rtw88
 
 * add more USB device IDs
 
 * enable USB RX aggregation and USB 3 to improve performance
 
 rtw89
 
 * PowerSave flow for Multi-Link Operation
 -----BEGIN PGP SIGNATURE-----
 
 iQFFBAABCgAvFiEEiBjanGPFTz4PRfLobhckVSbrbZsFAmdkbAoRHGt2YWxvQGtl
 cm5lbC5vcmcACgkQbhckVSbrbZuRIgf/dcjYr+eg3I7iU9qGxvEwHlDAC5CaMRwe
 8+/SO6gy49xf6igleNQ2jBn/qAsJTiro6IzJwb1D6i16ax4TRUTEkTZSiYCzntKI
 9Nkq59qhsRI4Vxrhp6NibUtVnjuRdSruVM5uLCccUCJ8tfq13WGhecR2pmV0TDO3
 bRSza6L64XIuSmqHkuWS3Hz1YQvIvIZMeeiWoC35mtXg6ORRXpYloLtCzFn1zxoP
 YPoeSfoAqIlaVwdB1DoaakU6is8oGZ0oI6zw/qaN8P7pYfqO62ATf6ZzAdwHE1dU
 fow9nvwzln+BqgpdIK81QFR+XC+7LorCGSaQlYu6C0nxjSzycSrgOw==
 =WIP7
 -----END PGP SIGNATURE-----

Merge tag 'wireless-next-2024-12-19' of git://git.kernel.org/pub/scm/linux/kernel/git/wireless/wireless-next

Kalle Valo says:

====================
wireless-next patches for v6.14

Multi-Link Operation implementation continues, both in stack and in
drivers. Otherwise it has been relatively quiet.

Major changes:

cfg80211/mac80211
 - define wiphy guard
 - get TX power per link
 - EHT 320 MHz channel support for mesh

ath11k
 - QCA6698AQ support

ath9k
 - RX inactivity detection

rtl8xxxu
 - add more USB device IDs

rtw88
 - add more USB device IDs
 - enable USB RX aggregation and USB 3 to improve performance

rtw89
 - PowerSave flow for Multi-Link Operation

* tag 'wireless-next-2024-12-19' of git://git.kernel.org/pub/scm/linux/kernel/git/wireless/wireless-next: (121 commits)
  wifi: wlcore: sysfs: constify 'struct bin_attribute'
  wifi: brcmfmac: clarify unmodifiable headroom log message
  wifi: brcmfmac: add missing header include for brcmf_dbg
  wifi: brcmsmac: add gain range check to wlc_phy_iqcal_gainparams_nphy()
  wifi: qtnfmac: fix spelling error in core.h
  wifi: rtw89: phy: add dummy C2H event handler for report of TAS power
  wifi: rtw89: 8851b: rfk: remove unnecessary assignment of return value of _dpk_dgain_read()
  wifi: rtw89: 8852c: rfk: refine target channel calculation in _rx_dck_channel_calc()
  wifi: rtlwifi: pci: wait for firmware loading before releasing memory
  wifi: rtlwifi: fix memory leaks and invalid access at probe error path
  wifi: rtlwifi: destroy workqueue at rtl_deinit_core
  wifi: rtlwifi: remove unused check_buddy_priv
  wifi: rtw89: 8922a: update format of RFK pre-notify H2C command v2
  wifi: rtw89: regd: update regulatory map to R68-R51
  wifi: rtw89: 8852c: disable ER SU when 4x HE-LTF and 0.8 GI capability differ
  wifi: rtw89: disable firmware training HE GI and LTF
  wifi: rtw89: ps: update data for firmware and settings for hardware before/after PS
  wifi: rtw89: ps: refactor channel info to firmware before entering PS
  wifi: rtw89: ps: refactor PS flow to support MLO
  wifi: mwifiex: decrease timeout waiting for host sleep from 10s to 5s
  ...
====================

Link: https://patch.msgid.link/20241219185709.774EDC4CECE@smtp.kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2024-12-19 18:54:07 -08:00
commit e8f3323805
145 changed files with 6347 additions and 1645 deletions

View file

@ -123,6 +123,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
.smp2p_wow_exit = false,
.support_dual_stations = false,
.pdev_suspend = false,
},
{
.hw_rev = ATH11K_HW_IPQ6018_HW10,
@ -207,6 +208,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.smp2p_wow_exit = false,
.support_fw_mac_sequence = false,
.support_dual_stations = false,
.pdev_suspend = false,
},
{
.name = "qca6390 hw2.0",
@ -296,6 +298,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.smp2p_wow_exit = false,
.support_fw_mac_sequence = true,
.support_dual_stations = true,
.pdev_suspend = false,
},
{
.name = "qcn9074 hw1.0",
@ -379,6 +382,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.smp2p_wow_exit = false,
.support_fw_mac_sequence = false,
.support_dual_stations = false,
.pdev_suspend = false,
},
{
.name = "wcn6855 hw2.0",
@ -468,6 +472,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.smp2p_wow_exit = false,
.support_fw_mac_sequence = true,
.support_dual_stations = true,
.pdev_suspend = false,
},
{
.name = "wcn6855 hw2.1",
@ -555,6 +560,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.smp2p_wow_exit = false,
.support_fw_mac_sequence = true,
.support_dual_stations = true,
.pdev_suspend = false,
},
{
.name = "wcn6750 hw1.0",
@ -637,6 +643,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.smp2p_wow_exit = true,
.support_fw_mac_sequence = true,
.support_dual_stations = false,
.pdev_suspend = true,
},
{
.hw_rev = ATH11K_HW_IPQ5018_HW10,
@ -719,6 +726,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.smp2p_wow_exit = false,
.support_fw_mac_sequence = false,
.support_dual_stations = false,
.pdev_suspend = false,
},
{
.name = "qca2066 hw2.1",
@ -809,6 +817,94 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.support_fw_mac_sequence = true,
.support_dual_stations = true,
},
{
.name = "qca6698aq hw2.1",
.hw_rev = ATH11K_HW_QCA6698AQ_HW21,
.fw = {
.dir = "QCA6698AQ/hw2.1",
.board_size = 256 * 1024,
.cal_offset = 128 * 1024,
},
.max_radios = 3,
.bdf_addr = 0x4B0C0000,
.hw_ops = &wcn6855_ops,
.ring_mask = &ath11k_hw_ring_mask_qca6390,
.internal_sleep_clock = true,
.regs = &wcn6855_regs,
.qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390,
.host_ce_config = ath11k_host_ce_config_qca6390,
.ce_count = 9,
.target_ce_config = ath11k_target_ce_config_wlan_qca6390,
.target_ce_count = 9,
.svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390,
.svc_to_ce_map_len = 14,
.single_pdev_only = true,
.rxdma1_enable = false,
.num_rxdma_per_pdev = 2,
.rx_mac_buf_ring = true,
.vdev_start_delay = true,
.htt_peer_map_v2 = false,
.spectral = {
.fft_sz = 0,
.fft_pad_sz = 0,
.summary_pad_sz = 0,
.fft_hdr_len = 0,
.max_fft_bins = 0,
.fragment_160mhz = false,
},
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_P2P_DEVICE) |
BIT(NL80211_IFTYPE_P2P_CLIENT) |
BIT(NL80211_IFTYPE_P2P_GO),
.supports_monitor = false,
.supports_shadow_regs = true,
.idle_ps = true,
.supports_sta_ps = true,
.coldboot_cal_mm = false,
.coldboot_cal_ftm = false,
.cbcal_restart_fw = false,
.fw_mem_mode = 0,
.num_vdevs = 2 + 1,
.num_peers = 512,
.supports_suspend = true,
.hal_desc_sz = sizeof(struct hal_rx_desc_wcn6855),
.supports_regdb = true,
.fix_l1ss = false,
.credit_flow = true,
.max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390,
.hal_params = &ath11k_hw_hal_params_qca6390,
.supports_dynamic_smps_6ghz = false,
.alloc_cacheable_memory = false,
.supports_rssi_stats = true,
.fw_wmi_diag_event = true,
.current_cc_support = true,
.dbr_debug_support = false,
.global_reset = true,
.bios_sar_capa = &ath11k_hw_sar_capa_wcn6855,
.m3_fw_support = true,
.fixed_bdf_addr = false,
.fixed_mem_region = false,
.static_window_map = false,
.hybrid_bus_type = false,
.fixed_fw_mem = false,
.support_off_channel_tx = true,
.supports_multi_bssid = true,
.sram_dump = {
.start = 0x01400000,
.end = 0x0177ffff,
},
.tcl_ring_retry = true,
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
.smp2p_wow_exit = false,
.support_fw_mac_sequence = true,
.support_dual_stations = true,
.pdev_suspend = false,
},
};
static inline struct ath11k_pdev *ath11k_core_get_single_pdev(struct ath11k_base *ab)
@ -1669,11 +1765,47 @@ err_pdev_debug:
return ret;
}
static void ath11k_core_pdev_suspend_target(struct ath11k_base *ab)
{
struct ath11k *ar;
struct ath11k_pdev *pdev;
unsigned long time_left;
int ret;
int i;
if (!ab->hw_params.pdev_suspend)
return;
for (i = 0; i < ab->num_radios; i++) {
pdev = &ab->pdevs[i];
ar = pdev->ar;
reinit_completion(&ab->htc_suspend);
ret = ath11k_wmi_pdev_suspend(ar, WMI_PDEV_SUSPEND_AND_DISABLE_INTR,
pdev->pdev_id);
if (ret) {
ath11k_warn(ab, "could not suspend target :%d\n", ret);
/* pointless to try other pdevs */
return;
}
time_left = wait_for_completion_timeout(&ab->htc_suspend, 3 * HZ);
if (!time_left) {
ath11k_warn(ab, "suspend timed out - target pause event never came\n");
/* pointless to try other pdevs */
return;
}
}
}
static void ath11k_core_pdev_destroy(struct ath11k_base *ab)
{
ath11k_spectral_deinit(ab);
ath11k_thermal_unregister(ab);
ath11k_mac_unregister(ab);
ath11k_core_pdev_suspend_target(ab);
ath11k_hif_irq_disable(ab);
ath11k_dp_pdev_free(ab);
ath11k_debugfs_pdev_destroy(ab);

View file

@ -148,6 +148,7 @@ enum ath11k_hw_rev {
ATH11K_HW_WCN6750_HW10,
ATH11K_HW_IPQ5018_HW10,
ATH11K_HW_QCA2066_HW21,
ATH11K_HW_QCA6698AQ_HW21,
};
enum ath11k_firmware_mode {
@ -340,7 +341,6 @@ struct ath11k_chan_power_info {
* @ap_power_type: type of power (SP/LPI/VLP)
* @num_pwr_levels: number of power levels
* @reg_max: Array of maximum TX power (dBm) per PSD value
* @ap_constraint_power: AP constraint power (dBm)
* @tpe: TPE values processed from TPE IE
* @chan_power_info: power info to send to firmware
*/
@ -350,7 +350,6 @@ struct ath11k_reg_tpc_power_info {
enum wmi_reg_6ghz_ap_type ap_power_type;
u8 num_pwr_levels;
u8 reg_max[ATH11K_NUM_PWR_LEVELS];
u8 ap_constraint_power;
s8 tpe[ATH11K_NUM_PWR_LEVELS];
struct ath11k_chan_power_info chan_power_info[ATH11K_NUM_PWR_LEVELS];
};
@ -370,7 +369,6 @@ struct ath11k_vif {
struct ath11k *ar;
struct ieee80211_vif *vif;
u16 tx_seq_no;
struct wmi_wmm_params_all_arg wmm_params;
struct list_head list;
union {

View file

@ -165,7 +165,6 @@ struct ath11k_mon_data {
struct ath11k_pdev_mon_stats rx_mon_stats;
/* lock for monitor data */
spinlock_t mon_lock;
struct sk_buff_head rx_status_q;
};
struct ath11k_pdev_dp {

View file

@ -3872,6 +3872,7 @@ int ath11k_dp_process_rx_err(struct ath11k_base *ab, struct napi_struct *napi,
ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies,
&rbm);
if (rbm != HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST &&
rbm != HAL_RX_BUF_RBM_SW1_BM &&
rbm != HAL_RX_BUF_RBM_SW3_BM) {
ab->soc_stats.invalid_rbm++;
ath11k_warn(ab, "invalid return buffer manager %d\n", rbm);
@ -4690,8 +4691,9 @@ static void ath11k_dp_mon_get_buf_len(struct hal_rx_msdu_desc_info *info,
}
}
static u32
ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar, int mac_id,
/* clang stack usage explodes if this is inlined */
static noinline_for_stack
u32 ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar, int mac_id,
void *ring_entry, struct sk_buff **head_msdu,
struct sk_buff **tail_msdu, u32 *npackets,
u32 *ppdu_id)
@ -5705,8 +5707,6 @@ static int ath11k_dp_rx_pdev_mon_status_attach(struct ath11k *ar)
struct ath11k_pdev_dp *dp = &ar->dp;
struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
skb_queue_head_init(&pmon->rx_status_q);
pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
memset(&pmon->rx_mon_stats, 0,

View file

@ -700,7 +700,7 @@ enum hal_rx_buf_return_buf_manager {
#define HAL_REO_CMD_FLG_UNBLK_RESOURCE BIT(7)
#define HAL_REO_CMD_FLG_UNBLK_CACHE BIT(8)
/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO0_UPD_* feilds */
/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO0_UPD_* fields */
#define HAL_REO_CMD_UPD0_RX_QUEUE_NUM BIT(8)
#define HAL_REO_CMD_UPD0_VLD BIT(9)
#define HAL_REO_CMD_UPD0_ALDC BIT(10)
@ -725,7 +725,7 @@ enum hal_rx_buf_return_buf_manager {
#define HAL_REO_CMD_UPD0_PN_VALID BIT(29)
#define HAL_REO_CMD_UPD0_PN BIT(30)
/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO1_* feilds */
/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO1_* fields */
#define HAL_REO_CMD_UPD1_VLD BIT(16)
#define HAL_REO_CMD_UPD1_ALDC GENMASK(18, 17)
#define HAL_REO_CMD_UPD1_DIS_DUP_DETECTION BIT(19)
@ -741,7 +741,7 @@ enum hal_rx_buf_return_buf_manager {
#define HAL_REO_CMD_UPD1_PN_HANDLE_ENABLE BIT(30)
#define HAL_REO_CMD_UPD1_IGNORE_AMPDU_FLG BIT(31)
/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO2_* feilds */
/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO2_* fields */
#define HAL_REO_CMD_UPD2_SVLD BIT(10)
#define HAL_REO_CMD_UPD2_SSN GENMASK(22, 11)
#define HAL_REO_CMD_UPD2_SEQ_2K_ERR BIT(23)

View file

@ -372,7 +372,8 @@ int ath11k_hal_wbm_desc_parse_err(struct ath11k_base *ab, void *desc,
ret_buf_mgr = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR,
wbm_desc->buf_addr_info.info1);
if (ret_buf_mgr != HAL_RX_BUF_RBM_SW3_BM) {
if (ret_buf_mgr != HAL_RX_BUF_RBM_SW1_BM &&
ret_buf_mgr != HAL_RX_BUF_RBM_SW3_BM) {
ab->soc_stats.invalid_rbm++;
return -EINVAL;
}

View file

@ -227,6 +227,7 @@ struct ath11k_hw_params {
bool smp2p_wow_exit;
bool support_fw_mac_sequence;
bool support_dual_stations;
bool pdev_suspend;
};
struct ath11k_hw_ops {

View file

@ -1697,8 +1697,6 @@ static void ath11k_control_beaconing(struct ath11k_vif *arvif,
return;
}
arvif->tx_seq_no = 0x1000;
arvif->aid = 0;
ether_addr_copy(arvif->bssid, info->bssid);
@ -2230,7 +2228,7 @@ static void ath11k_peer_assoc_h_vht(struct ath11k *ar,
__le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map), vht_mcs_mask);
/* In IPQ8074 platform, VHT mcs rate 10 and 11 is enabled by default.
* VHT mcs rate 10 and 11 is not suppoerted in 11ac standard.
* VHT mcs rate 10 and 11 is not supported in 11ac standard.
* so explicitly disable the VHT MCS rate 10 and 11 in 11ac mode.
*/
arg->tx_mcs_set &= ~IEEE80211_VHT_MCS_SUPPORT_0_11_MASK;
@ -6952,7 +6950,7 @@ err_vdev_del:
/* Recalc txpower for remaining vdev */
ath11k_mac_txpower_recalc(ar);
/* TODO: recal traffic pause state based on the available vdevs */
/* TODO: recalc traffic pause state based on the available vdevs */
mutex_unlock(&ar->conf_mutex);
}
@ -9356,6 +9354,7 @@ static int ath11k_fw_stats_request(struct ath11k *ar,
static int ath11k_mac_op_get_txpower(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
unsigned int link_id,
int *dbm)
{
struct ath11k *ar = hw->priv;

View file

@ -398,6 +398,7 @@ int ath11k_mhi_register(struct ath11k_pci *ab_pci)
case ATH11K_HW_WCN6855_HW20:
case ATH11K_HW_WCN6855_HW21:
case ATH11K_HW_QCA2066_HW21:
case ATH11K_HW_QCA6698AQ_HW21:
ath11k_mhi_config = &ath11k_mhi_config_qca6390;
break;
default:

View file

@ -846,6 +846,9 @@ static int ath11k_pci_probe(struct pci_dev *pdev,
case 0x1019D0E1:
ab->hw_rev = ATH11K_HW_QCA2066_HW21;
break;
case 0x001e60e1:
ab->hw_rev = ATH11K_HW_QCA6698AQ_HW21;
break;
default:
ab->hw_rev = ATH11K_HW_WCN6855_HW21;
}

View file

@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"
@ -126,6 +126,17 @@ static const struct ath11k_msi_config ath11k_msi_config[] = {
},
.hw_rev = ATH11K_HW_QCA2066_HW21,
},
{
.total_vectors = 32,
.total_users = 4,
.users = (struct ath11k_msi_user[]) {
{ .name = "MHI", .num_vectors = 3, .base_vector = 0 },
{ .name = "CE", .num_vectors = 10, .base_vector = 3 },
{ .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
{ .name = "DP", .num_vectors = 18, .base_vector = 14 },
},
.hw_rev = ATH11K_HW_QCA6698AQ_HW21,
},
};
int ath11k_pcic_init_msi_config(struct ath11k_base *ab)

View file

@ -1704,7 +1704,9 @@ static const struct qmi_elem_info qmi_wlfw_fw_init_done_ind_msg_v01_ei[] = {
},
};
static int ath11k_qmi_host_cap_send(struct ath11k_base *ab)
/* clang stack usage explodes if this is inlined */
static noinline_for_stack
int ath11k_qmi_host_cap_send(struct ath11k_base *ab)
{
struct qmi_wlanfw_host_cap_req_msg_v01 req;
struct qmi_wlanfw_host_cap_resp_msg_v01 resp;
@ -2570,7 +2572,9 @@ static void ath11k_qmi_m3_free(struct ath11k_base *ab)
m3_mem->size = 0;
}
static int ath11k_qmi_wlanfw_m3_info_send(struct ath11k_base *ab)
/* clang stack usage explodes if this is inlined */
static noinline_for_stack
int ath11k_qmi_wlanfw_m3_info_send(struct ath11k_base *ab)
{
struct m3_mem_region *m3_mem = &ab->qmi.m3_mem;
struct qmi_wlanfw_m3_info_req_msg_v01 req;

View file

@ -148,7 +148,9 @@ static int ath11k_wow_cleanup(struct ath11k *ar)
* 802.11: |4B|dest mac(6B)| 6B |src mac(6B)| 8B |type(2B)| body... |
* +--+------------+----+-----------+---------------+-----------+
*/
static void ath11k_wow_convert_8023_to_80211(struct cfg80211_pkt_pattern *new,
/* clang stack usage explodes if this is inlined */
static noinline_for_stack
void ath11k_wow_convert_8023_to_80211(struct cfg80211_pkt_pattern *new,
const struct cfg80211_pkt_pattern *old)
{
u8 hdr_8023_pattern[ETH_HLEN] = {};

View file

@ -22,6 +22,11 @@ unsigned int ath12k_debug_mask;
module_param_named(debug_mask, ath12k_debug_mask, uint, 0644);
MODULE_PARM_DESC(debug_mask, "Debugging mask");
/* protected with ath12k_hw_group_mutex */
static struct list_head ath12k_hw_group_list = LIST_HEAD_INIT(ath12k_hw_group_list);
static DEFINE_MUTEX(ath12k_hw_group_mutex);
static int ath12k_core_rfkill_config(struct ath12k_base *ab)
{
struct ath12k *ar;
@ -79,11 +84,17 @@ int ath12k_core_suspend(struct ath12k_base *ab)
ar = ab->pdevs[i].ar;
if (!ar)
continue;
wiphy_lock(ath12k_ar_to_hw(ar)->wiphy);
ret = ath12k_mac_wait_tx_complete(ar);
if (ret) {
wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy);
ath12k_warn(ab, "failed to wait tx complete: %d\n", ret);
return ret;
}
wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy);
}
/* PM framework skips suspend_late/resume_early callbacks
@ -593,14 +604,17 @@ u32 ath12k_core_get_max_num_tids(struct ath12k_base *ab)
static void ath12k_core_stop(struct ath12k_base *ab)
{
ath12k_core_stopped(ab);
if (!test_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags))
ath12k_qmi_firmware_stop(ab);
ath12k_acpi_stop(ab);
ath12k_dp_rx_pdev_reo_cleanup(ab);
ath12k_hif_stop(ab);
ath12k_wmi_detach(ab);
ath12k_dp_rx_pdev_reo_cleanup(ab);
ath12k_dp_free(ab);
/* De-Init of components as needed */
}
@ -702,7 +716,7 @@ err_qmi_deinit:
static void ath12k_core_soc_destroy(struct ath12k_base *ab)
{
ath12k_dp_free(ab);
ath12k_hif_power_down(ab, false);
ath12k_reg_free(ab);
ath12k_debugfs_soc_destroy(ab);
ath12k_qmi_deinit_service(ab);
@ -712,30 +726,17 @@ static int ath12k_core_pdev_create(struct ath12k_base *ab)
{
int ret;
ret = ath12k_mac_register(ab);
if (ret) {
ath12k_err(ab, "failed register the radio with mac80211: %d\n", ret);
return ret;
}
ret = ath12k_dp_pdev_alloc(ab);
if (ret) {
ath12k_err(ab, "failed to attach DP pdev: %d\n", ret);
goto err_mac_unregister;
return ret;
}
return 0;
err_mac_unregister:
ath12k_mac_unregister(ab);
return ret;
}
static void ath12k_core_pdev_destroy(struct ath12k_base *ab)
{
ath12k_mac_unregister(ab);
ath12k_hif_irq_disable(ab);
ath12k_dp_pdev_free(ab);
}
@ -744,6 +745,8 @@ static int ath12k_core_start(struct ath12k_base *ab,
{
int ret;
lockdep_assert_held(&ab->core_lock);
ret = ath12k_wmi_attach(ab);
if (ret) {
ath12k_err(ab, "failed to attach wmi: %d\n", ret);
@ -793,19 +796,12 @@ static int ath12k_core_start(struct ath12k_base *ab,
goto err_hif_stop;
}
ret = ath12k_mac_allocate(ab);
if (ret) {
ath12k_err(ab, "failed to create new hw device with mac80211 :%d\n",
ret);
goto err_hif_stop;
}
ath12k_dp_cc_config(ab);
ret = ath12k_dp_rx_pdev_reo_setup(ab);
if (ret) {
ath12k_err(ab, "failed to initialize reo destination rings: %d\n", ret);
goto err_mac_destroy;
goto err_hif_stop;
}
ath12k_dp_hal_rx_desc_init(ab);
@ -844,12 +840,14 @@ static int ath12k_core_start(struct ath12k_base *ab,
/* ACPI is optional so continue in case of an error */
ath12k_dbg(ab, ATH12K_DBG_BOOT, "acpi failed: %d\n", ret);
if (!test_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags))
/* Indicate the core start in the appropriate group */
ath12k_core_started(ab);
return 0;
err_reo_cleanup:
ath12k_dp_rx_pdev_reo_cleanup(ab);
err_mac_destroy:
ath12k_mac_destroy(ab);
err_hif_stop:
ath12k_hif_stop(ab);
err_wmi_detach:
@ -857,6 +855,95 @@ err_wmi_detach:
return ret;
}
static void ath12k_core_device_cleanup(struct ath12k_base *ab)
{
mutex_lock(&ab->core_lock);
ath12k_hif_irq_disable(ab);
ath12k_core_pdev_destroy(ab);
mutex_unlock(&ab->core_lock);
}
static void ath12k_core_hw_group_stop(struct ath12k_hw_group *ag)
{
struct ath12k_base *ab;
int i;
lockdep_assert_held(&ag->mutex);
clear_bit(ATH12K_GROUP_FLAG_REGISTERED, &ag->flags);
ath12k_mac_unregister(ag);
for (i = ag->num_devices - 1; i >= 0; i--) {
ab = ag->ab[i];
if (!ab)
continue;
ath12k_core_device_cleanup(ab);
}
ath12k_mac_destroy(ag);
}
static int ath12k_core_hw_group_start(struct ath12k_hw_group *ag)
{
struct ath12k_base *ab;
int ret, i;
lockdep_assert_held(&ag->mutex);
if (test_bit(ATH12K_GROUP_FLAG_REGISTERED, &ag->flags))
goto core_pdev_create;
ret = ath12k_mac_allocate(ag);
if (WARN_ON(ret))
return ret;
ret = ath12k_mac_register(ag);
if (WARN_ON(ret))
goto err_mac_destroy;
set_bit(ATH12K_GROUP_FLAG_REGISTERED, &ag->flags);
core_pdev_create:
for (i = 0; i < ag->num_devices; i++) {
ab = ag->ab[i];
if (!ab)
continue;
mutex_lock(&ab->core_lock);
ret = ath12k_core_pdev_create(ab);
if (ret) {
ath12k_err(ab, "failed to create pdev core %d\n", ret);
mutex_unlock(&ab->core_lock);
goto err;
}
ath12k_hif_irq_enable(ab);
ret = ath12k_core_rfkill_config(ab);
if (ret && ret != -EOPNOTSUPP) {
mutex_unlock(&ab->core_lock);
goto err;
}
mutex_unlock(&ab->core_lock);
}
return 0;
err:
ath12k_core_hw_group_stop(ag);
return ret;
err_mac_destroy:
ath12k_mac_destroy(ag);
return ret;
}
static int ath12k_core_start_firmware(struct ath12k_base *ab,
enum ath12k_firmware_mode mode)
{
@ -874,9 +961,37 @@ static int ath12k_core_start_firmware(struct ath12k_base *ab,
return ret;
}
static inline
bool ath12k_core_hw_group_start_ready(struct ath12k_hw_group *ag)
{
lockdep_assert_held(&ag->mutex);
return (ag->num_started == ag->num_devices);
}
static void ath12k_core_trigger_partner(struct ath12k_base *ab)
{
struct ath12k_hw_group *ag = ab->ag;
struct ath12k_base *partner_ab;
bool found = false;
int i;
for (i = 0; i < ag->num_devices; i++) {
partner_ab = ag->ab[i];
if (!partner_ab)
continue;
if (found)
ath12k_qmi_trigger_host_cap(partner_ab);
found = (partner_ab == ab);
}
}
int ath12k_core_qmi_firmware_ready(struct ath12k_base *ab)
{
int ret;
struct ath12k_hw_group *ag = ath12k_ab_to_ag(ab);
int ret, i;
ret = ath12k_core_start_firmware(ab, ATH12K_FIRMWARE_MODE_NORMAL);
if (ret) {
@ -896,41 +1011,52 @@ int ath12k_core_qmi_firmware_ready(struct ath12k_base *ab)
goto err_firmware_stop;
}
mutex_lock(&ag->mutex);
mutex_lock(&ab->core_lock);
ret = ath12k_core_start(ab, ATH12K_FIRMWARE_MODE_NORMAL);
if (ret) {
ath12k_err(ab, "failed to start core: %d\n", ret);
goto err_dp_free;
}
ret = ath12k_core_pdev_create(ab);
mutex_unlock(&ab->core_lock);
if (ath12k_core_hw_group_start_ready(ag)) {
ret = ath12k_core_hw_group_start(ag);
if (ret) {
ath12k_err(ab, "failed to create pdev core: %d\n", ret);
ath12k_warn(ab, "unable to start hw group\n");
goto err_core_stop;
}
ath12k_hif_irq_enable(ab);
ret = ath12k_core_rfkill_config(ab);
if (ret && ret != -EOPNOTSUPP) {
ath12k_err(ab, "failed to config rfkill: %d\n", ret);
goto err_core_pdev_destroy;
ath12k_dbg(ab, ATH12K_DBG_BOOT, "group %d started\n", ag->id);
} else {
ath12k_core_trigger_partner(ab);
}
mutex_unlock(&ab->core_lock);
mutex_unlock(&ag->mutex);
return 0;
err_core_pdev_destroy:
ath12k_core_pdev_destroy(ab);
err_core_stop:
for (i = ag->num_devices - 1; i >= 0; i--) {
ab = ag->ab[i];
if (!ab)
continue;
mutex_lock(&ab->core_lock);
ath12k_core_stop(ab);
ath12k_mac_destroy(ab);
mutex_unlock(&ab->core_lock);
}
goto exit;
err_dp_free:
ath12k_dp_free(ab);
mutex_unlock(&ab->core_lock);
err_firmware_stop:
ath12k_qmi_firmware_stop(ab);
exit:
mutex_unlock(&ag->mutex);
return ret;
}
@ -982,8 +1108,8 @@ static void ath12k_rfkill_work(struct work_struct *work)
rfkill_radio_on = ab->rfkill_radio_on;
spin_unlock_bh(&ab->base_lock);
for (i = 0; i < ab->num_hw; i++) {
ah = ab->ah[i];
for (i = 0; i < ath12k_get_num_hw(ab); i++) {
ah = ath12k_ab_to_ah(ab, i);
if (!ah)
continue;
@ -1034,8 +1160,8 @@ static void ath12k_core_pre_reconfigure_recovery(struct ath12k_base *ab)
if (ab->is_reset)
set_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags);
for (i = 0; i < ab->num_hw; i++) {
ah = ab->ah[i];
for (i = 0; i < ath12k_get_num_hw(ab); i++) {
ah = ath12k_ab_to_ah(ab, i);
if (!ah || ah->state == ATH12K_HW_STATE_OFF)
continue;
@ -1073,8 +1199,8 @@ static void ath12k_core_post_reconfigure_recovery(struct ath12k_base *ab)
struct ath12k *ar;
int i, j;
for (i = 0; i < ab->num_hw; i++) {
ah = ab->ah[i];
for (i = 0; i < ath12k_get_num_hw(ab); i++) {
ah = ath12k_ab_to_ah(ab, i);
if (!ah || ah->state == ATH12K_HW_STATE_OFF)
continue;
@ -1127,8 +1253,16 @@ static void ath12k_core_restart(struct work_struct *work)
}
if (ab->is_reset) {
for (i = 0; i < ab->num_hw; i++) {
ah = ab->ah[i];
if (!test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) {
atomic_dec(&ab->reset_count);
complete(&ab->reset_complete);
ab->is_reset = false;
atomic_set(&ab->fail_cont_count, 0);
ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset success\n");
}
for (i = 0; i < ath12k_get_num_hw(ab); i++) {
ah = ath12k_ab_to_ah(ab, i);
ieee80211_restart_hw(ah->hw);
}
}
@ -1142,7 +1276,7 @@ static void ath12k_core_reset(struct work_struct *work)
int reset_count, fail_cont_count;
long time_left;
if (!(test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags))) {
if (!(test_bit(ATH12K_FLAG_QMI_FW_READY_COMPLETE, &ab->dev_flags))) {
ath12k_warn(ab, "ignore reset dev flags 0x%lx\n", ab->dev_flags);
return;
}
@ -1241,38 +1375,246 @@ static void ath12k_core_panic_notifier_unregister(struct ath12k_base *ab)
&ab->panic_nb);
}
int ath12k_core_init(struct ath12k_base *ab)
static inline
bool ath12k_core_hw_group_create_ready(struct ath12k_hw_group *ag)
{
int ret;
lockdep_assert_held(&ag->mutex);
return (ag->num_probed == ag->num_devices);
}
static struct ath12k_hw_group *ath12k_core_hw_group_alloc(u8 id, u8 max_devices)
{
struct ath12k_hw_group *ag;
lockdep_assert_held(&ath12k_hw_group_mutex);
ag = kzalloc(sizeof(*ag), GFP_KERNEL);
if (!ag)
return NULL;
ag->id = id;
ag->num_devices = max_devices;
list_add(&ag->list, &ath12k_hw_group_list);
mutex_init(&ag->mutex);
return ag;
}
static void ath12k_core_hw_group_free(struct ath12k_hw_group *ag)
{
mutex_lock(&ath12k_hw_group_mutex);
list_del(&ag->list);
kfree(ag);
mutex_unlock(&ath12k_hw_group_mutex);
}
static struct ath12k_hw_group *ath12k_core_hw_group_assign(struct ath12k_base *ab)
{
u32 group_id = ATH12K_INVALID_GROUP_ID;
struct ath12k_hw_group *ag;
lockdep_assert_held(&ath12k_hw_group_mutex);
/* The grouping of multiple devices will be done based on device tree file.
* TODO: device tree file parsing to know about the devices involved in group.
*
* The platforms that do not have any valid group information would have each
* device to be part of its own invalid group.
*
* Currently, we are not parsing any device tree information and hence, grouping
* of multiple devices is not involved. Thus, single device is added to device
* group.
*/
ag = ath12k_core_hw_group_alloc(group_id, 1);
if (!ag) {
ath12k_warn(ab, "unable to create new hw group\n");
return NULL;
}
ath12k_dbg(ab, ATH12K_DBG_BOOT, "single device added to hardware group\n");
ab->device_id = ag->num_probed++;
ag->ab[ab->device_id] = ab;
ab->ag = ag;
ag->mlo_capable = false;
return ag;
}
void ath12k_core_hw_group_unassign(struct ath12k_base *ab)
{
struct ath12k_hw_group *ag = ath12k_ab_to_ag(ab);
u8 device_id = ab->device_id;
int num_probed;
if (!ag)
return;
mutex_lock(&ag->mutex);
if (WARN_ON(device_id >= ag->num_devices)) {
mutex_unlock(&ag->mutex);
return;
}
if (WARN_ON(ag->ab[device_id] != ab)) {
mutex_unlock(&ag->mutex);
return;
}
ag->ab[device_id] = NULL;
ab->ag = NULL;
ab->device_id = ATH12K_INVALID_DEVICE_ID;
if (ag->num_probed)
ag->num_probed--;
num_probed = ag->num_probed;
mutex_unlock(&ag->mutex);
if (!num_probed)
ath12k_core_hw_group_free(ag);
}
static void ath12k_core_hw_group_destroy(struct ath12k_hw_group *ag)
{
struct ath12k_base *ab;
int i;
if (WARN_ON(!ag))
return;
for (i = 0; i < ag->num_devices; i++) {
ab = ag->ab[i];
if (!ab)
continue;
ath12k_core_soc_destroy(ab);
}
}
static void ath12k_core_hw_group_cleanup(struct ath12k_hw_group *ag)
{
struct ath12k_base *ab;
int i;
if (!ag)
return;
mutex_lock(&ag->mutex);
ath12k_core_hw_group_stop(ag);
for (i = 0; i < ag->num_devices; i++) {
ab = ag->ab[i];
if (!ab)
continue;
mutex_lock(&ab->core_lock);
ath12k_core_stop(ab);
mutex_unlock(&ab->core_lock);
}
mutex_unlock(&ag->mutex);
}
static int ath12k_core_hw_group_create(struct ath12k_hw_group *ag)
{
struct ath12k_base *ab;
int i, ret;
lockdep_assert_held(&ag->mutex);
for (i = 0; i < ag->num_devices; i++) {
ab = ag->ab[i];
if (!ab)
continue;
mutex_lock(&ab->core_lock);
ret = ath12k_core_soc_create(ab);
if (ret) {
mutex_unlock(&ab->core_lock);
ath12k_err(ab, "failed to create soc core: %d\n", ret);
return ret;
}
mutex_unlock(&ab->core_lock);
}
return 0;
}
void ath12k_core_hw_group_set_mlo_capable(struct ath12k_hw_group *ag)
{
lockdep_assert_held(&ag->mutex);
/* If more than one devices are grouped, then inter MLO
* functionality can work still independent of whether internally
* each device supports single_chip_mlo or not.
* Only when there is one device, then it depends whether the
* device can support intra chip MLO or not
*/
if (ag->num_devices > 1)
ag->mlo_capable = true;
else
ag->mlo_capable = ag->ab[0]->single_chip_mlo_supp;
}
int ath12k_core_init(struct ath12k_base *ab)
{
struct ath12k_hw_group *ag;
int ret;
ret = ath12k_core_panic_notifier_register(ab);
if (ret)
ath12k_warn(ab, "failed to register panic handler: %d\n", ret);
mutex_lock(&ath12k_hw_group_mutex);
ag = ath12k_core_hw_group_assign(ab);
if (!ag) {
mutex_unlock(&ath12k_hw_group_mutex);
ath12k_warn(ab, "unable to get hw group\n");
return -ENODEV;
}
mutex_unlock(&ath12k_hw_group_mutex);
mutex_lock(&ag->mutex);
ath12k_dbg(ab, ATH12K_DBG_BOOT, "num devices %d num probed %d\n",
ag->num_devices, ag->num_probed);
if (ath12k_core_hw_group_create_ready(ag)) {
ret = ath12k_core_hw_group_create(ag);
if (ret) {
mutex_unlock(&ag->mutex);
ath12k_warn(ab, "unable to create hw group\n");
goto err;
}
}
mutex_unlock(&ag->mutex);
return 0;
err:
ath12k_core_hw_group_destroy(ab->ag);
ath12k_core_hw_group_unassign(ab);
return ret;
}
void ath12k_core_deinit(struct ath12k_base *ab)
{
ath12k_core_panic_notifier_unregister(ab);
mutex_lock(&ab->core_lock);
ath12k_core_pdev_destroy(ab);
ath12k_core_stop(ab);
mutex_unlock(&ab->core_lock);
ath12k_hif_power_down(ab, false);
ath12k_mac_destroy(ab);
ath12k_core_soc_destroy(ab);
ath12k_fw_unmap(ab);
ath12k_core_hw_group_cleanup(ab->ag);
ath12k_core_hw_group_destroy(ab->ag);
ath12k_core_hw_group_unassign(ab);
}
void ath12k_core_free(struct ath12k_base *ab)
@ -1322,7 +1664,7 @@ struct ath12k_base *ath12k_core_alloc(struct device *dev, size_t priv_size,
ab->dev = dev;
ab->hif.bus = bus;
ab->qmi.num_radios = U8_MAX;
ab->mlo_capable_flags = ATH12K_INTRA_DEVICE_MLO_SUPPORT;
ab->single_chip_mlo_supp = false;
/* Device index used to identify the devices in a group.
*

View file

@ -63,6 +63,14 @@
#define ATH12K_RECONFIGURE_TIMEOUT_HZ (10 * HZ)
#define ATH12K_RECOVER_START_TIMEOUT_HZ (20 * HZ)
#define ATH12K_MAX_SOCS 3
#define ATH12K_GROUP_MAX_RADIO (ATH12K_MAX_SOCS * MAX_RADIOS)
#define ATH12K_INVALID_GROUP_ID 0xFF
#define ATH12K_INVALID_DEVICE_ID 0xFF
#define ATH12K_MAX_MLO_PEERS 256
#define ATH12K_MLO_PEER_ID_INVALID 0xFFFF
enum ath12k_bdf_search {
ATH12K_BDF_SEARCH_DEFAULT,
ATH12K_BDF_SEARCH_BUS_AND_BOARD,
@ -115,6 +123,7 @@ struct ath12k_skb_cb {
dma_addr_t paddr_ext_desc;
u32 cipher;
u8 flags;
u8 link_id;
};
struct ath12k_skb_rxcb {
@ -208,6 +217,10 @@ enum ath12k_scan_state {
ATH12K_SCAN_ABORTING,
};
enum ath12k_hw_group_flags {
ATH12K_GROUP_FLAG_REGISTERED,
};
enum ath12k_dev_flags {
ATH12K_CAC_RUNNING,
ATH12K_FLAG_CRASH_FLUSH,
@ -220,6 +233,7 @@ enum ath12k_dev_flags {
ATH12K_FLAG_HTC_SUSPEND_COMPLETE,
ATH12K_FLAG_CE_IRQ_ENABLED,
ATH12K_FLAG_EXT_IRQ_ENABLED,
ATH12K_FLAG_QMI_FW_READY_COMPLETE,
};
struct ath12k_tx_conf {
@ -314,10 +328,11 @@ struct ath12k_vif {
bool ps;
struct ath12k_link_vif deflink;
struct ath12k_link_vif __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS];
struct ath12k_link_vif __rcu *link[ATH12K_NUM_MAX_LINKS];
struct ath12k_vif_cache *cache[IEEE80211_MLD_MAX_NUM_LINKS];
/* indicates bitmap of link vif created in FW */
u16 links_map;
u8 last_scan_link;
/* Must be last - ends in a flexible-array member.
*
@ -469,6 +484,9 @@ struct ath12k_link_sta {
struct ath12k_link_vif *arvif;
struct ath12k_sta *ahsta;
/* link address similar to ieee80211_link_sta */
u8 addr[ETH_ALEN];
/* the following are protected by ar->data_lock */
u32 changed; /* IEEE80211_RC_* */
u32 bw;
@ -485,14 +503,26 @@ struct ath12k_link_sta {
struct ath12k_rx_peer_stats *rx_stats;
struct ath12k_wbm_tx_stats *wbm_tx_stats;
u32 bw_prev;
/* For now the assoc link will be considered primary */
bool is_assoc_link;
/* for firmware use only */
u8 link_idx;
};
struct ath12k_sta {
struct ath12k_vif *ahvif;
enum hal_pn_type pn_type;
struct ath12k_link_sta deflink;
struct ath12k_link_sta __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS];
/* indicates bitmap of link sta created in FW */
u16 links_map;
u8 assoc_link_id;
u16 ml_peer_id;
u8 num_peer;
enum ieee80211_sta_state state;
};
#define ATH12K_MIN_5G_FREQ 4150
@ -657,7 +687,7 @@ struct ath12k {
struct work_struct regd_update_work;
struct work_struct wmi_mgmt_tx_work;
struct wiphy_work wmi_mgmt_tx_work;
struct sk_buff_head wmi_mgmt_tx_queue;
struct ath12k_wow wow;
@ -688,6 +718,8 @@ struct ath12k {
struct ath12k_hw {
struct ieee80211_hw *hw;
struct device *dev;
/* Protect the write operation of the hardware state ath12k_hw::state
* between hardware start<=>reconfigure<=>stop transitions.
*/
@ -698,6 +730,11 @@ struct ath12k_hw {
u8 num_radio;
DECLARE_BITMAP(free_ml_peer_id_map, ATH12K_MAX_MLO_PEERS);
/* protected by wiphy_lock() */
struct list_head ml_peers;
/* Keep last */
struct ath12k radio[] __aligned(sizeof(void *));
};
@ -784,19 +821,30 @@ struct ath12k_soc_dp_stats {
struct ath12k_soc_dp_tx_err_stats tx_err;
};
/**
* enum ath12k_link_capable_flags - link capable flags
*
* Single/Multi link capability information
*
* @ATH12K_INTRA_DEVICE_MLO_SUPPORT: SLO/MLO form between the radio, where all
* the links (radios) present within a device.
* @ATH12K_INTER_DEVICE_MLO_SUPPORT: SLO/MLO form between the radio, where all
* the links (radios) present across the devices.
/* Holds info on the group of devices that are registered as a single
* wiphy, protected with struct ath12k_hw_group::mutex.
*/
enum ath12k_link_capable_flags {
ATH12K_INTRA_DEVICE_MLO_SUPPORT = BIT(0),
ATH12K_INTER_DEVICE_MLO_SUPPORT = BIT(1),
struct ath12k_hw_group {
struct list_head list;
u8 id;
u8 num_devices;
u8 num_probed;
u8 num_started;
unsigned long flags;
struct ath12k_base *ab[ATH12K_MAX_SOCS];
/* protects access to this struct */
struct mutex mutex;
/* Holds information of wiphy (hw) registration.
*
* In Multi/Single Link Operation case, all pdevs are registered as
* a single wiphy. In other (legacy/Non-MLO) cases, each pdev is
* registered as separate wiphys.
*/
struct ath12k_hw *ah[ATH12K_GROUP_MAX_RADIO];
u8 num_hw;
bool mlo_capable;
};
/* Master structure to hold the hw data which may be used in core module */
@ -862,15 +910,6 @@ struct ath12k_base {
struct ath12k_pdev __rcu *pdevs_active[MAX_RADIOS];
/* Holds information of wiphy (hw) registration.
*
* In Multi/Single Link Operation case, all pdevs are registered as
* a single wiphy. In other (legacy/Non-MLO) cases, each pdev is
* registered as separate wiphys.
*/
struct ath12k_hw *ah[MAX_RADIOS];
u8 num_hw;
struct ath12k_wmi_hal_reg_capabilities_ext_arg hal_reg_cap[MAX_RADIOS];
unsigned long long free_vdev_map;
unsigned long long free_vdev_stats_id_map;
@ -964,12 +1003,8 @@ struct ath12k_base {
const struct hal_rx_ops *hal_rx_ops;
/* mlo_capable_flags denotes the single/multi link operation
* capabilities of the Device.
*
* See enum ath12k_link_capable_flags
*/
u8 mlo_capable_flags;
/* Denotes the whether MLO is possible within the chip */
bool single_chip_mlo_supp;
struct completion restart_completed;
@ -992,6 +1027,8 @@ struct ath12k_base {
struct notifier_block panic_nb;
struct ath12k_hw_group *ag;
/* must be last */
u8 drv_priv[] __aligned(sizeof(void *));
};
@ -1022,6 +1059,7 @@ int ath12k_core_resume_early(struct ath12k_base *ab);
int ath12k_core_resume(struct ath12k_base *ab);
int ath12k_core_suspend(struct ath12k_base *ab);
int ath12k_core_suspend_late(struct ath12k_base *ab);
void ath12k_core_hw_group_unassign(struct ath12k_base *ab);
const struct firmware *ath12k_core_firmware_request(struct ath12k_base *ab,
const char *filename);
@ -1029,6 +1067,8 @@ u32 ath12k_core_get_max_station_per_radio(struct ath12k_base *ab);
u32 ath12k_core_get_max_peers_per_radio(struct ath12k_base *ab);
u32 ath12k_core_get_max_num_tids(struct ath12k_base *ab);
void ath12k_core_hw_group_set_mlo_capable(struct ath12k_hw_group *ag);
static inline const char *ath12k_scan_state_str(enum ath12k_scan_state state)
{
switch (state) {
@ -1129,4 +1169,40 @@ static inline struct ieee80211_hw *ath12k_ar_to_hw(struct ath12k *ar)
#define for_each_ar(ah, ar, index) \
for ((index) = 0; ((index) < (ah)->num_radio && \
((ar) = &(ah)->radio[(index)])); (index)++)
static inline struct ath12k_hw *ath12k_ab_to_ah(struct ath12k_base *ab, int idx)
{
return ab->ag->ah[idx];
}
static inline void ath12k_ab_set_ah(struct ath12k_base *ab, int idx,
struct ath12k_hw *ah)
{
ab->ag->ah[idx] = ah;
}
static inline int ath12k_get_num_hw(struct ath12k_base *ab)
{
return ab->ag->num_hw;
}
static inline struct ath12k_hw_group *ath12k_ab_to_ag(struct ath12k_base *ab)
{
return ab->ag;
}
static inline void ath12k_core_started(struct ath12k_base *ab)
{
lockdep_assert_held(&ab->ag->mutex);
ab->ag->num_started++;
}
static inline void ath12k_core_stopped(struct ath12k_base *ab)
{
lockdep_assert_held(&ab->ag->mutex);
ab->ag->num_started--;
}
#endif /* _CORE_H_ */

View file

@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/vmalloc.h>
@ -36,7 +36,7 @@ void ath12k_err(struct ath12k_base *ab, const char *fmt, ...)
va_end(args);
}
void ath12k_warn(struct ath12k_base *ab, const char *fmt, ...)
void __ath12k_warn(struct device *dev, const char *fmt, ...)
{
struct va_format vaf = {
.fmt = fmt,
@ -45,7 +45,7 @@ void ath12k_warn(struct ath12k_base *ab, const char *fmt, ...)
va_start(args, fmt);
vaf.va = &args;
dev_warn_ratelimited(ab->dev, "%pV", &vaf);
dev_warn_ratelimited(dev, "%pV", &vaf);
/* TODO: Trace the log */
va_end(args);
}

View file

@ -31,7 +31,10 @@ enum ath12k_debug_mask {
__printf(2, 3) void ath12k_info(struct ath12k_base *ab, const char *fmt, ...);
__printf(2, 3) void ath12k_err(struct ath12k_base *ab, const char *fmt, ...);
__printf(2, 3) void ath12k_warn(struct ath12k_base *ab, const char *fmt, ...);
__printf(2, 3) void __ath12k_warn(struct device *dev, const char *fmt, ...);
#define ath12k_warn(ab, fmt, ...) __ath12k_warn((ab)->dev, fmt, ##__VA_ARGS__)
#define ath12k_hw_warn(ah, fmt, ...) __ath12k_warn((ah)->dev, fmt, ##__VA_ARGS__)
extern unsigned int ath12k_debug_mask;

View file

@ -48,6 +48,28 @@ print_array_to_buf(u8 *buf, u32 offset, const char *header,
footer);
}
static const char *ath12k_htt_ax_tx_rx_ru_size_to_str(u8 ru_size)
{
switch (ru_size) {
case ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_26:
return "26";
case ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_52:
return "52";
case ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_106:
return "106";
case ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_242:
return "242";
case ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_484:
return "484";
case ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_996:
return "996";
case ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_996x2:
return "996x2";
default:
return "unknown";
}
}
static const char *ath12k_htt_be_tx_rx_ru_size_to_str(u8 ru_size)
{
switch (ru_size) {
@ -88,6 +110,17 @@ static const char *ath12k_htt_be_tx_rx_ru_size_to_str(u8 ru_size)
}
}
static const char*
ath12k_tx_ru_size_to_str(enum ath12k_htt_stats_ru_type ru_type, u8 ru_size)
{
if (ru_type == ATH12K_HTT_STATS_RU_TYPE_SINGLE_RU_ONLY)
return ath12k_htt_ax_tx_rx_ru_size_to_str(ru_size);
else if (ru_type == ATH12K_HTT_STATS_RU_TYPE_SINGLE_AND_MULTI_RU)
return ath12k_htt_be_tx_rx_ru_size_to_str(ru_size);
else
return "unknown";
}
static void
htt_print_tx_pdev_stats_cmn_tlv(const void *tag_buf, u16 tag_len,
struct debug_htt_stats_req *stats_req)
@ -1562,7 +1595,8 @@ ath12k_htt_print_tx_selfgen_ac_stats_tlv(const void *tag_buf, u16 tag_len,
le32_to_cpu(htt_stats_buf->ac_mu_mimo_ndp));
len += print_array_to_buf_index(buf, len, "ac_mu_mimo_brpollX_tried = ", 1,
htt_stats_buf->ac_mu_mimo_brpoll,
ATH12K_HTT_TX_NUM_AC_MUMIMO_USER_STATS, "\n\n");
ATH12K_HTT_TX_NUM_AC_MUMIMO_USER_STATS - 1,
"\n\n");
stats_req->buf_len = len;
}
@ -1590,7 +1624,7 @@ ath12k_htt_print_tx_selfgen_ax_stats_tlv(const void *tag_buf, u16 tag_len,
le32_to_cpu(htt_stats_buf->ax_mu_mimo_ndp));
len += print_array_to_buf_index(buf, len, "ax_mu_mimo_brpollX_tried = ", 1,
htt_stats_buf->ax_mu_mimo_brpoll,
ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS, "\n");
ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS - 1, "\n");
len += scnprintf(buf + len, buf_len - len, "ax_basic_trigger = %u\n",
le32_to_cpu(htt_stats_buf->ax_basic_trigger));
len += scnprintf(buf + len, buf_len - len, "ax_ulmumimo_total_trigger = %u\n",
@ -2276,7 +2310,7 @@ ath12k_htt_print_tx_pdev_mumimo_grp_stats_tlv(const void *tag_buf, u16 tag_len,
len += print_array_to_buf(buf, len, "ul_mumimo_grp_best_grp_size",
htt_stats_buf->ul_mumimo_grp_best_grp_size,
ATH12K_HTT_STATS_NUM_MAX_MUMIMO_SZ, "\n");
len += print_array_to_buf_index(buf, len, "ul_mumimo_grp_best_num_usrs = ", 1,
len += print_array_to_buf(buf, len, "ul_mumimo_grp_best_num_usrs = ",
htt_stats_buf->ul_mumimo_grp_best_usrs,
ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS, "\n");
len += print_array_to_buf(buf, len,
@ -2542,6 +2576,573 @@ ath12k_htt_print_pdev_obss_pd_stats_tlv(const void *tag_buf, u16 tag_len,
stats_req->buf_len = len;
}
static void ath12k_htt_print_dlpager_entry(const struct ath12k_htt_pgs_info *pg_info,
int idx, char *str_buf)
{
u64 page_timestamp;
u16 index = 0;
page_timestamp = ath12k_le32hilo_to_u64(pg_info->ts_msb, pg_info->ts_lsb);
index += snprintf(&str_buf[index], ATH12K_HTT_MAX_STRING_LEN - index,
"Index - %u ; Page Number - %u ; ",
idx, le32_to_cpu(pg_info->page_num));
index += snprintf(&str_buf[index], ATH12K_HTT_MAX_STRING_LEN - index,
"Num of pages - %u ; Timestamp - %lluus\n",
le32_to_cpu(pg_info->num_pgs), page_timestamp);
}
static void
ath12k_htt_print_dlpager_stats_tlv(const void *tag_buf, u16 tag_len,
struct debug_htt_stats_req *stats_req)
{
const struct ath12k_htt_dl_pager_stats_tlv *stat_buf = tag_buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
u32 dword_lock, dword_unlock;
int i;
u8 *buf = stats_req->buf;
u8 pg_locked;
u8 pg_unlock;
char str_buf[ATH12K_HTT_MAX_STRING_LEN] = {0};
if (tag_len < sizeof(*stat_buf))
return;
dword_lock = le32_get_bits(stat_buf->info2,
ATH12K_HTT_DLPAGER_TOTAL_LOCK_PAGES_INFO2);
dword_unlock = le32_get_bits(stat_buf->info2,
ATH12K_HTT_DLPAGER_TOTAL_FREE_PAGES_INFO2);
pg_locked = ATH12K_HTT_STATS_PAGE_LOCKED;
pg_unlock = ATH12K_HTT_STATS_PAGE_UNLOCKED;
len += scnprintf(buf + len, buf_len - len, "HTT_DLPAGER_STATS_TLV:\n");
len += scnprintf(buf + len, buf_len - len, "ASYNC locked pages = %u\n",
le32_get_bits(stat_buf->info0,
ATH12K_HTT_DLPAGER_ASYNC_LOCK_PG_CNT_INFO0));
len += scnprintf(buf + len, buf_len - len, "SYNC locked pages = %u\n",
le32_get_bits(stat_buf->info0,
ATH12K_HTT_DLPAGER_SYNC_LOCK_PG_CNT_INFO0));
len += scnprintf(buf + len, buf_len - len, "Total locked pages = %u\n",
le32_get_bits(stat_buf->info1,
ATH12K_HTT_DLPAGER_TOTAL_LOCK_PAGES_INFO1));
len += scnprintf(buf + len, buf_len - len, "Total free pages = %u\n",
le32_get_bits(stat_buf->info1,
ATH12K_HTT_DLPAGER_TOTAL_FREE_PAGES_INFO1));
len += scnprintf(buf + len, buf_len - len, "\nLOCKED PAGES HISTORY\n");
len += scnprintf(buf + len, buf_len - len, "last_locked_page_idx = %u\n",
dword_lock ? dword_lock - 1 : (ATH12K_PAGER_MAX - 1));
for (i = 0; i < ATH12K_PAGER_MAX; i++) {
memset(str_buf, 0x0, ATH12K_HTT_MAX_STRING_LEN);
ath12k_htt_print_dlpager_entry(&stat_buf->pgs_info[pg_locked][i],
i, str_buf);
len += scnprintf(buf + len, buf_len - len, "%s", str_buf);
}
len += scnprintf(buf + len, buf_len - len, "\nUNLOCKED PAGES HISTORY\n");
len += scnprintf(buf + len, buf_len - len, "last_unlocked_page_idx = %u\n",
dword_unlock ? dword_unlock - 1 : ATH12K_PAGER_MAX - 1);
for (i = 0; i < ATH12K_PAGER_MAX; i++) {
memset(str_buf, 0x0, ATH12K_HTT_MAX_STRING_LEN);
ath12k_htt_print_dlpager_entry(&stat_buf->pgs_info[pg_unlock][i],
i, str_buf);
len += scnprintf(buf + len, buf_len - len, "%s", str_buf);
}
len += scnprintf(buf + len, buf_len - len, "\n");
stats_req->buf_len = len;
}
static void
ath12k_htt_print_phy_stats_tlv(const void *tag_buf, u16 tag_len,
struct debug_htt_stats_req *stats_req)
{
const struct ath12k_htt_phy_stats_tlv *htt_stats_buf = tag_buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
u8 *buf = stats_req->buf, i;
if (tag_len < sizeof(*htt_stats_buf))
return;
len += scnprintf(buf + len, buf_len - len, "HTT_PHY_STATS_TLV:\n");
for (i = 0; i < ATH12K_HTT_STATS_MAX_CHAINS; i++)
len += scnprintf(buf + len, buf_len - len, "bdf_nf_chain[%d] = %d\n",
i, a_sle32_to_cpu(htt_stats_buf->nf_chain[i]));
for (i = 0; i < ATH12K_HTT_STATS_MAX_CHAINS; i++)
len += scnprintf(buf + len, buf_len - len, "runtime_nf_chain[%d] = %d\n",
i, a_sle32_to_cpu(htt_stats_buf->runtime_nf_chain[i]));
len += scnprintf(buf + len, buf_len - len, "false_radar_cnt = %u / %u (mins)\n",
le32_to_cpu(htt_stats_buf->false_radar_cnt),
le32_to_cpu(htt_stats_buf->fw_run_time));
len += scnprintf(buf + len, buf_len - len, "radar_cs_cnt = %u\n",
le32_to_cpu(htt_stats_buf->radar_cs_cnt));
len += scnprintf(buf + len, buf_len - len, "ani_level = %d\n\n",
a_sle32_to_cpu(htt_stats_buf->ani_level));
stats_req->buf_len = len;
}
static void
ath12k_htt_print_phy_counters_tlv(const void *tag_buf, u16 tag_len,
struct debug_htt_stats_req *stats_req)
{
const struct ath12k_htt_phy_counters_tlv *htt_stats_buf = tag_buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
u8 *buf = stats_req->buf;
if (tag_len < sizeof(*htt_stats_buf))
return;
len += scnprintf(buf + len, buf_len - len, "HTT_PHY_COUNTERS_TLV:\n");
len += scnprintf(buf + len, buf_len - len, "rx_ofdma_timing_err_cnt = %u\n",
le32_to_cpu(htt_stats_buf->rx_ofdma_timing_err_cnt));
len += scnprintf(buf + len, buf_len - len, "rx_cck_fail_cnt = %u\n",
le32_to_cpu(htt_stats_buf->rx_cck_fail_cnt));
len += scnprintf(buf + len, buf_len - len, "mactx_abort_cnt = %u\n",
le32_to_cpu(htt_stats_buf->mactx_abort_cnt));
len += scnprintf(buf + len, buf_len - len, "macrx_abort_cnt = %u\n",
le32_to_cpu(htt_stats_buf->macrx_abort_cnt));
len += scnprintf(buf + len, buf_len - len, "phytx_abort_cnt = %u\n",
le32_to_cpu(htt_stats_buf->phytx_abort_cnt));
len += scnprintf(buf + len, buf_len - len, "phyrx_abort_cnt = %u\n",
le32_to_cpu(htt_stats_buf->phyrx_abort_cnt));
len += scnprintf(buf + len, buf_len - len, "phyrx_defer_abort_cnt = %u\n",
le32_to_cpu(htt_stats_buf->phyrx_defer_abort_cnt));
len += scnprintf(buf + len, buf_len - len, "rx_gain_adj_lstf_event_cnt = %u\n",
le32_to_cpu(htt_stats_buf->rx_gain_adj_lstf_event_cnt));
len += scnprintf(buf + len, buf_len - len, "rx_gain_adj_non_legacy_cnt = %u\n",
le32_to_cpu(htt_stats_buf->rx_gain_adj_non_legacy_cnt));
len += print_array_to_buf(buf, len, "rx_pkt_cnt", htt_stats_buf->rx_pkt_cnt,
ATH12K_HTT_MAX_RX_PKT_CNT, "\n");
len += print_array_to_buf(buf, len, "rx_pkt_crc_pass_cnt",
htt_stats_buf->rx_pkt_crc_pass_cnt,
ATH12K_HTT_MAX_RX_PKT_CRC_PASS_CNT, "\n");
len += print_array_to_buf(buf, len, "per_blk_err_cnt",
htt_stats_buf->per_blk_err_cnt,
ATH12K_HTT_MAX_PER_BLK_ERR_CNT, "\n");
len += print_array_to_buf(buf, len, "rx_ota_err_cnt",
htt_stats_buf->rx_ota_err_cnt,
ATH12K_HTT_MAX_RX_OTA_ERR_CNT, "\n\n");
stats_req->buf_len = len;
}
static void
ath12k_htt_print_phy_reset_stats_tlv(const void *tag_buf, u16 tag_len,
struct debug_htt_stats_req *stats_req)
{
const struct ath12k_htt_phy_reset_stats_tlv *htt_stats_buf = tag_buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
u8 *buf = stats_req->buf;
if (tag_len < sizeof(*htt_stats_buf))
return;
len += scnprintf(buf + len, buf_len - len, "HTT_PHY_RESET_STATS_TLV:\n");
len += scnprintf(buf + len, buf_len - len, "pdev_id = %u\n",
le32_to_cpu(htt_stats_buf->pdev_id));
len += scnprintf(buf + len, buf_len - len, "chan_mhz = %u\n",
le32_to_cpu(htt_stats_buf->chan_mhz));
len += scnprintf(buf + len, buf_len - len, "chan_band_center_freq1 = %u\n",
le32_to_cpu(htt_stats_buf->chan_band_center_freq1));
len += scnprintf(buf + len, buf_len - len, "chan_band_center_freq2 = %u\n",
le32_to_cpu(htt_stats_buf->chan_band_center_freq2));
len += scnprintf(buf + len, buf_len - len, "chan_phy_mode = %u\n",
le32_to_cpu(htt_stats_buf->chan_phy_mode));
len += scnprintf(buf + len, buf_len - len, "chan_flags = 0x%0x\n",
le32_to_cpu(htt_stats_buf->chan_flags));
len += scnprintf(buf + len, buf_len - len, "chan_num = %u\n",
le32_to_cpu(htt_stats_buf->chan_num));
len += scnprintf(buf + len, buf_len - len, "reset_cause = 0x%0x\n",
le32_to_cpu(htt_stats_buf->reset_cause));
len += scnprintf(buf + len, buf_len - len, "prev_reset_cause = 0x%0x\n",
le32_to_cpu(htt_stats_buf->prev_reset_cause));
len += scnprintf(buf + len, buf_len - len, "phy_warm_reset_src = 0x%0x\n",
le32_to_cpu(htt_stats_buf->phy_warm_reset_src));
len += scnprintf(buf + len, buf_len - len, "rx_gain_tbl_mode = %d\n",
le32_to_cpu(htt_stats_buf->rx_gain_tbl_mode));
len += scnprintf(buf + len, buf_len - len, "xbar_val = 0x%0x\n",
le32_to_cpu(htt_stats_buf->xbar_val));
len += scnprintf(buf + len, buf_len - len, "force_calibration = %u\n",
le32_to_cpu(htt_stats_buf->force_calibration));
len += scnprintf(buf + len, buf_len - len, "phyrf_mode = %u\n",
le32_to_cpu(htt_stats_buf->phyrf_mode));
len += scnprintf(buf + len, buf_len - len, "phy_homechan = %u\n",
le32_to_cpu(htt_stats_buf->phy_homechan));
len += scnprintf(buf + len, buf_len - len, "phy_tx_ch_mask = 0x%0x\n",
le32_to_cpu(htt_stats_buf->phy_tx_ch_mask));
len += scnprintf(buf + len, buf_len - len, "phy_rx_ch_mask = 0x%0x\n",
le32_to_cpu(htt_stats_buf->phy_rx_ch_mask));
len += scnprintf(buf + len, buf_len - len, "phybb_ini_mask = 0x%0x\n",
le32_to_cpu(htt_stats_buf->phybb_ini_mask));
len += scnprintf(buf + len, buf_len - len, "phyrf_ini_mask = 0x%0x\n",
le32_to_cpu(htt_stats_buf->phyrf_ini_mask));
len += scnprintf(buf + len, buf_len - len, "phy_dfs_en_mask = 0x%0x\n",
le32_to_cpu(htt_stats_buf->phy_dfs_en_mask));
len += scnprintf(buf + len, buf_len - len, "phy_sscan_en_mask = 0x%0x\n",
le32_to_cpu(htt_stats_buf->phy_sscan_en_mask));
len += scnprintf(buf + len, buf_len - len, "phy_synth_sel_mask = 0x%0x\n",
le32_to_cpu(htt_stats_buf->phy_synth_sel_mask));
len += scnprintf(buf + len, buf_len - len, "phy_adfs_freq = %u\n",
le32_to_cpu(htt_stats_buf->phy_adfs_freq));
len += scnprintf(buf + len, buf_len - len, "cck_fir_settings = 0x%0x\n",
le32_to_cpu(htt_stats_buf->cck_fir_settings));
len += scnprintf(buf + len, buf_len - len, "phy_dyn_pri_chan = %u\n",
le32_to_cpu(htt_stats_buf->phy_dyn_pri_chan));
len += scnprintf(buf + len, buf_len - len, "cca_thresh = 0x%0x\n",
le32_to_cpu(htt_stats_buf->cca_thresh));
len += scnprintf(buf + len, buf_len - len, "dyn_cca_status = %u\n",
le32_to_cpu(htt_stats_buf->dyn_cca_status));
len += scnprintf(buf + len, buf_len - len, "rxdesense_thresh_hw = 0x%x\n",
le32_to_cpu(htt_stats_buf->rxdesense_thresh_hw));
len += scnprintf(buf + len, buf_len - len, "rxdesense_thresh_sw = 0x%x\n\n",
le32_to_cpu(htt_stats_buf->rxdesense_thresh_sw));
stats_req->buf_len = len;
}
static void
ath12k_htt_print_phy_reset_counters_tlv(const void *tag_buf, u16 tag_len,
struct debug_htt_stats_req *stats_req)
{
const struct ath12k_htt_phy_reset_counters_tlv *htt_stats_buf = tag_buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
u8 *buf = stats_req->buf;
if (tag_len < sizeof(*htt_stats_buf))
return;
len += scnprintf(buf + len, buf_len - len, "HTT_PHY_RESET_COUNTERS_TLV:\n");
len += scnprintf(buf + len, buf_len - len, "pdev_id = %u\n",
le32_to_cpu(htt_stats_buf->pdev_id));
len += scnprintf(buf + len, buf_len - len, "cf_active_low_fail_cnt = %u\n",
le32_to_cpu(htt_stats_buf->cf_active_low_fail_cnt));
len += scnprintf(buf + len, buf_len - len, "cf_active_low_pass_cnt = %u\n",
le32_to_cpu(htt_stats_buf->cf_active_low_pass_cnt));
len += scnprintf(buf + len, buf_len - len, "phy_off_through_vreg_cnt = %u\n",
le32_to_cpu(htt_stats_buf->phy_off_through_vreg_cnt));
len += scnprintf(buf + len, buf_len - len, "force_calibration_cnt = %u\n",
le32_to_cpu(htt_stats_buf->force_calibration_cnt));
len += scnprintf(buf + len, buf_len - len, "rf_mode_switch_phy_off_cnt = %u\n",
le32_to_cpu(htt_stats_buf->rf_mode_switch_phy_off_cnt));
len += scnprintf(buf + len, buf_len - len, "temperature_recal_cnt = %u\n\n",
le32_to_cpu(htt_stats_buf->temperature_recal_cnt));
stats_req->buf_len = len;
}
static void
ath12k_htt_print_phy_tpc_stats_tlv(const void *tag_buf, u16 tag_len,
struct debug_htt_stats_req *stats_req)
{
const struct ath12k_htt_phy_tpc_stats_tlv *htt_stats_buf = tag_buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
u8 *buf = stats_req->buf;
if (tag_len < sizeof(*htt_stats_buf))
return;
len += scnprintf(buf + len, buf_len - len, "HTT_PHY_TPC_STATS_TLV:\n");
len += scnprintf(buf + len, buf_len - len, "pdev_id = %u\n",
le32_to_cpu(htt_stats_buf->pdev_id));
len += scnprintf(buf + len, buf_len - len, "tx_power_scale = %u\n",
le32_to_cpu(htt_stats_buf->tx_power_scale));
len += scnprintf(buf + len, buf_len - len, "tx_power_scale_db = %u\n",
le32_to_cpu(htt_stats_buf->tx_power_scale_db));
len += scnprintf(buf + len, buf_len - len, "min_negative_tx_power = %d\n",
le32_to_cpu(htt_stats_buf->min_negative_tx_power));
len += scnprintf(buf + len, buf_len - len, "reg_ctl_domain = %u\n",
le32_to_cpu(htt_stats_buf->reg_ctl_domain));
len += scnprintf(buf + len, buf_len - len, "twice_max_rd_power = %u\n",
le32_to_cpu(htt_stats_buf->twice_max_rd_power));
len += scnprintf(buf + len, buf_len - len, "max_tx_power = %u\n",
le32_to_cpu(htt_stats_buf->max_tx_power));
len += scnprintf(buf + len, buf_len - len, "home_max_tx_power = %u\n",
le32_to_cpu(htt_stats_buf->home_max_tx_power));
len += scnprintf(buf + len, buf_len - len, "psd_power = %d\n",
le32_to_cpu(htt_stats_buf->psd_power));
len += scnprintf(buf + len, buf_len - len, "eirp_power = %u\n",
le32_to_cpu(htt_stats_buf->eirp_power));
len += scnprintf(buf + len, buf_len - len, "power_type_6ghz = %u\n",
le32_to_cpu(htt_stats_buf->power_type_6ghz));
len += print_array_to_buf(buf, len, "max_reg_allowed_power",
htt_stats_buf->max_reg_allowed_power,
ATH12K_HTT_STATS_MAX_CHAINS, "\n");
len += print_array_to_buf(buf, len, "max_reg_allowed_power_6ghz",
htt_stats_buf->max_reg_allowed_power_6ghz,
ATH12K_HTT_STATS_MAX_CHAINS, "\n");
len += print_array_to_buf(buf, len, "sub_band_cfreq",
htt_stats_buf->sub_band_cfreq,
ATH12K_HTT_MAX_CH_PWR_INFO_SIZE, "\n");
len += print_array_to_buf(buf, len, "sub_band_txpower",
htt_stats_buf->sub_band_txpower,
ATH12K_HTT_MAX_CH_PWR_INFO_SIZE, "\n\n");
stats_req->buf_len = len;
}
static void
ath12k_htt_print_soc_txrx_stats_common_tlv(const void *tag_buf, u16 tag_len,
struct debug_htt_stats_req *stats_req)
{
const struct ath12k_htt_t2h_soc_txrx_stats_common_tlv *htt_stats_buf = tag_buf;
u64 drop_count;
u32 len = stats_req->buf_len;
u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
u8 *buf = stats_req->buf;
if (tag_len < sizeof(*htt_stats_buf))
return;
drop_count = ath12k_le32hilo_to_u64(htt_stats_buf->inv_peers_msdu_drop_count_hi,
htt_stats_buf->inv_peers_msdu_drop_count_lo);
len += scnprintf(buf + len, buf_len - len, "HTT_SOC_COMMON_STATS_TLV:\n");
len += scnprintf(buf + len, buf_len - len, "soc_drop_count = %llu\n\n",
drop_count);
stats_req->buf_len = len;
}
static void
ath12k_htt_print_tx_per_rate_stats_tlv(const void *tag_buf, u16 tag_len,
struct debug_htt_stats_req *stats_req)
{
const struct ath12k_htt_tx_per_rate_stats_tlv *stats_buf = tag_buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
u32 ru_size_cnt = 0;
u32 rc_mode, ru_type;
u8 *buf = stats_req->buf, i;
const char *mode_prefix;
if (tag_len < sizeof(*stats_buf))
return;
rc_mode = le32_to_cpu(stats_buf->rc_mode);
ru_type = le32_to_cpu(stats_buf->ru_type);
switch (rc_mode) {
case ATH12K_HTT_STATS_RC_MODE_DLSU:
len += scnprintf(buf + len, buf_len - len, "HTT_TX_PER_STATS:\n");
len += scnprintf(buf + len, buf_len - len, "\nPER_STATS_SU:\n");
mode_prefix = "su";
break;
case ATH12K_HTT_STATS_RC_MODE_DLMUMIMO:
len += scnprintf(buf + len, buf_len - len, "\nPER_STATS_DL_MUMIMO:\n");
mode_prefix = "mu";
break;
case ATH12K_HTT_STATS_RC_MODE_DLOFDMA:
len += scnprintf(buf + len, buf_len - len, "\nPER_STATS_DL_OFDMA:\n");
mode_prefix = "ofdma";
if (ru_type == ATH12K_HTT_STATS_RU_TYPE_SINGLE_RU_ONLY)
ru_size_cnt = ATH12K_HTT_TX_RX_PDEV_STATS_NUM_AX_RU_SIZE_CNTRS;
else if (ru_type == ATH12K_HTT_STATS_RU_TYPE_SINGLE_AND_MULTI_RU)
ru_size_cnt = ATH12K_HTT_TX_RX_PDEV_NUM_BE_RU_SIZE_CNTRS;
break;
case ATH12K_HTT_STATS_RC_MODE_ULMUMIMO:
len += scnprintf(buf + len, buf_len - len, "HTT_RX_PER_STATS:\n");
len += scnprintf(buf + len, buf_len - len, "\nPER_STATS_UL_MUMIMO:\n");
mode_prefix = "ulmu";
break;
case ATH12K_HTT_STATS_RC_MODE_ULOFDMA:
len += scnprintf(buf + len, buf_len - len, "\nPER_STATS_UL_OFDMA:\n");
mode_prefix = "ulofdma";
if (ru_type == ATH12K_HTT_STATS_RU_TYPE_SINGLE_RU_ONLY)
ru_size_cnt = ATH12K_HTT_TX_RX_PDEV_STATS_NUM_AX_RU_SIZE_CNTRS;
else if (ru_type == ATH12K_HTT_STATS_RU_TYPE_SINGLE_AND_MULTI_RU)
ru_size_cnt = ATH12K_HTT_TX_RX_PDEV_NUM_BE_RU_SIZE_CNTRS;
break;
default:
return;
}
len += scnprintf(buf + len, buf_len - len, "\nPER per BW:\n");
if (rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA ||
rc_mode == ATH12K_HTT_STATS_RC_MODE_ULMUMIMO)
len += scnprintf(buf + len, buf_len - len, "data_ppdus_%s = ",
mode_prefix);
else
len += scnprintf(buf + len, buf_len - len, "ppdus_tried_%s = ",
mode_prefix);
for (i = 0; i < ATH12K_HTT_TX_PDEV_STATS_NUM_BW_CNTRS; i++)
len += scnprintf(buf + len, buf_len - len, " %u:%u ", i,
le32_to_cpu(stats_buf->per_bw[i].ppdus_tried));
len += scnprintf(buf + len, buf_len - len, " %u:%u\n", i,
le32_to_cpu(stats_buf->per_bw320.ppdus_tried));
if (rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA ||
rc_mode == ATH12K_HTT_STATS_RC_MODE_ULMUMIMO)
len += scnprintf(buf + len, buf_len - len, "non_data_ppdus_%s = ",
mode_prefix);
else
len += scnprintf(buf + len, buf_len - len, "ppdus_ack_failed_%s = ",
mode_prefix);
for (i = 0; i < ATH12K_HTT_TX_PDEV_STATS_NUM_BW_CNTRS; i++)
len += scnprintf(buf + len, buf_len - len, " %u:%u ", i,
le32_to_cpu(stats_buf->per_bw[i].ppdus_ack_failed));
len += scnprintf(buf + len, buf_len - len, " %u:%u\n", i,
le32_to_cpu(stats_buf->per_bw320.ppdus_ack_failed));
len += scnprintf(buf + len, buf_len - len, "mpdus_tried_%s = ", mode_prefix);
for (i = 0; i < ATH12K_HTT_TX_PDEV_STATS_NUM_BW_CNTRS; i++)
len += scnprintf(buf + len, buf_len - len, " %u:%u ", i,
le32_to_cpu(stats_buf->per_bw[i].mpdus_tried));
len += scnprintf(buf + len, buf_len - len, " %u:%u\n", i,
le32_to_cpu(stats_buf->per_bw320.mpdus_tried));
len += scnprintf(buf + len, buf_len - len, "mpdus_failed_%s = ", mode_prefix);
for (i = 0; i < ATH12K_HTT_TX_PDEV_STATS_NUM_BW_CNTRS; i++)
len += scnprintf(buf + len, buf_len - len, " %u:%u", i,
le32_to_cpu(stats_buf->per_bw[i].mpdus_failed));
len += scnprintf(buf + len, buf_len - len, " %u:%u\n", i,
le32_to_cpu(stats_buf->per_bw320.mpdus_failed));
len += scnprintf(buf + len, buf_len - len, "\nPER per NSS:\n");
if (rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA ||
rc_mode == ATH12K_HTT_STATS_RC_MODE_ULMUMIMO)
len += scnprintf(buf + len, buf_len - len, "data_ppdus_%s = ",
mode_prefix);
else
len += scnprintf(buf + len, buf_len - len, "ppdus_tried_%s = ",
mode_prefix);
for (i = 0; i < ATH12K_HTT_PDEV_STAT_NUM_SPATIAL_STREAMS; i++)
len += scnprintf(buf + len, buf_len - len, " %u:%u ", i + 1,
le32_to_cpu(stats_buf->per_nss[i].ppdus_tried));
len += scnprintf(buf + len, buf_len - len, "\n");
if (rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA ||
rc_mode == ATH12K_HTT_STATS_RC_MODE_ULMUMIMO)
len += scnprintf(buf + len, buf_len - len, "non_data_ppdus_%s = ",
mode_prefix);
else
len += scnprintf(buf + len, buf_len - len, "ppdus_ack_failed_%s = ",
mode_prefix);
for (i = 0; i < ATH12K_HTT_PDEV_STAT_NUM_SPATIAL_STREAMS; i++)
len += scnprintf(buf + len, buf_len - len, " %u:%u ", i + 1,
le32_to_cpu(stats_buf->per_nss[i].ppdus_ack_failed));
len += scnprintf(buf + len, buf_len - len, "\n");
len += scnprintf(buf + len, buf_len - len, "mpdus_tried_%s = ", mode_prefix);
for (i = 0; i < ATH12K_HTT_PDEV_STAT_NUM_SPATIAL_STREAMS; i++)
len += scnprintf(buf + len, buf_len - len, " %u:%u ", i + 1,
le32_to_cpu(stats_buf->per_nss[i].mpdus_tried));
len += scnprintf(buf + len, buf_len - len, "\n");
len += scnprintf(buf + len, buf_len - len, "mpdus_failed_%s = ", mode_prefix);
for (i = 0; i < ATH12K_HTT_PDEV_STAT_NUM_SPATIAL_STREAMS; i++)
len += scnprintf(buf + len, buf_len - len, " %u:%u ", i + 1,
le32_to_cpu(stats_buf->per_nss[i].mpdus_failed));
len += scnprintf(buf + len, buf_len - len, "\n");
len += scnprintf(buf + len, buf_len - len, "\nPER per MCS:\n");
if (rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA ||
rc_mode == ATH12K_HTT_STATS_RC_MODE_ULMUMIMO)
len += scnprintf(buf + len, buf_len - len, "data_ppdus_%s = ",
mode_prefix);
else
len += scnprintf(buf + len, buf_len - len, "ppdus_tried_%s = ",
mode_prefix);
for (i = 0; i < ATH12K_HTT_TXBF_RATE_STAT_NUM_MCS_CNTRS; i++)
len += scnprintf(buf + len, buf_len - len, " %u:%u ", i,
le32_to_cpu(stats_buf->per_mcs[i].ppdus_tried));
len += scnprintf(buf + len, buf_len - len, "\n");
if (rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA ||
rc_mode == ATH12K_HTT_STATS_RC_MODE_ULMUMIMO)
len += scnprintf(buf + len, buf_len - len, "non_data_ppdus_%s = ",
mode_prefix);
else
len += scnprintf(buf + len, buf_len - len, "ppdus_ack_failed_%s = ",
mode_prefix);
for (i = 0; i < ATH12K_HTT_TXBF_RATE_STAT_NUM_MCS_CNTRS; i++)
len += scnprintf(buf + len, buf_len - len, " %u:%u ", i,
le32_to_cpu(stats_buf->per_mcs[i].ppdus_ack_failed));
len += scnprintf(buf + len, buf_len - len, "\n");
len += scnprintf(buf + len, buf_len - len, "mpdus_tried_%s = ", mode_prefix);
for (i = 0; i < ATH12K_HTT_TXBF_RATE_STAT_NUM_MCS_CNTRS; i++)
len += scnprintf(buf + len, buf_len - len, " %u:%u ", i,
le32_to_cpu(stats_buf->per_mcs[i].mpdus_tried));
len += scnprintf(buf + len, buf_len - len, "\n");
len += scnprintf(buf + len, buf_len - len, "mpdus_failed_%s = ", mode_prefix);
for (i = 0; i < ATH12K_HTT_TXBF_RATE_STAT_NUM_MCS_CNTRS; i++)
len += scnprintf(buf + len, buf_len - len, " %u:%u ", i,
le32_to_cpu(stats_buf->per_mcs[i].mpdus_failed));
len += scnprintf(buf + len, buf_len - len, "\n");
if ((rc_mode == ATH12K_HTT_STATS_RC_MODE_DLOFDMA ||
rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA) &&
ru_type != ATH12K_HTT_STATS_RU_TYPE_INVALID) {
len += scnprintf(buf + len, buf_len - len, "\nPER per RU:\n");
if (rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA)
len += scnprintf(buf + len, buf_len - len, "data_ppdus_%s = ",
mode_prefix);
else
len += scnprintf(buf + len, buf_len - len, "ppdus_tried_%s = ",
mode_prefix);
for (i = 0; i < ru_size_cnt; i++)
len += scnprintf(buf + len, buf_len - len, " %s:%u ",
ath12k_tx_ru_size_to_str(ru_type, i),
le32_to_cpu(stats_buf->ru[i].ppdus_tried));
len += scnprintf(buf + len, buf_len - len, "\n");
if (rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA)
len += scnprintf(buf + len, buf_len - len,
"non_data_ppdus_%s = ", mode_prefix);
else
len += scnprintf(buf + len, buf_len - len,
"ppdus_ack_failed_%s = ", mode_prefix);
for (i = 0; i < ru_size_cnt; i++)
len += scnprintf(buf + len, buf_len - len, " %s:%u ",
ath12k_tx_ru_size_to_str(ru_type, i),
le32_to_cpu(stats_buf->ru[i].ppdus_ack_failed));
len += scnprintf(buf + len, buf_len - len, "\n");
len += scnprintf(buf + len, buf_len - len, "mpdus_tried_%s = ",
mode_prefix);
for (i = 0; i < ru_size_cnt; i++)
len += scnprintf(buf + len, buf_len - len, " %s:%u ",
ath12k_tx_ru_size_to_str(ru_type, i),
le32_to_cpu(stats_buf->ru[i].mpdus_tried));
len += scnprintf(buf + len, buf_len - len, "\n");
len += scnprintf(buf + len, buf_len - len, "mpdus_failed_%s = ",
mode_prefix);
for (i = 0; i < ru_size_cnt; i++)
len += scnprintf(buf + len, buf_len - len, " %s:%u ",
ath12k_tx_ru_size_to_str(ru_type, i),
le32_to_cpu(stats_buf->ru[i].mpdus_failed));
len += scnprintf(buf + len, buf_len - len, "\n\n");
}
if (rc_mode == ATH12K_HTT_STATS_RC_MODE_DLMUMIMO) {
len += scnprintf(buf + len, buf_len - len, "\nlast_probed_bw = %u\n",
le32_to_cpu(stats_buf->last_probed_bw));
len += scnprintf(buf + len, buf_len - len, "last_probed_nss = %u\n",
le32_to_cpu(stats_buf->last_probed_nss));
len += scnprintf(buf + len, buf_len - len, "last_probed_mcs = %u\n",
le32_to_cpu(stats_buf->last_probed_mcs));
len += print_array_to_buf(buf, len, "MU Probe count per RC MODE",
stats_buf->probe_cnt,
ATH12K_HTT_RC_MODE_2D_COUNT, "\n\n");
}
stats_req->buf_len = len;
}
static void
ath12k_htt_print_dmac_reset_stats_tlv(const void *tag_buf, u16 tag_len,
struct debug_htt_stats_req *stats_req)
@ -2561,7 +3162,6 @@ ath12k_htt_print_dmac_reset_stats_tlv(const void *tag_buf, u16 tag_len,
time = ath12k_le32hilo_to_u64(htt_stats_buf->reset_time_hi_ms,
htt_stats_buf->reset_time_lo_ms);
len += scnprintf(buf + len, buf_len - len, "reset_time_ms = %llu\n", time);
time = ath12k_le32hilo_to_u64(htt_stats_buf->disengage_time_hi_ms,
htt_stats_buf->disengage_time_lo_ms);
len += scnprintf(buf + len, buf_len - len, "disengage_time_ms = %llu\n", time);
@ -2680,7 +3280,7 @@ ath12k_htt_print_tx_pdev_rate_stats_be_ofdma_tlv(const void *tag_buf, u16 tag_le
len += scnprintf(buf + len, buf_len - len, "\n");
len += print_array_to_buf_index(buf, len, "be_ofdma_tx_nss = ", 1,
htt_stats_buf->be_ofdma_tx_nss,
ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS,
ATH12K_HTT_PDEV_STAT_NUM_SPATIAL_STREAMS,
"\n");
len += print_array_to_buf(buf, len, "be_ofdma_tx_bw",
htt_stats_buf->be_ofdma_tx_bw,
@ -2696,6 +3296,45 @@ ath12k_htt_print_tx_pdev_rate_stats_be_ofdma_tlv(const void *tag_buf, u16 tag_le
stats_req->buf_len = len;
}
static void
ath12k_htt_print_pdev_mbssid_ctrl_frame_stats_tlv(const void *tag_buf, u16 tag_len,
struct debug_htt_stats_req *stats_req)
{
const struct ath12k_htt_pdev_mbssid_ctrl_frame_tlv *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
u32 mac_id_word;
if (tag_len < sizeof(*htt_stats_buf))
return;
mac_id_word = le32_to_cpu(htt_stats_buf->mac_id__word);
len += scnprintf(buf + len, buf_len - len, "HTT_MBSSID_CTRL_FRAME_STATS_TLV:\n");
len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID));
len += scnprintf(buf + len, buf_len - len, "basic_trigger_across_bss = %u\n",
le32_to_cpu(htt_stats_buf->basic_trigger_across_bss));
len += scnprintf(buf + len, buf_len - len, "basic_trigger_within_bss = %u\n",
le32_to_cpu(htt_stats_buf->basic_trigger_within_bss));
len += scnprintf(buf + len, buf_len - len, "bsr_trigger_across_bss = %u\n",
le32_to_cpu(htt_stats_buf->bsr_trigger_across_bss));
len += scnprintf(buf + len, buf_len - len, "bsr_trigger_within_bss = %u\n",
le32_to_cpu(htt_stats_buf->bsr_trigger_within_bss));
len += scnprintf(buf + len, buf_len - len, "mu_rts_across_bss = %u\n",
le32_to_cpu(htt_stats_buf->mu_rts_across_bss));
len += scnprintf(buf + len, buf_len - len, "mu_rts_within_bss = %u\n",
le32_to_cpu(htt_stats_buf->mu_rts_within_bss));
len += scnprintf(buf + len, buf_len - len, "ul_mumimo_trigger_across_bss = %u\n",
le32_to_cpu(htt_stats_buf->ul_mumimo_trigger_across_bss));
len += scnprintf(buf + len, buf_len - len,
"ul_mumimo_trigger_within_bss = %u\n\n",
le32_to_cpu(htt_stats_buf->ul_mumimo_trigger_within_bss));
stats_req->buf_len = len;
}
static int ath12k_dbg_htt_ext_stats_parse(struct ath12k_base *ab,
u16 tag, u16 len, const void *tag_buf,
void *user_data)
@ -2869,6 +3508,30 @@ static int ath12k_dbg_htt_ext_stats_parse(struct ath12k_base *ab,
case HTT_STATS_PDEV_OBSS_PD_TAG:
ath12k_htt_print_pdev_obss_pd_stats_tlv(tag_buf, len, stats_req);
break;
case HTT_STATS_DLPAGER_STATS_TAG:
ath12k_htt_print_dlpager_stats_tlv(tag_buf, len, stats_req);
break;
case HTT_STATS_PHY_STATS_TAG:
ath12k_htt_print_phy_stats_tlv(tag_buf, len, stats_req);
break;
case HTT_STATS_PHY_COUNTERS_TAG:
ath12k_htt_print_phy_counters_tlv(tag_buf, len, stats_req);
break;
case HTT_STATS_PHY_RESET_STATS_TAG:
ath12k_htt_print_phy_reset_stats_tlv(tag_buf, len, stats_req);
break;
case HTT_STATS_PHY_RESET_COUNTERS_TAG:
ath12k_htt_print_phy_reset_counters_tlv(tag_buf, len, stats_req);
break;
case HTT_STATS_PHY_TPC_STATS_TAG:
ath12k_htt_print_phy_tpc_stats_tlv(tag_buf, len, stats_req);
break;
case HTT_STATS_SOC_TXRX_STATS_COMMON_TAG:
ath12k_htt_print_soc_txrx_stats_common_tlv(tag_buf, len, stats_req);
break;
case HTT_STATS_PER_RATE_STATS_TAG:
ath12k_htt_print_tx_per_rate_stats_tlv(tag_buf, len, stats_req);
break;
case HTT_STATS_DMAC_RESET_STATS_TAG:
ath12k_htt_print_dmac_reset_stats_tlv(tag_buf, len, stats_req);
break;
@ -2878,6 +3541,10 @@ static int ath12k_dbg_htt_ext_stats_parse(struct ath12k_base *ab,
case HTT_STATS_TX_PDEV_RATE_STATS_BE_OFDMA_TAG:
ath12k_htt_print_tx_pdev_rate_stats_be_ofdma_tlv(tag_buf, len, stats_req);
break;
case HTT_STATS_PDEV_MBSSID_CTRL_FRAME_STATS_TAG:
ath12k_htt_print_pdev_mbssid_ctrl_frame_stats_tlv(tag_buf, len,
stats_req);
break;
default:
break;
}

View file

@ -135,9 +135,14 @@ enum ath12k_dbg_htt_ext_stats_type {
ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_MU = 17,
ATH12K_DBG_HTT_EXT_STATS_PDEV_CCA_STATS = 19,
ATH12K_DBG_HTT_EXT_STATS_PDEV_OBSS_PD_STATS = 23,
ATH12K_DBG_HTT_EXT_STATS_DLPAGER_STATS = 36,
ATH12K_DBG_HTT_EXT_PHY_COUNTERS_AND_PHY_STATS = 37,
ATH12K_DBG_HTT_EXT_VDEVS_TXRX_STATS = 38,
ATH12K_DBG_HTT_EXT_PDEV_PER_STATS = 40,
ATH12K_DBG_HTT_EXT_STATS_SOC_ERROR = 45,
ATH12K_DBG_HTT_EXT_STATS_PDEV_SCHED_ALGO = 49,
ATH12K_DBG_HTT_EXT_STATS_MANDATORY_MUOFDMA = 51,
ATH12K_DGB_HTT_EXT_STATS_PDEV_MBSSID_CTRL_FRAME = 54,
/* keep this last */
ATH12K_DBG_HTT_NUM_EXT_STATS,
@ -194,6 +199,13 @@ enum ath12k_dbg_htt_tlv_tag {
HTT_STATS_PDEV_CTRL_PATH_TX_STATS_TAG = 102,
HTT_STATS_TX_SELFGEN_AC_SCHED_STATUS_STATS_TAG = 111,
HTT_STATS_TX_SELFGEN_AX_SCHED_STATUS_STATS_TAG = 112,
HTT_STATS_DLPAGER_STATS_TAG = 120,
HTT_STATS_PHY_COUNTERS_TAG = 121,
HTT_STATS_PHY_STATS_TAG = 122,
HTT_STATS_PHY_RESET_COUNTERS_TAG = 123,
HTT_STATS_PHY_RESET_STATS_TAG = 124,
HTT_STATS_SOC_TXRX_STATS_COMMON_TAG = 125,
HTT_STATS_PER_RATE_STATS_TAG = 128,
HTT_STATS_MU_PPDU_DIST_TAG = 129,
HTT_STATS_TX_PDEV_MUMIMO_GRP_STATS_TAG = 130,
HTT_STATS_TX_PDEV_RATE_STATS_BE_OFDMA_TAG = 135,
@ -201,7 +213,9 @@ enum ath12k_dbg_htt_tlv_tag {
HTT_STATS_TX_SELFGEN_BE_STATS_TAG = 138,
HTT_STATS_TX_SELFGEN_BE_SCHED_STATUS_STATS_TAG = 139,
HTT_STATS_DMAC_RESET_STATS_TAG = 155,
HTT_STATS_PHY_TPC_STATS_TAG = 157,
HTT_STATS_PDEV_SCHED_ALGO_OFDMA_STATS_TAG = 165,
HTT_STATS_PDEV_MBSSID_CTRL_FRAME_STATS_TAG = 176,
HTT_STATS_MAX_TAG,
};
@ -1054,6 +1068,132 @@ struct ath12k_htt_pdev_obss_pd_stats_tlv {
__le32 num_sr_ppdu_abort_flush_cnt;
} __packed;
enum ath12k_htt_stats_page_lock_state {
ATH12K_HTT_STATS_PAGE_LOCKED = 0,
ATH12K_HTT_STATS_PAGE_UNLOCKED = 1,
ATH12K_NUM_PG_LOCK_STATE
};
#define ATH12K_PAGER_MAX 10
#define ATH12K_HTT_DLPAGER_ASYNC_LOCK_PG_CNT_INFO0 GENMASK(7, 0)
#define ATH12K_HTT_DLPAGER_SYNC_LOCK_PG_CNT_INFO0 GENMASK(15, 8)
#define ATH12K_HTT_DLPAGER_TOTAL_LOCK_PAGES_INFO1 GENMASK(15, 0)
#define ATH12K_HTT_DLPAGER_TOTAL_FREE_PAGES_INFO1 GENMASK(31, 16)
#define ATH12K_HTT_DLPAGER_TOTAL_LOCK_PAGES_INFO2 GENMASK(15, 0)
#define ATH12K_HTT_DLPAGER_TOTAL_FREE_PAGES_INFO2 GENMASK(31, 16)
struct ath12k_htt_pgs_info {
__le32 page_num;
__le32 num_pgs;
__le32 ts_lsb;
__le32 ts_msb;
} __packed;
struct ath12k_htt_dl_pager_stats_tlv {
__le32 info0;
__le32 info1;
__le32 info2;
struct ath12k_htt_pgs_info pgs_info[ATH12K_NUM_PG_LOCK_STATE][ATH12K_PAGER_MAX];
} __packed;
#define ATH12K_HTT_STATS_MAX_CHAINS 8
#define ATH12K_HTT_MAX_RX_PKT_CNT 8
#define ATH12K_HTT_MAX_RX_PKT_CRC_PASS_CNT 8
#define ATH12K_HTT_MAX_PER_BLK_ERR_CNT 20
#define ATH12K_HTT_MAX_RX_OTA_ERR_CNT 14
#define ATH12K_HTT_MAX_CH_PWR_INFO_SIZE 16
struct ath12k_htt_phy_stats_tlv {
a_sle32 nf_chain[ATH12K_HTT_STATS_MAX_CHAINS];
__le32 false_radar_cnt;
__le32 radar_cs_cnt;
a_sle32 ani_level;
__le32 fw_run_time;
a_sle32 runtime_nf_chain[ATH12K_HTT_STATS_MAX_CHAINS];
} __packed;
struct ath12k_htt_phy_counters_tlv {
__le32 rx_ofdma_timing_err_cnt;
__le32 rx_cck_fail_cnt;
__le32 mactx_abort_cnt;
__le32 macrx_abort_cnt;
__le32 phytx_abort_cnt;
__le32 phyrx_abort_cnt;
__le32 phyrx_defer_abort_cnt;
__le32 rx_gain_adj_lstf_event_cnt;
__le32 rx_gain_adj_non_legacy_cnt;
__le32 rx_pkt_cnt[ATH12K_HTT_MAX_RX_PKT_CNT];
__le32 rx_pkt_crc_pass_cnt[ATH12K_HTT_MAX_RX_PKT_CRC_PASS_CNT];
__le32 per_blk_err_cnt[ATH12K_HTT_MAX_PER_BLK_ERR_CNT];
__le32 rx_ota_err_cnt[ATH12K_HTT_MAX_RX_OTA_ERR_CNT];
} __packed;
struct ath12k_htt_phy_reset_stats_tlv {
__le32 pdev_id;
__le32 chan_mhz;
__le32 chan_band_center_freq1;
__le32 chan_band_center_freq2;
__le32 chan_phy_mode;
__le32 chan_flags;
__le32 chan_num;
__le32 reset_cause;
__le32 prev_reset_cause;
__le32 phy_warm_reset_src;
__le32 rx_gain_tbl_mode;
__le32 xbar_val;
__le32 force_calibration;
__le32 phyrf_mode;
__le32 phy_homechan;
__le32 phy_tx_ch_mask;
__le32 phy_rx_ch_mask;
__le32 phybb_ini_mask;
__le32 phyrf_ini_mask;
__le32 phy_dfs_en_mask;
__le32 phy_sscan_en_mask;
__le32 phy_synth_sel_mask;
__le32 phy_adfs_freq;
__le32 cck_fir_settings;
__le32 phy_dyn_pri_chan;
__le32 cca_thresh;
__le32 dyn_cca_status;
__le32 rxdesense_thresh_hw;
__le32 rxdesense_thresh_sw;
} __packed;
struct ath12k_htt_phy_reset_counters_tlv {
__le32 pdev_id;
__le32 cf_active_low_fail_cnt;
__le32 cf_active_low_pass_cnt;
__le32 phy_off_through_vreg_cnt;
__le32 force_calibration_cnt;
__le32 rf_mode_switch_phy_off_cnt;
__le32 temperature_recal_cnt;
} __packed;
struct ath12k_htt_phy_tpc_stats_tlv {
__le32 pdev_id;
__le32 tx_power_scale;
__le32 tx_power_scale_db;
__le32 min_negative_tx_power;
__le32 reg_ctl_domain;
__le32 max_reg_allowed_power[ATH12K_HTT_STATS_MAX_CHAINS];
__le32 max_reg_allowed_power_6ghz[ATH12K_HTT_STATS_MAX_CHAINS];
__le32 twice_max_rd_power;
__le32 max_tx_power;
__le32 home_max_tx_power;
__le32 psd_power;
__le32 eirp_power;
__le32 power_type_6ghz;
__le32 sub_band_cfreq[ATH12K_HTT_MAX_CH_PWR_INFO_SIZE];
__le32 sub_band_txpower[ATH12K_HTT_MAX_CH_PWR_INFO_SIZE];
} __packed;
struct ath12k_htt_t2h_soc_txrx_stats_common_tlv {
__le32 inv_peers_msdu_drop_count_hi;
__le32 inv_peers_msdu_drop_count_lo;
} __packed;
struct ath12k_htt_dmac_reset_stats_tlv {
__le32 reset_count;
__le32 reset_time_lo_ms;
@ -1085,6 +1225,10 @@ struct ath12k_htt_pdev_sched_algo_ofdma_stats_tlv {
__le32 dlofdma_disabled_consec_no_mpdus_success[ATH12K_HTT_NUM_AC_WMM];
} __packed;
#define ATH12K_HTT_TX_PDEV_STATS_NUM_BW_CNTRS 4
#define ATH12K_HTT_PDEV_STAT_NUM_SPATIAL_STREAMS 8
#define ATH12K_HTT_TXBF_RATE_STAT_NUM_MCS_CNTRS 14
enum ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE {
ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_26,
ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_52,
@ -1105,7 +1249,65 @@ enum ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE {
ATH12K_HTT_TX_RX_PDEV_NUM_BE_RU_SIZE_CNTRS,
};
#define ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS 8
enum ATH12K_HTT_RC_MODE {
ATH12K_HTT_RC_MODE_SU_OL,
ATH12K_HTT_RC_MODE_SU_BF,
ATH12K_HTT_RC_MODE_MU1_INTF,
ATH12K_HTT_RC_MODE_MU2_INTF,
ATH12K_HTT_RC_MODE_MU3_INTF,
ATH12K_HTT_RC_MODE_MU4_INTF,
ATH12K_HTT_RC_MODE_MU5_INTF,
ATH12K_HTT_RC_MODE_MU6_INTF,
ATH12K_HTT_RC_MODE_MU7_INTF,
ATH12K_HTT_RC_MODE_2D_COUNT
};
enum ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE {
ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_26,
ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_52,
ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_106,
ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_242,
ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_484,
ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_996,
ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_996x2,
ATH12K_HTT_TX_RX_PDEV_STATS_NUM_AX_RU_SIZE_CNTRS
};
enum ath12k_htt_stats_rc_mode {
ATH12K_HTT_STATS_RC_MODE_DLSU = 0,
ATH12K_HTT_STATS_RC_MODE_DLMUMIMO = 1,
ATH12K_HTT_STATS_RC_MODE_DLOFDMA = 2,
ATH12K_HTT_STATS_RC_MODE_ULMUMIMO = 3,
ATH12K_HTT_STATS_RC_MODE_ULOFDMA = 4,
};
enum ath12k_htt_stats_ru_type {
ATH12K_HTT_STATS_RU_TYPE_INVALID,
ATH12K_HTT_STATS_RU_TYPE_SINGLE_RU_ONLY,
ATH12K_HTT_STATS_RU_TYPE_SINGLE_AND_MULTI_RU,
};
struct ath12k_htt_tx_rate_stats {
__le32 ppdus_tried;
__le32 ppdus_ack_failed;
__le32 mpdus_tried;
__le32 mpdus_failed;
} __packed;
struct ath12k_htt_tx_per_rate_stats_tlv {
__le32 rc_mode;
__le32 last_probed_mcs;
__le32 last_probed_nss;
__le32 last_probed_bw;
struct ath12k_htt_tx_rate_stats per_bw[ATH12K_HTT_TX_PDEV_STATS_NUM_BW_CNTRS];
struct ath12k_htt_tx_rate_stats per_nss[ATH12K_HTT_PDEV_STAT_NUM_SPATIAL_STREAMS];
struct ath12k_htt_tx_rate_stats per_mcs[ATH12K_HTT_TXBF_RATE_STAT_NUM_MCS_CNTRS];
struct ath12k_htt_tx_rate_stats per_bw320;
__le32 probe_cnt[ATH12K_HTT_RC_MODE_2D_COUNT];
__le32 ru_type;
struct ath12k_htt_tx_rate_stats ru[ATH12K_HTT_TX_RX_PDEV_NUM_BE_RU_SIZE_CNTRS];
} __packed;
#define ATH12K_HTT_TX_PDEV_NUM_BE_MCS_CNTRS 16
#define ATH12K_HTT_TX_PDEV_NUM_BE_BW_CNTRS 5
#define ATH12K_HTT_TX_PDEV_NUM_EHT_SIG_MCS_CNTRS 4
@ -1115,11 +1317,23 @@ struct ath12k_htt_tx_pdev_rate_stats_be_ofdma_tlv {
__le32 mac_id__word;
__le32 be_ofdma_tx_ldpc;
__le32 be_ofdma_tx_mcs[ATH12K_HTT_TX_PDEV_NUM_BE_MCS_CNTRS];
__le32 be_ofdma_tx_nss[ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
__le32 be_ofdma_tx_nss[ATH12K_HTT_PDEV_STAT_NUM_SPATIAL_STREAMS];
__le32 be_ofdma_tx_bw[ATH12K_HTT_TX_PDEV_NUM_BE_BW_CNTRS];
__le32 gi[ATH12K_HTT_TX_PDEV_NUM_GI_CNTRS][ATH12K_HTT_TX_PDEV_NUM_BE_MCS_CNTRS];
__le32 be_ofdma_tx_ru_size[ATH12K_HTT_TX_RX_PDEV_NUM_BE_RU_SIZE_CNTRS];
__le32 be_ofdma_eht_sig_mcs[ATH12K_HTT_TX_PDEV_NUM_EHT_SIG_MCS_CNTRS];
} __packed;
struct ath12k_htt_pdev_mbssid_ctrl_frame_tlv {
__le32 mac_id__word;
__le32 basic_trigger_across_bss;
__le32 basic_trigger_within_bss;
__le32 bsr_trigger_across_bss;
__le32 bsr_trigger_within_bss;
__le32 mu_rts_across_bss;
__le32 mu_rts_within_bss;
__le32 ul_mumimo_trigger_across_bss;
__le32 ul_mumimo_trigger_within_bss;
} __packed;
#endif

View file

@ -41,6 +41,11 @@ void ath12k_dp_peer_cleanup(struct ath12k *ar, int vdev_id, const u8 *addr)
return;
}
if (!peer->primary_link) {
spin_unlock_bh(&ab->base_lock);
return;
}
ath12k_dp_rx_peer_tid_cleanup(ar, peer);
crypto_free_shash(peer->tfm_mmic);
peer->dp_setup_done = false;
@ -977,28 +982,24 @@ void ath12k_dp_pdev_free(struct ath12k_base *ab)
{
int i;
if (!ab->mon_reap_timer.function)
return;
del_timer_sync(&ab->mon_reap_timer);
for (i = 0; i < ab->num_radios; i++)
ath12k_dp_rx_pdev_free(ab, i);
}
void ath12k_dp_pdev_pre_alloc(struct ath12k_base *ab)
void ath12k_dp_pdev_pre_alloc(struct ath12k *ar)
{
struct ath12k *ar;
struct ath12k_pdev_dp *dp;
int i;
struct ath12k_pdev_dp *dp = &ar->dp;
for (i = 0; i < ab->num_radios; i++) {
ar = ab->pdevs[i].ar;
dp = &ar->dp;
dp->mac_id = i;
dp->mac_id = ar->pdev_idx;
atomic_set(&dp->num_tx_pending, 0);
init_waitqueue_head(&dp->tx_empty_waitq);
/* TODO: Add any RXDMA setup required per pdev */
}
}
bool ath12k_dp_wmask_compaction_rx_tlv_supported(struct ath12k_base *ab)
{
@ -1260,15 +1261,23 @@ static void ath12k_dp_reoq_lut_cleanup(struct ath12k_base *ab)
if (!ab->hw_params->reoq_lut_support)
return;
if (!dp->reoq_lut.vaddr)
return;
if (dp->reoq_lut.vaddr) {
ath12k_hif_write32(ab,
HAL_SEQ_WCSS_UMAC_REO_REG +
HAL_REO1_QDESC_LUT_BASE0(ab), 0);
dma_free_coherent(ab->dev, DP_REOQ_LUT_SIZE,
dp->reoq_lut.vaddr, dp->reoq_lut.paddr);
dp->reoq_lut.vaddr = NULL;
}
if (dp->ml_reoq_lut.vaddr) {
ath12k_hif_write32(ab,
HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_LUT_BASE0(ab), 0);
HAL_SEQ_WCSS_UMAC_REO_REG +
HAL_REO1_QDESC_LUT_BASE1(ab), 0);
dma_free_coherent(ab->dev, DP_REOQ_LUT_SIZE,
dp->ml_reoq_lut.vaddr, dp->ml_reoq_lut.paddr);
dp->ml_reoq_lut.vaddr = NULL;
}
}
void ath12k_dp_free(struct ath12k_base *ab)
@ -1276,6 +1285,9 @@ void ath12k_dp_free(struct ath12k_base *ab)
struct ath12k_dp *dp = &ab->dp;
int i;
if (!dp->ab)
return;
ath12k_dp_link_desc_cleanup(ab, dp->link_desc_banks,
HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
@ -1293,6 +1305,7 @@ void ath12k_dp_free(struct ath12k_base *ab)
ath12k_dp_rx_free(ab);
/* Deinit any SOC level resource */
dp->ab = NULL;
}
void ath12k_dp_cc_config(struct ath12k_base *ab)
@ -1594,8 +1607,23 @@ static int ath12k_dp_reoq_lut_setup(struct ath12k_base *ab)
return -ENOMEM;
}
dp->ml_reoq_lut.vaddr = dma_alloc_coherent(ab->dev,
DP_REOQ_LUT_SIZE,
&dp->ml_reoq_lut.paddr,
GFP_KERNEL | __GFP_ZERO);
if (!dp->ml_reoq_lut.vaddr) {
ath12k_warn(ab, "failed to allocate memory for ML reoq table");
dma_free_coherent(ab->dev, DP_REOQ_LUT_SIZE,
dp->reoq_lut.vaddr, dp->reoq_lut.paddr);
dp->reoq_lut.vaddr = NULL;
return -ENOMEM;
}
ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_LUT_BASE0(ab),
dp->reoq_lut.paddr);
ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_LUT_BASE1(ab),
dp->ml_reoq_lut.paddr >> 8);
return 0;
}

View file

@ -368,6 +368,7 @@ struct ath12k_dp {
struct dp_rxdma_mon_ring rxdma_mon_buf_ring;
struct dp_rxdma_mon_ring tx_mon_buf_ring;
struct ath12k_reo_q_addr_lut reoq_lut;
struct ath12k_reo_q_addr_lut ml_reoq_lut;
};
/* HTT definitions */
@ -1805,7 +1806,7 @@ void ath12k_dp_free(struct ath12k_base *ab);
int ath12k_dp_alloc(struct ath12k_base *ab);
void ath12k_dp_cc_config(struct ath12k_base *ab);
int ath12k_dp_pdev_alloc(struct ath12k_base *ab);
void ath12k_dp_pdev_pre_alloc(struct ath12k_base *ab);
void ath12k_dp_pdev_pre_alloc(struct ath12k *ar);
void ath12k_dp_pdev_free(struct ath12k_base *ab);
int ath12k_dp_tx_htt_srng_setup(struct ath12k_base *ab, u32 ring_id,
int mac_id, enum hal_ring_type ring_type);

View file

@ -740,13 +740,20 @@ static void ath12k_peer_rx_tid_qref_setup(struct ath12k_base *ab, u16 peer_id, u
{
struct ath12k_reo_queue_ref *qref;
struct ath12k_dp *dp = &ab->dp;
bool ml_peer = false;
if (!ab->hw_params->reoq_lut_support)
return;
/* TODO: based on ML peer or not, select the LUT. below assumes non
* ML peer
*/
if (peer_id & ATH12K_PEER_ML_ID_VALID) {
peer_id &= ~ATH12K_PEER_ML_ID_VALID;
ml_peer = true;
}
if (ml_peer)
qref = (struct ath12k_reo_queue_ref *)dp->ml_reoq_lut.vaddr +
(peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
else
qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr +
(peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
@ -761,13 +768,20 @@ static void ath12k_peer_rx_tid_qref_reset(struct ath12k_base *ab, u16 peer_id, u
{
struct ath12k_reo_queue_ref *qref;
struct ath12k_dp *dp = &ab->dp;
bool ml_peer = false;
if (!ab->hw_params->reoq_lut_support)
return;
/* TODO: based on ML peer or not, select the LUT. below assumes non
* ML peer
*/
if (peer_id & ATH12K_PEER_ML_ID_VALID) {
peer_id &= ~ATH12K_PEER_ML_ID_VALID;
ml_peer = true;
}
if (ml_peer)
qref = (struct ath12k_reo_queue_ref *)dp->ml_reoq_lut.vaddr +
(peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
else
qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr +
(peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
@ -802,6 +816,9 @@ void ath12k_dp_rx_peer_tid_delete(struct ath12k *ar,
rx_tid->vaddr = NULL;
}
if (peer->mlo)
ath12k_peer_rx_tid_qref_reset(ar->ab, peer->ml_id, tid);
else
ath12k_peer_rx_tid_qref_reset(ar->ab, peer->peer_id, tid);
rx_tid->active = false;
@ -940,7 +957,13 @@ int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_
return -ENOENT;
}
if (ab->hw_params->reoq_lut_support && !dp->reoq_lut.vaddr) {
if (!peer->primary_link) {
spin_unlock_bh(&ab->base_lock);
return 0;
}
if (ab->hw_params->reoq_lut_support &&
(!dp->reoq_lut.vaddr || !dp->ml_reoq_lut.vaddr)) {
spin_unlock_bh(&ab->base_lock);
ath12k_warn(ab, "reo qref table is not setup\n");
return -EINVAL;
@ -1021,7 +1044,11 @@ int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_
/* Update the REO queue LUT at the corresponding peer id
* and tid with qaddr.
*/
if (peer->mlo)
ath12k_peer_rx_tid_qref_setup(ab, peer->ml_id, tid, paddr);
else
ath12k_peer_rx_tid_qref_setup(ab, peer->peer_id, tid, paddr);
spin_unlock_bh(&ab->base_lock);
} else {
spin_unlock_bh(&ab->base_lock);
@ -1038,15 +1065,25 @@ err_mem_free:
}
int ath12k_dp_rx_ampdu_start(struct ath12k *ar,
struct ieee80211_ampdu_params *params)
struct ieee80211_ampdu_params *params,
u8 link_id)
{
struct ath12k_base *ab = ar->ab;
struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(params->sta);
struct ath12k_link_sta *arsta = &ahsta->deflink;
int vdev_id = arsta->arvif->vdev_id;
struct ath12k_link_sta *arsta;
int vdev_id;
int ret;
ret = ath12k_dp_rx_peer_tid_setup(ar, params->sta->addr, vdev_id,
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
arsta = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy,
ahsta->link[link_id]);
if (!arsta)
return -ENOLINK;
vdev_id = arsta->arvif->vdev_id;
ret = ath12k_dp_rx_peer_tid_setup(ar, arsta->addr, vdev_id,
params->tid, params->buf_size,
params->ssn, arsta->ahsta->pn_type);
if (ret)
@ -1056,19 +1093,29 @@ int ath12k_dp_rx_ampdu_start(struct ath12k *ar,
}
int ath12k_dp_rx_ampdu_stop(struct ath12k *ar,
struct ieee80211_ampdu_params *params)
struct ieee80211_ampdu_params *params,
u8 link_id)
{
struct ath12k_base *ab = ar->ab;
struct ath12k_peer *peer;
struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(params->sta);
struct ath12k_link_sta *arsta = &ahsta->deflink;
int vdev_id = arsta->arvif->vdev_id;
struct ath12k_link_sta *arsta;
int vdev_id;
bool active;
int ret;
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
arsta = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy,
ahsta->link[link_id]);
if (!arsta)
return -ENOLINK;
vdev_id = arsta->arvif->vdev_id;
spin_lock_bh(&ab->base_lock);
peer = ath12k_peer_find(ab, vdev_id, params->sta->addr);
peer = ath12k_peer_find(ab, vdev_id, arsta->addr);
if (!peer) {
spin_unlock_bh(&ab->base_lock);
ath12k_warn(ab, "failed to find the peer to stop rx aggregation\n");
@ -2781,6 +2828,11 @@ int ath12k_dp_rx_peer_frag_setup(struct ath12k *ar, const u8 *peer_mac, int vdev
return -ENOENT;
}
if (!peer->primary_link) {
spin_unlock_bh(&ab->base_lock);
return 0;
}
for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
rx_tid = &peer->rx_tid[i];
rx_tid->ab = ab;
@ -3912,7 +3964,7 @@ void ath12k_dp_rx_process_reo_status(struct ath12k_base *ab)
ath12k_hal_srng_access_begin(ab, srng);
while ((hdr = ath12k_hal_srng_dst_get_next_entry(ab, srng))) {
tag = u64_get_bits(hdr->tl, HAL_SRNG_TLV_HDR_TAG);
tag = le64_get_bits(hdr->tl, HAL_SRNG_TLV_HDR_TAG);
switch (tag) {
case HAL_REO_GET_QUEUE_STATS_STATUS:

View file

@ -85,9 +85,11 @@ static inline u32 ath12k_he_gi_to_nl80211_he_gi(u8 sgi)
}
int ath12k_dp_rx_ampdu_start(struct ath12k *ar,
struct ieee80211_ampdu_params *params);
struct ieee80211_ampdu_params *params,
u8 link_id);
int ath12k_dp_rx_ampdu_stop(struct ath12k *ar,
struct ieee80211_ampdu_params *params);
struct ieee80211_ampdu_params *params,
u8 link_id);
int ath12k_dp_rx_peer_pn_replay_config(struct ath12k_link_vif *arvif,
const u8 *peer_addr,
enum set_key_cmd key_cmd,

View file

@ -581,7 +581,7 @@ struct hal_tlv_hdr {
#define HAL_TLV_64_HDR_LEN GENMASK(21, 10)
struct hal_tlv_64_hdr {
u64 tl;
__le64 tl;
u8 value[];
} __packed;

View file

@ -26,8 +26,8 @@ static int ath12k_hal_reo_cmd_queue_stats(struct hal_tlv_64_hdr *tlv,
{
struct hal_reo_get_queue_stats *desc;
tlv->tl = u32_encode_bits(HAL_REO_GET_QUEUE_STATS, HAL_TLV_HDR_TAG) |
u32_encode_bits(sizeof(*desc), HAL_TLV_HDR_LEN);
tlv->tl = le64_encode_bits(HAL_REO_GET_QUEUE_STATS, HAL_TLV_HDR_TAG) |
le64_encode_bits(sizeof(*desc), HAL_TLV_HDR_LEN);
desc = (struct hal_reo_get_queue_stats *)tlv->value;
memset_startat(desc, 0, queue_addr_lo);
@ -59,8 +59,8 @@ static int ath12k_hal_reo_cmd_flush_cache(struct ath12k_hal *hal,
hal->current_blk_index = avail_slot;
}
tlv->tl = u32_encode_bits(HAL_REO_FLUSH_CACHE, HAL_TLV_HDR_TAG) |
u32_encode_bits(sizeof(*desc), HAL_TLV_HDR_LEN);
tlv->tl = le64_encode_bits(HAL_REO_FLUSH_CACHE, HAL_TLV_HDR_TAG) |
le64_encode_bits(sizeof(*desc), HAL_TLV_HDR_LEN);
desc = (struct hal_reo_flush_cache *)tlv->value;
memset_startat(desc, 0, cache_addr_lo);
@ -97,8 +97,8 @@ static int ath12k_hal_reo_cmd_update_rx_queue(struct hal_tlv_64_hdr *tlv,
{
struct hal_reo_update_rx_queue *desc;
tlv->tl = u32_encode_bits(HAL_REO_UPDATE_RX_REO_QUEUE, HAL_TLV_HDR_TAG) |
u32_encode_bits(sizeof(*desc), HAL_TLV_HDR_LEN);
tlv->tl = le64_encode_bits(HAL_REO_UPDATE_RX_REO_QUEUE, HAL_TLV_HDR_TAG) |
le64_encode_bits(sizeof(*desc), HAL_TLV_HDR_LEN);
desc = (struct hal_reo_update_rx_queue *)tlv->value;
memset_startat(desc, 0, queue_addr_lo);

File diff suppressed because it is too large Load diff

View file

@ -14,6 +14,7 @@
struct ath12k;
struct ath12k_base;
struct ath12k_hw;
struct ath12k_hw_group;
struct ath12k_pdev_map;
struct ath12k_generic_iter {
@ -44,6 +45,12 @@ struct ath12k_generic_iter {
#define ATH12K_DEFAULT_LINK_ID 0
#define ATH12K_INVALID_LINK_ID 255
/* Default link after the IEEE802.11 defined Max link id limit
* for driver usage purpose.
*/
#define ATH12K_DEFAULT_SCAN_LINK IEEE80211_MLD_MAX_NUM_LINKS
#define ATH12K_NUM_MAX_LINKS (IEEE80211_MLD_MAX_NUM_LINKS + 1)
enum ath12k_supported_bw {
ATH12K_BW_20 = 0,
ATH12K_BW_40 = 1,
@ -54,10 +61,10 @@ enum ath12k_supported_bw {
extern const struct htt_rx_ring_tlv_filter ath12k_mac_mon_status_filter_default;
void ath12k_mac_destroy(struct ath12k_base *ab);
void ath12k_mac_unregister(struct ath12k_base *ab);
int ath12k_mac_register(struct ath12k_base *ab);
int ath12k_mac_allocate(struct ath12k_base *ab);
void ath12k_mac_destroy(struct ath12k_hw_group *ag);
void ath12k_mac_unregister(struct ath12k_hw_group *ag);
int ath12k_mac_register(struct ath12k_hw_group *ag);
int ath12k_mac_allocate(struct ath12k_hw_group *ag);
int ath12k_mac_hw_ratecode_to_legacy_rate(u8 hw_rc, u8 preamble, u8 *rateidx,
u16 *rate);
u8 ath12k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband,
@ -89,5 +96,6 @@ int ath12k_mac_vif_set_keepalive(struct ath12k_link_vif *arvif,
enum wmi_sta_keepalive_method method,
u32 interval);
u8 ath12k_mac_get_target_pdev_id(struct ath12k *ar);
int ath12k_mac_vdev_stop(struct ath12k_link_vif *arvif);
#endif

View file

@ -1123,6 +1123,9 @@ void ath12k_pci_ext_irq_enable(struct ath12k_base *ab)
void ath12k_pci_ext_irq_disable(struct ath12k_base *ab)
{
if (!test_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags))
return;
__ath12k_pci_ext_irq_disable(ab);
ath12k_pci_sync_ext_irqs(ab);
}
@ -1147,6 +1150,11 @@ int ath12k_pci_hif_resume(struct ath12k_base *ab)
void ath12k_pci_stop(struct ath12k_base *ab)
{
struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
if (!test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags))
return;
ath12k_pci_ce_irq_disable_sync(ab);
ath12k_ce_cleanup_pipes(ab);
}
@ -1717,6 +1725,7 @@ static void ath12k_pci_remove(struct pci_dev *pdev)
if (test_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags)) {
ath12k_pci_power_down(ab, false);
ath12k_qmi_deinit_service(ab);
ath12k_core_hw_group_unassign(ab);
goto qmi_fail;
}
@ -1725,6 +1734,7 @@ static void ath12k_pci_remove(struct pci_dev *pdev)
cancel_work_sync(&ab->reset_work);
cancel_work_sync(&ab->dump_work);
ath12k_core_deinit(ab);
ath12k_fw_unmap(ab);
qmi_fail:
ath12k_mhi_unregister(ab_pci);

View file

@ -8,6 +8,22 @@
#include "peer.h"
#include "debug.h"
static struct ath12k_ml_peer *ath12k_peer_ml_find(struct ath12k_hw *ah, const u8 *addr)
{
struct ath12k_ml_peer *ml_peer;
lockdep_assert_wiphy(ah->hw->wiphy);
list_for_each_entry(ml_peer, &ah->ml_peers, list) {
if (!ether_addr_equal(ml_peer->addr, addr))
continue;
return ml_peer;
}
return NULL;
}
struct ath12k_peer *ath12k_peer_find(struct ath12k_base *ab, int vdev_id,
const u8 *addr)
{
@ -63,6 +79,20 @@ struct ath12k_peer *ath12k_peer_find_by_addr(struct ath12k_base *ab,
return NULL;
}
static struct ath12k_peer *ath12k_peer_find_by_ml_id(struct ath12k_base *ab,
int ml_peer_id)
{
struct ath12k_peer *peer;
lockdep_assert_held(&ab->base_lock);
list_for_each_entry(peer, &ab->peers, list)
if (ml_peer_id == peer->ml_id)
return peer;
return NULL;
}
struct ath12k_peer *ath12k_peer_find_by_id(struct ath12k_base *ab,
int peer_id)
{
@ -70,6 +100,9 @@ struct ath12k_peer *ath12k_peer_find_by_id(struct ath12k_base *ab,
lockdep_assert_held(&ab->base_lock);
if (peer_id & ATH12K_PEER_ML_ID_VALID)
return ath12k_peer_find_by_ml_id(ab, peer_id);
list_for_each_entry(peer, &ab->peers, list)
if (peer_id == peer->peer_id)
return peer;
@ -231,8 +264,9 @@ int ath12k_wait_for_peer_delete_done(struct ath12k *ar, u32 vdev_id,
return 0;
}
int ath12k_peer_delete(struct ath12k *ar, u32 vdev_id, u8 *addr)
static int ath12k_peer_delete_send(struct ath12k *ar, u32 vdev_id, const u8 *addr)
{
struct ath12k_base *ab = ar->ab;
int ret;
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
@ -241,12 +275,25 @@ int ath12k_peer_delete(struct ath12k *ar, u32 vdev_id, u8 *addr)
ret = ath12k_wmi_send_peer_delete_cmd(ar, addr, vdev_id);
if (ret) {
ath12k_warn(ar->ab,
ath12k_warn(ab,
"failed to delete peer vdev_id %d addr %pM ret %d\n",
vdev_id, addr, ret);
return ret;
}
return 0;
}
int ath12k_peer_delete(struct ath12k *ar, u32 vdev_id, u8 *addr)
{
int ret;
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
ret = ath12k_peer_delete_send(ar, vdev_id, addr);
if (ret)
return ret;
ret = ath12k_wait_for_peer_delete_done(ar, vdev_id, addr);
if (ret)
return ret;
@ -266,7 +313,11 @@ int ath12k_peer_create(struct ath12k *ar, struct ath12k_link_vif *arvif,
struct ath12k_wmi_peer_create_arg *arg)
{
struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
struct ath12k_link_sta *arsta;
u8 link_id = arvif->link_id;
struct ath12k_peer *peer;
struct ath12k_sta *ahsta;
u16 ml_peer_id;
int ret;
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
@ -332,6 +383,27 @@ int ath12k_peer_create(struct ath12k *ar, struct ath12k_link_vif *arvif,
arvif->ast_idx = peer->hw_peer_id;
}
if (sta) {
ahsta = ath12k_sta_to_ahsta(sta);
arsta = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy,
ahsta->link[link_id]);
/* Fill ML info into created peer */
if (sta->mlo) {
ml_peer_id = ahsta->ml_peer_id;
peer->ml_id = ml_peer_id | ATH12K_PEER_ML_ID_VALID;
ether_addr_copy(peer->ml_addr, sta->addr);
/* the assoc link is considered primary for now */
peer->primary_link = arsta->is_assoc_link;
peer->mlo = true;
} else {
peer->ml_id = ATH12K_MLO_PEER_ID_INVALID;
peer->primary_link = true;
peer->mlo = false;
}
}
peer->sec_type = HAL_ENCRYPT_TYPE_OPEN;
peer->sec_type_grp = HAL_ENCRYPT_TYPE_OPEN;
@ -341,3 +413,150 @@ int ath12k_peer_create(struct ath12k *ar, struct ath12k_link_vif *arvif,
return 0;
}
static u16 ath12k_peer_ml_alloc(struct ath12k_hw *ah)
{
u16 ml_peer_id;
lockdep_assert_wiphy(ah->hw->wiphy);
for (ml_peer_id = 0; ml_peer_id < ATH12K_MAX_MLO_PEERS; ml_peer_id++) {
if (test_bit(ml_peer_id, ah->free_ml_peer_id_map))
continue;
set_bit(ml_peer_id, ah->free_ml_peer_id_map);
break;
}
if (ml_peer_id == ATH12K_MAX_MLO_PEERS)
ml_peer_id = ATH12K_MLO_PEER_ID_INVALID;
return ml_peer_id;
}
int ath12k_peer_ml_create(struct ath12k_hw *ah, struct ieee80211_sta *sta)
{
struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
struct ath12k_ml_peer *ml_peer;
lockdep_assert_wiphy(ah->hw->wiphy);
if (!sta->mlo)
return -EINVAL;
ml_peer = ath12k_peer_ml_find(ah, sta->addr);
if (ml_peer) {
ath12k_hw_warn(ah, "ML peer %d exists already, unable to add new entry for %pM",
ml_peer->id, sta->addr);
return -EEXIST;
}
ml_peer = kzalloc(sizeof(*ml_peer), GFP_ATOMIC);
if (!ml_peer)
return -ENOMEM;
ahsta->ml_peer_id = ath12k_peer_ml_alloc(ah);
if (ahsta->ml_peer_id == ATH12K_MLO_PEER_ID_INVALID) {
ath12k_hw_warn(ah, "unable to allocate ML peer id for sta %pM",
sta->addr);
kfree(ml_peer);
return -ENOMEM;
}
ether_addr_copy(ml_peer->addr, sta->addr);
ml_peer->id = ahsta->ml_peer_id;
list_add(&ml_peer->list, &ah->ml_peers);
return 0;
}
int ath12k_peer_ml_delete(struct ath12k_hw *ah, struct ieee80211_sta *sta)
{
struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
struct ath12k_ml_peer *ml_peer;
lockdep_assert_wiphy(ah->hw->wiphy);
if (!sta->mlo)
return -EINVAL;
clear_bit(ahsta->ml_peer_id, ah->free_ml_peer_id_map);
ahsta->ml_peer_id = ATH12K_MLO_PEER_ID_INVALID;
ml_peer = ath12k_peer_ml_find(ah, sta->addr);
if (!ml_peer) {
ath12k_hw_warn(ah, "ML peer for %pM not found", sta->addr);
return -EINVAL;
}
list_del(&ml_peer->list);
kfree(ml_peer);
return 0;
}
int ath12k_peer_mlo_link_peers_delete(struct ath12k_vif *ahvif, struct ath12k_sta *ahsta)
{
struct ieee80211_sta *sta = ath12k_ahsta_to_sta(ahsta);
struct ath12k_hw *ah = ahvif->ah;
struct ath12k_link_vif *arvif;
struct ath12k_link_sta *arsta;
unsigned long links;
struct ath12k *ar;
int ret, err_ret = 0;
u8 link_id;
lockdep_assert_wiphy(ah->hw->wiphy);
if (!sta->mlo)
return -EINVAL;
/* FW expects delete of all link peers at once before waiting for reception
* of peer unmap or delete responses
*/
links = ahsta->links_map;
for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
arvif = wiphy_dereference(ah->hw->wiphy, ahvif->link[link_id]);
arsta = wiphy_dereference(ah->hw->wiphy, ahsta->link[link_id]);
if (!arvif || !arsta)
continue;
ar = arvif->ar;
if (!ar)
continue;
ath12k_dp_peer_cleanup(ar, arvif->vdev_id, arsta->addr);
ret = ath12k_peer_delete_send(ar, arvif->vdev_id, arsta->addr);
if (ret) {
ath12k_warn(ar->ab,
"failed to delete peer vdev_id %d addr %pM ret %d\n",
arvif->vdev_id, arsta->addr, ret);
err_ret = ret;
continue;
}
}
/* Ensure all link peers are deleted and unmapped */
links = ahsta->links_map;
for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
arvif = wiphy_dereference(ah->hw->wiphy, ahvif->link[link_id]);
arsta = wiphy_dereference(ah->hw->wiphy, ahsta->link[link_id]);
if (!arvif || !arsta)
continue;
ar = arvif->ar;
if (!ar)
continue;
ret = ath12k_wait_for_peer_delete_done(ar, arvif->vdev_id, arsta->addr);
if (ret) {
err_ret = ret;
continue;
}
ar->num_peers--;
}
return err_ret;
}

View file

@ -19,6 +19,8 @@ struct ppdu_user_delayba {
u32 resp_rate_flags;
};
#define ATH12K_PEER_ML_ID_VALID BIT(13)
struct ath12k_peer {
struct list_head list;
struct ieee80211_sta *sta;
@ -44,9 +46,25 @@ struct ath12k_peer {
struct ppdu_user_delayba ppdu_stats_delayba;
bool delayba_flag;
bool is_authorized;
bool mlo;
/* protected by ab->data_lock */
bool dp_setup_done;
u16 ml_id;
/* any other ML info common for all partners can be added
* here and would be same for all partner peers.
*/
u8 ml_addr[ETH_ALEN];
/* To ensure only certain work related to dp is done once */
bool primary_link;
};
struct ath12k_ml_peer {
struct list_head list;
u8 addr[ETH_ALEN];
u16 id;
};
void ath12k_peer_unmap_event(struct ath12k_base *ab, u16 peer_id);
@ -66,5 +84,8 @@ int ath12k_wait_for_peer_delete_done(struct ath12k *ar, u32 vdev_id,
const u8 *addr);
bool ath12k_peer_exist_by_vdev_id(struct ath12k_base *ab, int vdev_id);
struct ath12k_peer *ath12k_peer_find_by_ast(struct ath12k_base *ab, int ast_hash);
int ath12k_peer_ml_create(struct ath12k_hw *ah, struct ieee80211_sta *sta);
int ath12k_peer_ml_delete(struct ath12k_hw *ah, struct ieee80211_sta *sta);
int ath12k_peer_mlo_link_peers_delete(struct ath12k_vif *ahvif, struct ath12k_sta *ahsta);
#endif /* _PEER_H_ */

View file

@ -2023,14 +2023,14 @@ static void ath12k_host_cap_parse_mlo(struct ath12k_base *ab,
u8 hw_link_id = 0;
int i;
if (!(ab->mlo_capable_flags & ATH12K_INTRA_DEVICE_MLO_SUPPORT)) {
if (!ab->ag->mlo_capable) {
ath12k_dbg(ab, ATH12K_DBG_QMI,
"intra device MLO is disabled hence skip QMI MLO cap");
"MLO is disabled hence skip QMI MLO cap");
return;
}
if (!ab->qmi.num_radios || ab->qmi.num_radios == U8_MAX) {
ab->mlo_capable_flags = 0;
ab->single_chip_mlo_supp = false;
ath12k_dbg(ab, ATH12K_DBG_QMI,
"skip QMI MLO cap due to invalid num_radio %d\n",
@ -2066,7 +2066,9 @@ static void ath12k_host_cap_parse_mlo(struct ath12k_base *ab,
req->mlo_chip_info_valid = 1;
}
static int ath12k_qmi_host_cap_send(struct ath12k_base *ab)
/* clang stack usage explodes if this is inlined */
static noinline_for_stack
int ath12k_qmi_host_cap_send(struct ath12k_base *ab)
{
struct qmi_wlanfw_host_cap_req_msg_v01 req = {};
struct qmi_wlanfw_host_cap_resp_msg_v01 resp = {};
@ -2174,12 +2176,9 @@ static void ath12k_qmi_phy_cap_send(struct ath12k_base *ab)
goto out;
}
if (resp.single_chip_mlo_support_valid) {
if (resp.single_chip_mlo_support)
ab->mlo_capable_flags |= ATH12K_INTRA_DEVICE_MLO_SUPPORT;
else
ab->mlo_capable_flags &= ~ATH12K_INTRA_DEVICE_MLO_SUPPORT;
}
if (resp.single_chip_mlo_support_valid &&
resp.single_chip_mlo_support)
ab->single_chip_mlo_supp = true;
if (!resp.num_phy_valid) {
ret = -ENODATA;
@ -2275,7 +2274,9 @@ resp_out:
return ret;
}
static int ath12k_qmi_respond_fw_mem_request(struct ath12k_base *ab)
/* clang stack usage explodes if this is inlined */
static noinline_for_stack
int ath12k_qmi_respond_fw_mem_request(struct ath12k_base *ab)
{
struct qmi_wlanfw_respond_mem_req_msg_v01 *req;
struct qmi_wlanfw_respond_mem_resp_msg_v01 resp = {};
@ -2433,7 +2434,9 @@ this_chunk_done:
return 0;
}
static int ath12k_qmi_request_target_cap(struct ath12k_base *ab)
/* clang stack usage explodes if this is inlined */
static noinline_for_stack
int ath12k_qmi_request_target_cap(struct ath12k_base *ab)
{
struct qmi_wlanfw_cap_req_msg_v01 req = {};
struct qmi_wlanfw_cap_resp_msg_v01 resp = {};
@ -2619,7 +2622,9 @@ out:
return ret;
}
static int ath12k_qmi_load_bdf_qmi(struct ath12k_base *ab,
/* clang stack usage explodes if this is inlined */
static noinline_for_stack
int ath12k_qmi_load_bdf_qmi(struct ath12k_base *ab,
enum ath12k_qmi_bdf_type type)
{
struct device *dev = ab->dev;
@ -2791,7 +2796,9 @@ out:
return ret;
}
static int ath12k_qmi_wlanfw_m3_info_send(struct ath12k_base *ab)
/* clang stack usage explodes if this is inlined */
static noinline_for_stack
int ath12k_qmi_wlanfw_m3_info_send(struct ath12k_base *ab)
{
struct m3_mem_region *m3_mem = &ab->qmi.m3_mem;
struct qmi_wlanfw_m3_info_req_msg_v01 req = {};
@ -3023,6 +3030,8 @@ void ath12k_qmi_firmware_stop(struct ath12k_base *ab)
{
int ret;
clear_bit(ATH12K_FLAG_QMI_FW_READY_COMPLETE, &ab->dev_flags);
ret = ath12k_qmi_wlanfw_mode_send(ab, ATH12K_FIRMWARE_MODE_OFF);
if (ret < 0) {
ath12k_warn(ab, "qmi failed to send wlan mode off\n");
@ -3079,9 +3088,69 @@ ath12k_qmi_driver_event_post(struct ath12k_qmi *qmi,
return 0;
}
static int ath12k_qmi_event_server_arrive(struct ath12k_qmi *qmi)
void ath12k_qmi_trigger_host_cap(struct ath12k_base *ab)
{
struct ath12k_base *ab = qmi->ab;
struct ath12k_qmi *qmi = &ab->qmi;
spin_lock(&qmi->event_lock);
if (ath12k_qmi_get_event_block(qmi))
ath12k_qmi_set_event_block(qmi, false);
spin_unlock(&qmi->event_lock);
ath12k_dbg(ab, ATH12K_DBG_QMI, "trigger host cap for device id %d\n",
ab->device_id);
ath12k_qmi_driver_event_post(qmi, ATH12K_QMI_EVENT_HOST_CAP, NULL);
}
static bool ath12k_qmi_hw_group_host_cap_ready(struct ath12k_hw_group *ag)
{
struct ath12k_base *ab;
int i;
for (i = 0; i < ag->num_devices; i++) {
ab = ag->ab[i];
if (!(ab && ab->qmi.num_radios != U8_MAX))
return false;
}
return true;
}
static struct ath12k_base *ath12k_qmi_hw_group_find_blocked(struct ath12k_hw_group *ag)
{
struct ath12k_base *ab;
int i;
lockdep_assert_held(&ag->mutex);
for (i = 0; i < ag->num_devices; i++) {
ab = ag->ab[i];
if (!ab)
continue;
spin_lock(&ab->qmi.event_lock);
if (ath12k_qmi_get_event_block(&ab->qmi)) {
spin_unlock(&ab->qmi.event_lock);
return ab;
}
spin_unlock(&ab->qmi.event_lock);
}
return NULL;
}
/* clang stack usage explodes if this is inlined */
static noinline_for_stack
int ath12k_qmi_event_server_arrive(struct ath12k_qmi *qmi)
{
struct ath12k_base *ab = qmi->ab, *block_ab;
struct ath12k_hw_group *ag = ab->ag;
int ret;
ath12k_qmi_phy_cap_send(ab);
@ -3092,16 +3161,30 @@ static int ath12k_qmi_event_server_arrive(struct ath12k_qmi *qmi)
return ret;
}
ret = ath12k_qmi_host_cap_send(ab);
if (ret < 0) {
ath12k_warn(ab, "qmi failed to send host cap QMI:%d\n", ret);
return ret;
spin_lock(&qmi->event_lock);
ath12k_qmi_set_event_block(qmi, true);
spin_unlock(&qmi->event_lock);
mutex_lock(&ag->mutex);
if (ath12k_qmi_hw_group_host_cap_ready(ag)) {
ath12k_core_hw_group_set_mlo_capable(ag);
block_ab = ath12k_qmi_hw_group_find_blocked(ag);
if (block_ab)
ath12k_qmi_trigger_host_cap(block_ab);
}
mutex_unlock(&ag->mutex);
return ret;
}
static int ath12k_qmi_event_mem_request(struct ath12k_qmi *qmi)
/* clang stack usage explodes if this is inlined */
static noinline_for_stack
int ath12k_qmi_event_mem_request(struct ath12k_qmi *qmi)
{
struct ath12k_base *ab = qmi->ab;
int ret;
@ -3115,7 +3198,9 @@ static int ath12k_qmi_event_mem_request(struct ath12k_qmi *qmi)
return ret;
}
static int ath12k_qmi_event_load_bdf(struct ath12k_qmi *qmi)
/* clang stack usage explodes if this is inlined */
static noinline_for_stack
int ath12k_qmi_event_load_bdf(struct ath12k_qmi *qmi)
{
struct ath12k_base *ab = qmi->ab;
int ret;
@ -3280,6 +3365,21 @@ static const struct qmi_ops ath12k_qmi_ops = {
.del_server = ath12k_qmi_ops_del_server,
};
static int ath12k_qmi_event_host_cap(struct ath12k_qmi *qmi)
{
struct ath12k_base *ab = qmi->ab;
int ret;
ret = ath12k_qmi_host_cap_send(ab);
if (ret < 0) {
ath12k_warn(ab, "failed to send qmi host cap for device id %d: %d\n",
ab->device_id, ret);
return ret;
}
return ret;
}
static void ath12k_qmi_driver_event_work(struct work_struct *work)
{
struct ath12k_qmi *qmi = container_of(work, struct ath12k_qmi,
@ -3306,7 +3406,6 @@ static void ath12k_qmi_driver_event_work(struct work_struct *work)
break;
case ATH12K_QMI_EVENT_SERVER_EXIT:
set_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags);
set_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags);
break;
case ATH12K_QMI_EVENT_REQUEST_MEM:
ret = ath12k_qmi_event_mem_request(qmi);
@ -3320,20 +3419,28 @@ static void ath12k_qmi_driver_event_work(struct work_struct *work)
break;
case ATH12K_QMI_EVENT_FW_READY:
clear_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags);
if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) {
if (test_bit(ATH12K_FLAG_QMI_FW_READY_COMPLETE, &ab->dev_flags)) {
if (ab->is_reset)
ath12k_hal_dump_srng_stats(ab);
set_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags);
queue_work(ab->workqueue, &ab->restart_work);
break;
}
clear_bit(ATH12K_FLAG_CRASH_FLUSH,
&ab->dev_flags);
clear_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags);
ath12k_core_qmi_firmware_ready(ab);
set_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags);
ret = ath12k_core_qmi_firmware_ready(ab);
if (!ret)
set_bit(ATH12K_FLAG_QMI_FW_READY_COMPLETE,
&ab->dev_flags);
break;
case ATH12K_QMI_EVENT_HOST_CAP:
ret = ath12k_qmi_event_host_cap(qmi);
if (ret < 0)
set_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags);
break;
default:
ath12k_warn(ab, "invalid event type: %d", event->type);
break;
@ -3386,11 +3493,15 @@ int ath12k_qmi_init_service(struct ath12k_base *ab)
void ath12k_qmi_deinit_service(struct ath12k_base *ab)
{
if (!ab->qmi.ab)
return;
qmi_handle_release(&ab->qmi.handle);
cancel_work_sync(&ab->qmi.event_work);
destroy_workqueue(ab->qmi.event_wq);
ath12k_qmi_m3_free(ab);
ath12k_qmi_free_target_mem_chunk(ab);
ab->qmi.ab = NULL;
}
void ath12k_qmi_free_resource(struct ath12k_base *ab)

View file

@ -68,6 +68,7 @@ enum ath12k_qmi_event_type {
ATH12K_QMI_EVENT_FORCE_FW_ASSERT,
ATH12K_QMI_EVENT_POWER_UP,
ATH12K_QMI_EVENT_POWER_DOWN,
ATH12K_QMI_EVENT_HOST_CAP,
ATH12K_QMI_EVENT_MAX,
};
@ -142,6 +143,10 @@ struct ath12k_qmi {
u32 target_mem_mode;
bool target_mem_delayed;
u8 cal_done;
/* protected with struct ath12k_qmi::event_lock */
bool block_event;
u8 num_radios;
struct target_info target;
struct m3_mem_region m3_mem;
@ -594,11 +599,26 @@ struct qmi_wlanfw_wlan_ini_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
static inline void ath12k_qmi_set_event_block(struct ath12k_qmi *qmi, bool block)
{
lockdep_assert_held(&qmi->event_lock);
qmi->block_event = block;
}
static inline bool ath12k_qmi_get_event_block(struct ath12k_qmi *qmi)
{
lockdep_assert_held(&qmi->event_lock);
return qmi->block_event;
}
int ath12k_qmi_firmware_start(struct ath12k_base *ab,
u32 mode);
void ath12k_qmi_firmware_stop(struct ath12k_base *ab);
void ath12k_qmi_deinit_service(struct ath12k_base *ab);
int ath12k_qmi_init_service(struct ath12k_base *ab);
void ath12k_qmi_free_resource(struct ath12k_base *ab);
void ath12k_qmi_trigger_host_cap(struct ath12k_base *ab);
#endif

View file

@ -821,6 +821,8 @@ int ath12k_wmi_vdev_create(struct ath12k *ar, u8 *macaddr,
struct wmi_vdev_create_cmd *cmd;
struct sk_buff *skb;
struct ath12k_wmi_vdev_txrx_streams_params *txrx_streams;
bool is_ml_vdev = is_valid_ether_addr(args->mld_addr);
struct wmi_vdev_create_mlo_params *ml_params;
struct wmi_tlv *tlv;
int ret, len;
void *ptr;
@ -830,7 +832,8 @@ int ath12k_wmi_vdev_create(struct ath12k *ar, u8 *macaddr,
* both the bands.
*/
len = sizeof(*cmd) + TLV_HDR_SIZE +
(WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams));
(WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams)) +
(is_ml_vdev ? TLV_HDR_SIZE + sizeof(*ml_params) : 0);
skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
@ -879,6 +882,21 @@ int ath12k_wmi_vdev_create(struct ath12k *ar, u8 *macaddr,
txrx_streams->supported_rx_streams =
cpu_to_le32(args->chains[NL80211_BAND_5GHZ].rx);
ptr += WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams);
if (is_ml_vdev) {
tlv = ptr;
tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
sizeof(*ml_params));
ptr += TLV_HDR_SIZE;
ml_params = ptr;
ml_params->tlv_header =
ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_VDEV_CREATE_PARAMS,
sizeof(*ml_params));
ether_addr_copy(ml_params->mld_macaddr.addr, args->mld_addr);
}
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
"WMI vdev create: id %d type %d subtype %d macaddr %pM pdevid %d\n",
args->if_id, args->type, args->subtype,
@ -1020,19 +1038,27 @@ static void ath12k_wmi_put_wmi_channel(struct ath12k_wmi_channel_params *chan,
int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg,
bool restart)
{
struct wmi_vdev_start_mlo_params *ml_params;
struct wmi_partner_link_info *partner_info;
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct wmi_vdev_start_request_cmd *cmd;
struct sk_buff *skb;
struct ath12k_wmi_channel_params *chan;
struct wmi_tlv *tlv;
void *ptr;
int ret, len;
int ret, len, i, ml_arg_size = 0;
if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
return -EINVAL;
len = sizeof(*cmd) + sizeof(*chan) + TLV_HDR_SIZE;
if (!restart && arg->ml.enabled) {
ml_arg_size = TLV_HDR_SIZE + sizeof(*ml_params) +
TLV_HDR_SIZE + (arg->ml.num_partner_links *
sizeof(*partner_info));
len += ml_arg_size;
}
skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
@ -1085,6 +1111,61 @@ int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg,
ptr += sizeof(*tlv);
if (ml_arg_size) {
tlv = ptr;
tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
sizeof(*ml_params));
ptr += TLV_HDR_SIZE;
ml_params = ptr;
ml_params->tlv_header =
ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_VDEV_START_PARAMS,
sizeof(*ml_params));
ml_params->flags = le32_encode_bits(arg->ml.enabled,
ATH12K_WMI_FLAG_MLO_ENABLED) |
le32_encode_bits(arg->ml.assoc_link,
ATH12K_WMI_FLAG_MLO_ASSOC_LINK) |
le32_encode_bits(arg->ml.mcast_link,
ATH12K_WMI_FLAG_MLO_MCAST_VDEV) |
le32_encode_bits(arg->ml.link_add,
ATH12K_WMI_FLAG_MLO_LINK_ADD);
ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "vdev %d start ml flags 0x%x\n",
arg->vdev_id, ml_params->flags);
ptr += sizeof(*ml_params);
tlv = ptr;
tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
arg->ml.num_partner_links *
sizeof(*partner_info));
ptr += TLV_HDR_SIZE;
partner_info = ptr;
for (i = 0; i < arg->ml.num_partner_links; i++) {
partner_info->tlv_header =
ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_PARTNER_LINK_PARAMS,
sizeof(*partner_info));
partner_info->vdev_id =
cpu_to_le32(arg->ml.partner_info[i].vdev_id);
partner_info->hw_link_id =
cpu_to_le32(arg->ml.partner_info[i].hw_link_id);
ether_addr_copy(partner_info->vdev_addr.addr,
arg->ml.partner_info[i].addr);
ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "partner vdev %d hw_link_id %d macaddr%pM\n",
partner_info->vdev_id, partner_info->hw_link_id,
partner_info->vdev_addr.addr);
partner_info++;
}
ptr = partner_info;
}
ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "vdev %s id 0x%x freq 0x%x mode 0x%x\n",
restart ? "restart" : "start", arg->vdev_id,
arg->freq, arg->mode);
@ -1149,9 +1230,14 @@ int ath12k_wmi_send_peer_create_cmd(struct ath12k *ar,
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct wmi_peer_create_cmd *cmd;
struct sk_buff *skb;
int ret;
int ret, len;
struct wmi_peer_create_mlo_params *ml_param;
void *ptr;
struct wmi_tlv *tlv;
skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(*ml_param);
skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
@ -1163,9 +1249,23 @@ int ath12k_wmi_send_peer_create_cmd(struct ath12k *ar,
cmd->peer_type = cpu_to_le32(arg->peer_type);
cmd->vdev_id = cpu_to_le32(arg->vdev_id);
ptr = skb->data + sizeof(*cmd);
tlv = ptr;
tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
sizeof(*ml_param));
ptr += TLV_HDR_SIZE;
ml_param = ptr;
ml_param->tlv_header =
ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_PEER_CREATE_PARAMS,
sizeof(*ml_param));
if (arg->ml_enabled)
ml_param->flags = cpu_to_le32(ATH12K_WMI_FLAG_MLO_ENABLED);
ptr += sizeof(*ml_param);
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
"WMI peer create vdev_id %d peer_addr %pM\n",
arg->vdev_id, arg->peer_addr);
"WMI peer create vdev_id %d peer_addr %pM ml_flags 0x%x\n",
arg->vdev_id, arg->peer_addr, ml_param->flags);
ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_CREATE_CMDID);
if (ret) {
@ -2001,12 +2101,15 @@ int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar,
struct ath12k_wmi_vht_rate_set_params *mcs;
struct ath12k_wmi_he_rate_set_params *he_mcs;
struct ath12k_wmi_eht_rate_set_params *eht_mcs;
struct wmi_peer_assoc_mlo_params *ml_params;
struct wmi_peer_assoc_mlo_partner_info_params *partner_info;
struct sk_buff *skb;
struct wmi_tlv *tlv;
void *ptr;
u32 peer_legacy_rates_align;
u32 peer_ht_rates_align;
int i, ret, len;
__le32 v;
peer_legacy_rates_align = roundup(arg->peer_legacy_rates.num_rates,
sizeof(u32));
@ -2018,8 +2121,13 @@ int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar,
TLV_HDR_SIZE + (peer_ht_rates_align * sizeof(u8)) +
sizeof(*mcs) + TLV_HDR_SIZE +
(sizeof(*he_mcs) * arg->peer_he_mcs_count) +
TLV_HDR_SIZE + (sizeof(*eht_mcs) * arg->peer_eht_mcs_count) +
TLV_HDR_SIZE + TLV_HDR_SIZE;
TLV_HDR_SIZE + (sizeof(*eht_mcs) * arg->peer_eht_mcs_count);
if (arg->ml.enabled)
len += TLV_HDR_SIZE + sizeof(*ml_params) +
TLV_HDR_SIZE + (arg->ml.num_partner_links * sizeof(*partner_info));
else
len += (2 * TLV_HDR_SIZE);
skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
@ -2143,12 +2251,38 @@ int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar,
ptr += sizeof(*he_mcs);
}
/* MLO header tag with 0 length */
len = 0;
tlv = ptr;
len = arg->ml.enabled ? sizeof(*ml_params) : 0;
tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
ptr += TLV_HDR_SIZE;
if (!len)
goto skip_ml_params;
ml_params = ptr;
ml_params->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_PEER_ASSOC_PARAMS,
len);
ml_params->flags = cpu_to_le32(ATH12K_WMI_FLAG_MLO_ENABLED);
if (arg->ml.assoc_link)
ml_params->flags |= cpu_to_le32(ATH12K_WMI_FLAG_MLO_ASSOC_LINK);
if (arg->ml.primary_umac)
ml_params->flags |= cpu_to_le32(ATH12K_WMI_FLAG_MLO_PRIMARY_UMAC);
if (arg->ml.logical_link_idx_valid)
ml_params->flags |=
cpu_to_le32(ATH12K_WMI_FLAG_MLO_LOGICAL_LINK_IDX_VALID);
if (arg->ml.peer_id_valid)
ml_params->flags |= cpu_to_le32(ATH12K_WMI_FLAG_MLO_PEER_ID_VALID);
ether_addr_copy(ml_params->mld_addr.addr, arg->ml.mld_addr);
ml_params->logical_link_idx = cpu_to_le32(arg->ml.logical_link_idx);
ml_params->ml_peer_id = cpu_to_le32(arg->ml.ml_peer_id);
ml_params->ieee_link_id = cpu_to_le32(arg->ml.ieee_link_id);
ptr += sizeof(*ml_params);
skip_ml_params:
/* Loop through the EHT rate set */
len = arg->peer_eht_mcs_count * sizeof(*eht_mcs);
tlv = ptr;
@ -2165,12 +2299,45 @@ int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar,
ptr += sizeof(*eht_mcs);
}
/* ML partner links tag with 0 length */
len = 0;
tlv = ptr;
len = arg->ml.enabled ? arg->ml.num_partner_links * sizeof(*partner_info) : 0;
/* fill ML Partner links */
tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
ptr += TLV_HDR_SIZE;
if (len == 0)
goto send;
for (i = 0; i < arg->ml.num_partner_links; i++) {
u32 cmd = WMI_TAG_MLO_PARTNER_LINK_PARAMS_PEER_ASSOC;
partner_info = ptr;
partner_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(cmd,
sizeof(*partner_info));
partner_info->vdev_id = cpu_to_le32(arg->ml.partner_info[i].vdev_id);
partner_info->hw_link_id =
cpu_to_le32(arg->ml.partner_info[i].hw_link_id);
partner_info->flags = cpu_to_le32(ATH12K_WMI_FLAG_MLO_ENABLED);
if (arg->ml.partner_info[i].assoc_link)
partner_info->flags |=
cpu_to_le32(ATH12K_WMI_FLAG_MLO_ASSOC_LINK);
if (arg->ml.partner_info[i].primary_umac)
partner_info->flags |=
cpu_to_le32(ATH12K_WMI_FLAG_MLO_PRIMARY_UMAC);
if (arg->ml.partner_info[i].logical_link_idx_valid) {
v = cpu_to_le32(ATH12K_WMI_FLAG_MLO_LINK_ID_VALID);
partner_info->flags |= v;
}
partner_info->logical_link_idx =
cpu_to_le32(arg->ml.partner_info[i].logical_link_idx);
ptr += sizeof(*partner_info);
}
send:
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
"wmi peer assoc vdev id %d assoc id %d peer mac %pM peer_flags %x rate_caps %x peer_caps %x listen_intval %d ht_caps %x max_mpdu %d nss %d phymode %d peer_mpdu_density %d vht_caps %x he cap_info %x he ops %x he cap_info_ext %x he phy %x %x %x peer_bw_rxnss_override %x peer_flags_ext %x eht mac_cap %x %x eht phy_cap %x %x %x\n",
cmd->vdev_id, cmd->peer_associd, arg->peer_mac,
@ -6687,6 +6854,7 @@ ath12k_wmi_process_csa_switch_count_event(struct ath12k_base *ab,
const u32 *vdev_ids)
{
int i;
struct ieee80211_bss_conf *conf;
struct ath12k_link_vif *arvif;
struct ath12k_vif *ahvif;
@ -6705,7 +6873,20 @@ ath12k_wmi_process_csa_switch_count_event(struct ath12k_base *ab,
}
ahvif = arvif->ahvif;
if (arvif->is_up && ahvif->vif->bss_conf.csa_active)
if (arvif->link_id > IEEE80211_MLD_MAX_NUM_LINKS) {
ath12k_warn(ab, "Invalid CSA switch count even link id: %d\n",
arvif->link_id);
continue;
}
conf = rcu_dereference(ahvif->vif->link_conf[arvif->link_id]);
if (!conf) {
ath12k_warn(ab, "unable to access bss link conf in process csa for vif %pM link %u\n",
ahvif->vif->addr, arvif->link_id);
continue;
}
if (arvif->is_up && conf->csa_active)
ieee80211_csa_finish(ahvif->vif, 0);
}
rcu_read_unlock();

View file

@ -1929,6 +1929,19 @@ enum wmi_tlv_tag {
WMI_TAG_REGULATORY_RULE_EXT_STRUCT = 0x3A9,
WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT,
WMI_TAG_EHT_RATE_SET = 0x3C4,
WMI_TAG_DCS_AWGN_INT_TYPE = 0x3C5,
WMI_TAG_MLO_TX_SEND_PARAMS,
WMI_TAG_MLO_PARTNER_LINK_PARAMS,
WMI_TAG_MLO_PARTNER_LINK_PARAMS_PEER_ASSOC,
WMI_TAG_MLO_SETUP_CMD = 0x3C9,
WMI_TAG_MLO_SETUP_COMPLETE_EVENT,
WMI_TAG_MLO_READY_CMD,
WMI_TAG_MLO_TEARDOWN_CMD,
WMI_TAG_MLO_TEARDOWN_COMPLETE,
WMI_TAG_MLO_PEER_ASSOC_PARAMS = 0x3D0,
WMI_TAG_MLO_PEER_CREATE_PARAMS = 0x3D5,
WMI_TAG_MLO_VDEV_START_PARAMS = 0x3D6,
WMI_TAG_MLO_VDEV_CREATE_PARAMS = 0x3D7,
WMI_TAG_PDEV_SET_BIOS_SAR_TABLE_CMD = 0x3D8,
WMI_TAG_PDEV_SET_BIOS_GEO_TABLE_CMD = 0x3D9,
WMI_TAG_PDEV_SET_BIOS_INTERFACE_CMD = 0x3FB,
@ -2740,6 +2753,7 @@ struct ath12k_wmi_vdev_create_arg {
u8 if_stats_id;
u32 mbssid_flags;
u32 mbssid_tx_vdev_id;
u8 mld_addr[ETH_ALEN];
};
#define ATH12K_MAX_VDEV_STATS_ID 0x30
@ -2766,6 +2780,33 @@ struct ath12k_wmi_vdev_txrx_streams_params {
__le32 supported_rx_streams;
} __packed;
struct wmi_vdev_create_mlo_params {
__le32 tlv_header;
struct ath12k_wmi_mac_addr_params mld_macaddr;
} __packed;
#define ATH12K_WMI_FLAG_MLO_ENABLED BIT(0)
#define ATH12K_WMI_FLAG_MLO_ASSOC_LINK BIT(1)
#define ATH12K_WMI_FLAG_MLO_PRIMARY_UMAC BIT(2)
#define ATH12K_WMI_FLAG_MLO_LOGICAL_LINK_IDX_VALID BIT(3)
#define ATH12K_WMI_FLAG_MLO_PEER_ID_VALID BIT(4)
#define ATH12K_WMI_FLAG_MLO_MCAST_VDEV BIT(5)
#define ATH12K_WMI_FLAG_MLO_EMLSR_SUPPORT BIT(6)
#define ATH12K_WMI_FLAG_MLO_FORCED_INACTIVE BIT(7)
#define ATH12K_WMI_FLAG_MLO_LINK_ADD BIT(8)
struct wmi_vdev_start_mlo_params {
__le32 tlv_header;
__le32 flags;
} __packed;
struct wmi_partner_link_info {
__le32 tlv_header;
__le32 vdev_id;
__le32 hw_link_id;
struct ath12k_wmi_mac_addr_params vdev_addr;
} __packed;
struct wmi_vdev_delete_cmd {
__le32 tlv_header;
__le32 vdev_id;
@ -2909,6 +2950,27 @@ enum wmi_phy_mode {
MODE_MAX = 33,
};
#define ATH12K_WMI_MLO_MAX_LINKS 4
struct wmi_ml_partner_info {
u32 vdev_id;
u32 hw_link_id;
u8 addr[ETH_ALEN];
bool assoc_link;
bool primary_umac;
bool logical_link_idx_valid;
u32 logical_link_idx;
};
struct wmi_ml_arg {
bool enabled;
bool assoc_link;
bool mcast_link;
bool link_add;
u8 num_partner_links;
struct wmi_ml_partner_info partner_info[ATH12K_WMI_MLO_MAX_LINKS];
};
struct wmi_vdev_start_req_arg {
u32 vdev_id;
u32 freq;
@ -2946,12 +3008,19 @@ struct wmi_vdev_start_req_arg {
u32 mbssid_flags;
u32 mbssid_tx_vdev_id;
u32 punct_bitmap;
struct wmi_ml_arg ml;
};
struct ath12k_wmi_peer_create_arg {
const u8 *peer_addr;
u32 peer_type;
u32 vdev_id;
bool ml_enabled;
};
struct wmi_peer_create_mlo_params {
__le32 tlv_header;
__le32 flags;
};
struct ath12k_wmi_pdev_set_regdomain_arg {
@ -3618,6 +3687,24 @@ struct wmi_vdev_install_key_arg {
#define WMI_HECAP_TXRX_MCS_NSS_IDX_160 1
#define WMI_HECAP_TXRX_MCS_NSS_IDX_80_80 2
#define ATH12K_WMI_MLO_MAX_PARTNER_LINKS \
(ATH12K_WMI_MLO_MAX_LINKS + ATH12K_MAX_NUM_BRIDGE_LINKS - 1)
struct peer_assoc_mlo_params {
bool enabled;
bool assoc_link;
bool primary_umac;
bool peer_id_valid;
bool logical_link_idx_valid;
bool bridge_peer;
u8 mld_addr[ETH_ALEN];
u32 logical_link_idx;
u32 ml_peer_id;
u32 ieee_link_id;
u8 num_partner_links;
struct wmi_ml_partner_info partner_info[ATH12K_WMI_MLO_MAX_LINKS];
};
struct wmi_rate_set_arg {
u32 num_rates;
u8 rates[WMI_MAX_SUPPORTED_RATES];
@ -3692,8 +3779,36 @@ struct ath12k_wmi_peer_assoc_arg {
u32 peer_eht_tx_mcs_set[WMI_MAX_EHTCAP_RATE_SET];
struct ath12k_wmi_ppe_threshold_arg peer_eht_ppet;
u32 punct_bitmap;
bool is_assoc;
struct peer_assoc_mlo_params ml;
};
#define ATH12K_WMI_FLAG_MLO_ENABLED BIT(0)
#define ATH12K_WMI_FLAG_MLO_ASSOC_LINK BIT(1)
#define ATH12K_WMI_FLAG_MLO_PRIMARY_UMAC BIT(2)
#define ATH12K_WMI_FLAG_MLO_LINK_ID_VALID BIT(3)
#define ATH12K_WMI_FLAG_MLO_PEER_ID_VALID BIT(4)
struct wmi_peer_assoc_mlo_partner_info_params {
__le32 tlv_header;
__le32 vdev_id;
__le32 hw_link_id;
__le32 flags;
__le32 logical_link_idx;
} __packed;
struct wmi_peer_assoc_mlo_params {
__le32 tlv_header;
__le32 flags;
struct ath12k_wmi_mac_addr_params mld_addr;
__le32 logical_link_idx;
__le32 ml_peer_id;
__le32 ieee_link_id;
__le32 emlsr_trans_timeout_us;
__le32 emlsr_trans_delay_us;
__le32 emlsr_padding_delay_us;
} __packed;
struct wmi_peer_assoc_complete_cmd {
__le32 tlv_header;
struct ath12k_wmi_mac_addr_params peer_macaddr;

View file

@ -1441,6 +1441,7 @@ static int ath6kl_cfg80211_set_txpower(struct wiphy *wiphy,
static int ath6kl_cfg80211_get_txpower(struct wiphy *wiphy,
struct wireless_dev *wdev,
unsigned int link_id,
int *dbm)
{
struct ath6kl *ar = (struct ath6kl *)wiphy_priv(wiphy);

View file

@ -193,7 +193,7 @@ static void ath_lnaconf_alt_good_scan(struct ath_ant_comb *antcomb,
static void ath_ant_set_alt_ratio(struct ath_ant_comb *antcomb,
struct ath_hw_antcomb_conf *conf)
{
/* set alt to the conf with maximun ratio */
/* set alt to the conf with maximum ratio */
if (antcomb->first_ratio && antcomb->second_ratio) {
if (antcomb->rssi_second > antcomb->rssi_third) {
/* first alt*/

View file

@ -395,7 +395,7 @@ static void ar9002_hw_init_hang_checks(struct ath_hw *ah)
ah->config.hw_hang_checks |= HW_MAC_HANG;
}
/* Sets up the AR5008/AR9001/AR9002 hardware familiy callbacks */
/* Sets up the AR5008/AR9001/AR9002 hardware family callbacks */
int ar9002_hw_attach_ops(struct ath_hw *ah)
{
struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);

View file

@ -1170,7 +1170,7 @@ exit:
return false;
}
/* Sets up the AR9003 hardware familiy callbacks */
/* Sets up the AR9003 hardware family callbacks */
void ar9003_hw_attach_ops(struct ath_hw *ah)
{
struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);

View file

@ -637,7 +637,7 @@ static u32 ar9003_mci_wait_for_gpm(struct ath_hw *ah, u8 gpm_type,
* same time. Since BT's calibration doesn't happen
* that often, we'll let BT completes calibration then
* we continue to wait for cal_grant from BT.
* Orginal: Wait BT_CAL_GRANT.
* Original: Wait BT_CAL_GRANT.
* New: Receive BT_CAL_REQ -> send WLAN_CAL_GRANT->wait
* BT_CAL_DONE -> Wait BT_CAL_GRANT.
*/
@ -747,7 +747,7 @@ int ar9003_mci_end_reset(struct ath_hw *ah, struct ath9k_channel *chan,
* BT is sleeping. Check if BT wakes up during
* WLAN calibration. If BT wakes up during
* WLAN calibration, need to go through all
* message exchanges again and recal.
* message exchanges again and recalibrate.
*/
REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
(AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET |

View file

@ -246,7 +246,7 @@
/*
* MRC Feild Definitions
* MRC Field Definitions
*/
#define AR_PHY_SGI_DSC_MAN 0x0007FFF0
#define AR_PHY_SGI_DSC_MAN_S 4

View file

@ -1018,6 +1018,8 @@ struct ath_softc {
u8 gtt_cnt;
u32 intrstatus;
u32 rx_active_check_time;
u32 rx_active_count;
u16 ps_flags; /* PS_* */
bool ps_enabled;
bool ps_idle;

View file

@ -17,7 +17,7 @@
#include "ath9k.h"
/* Set/change channels. If the channel is really being changed, it's done
* by reseting the chip. To accomplish this we must first cleanup any pending
* by resetting the chip. To accomplish this we must first cleanup any pending
* DMA, then restart stuff.
*/
static int ath_set_channel(struct ath_softc *sc)

View file

@ -734,7 +734,7 @@ void ath9k_cmn_spectral_scan_trigger(struct ath_common *common,
ATH9K_RX_FILTER_PHYRADAR |
ATH9K_RX_FILTER_PHYERR);
/* TODO: usually this should not be neccesary, but for some reason
/* TODO: usually this should not be necessary, but for some reason
* (or in some mode?) the trigger must be called after the
* configuration, otherwise the register will have its values reset
* (on my ar9220 to value 0x01002310)

View file

@ -750,6 +750,7 @@ static int read_file_reset(struct seq_file *file, void *data)
[RESET_TYPE_CALIBRATION] = "Calibration error",
[RESET_TX_DMA_ERROR] = "Tx DMA stop error",
[RESET_RX_DMA_ERROR] = "Rx DMA stop error",
[RESET_TYPE_RX_INACTIVE] = "Rx path inactive",
};
int i;

View file

@ -53,6 +53,7 @@ enum ath_reset_type {
RESET_TYPE_CALIBRATION,
RESET_TX_DMA_ERROR,
RESET_RX_DMA_ERROR,
RESET_TYPE_RX_INACTIVE,
__RESET_TYPE_MAX
};

View file

@ -79,7 +79,7 @@ static int ath9k_get_max_index_ht40(struct ath9k_dfs_fft_40 *fft,
const int DFS_UPPER_BIN_OFFSET = 64;
/* if detected radar on both channels, select the significant one */
if (is_ctl && is_ext) {
/* first check wether channels have 'strong' bins */
/* first check whether channels have 'strong' bins */
is_ctl = fft_bitmap_weight(fft->lower_bins) != 0;
is_ext = fft_bitmap_weight(fft->upper_bins) != 0;

View file

@ -1198,7 +1198,7 @@ static int ath9k_hif_request_firmware(struct hif_device_usb *hif_dev,
filename = FIRMWARE_AR9271;
/* expected fw locations:
* - htc_9271.fw (stable version 1.3, depricated)
* - htc_9271.fw (stable version 1.3, deprecated)
*/
snprintf(hif_dev->fw_name, sizeof(hif_dev->fw_name),
"%s", filename);

View file

@ -2149,7 +2149,7 @@ static void ath9k_set_power_network_sleep(struct ath_hw *ah)
/* When chip goes into network sleep, it could be waken
* up by MCI_INT interrupt caused by BT's HW messages
* (LNA_xxx, CONT_xxx) which chould be in a very fast
* (LNA_xxx, CONT_xxx) which could be in a very fast
* rate (~100us). This will cause chip to leave and
* re-enter network sleep mode frequently, which in
* consequence will have WLAN MCI HW to generate lots of
@ -2544,7 +2544,7 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
pCap->tx_chainmask = ah->eep_ops->get_eeprom(ah, EEP_TX_MASK);
/*
* For AR9271 we will temporarilly uses the rx chainmax as read from
* For AR9271 we will temporarily use the rx chainmax as read from
* the EEPROM.
*/
if ((ah->hw_version.devid == AR5416_DEVID_PCI) &&

View file

@ -282,7 +282,7 @@ enum ath9k_hw_caps {
* an exact user defined pattern or de-authentication/disassoc pattern.
* @ATH9K_HW_WOW_PATTERN_MATCH_DWORD: device requires the first four
* bytes of the pattern for user defined pattern, de-authentication and
* disassociation patterns for all types of possible frames recieved
* disassociation patterns for all types of possible frames received
* of those types.
*/

View file

@ -50,7 +50,36 @@ reset:
"tx hung, resetting the chip\n");
ath9k_queue_reset(sc, RESET_TYPE_TX_HANG);
return false;
}
#define RX_INACTIVE_CHECK_INTERVAL (4 * MSEC_PER_SEC)
static bool ath_hw_rx_inactive_check(struct ath_softc *sc)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
u32 interval, count;
interval = jiffies_to_msecs(jiffies - sc->rx_active_check_time);
count = sc->rx_active_count;
if (interval < RX_INACTIVE_CHECK_INTERVAL)
return true; /* too soon to check */
sc->rx_active_count = 0;
sc->rx_active_check_time = jiffies;
/* Need at least one interrupt per second, and we should only react if
* we are within a factor two of the expected interval
*/
if (interval > RX_INACTIVE_CHECK_INTERVAL * 2 ||
count >= interval / MSEC_PER_SEC)
return true;
ath_dbg(common, RESET,
"RX inactivity detected. Schedule chip reset\n");
ath9k_queue_reset(sc, RESET_TYPE_RX_INACTIVE);
return false;
}
void ath_hw_check_work(struct work_struct *work)
@ -58,8 +87,8 @@ void ath_hw_check_work(struct work_struct *work)
struct ath_softc *sc = container_of(work, struct ath_softc,
hw_check_work.work);
if (!ath_hw_check(sc) ||
!ath_tx_complete_check(sc))
if (!ath_hw_check(sc) || !ath_tx_complete_check(sc) ||
!ath_hw_rx_inactive_check(sc))
return;
ieee80211_queue_delayed_work(sc->hw, &sc->hw_check_work,

View file

@ -251,7 +251,7 @@ struct ath_desc {
* when the descriptor is specifically marked to generate
* an interrupt with this flag. Descriptors should be
* marked periodically to insure timely replenishing of the
* supply needed for sending frames. Defering interrupts
* supply needed for sending frames. Deferring interrupts
* reduces system load and potentially allows more concurrent
* work to be done but if done to aggressively can cause
* senders to backup. When the hardware queue is left too

View file

@ -453,6 +453,7 @@ void ath9k_tasklet(struct tasklet_struct *t)
ath_rx_tasklet(sc, 0, true);
ath_rx_tasklet(sc, 0, false);
sc->rx_active_count++;
}
if (status & ATH9K_INT_TX) {
@ -1001,7 +1002,7 @@ static bool ath9k_uses_beacons(int type)
static void ath9k_vif_iter_set_beacon(struct ath9k_vif_iter_data *iter_data,
struct ieee80211_vif *vif)
{
/* Use the first (configured) interface, but prefering AP interfaces. */
/* Use the first (configured) interface, but preferring AP interfaces. */
if (!iter_data->primary_beacon_vif) {
iter_data->primary_beacon_vif = vif;
} else {
@ -2767,7 +2768,7 @@ void ath9k_fill_chanctx_ops(void)
#endif
static int ath9k_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
int *dbm)
unsigned int link_id, int *dbm)
{
struct ath_softc *sc = hw->priv;
struct ath_vif *avp = (void *)vif->drv_priv;

View file

@ -60,7 +60,7 @@ static int ath9k_wow_add_disassoc_deauth_pattern(struct ath_softc *sc)
memset(dis_deauth_mask, 0, MAX_PATTERN_SIZE);
/*
* Create Dissassociate / Deauthenticate packet filter
* Create Disassociate / Deauthenticate packet filter
*
* 2 bytes 2 byte 6 bytes 6 bytes 6 bytes
* +--------------+----------+---------+--------+--------+----
@ -70,7 +70,7 @@ static int ath9k_wow_add_disassoc_deauth_pattern(struct ath_softc *sc)
* The above is the management frame format for disassociate/
* deauthenticate pattern, from this we need to match the first byte
* of 'Frame Control' and DA, SA, and BSSID fields
* (skipping 2nd byte of FC and Duration feild.
* (skipping 2nd byte of FC and Duration field.
*
* Disassociate pattern
* --------------------
@ -225,7 +225,7 @@ int ath9k_suspend(struct ieee80211_hw *hw,
ath9k_stop_btcoex(sc);
/*
* Enable wake up on recieving disassoc/deauth
* Enable wake up on receiving disassoc/deauth
* frame by default.
*/
ret = ath9k_wow_add_disassoc_deauth_pattern(sc);

View file

@ -557,7 +557,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
/*
* AR5416 can become deaf/mute when BA
* issue happens. Chip needs to be reset.
* But AP code may have sychronization issues
* But AP code may have synchronization issues
* when perform internal reset in this routine.
* Only enable reset in STA mode for now.
*/

View file

@ -1590,7 +1590,10 @@ static int wcn36xx_probe(struct platform_device *pdev)
}
n_channels = wcn_band_2ghz.n_channels + wcn_band_5ghz.n_channels;
wcn->chan_survey = devm_kmalloc(wcn->dev, n_channels, GFP_KERNEL);
wcn->chan_survey = devm_kcalloc(wcn->dev,
n_channels,
sizeof(struct wcn36xx_chan_survey),
GFP_KERNEL);
if (!wcn->chan_survey) {
ret = -ENOMEM;
goto out_wq;

View file

@ -455,6 +455,11 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev,
if (sg_data_sz > max_req_sz - req_sz)
sg_data_sz = max_req_sz - req_sz;
if (!sgl) {
/* out of (pre-allocated) scatterlist entries */
ret = -ENOMEM;
goto exit;
}
sg_set_buf(sgl, pkt_data, sg_data_sz);
sg_cnt++;

View file

@ -2676,7 +2676,7 @@ done:
static s32
brcmf_cfg80211_get_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
s32 *dbm)
unsigned int link_id, s32 *dbm)
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct brcmf_cfg80211_vif *vif = wdev_to_vif(wdev);
@ -4999,12 +4999,16 @@ exit:
s32 brcmf_vif_clear_mgmt_ies(struct brcmf_cfg80211_vif *vif)
{
static const s32 pktflags[] = {
BRCMF_VNDR_IE_PRBREQ_FLAG,
BRCMF_VNDR_IE_PRBRSP_FLAG,
BRCMF_VNDR_IE_BEACON_FLAG
};
int i;
if (vif->wdev.iftype == NL80211_IFTYPE_AP)
brcmf_vif_set_mgmt_ie(vif, BRCMF_VNDR_IE_ASSOCRSP_FLAG, NULL, 0);
else
brcmf_vif_set_mgmt_ie(vif, BRCMF_VNDR_IE_PRBREQ_FLAG, NULL, 0);
for (i = 0; i < ARRAY_SIZE(pktflags); i++)
brcmf_vif_set_mgmt_ie(vif, pktflags[i], NULL, 0);

View file

@ -327,8 +327,8 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
if (skb_headroom(skb) < drvr->hdrlen || skb_header_cloned(skb)) {
head_delta = max_t(int, drvr->hdrlen - skb_headroom(skb), 0);
brcmf_dbg(INFO, "%s: insufficient headroom (%d)\n",
brcmf_ifname(ifp), head_delta);
brcmf_dbg(INFO, "%s: %s headroom\n", brcmf_ifname(ifp),
head_delta ? "insufficient" : "unmodifiable");
atomic_inc(&drvr->bus_if->stats.pktcowed);
ret = pskb_expand_head(skb, ALIGN(head_delta, NET_SKB_PAD), 0,
GFP_ATOMIC);

View file

@ -6,6 +6,8 @@
#ifndef _fwil_h_
#define _fwil_h_
#include "debug.h"
/*******************************************************************************
* Dongle command codes that are interpreted by firmware
******************************************************************************/

View file

@ -23423,6 +23423,9 @@ wlc_phy_iqcal_gainparams_nphy(struct brcms_phy *pi, u16 core_no,
break;
}
if (WARN_ON(k == NPHY_IQCAL_NUMGAINS))
return;
params->txgm = tbl_iqcal_gainparams_nphy[band_idx][k][1];
params->pga = tbl_iqcal_gainparams_nphy[band_idx][k][2];
params->pad = tbl_iqcal_gainparams_nphy[band_idx][k][3];

View file

@ -410,7 +410,7 @@ mwifiex_cfg80211_set_tx_power(struct wiphy *wiphy,
static int
mwifiex_cfg80211_get_tx_power(struct wiphy *wiphy,
struct wireless_dev *wdev,
int *dbm)
unsigned int link_id, int *dbm)
{
struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy);
struct mwifiex_private *priv = mwifiex_get_priv(adapter,

View file

@ -545,7 +545,7 @@ int mwifiex_enable_hs(struct mwifiex_adapter *adapter)
if (wait_event_interruptible_timeout(adapter->hs_activate_wait_q,
adapter->hs_activate_wait_q_woken,
(10 * HZ)) <= 0) {
(5 * HZ)) <= 0) {
mwifiex_dbg(adapter, ERROR,
"hs_activate_wait_q terminated\n");
return false;

View file

@ -1596,7 +1596,7 @@ void mt76_wcid_cleanup(struct mt76_dev *dev, struct mt76_wcid *wcid)
EXPORT_SYMBOL_GPL(mt76_wcid_cleanup);
int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
int *dbm)
unsigned int link_id, int *dbm)
{
struct mt76_phy *phy = hw->priv;
int n_chains = hweight16(phy->chainmask);

View file

@ -1431,7 +1431,7 @@ void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
int mt76_get_min_avg_rssi(struct mt76_dev *dev, bool ext_phy);
int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
int *dbm);
unsigned int link_id, int *dbm);
int mt76_init_sar_power(struct ieee80211_hw *hw,
const struct cfg80211_sar_specs *sar);
int mt76_get_sar_power(struct mt76_phy *phy,

View file

@ -1669,7 +1669,7 @@ static int set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
}
static int get_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
int *dbm)
unsigned int link_id, int *dbm)
{
int ret;
struct wilc_vif *vif = netdev_priv(wdev->netdev);

View file

@ -881,7 +881,7 @@ static int qtnf_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
}
static int qtnf_get_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
int *dbm)
unsigned int link_id, int *dbm)
{
struct qtnf_vif *vif = qtnf_netdev_get_priv(wdev->netdev);
int ret;

View file

@ -102,7 +102,7 @@ struct qtnf_wmac {
struct qtnf_mac_info macinfo;
struct qtnf_vif iflist[QTNF_MAX_INTF];
struct cfg80211_scan_request *scan_req;
struct mutex mac_lock; /* lock during wmac speicific ops */
struct mutex mac_lock; /* lock during wmac specific ops */
struct delayed_work scan_timeout;
struct ieee80211_regdomain *rd;
struct platform_device *pdev;

View file

@ -8147,6 +8147,8 @@ static const struct usb_device_id dev_table[] = {
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x817e, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8186, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x818a, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x317f, 0xff, 0xff, 0xff),
@ -8157,12 +8159,18 @@ static const struct usb_device_id dev_table[] = {
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x050d, 0x1102, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x050d, 0x11f2, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x06f8, 0xe033, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x07b8, 0x8188, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x07b8, 0x8189, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x0846, 0x9041, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x0846, 0x9043, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x0b05, 0x17ba, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x1e1e, 0xff, 0xff, 0xff),
@ -8179,6 +8187,10 @@ static const struct usb_device_id dev_table[] = {
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x13d3, 0x3357, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x13d3, 0x3358, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x13d3, 0x3359, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x330b, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x2019, 0x4902, 0xff, 0xff, 0xff),
@ -8193,6 +8205,8 @@ static const struct usb_device_id dev_table[] = {
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x4856, 0x0091, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x9846, 0x9041, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0xcdab, 0x8010, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x04f2, 0xaff7, 0xff, 0xff, 0xff),
@ -8218,6 +8232,8 @@ static const struct usb_device_id dev_table[] = {
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x0586, 0x341f, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x06f8, 0xe033, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x06f8, 0xe035, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x0b05, 0x17ab, 0xff, 0xff, 0xff),
@ -8226,6 +8242,8 @@ static const struct usb_device_id dev_table[] = {
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x0df6, 0x0070, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x0df6, 0x0077, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x0789, 0x016d, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x07aa, 0x0056, 0xff, 0xff, 0xff),
@ -8248,6 +8266,8 @@ static const struct usb_device_id dev_table[] = {
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x330a, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x330d, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x2019, 0xab2b, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x20f4, 0x624d, 0xff, 0xff, 0xff),

View file

@ -575,9 +575,15 @@ static void rtl_free_entries_from_ack_queue(struct ieee80211_hw *hw,
void rtl_deinit_core(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
rtl_c2hcmd_launcher(hw, 0);
rtl_free_entries_from_scan_list(hw);
rtl_free_entries_from_ack_queue(hw, false);
if (rtlpriv->works.rtl_wq) {
destroy_workqueue(rtlpriv->works.rtl_wq);
rtlpriv->works.rtl_wq = NULL;
}
}
EXPORT_SYMBOL_GPL(rtl_deinit_core);
@ -2696,9 +2702,6 @@ MODULE_AUTHOR("Larry Finger <Larry.FInger@lwfinger.net>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Realtek 802.11n PCI wireless core");
struct rtl_global_var rtl_global_var = {};
EXPORT_SYMBOL_GPL(rtl_global_var);
static int __init rtl_core_module_init(void)
{
BUILD_BUG_ON(TX_PWR_BY_RATE_NUM_RATE < TX_PWR_BY_RATE_NUM_SECTION);
@ -2712,10 +2715,6 @@ static int __init rtl_core_module_init(void)
/* add debugfs */
rtl_debugfs_add_topdir();
/* init some global vars */
INIT_LIST_HEAD(&rtl_global_var.glb_priv_list);
spin_lock_init(&rtl_global_var.glb_list_lock);
return 0;
}

View file

@ -124,7 +124,6 @@ int rtl_send_smps_action(struct ieee80211_hw *hw,
u8 *rtl_find_ie(u8 *data, unsigned int len, u8 ie);
void rtl_recognize_peer(struct ieee80211_hw *hw, u8 *data, unsigned int len);
u8 rtl_tid_to_ac(u8 tid);
extern struct rtl_global_var rtl_global_var;
void rtl_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation);
#endif

View file

@ -295,46 +295,6 @@ static bool rtl_pci_get_amd_l1_patch(struct ieee80211_hw *hw)
return status;
}
static bool rtl_pci_check_buddy_priv(struct ieee80211_hw *hw,
struct rtl_priv **buddy_priv)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
struct rtl_priv *tpriv = NULL, *iter;
struct rtl_pci_priv *tpcipriv = NULL;
if (!list_empty(&rtlpriv->glb_var->glb_priv_list)) {
list_for_each_entry(iter, &rtlpriv->glb_var->glb_priv_list,
list) {
tpcipriv = (struct rtl_pci_priv *)iter->priv;
rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
"pcipriv->ndis_adapter.funcnumber %x\n",
pcipriv->ndis_adapter.funcnumber);
rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
"tpcipriv->ndis_adapter.funcnumber %x\n",
tpcipriv->ndis_adapter.funcnumber);
if (pcipriv->ndis_adapter.busnumber ==
tpcipriv->ndis_adapter.busnumber &&
pcipriv->ndis_adapter.devnumber ==
tpcipriv->ndis_adapter.devnumber &&
pcipriv->ndis_adapter.funcnumber !=
tpcipriv->ndis_adapter.funcnumber) {
tpriv = iter;
break;
}
}
}
rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
"find_buddy_priv %d\n", tpriv != NULL);
if (tpriv)
*buddy_priv = tpriv;
return tpriv != NULL;
}
static void rtl_pci_parse_configuration(struct pci_dev *pdev,
struct ieee80211_hw *hw)
{
@ -1696,8 +1656,6 @@ static void rtl_pci_deinit(struct ieee80211_hw *hw)
synchronize_irq(rtlpci->pdev->irq);
tasklet_kill(&rtlpriv->works.irq_tasklet);
cancel_work_sync(&rtlpriv->works.lps_change_work);
destroy_workqueue(rtlpriv->works.rtl_wq);
}
static int rtl_pci_init(struct ieee80211_hw *hw, struct pci_dev *pdev)
@ -2011,7 +1969,6 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
pcipriv->ndis_adapter.amd_l1_patch);
rtl_pci_parse_configuration(pdev, hw);
list_add_tail(&rtlpriv->list, &rtlpriv->glb_var->glb_priv_list);
return true;
}
@ -2158,7 +2115,6 @@ int rtl_pci_probe(struct pci_dev *pdev,
rtlpriv->rtlhal.interface = INTF_PCI;
rtlpriv->cfg = (struct rtl_hal_cfg *)(id->driver_data);
rtlpriv->intf_ops = &rtl_pci_ops;
rtlpriv->glb_var = &rtl_global_var;
rtl_efuse_ops_init(hw);
/* MEM map */
@ -2209,7 +2165,7 @@ int rtl_pci_probe(struct pci_dev *pdev,
if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
pr_err("Can't init_sw_vars\n");
err = -ENODEV;
goto fail3;
goto fail2;
}
rtl_init_sw_leds(hw);
@ -2227,14 +2183,14 @@ int rtl_pci_probe(struct pci_dev *pdev,
err = rtl_pci_init(hw, pdev);
if (err) {
pr_err("Failed to init PCI\n");
goto fail3;
goto fail4;
}
err = ieee80211_register_hw(hw);
if (err) {
pr_err("Can't register mac80211 hw.\n");
err = -ENODEV;
goto fail3;
goto fail5;
}
rtlpriv->mac80211.mac80211_registered = 1;
@ -2257,16 +2213,19 @@ int rtl_pci_probe(struct pci_dev *pdev,
set_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status);
return 0;
fail3:
pci_set_drvdata(pdev, NULL);
fail5:
rtl_pci_deinit(hw);
fail4:
rtl_deinit_core(hw);
fail3:
wait_for_completion(&rtlpriv->firmware_loading_complete);
rtlpriv->cfg->ops->deinit_sw_vars(hw);
fail2:
if (rtlpriv->io.pci_mem_start != 0)
pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start);
pci_release_regions(pdev);
complete(&rtlpriv->firmware_loading_complete);
fail1:
if (hw)
@ -2317,7 +2276,6 @@ void rtl_pci_disconnect(struct pci_dev *pdev)
if (rtlpci->using_msi)
pci_disable_msi(rtlpci->pdev);
list_del(&rtlpriv->list);
if (rtlpriv->io.pci_mem_start != 0) {
pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start);
pci_release_regions(pdev);
@ -2376,7 +2334,6 @@ EXPORT_SYMBOL(rtl_pci_resume);
const struct rtl_intf_ops rtl_pci_ops = {
.adapter_start = rtl_pci_start,
.adapter_stop = rtl_pci_stop,
.check_buddy_priv = rtl_pci_check_buddy_priv,
.adapter_tx = rtl_pci_tx,
.flush = rtl_pci_flush,
.reset_trx_ring = rtl_pci_reset_trx_ring,

View file

@ -64,22 +64,23 @@ static void rtl92se_fw_cb(const struct firmware *firmware, void *context)
rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
"Firmware callback routine entered!\n");
complete(&rtlpriv->firmware_loading_complete);
if (!firmware) {
pr_err("Firmware %s not available\n", fw_name);
rtlpriv->max_fw_size = 0;
return;
goto exit;
}
if (firmware->size > rtlpriv->max_fw_size) {
pr_err("Firmware is too big!\n");
rtlpriv->max_fw_size = 0;
release_firmware(firmware);
return;
goto exit;
}
pfirmware = (struct rt_firmware *)rtlpriv->rtlhal.pfirmware;
memcpy(pfirmware->sz_fw_tmpbuffer, firmware->data, firmware->size);
pfirmware->sz_fw_tmpbufferlen = firmware->size;
release_firmware(firmware);
exit:
complete(&rtlpriv->firmware_loading_complete);
}
static int rtl92s_init_sw_vars(struct ieee80211_hw *hw)

View file

@ -2033,8 +2033,10 @@ static bool _rtl8821ae_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
if (!_rtl8821ae_check_condition(hw, v1)) {
i += 2; /* skip the pair of expression*/
v2 = array[i+1];
while (v2 != 0xDEAD)
while (v2 != 0xDEAD) {
i += 3;
v2 = array[i + 1];
}
}
}
}

View file

@ -629,11 +629,6 @@ static void _rtl_usb_cleanup_rx(struct ieee80211_hw *hw)
tasklet_kill(&rtlusb->rx_work_tasklet);
cancel_work_sync(&rtlpriv->works.lps_change_work);
if (rtlpriv->works.rtl_wq) {
destroy_workqueue(rtlpriv->works.rtl_wq);
rtlpriv->works.rtl_wq = NULL;
}
skb_queue_purge(&rtlusb->rx_queue);
while ((urb = usb_get_from_anchor(&rtlusb->rx_cleanup_urbs))) {
@ -1028,19 +1023,22 @@ int rtl_usb_probe(struct usb_interface *intf,
err = ieee80211_register_hw(hw);
if (err) {
pr_err("Can't register mac80211 hw.\n");
goto error_out;
goto error_init_vars;
}
rtlpriv->mac80211.mac80211_registered = 1;
set_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status);
return 0;
error_init_vars:
wait_for_completion(&rtlpriv->firmware_loading_complete);
rtlpriv->cfg->ops->deinit_sw_vars(hw);
error_out:
rtl_usb_deinit(hw);
rtl_deinit_core(hw);
error_out2:
_rtl_usb_io_handler_release(hw);
usb_put_dev(udev);
complete(&rtlpriv->firmware_loading_complete);
kfree(rtlpriv->usb_data);
ieee80211_free_hw(hw);
return -ENODEV;

View file

@ -2270,8 +2270,6 @@ struct rtl_intf_ops {
/*com */
int (*adapter_start)(struct ieee80211_hw *hw);
void (*adapter_stop)(struct ieee80211_hw *hw);
bool (*check_buddy_priv)(struct ieee80211_hw *hw,
struct rtl_priv **buddy_priv);
int (*adapter_tx)(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
@ -2514,14 +2512,6 @@ struct dig_t {
u32 rssi_max;
};
struct rtl_global_var {
/* from this list we can get
* other adapter's rtl_priv
*/
struct list_head glb_priv_list;
spinlock_t glb_list_lock;
};
#define IN_4WAY_TIMEOUT_TIME (30 * MSEC_PER_SEC) /* 30 seconds */
struct rtl_btc_info {
@ -2667,9 +2657,7 @@ struct rtl_scan_list {
struct rtl_priv {
struct ieee80211_hw *hw;
struct completion firmware_loading_complete;
struct list_head list;
struct rtl_priv *buddy_priv;
struct rtl_global_var *glb_var;
struct rtl_dmsp_ctl dmsp_ctl;
struct rtl_locks locks;
struct rtl_works works;

View file

@ -9,8 +9,74 @@
#include "usb.h"
static const struct usb_device_id rtw_8812au_id_table[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(0x2604, 0x0012, 0xff, 0xff, 0xff),
{ USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x8812, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) },
{ USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x881a, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) },
{ USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x881b, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) },
{ USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x881c, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x0409, 0x0408, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* NEC */
{ USB_DEVICE_AND_INTERFACE_INFO(0x0411, 0x025d, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* Buffalo */
{ USB_DEVICE_AND_INTERFACE_INFO(0x04bb, 0x0952, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* I-O DATA */
{ USB_DEVICE_AND_INTERFACE_INFO(0x050d, 0x1106, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* Belkin */
{ USB_DEVICE_AND_INTERFACE_INFO(0x050d, 0x1109, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* Belkin */
{ USB_DEVICE_AND_INTERFACE_INFO(0x0586, 0x3426, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* ZyXEL */
{ USB_DEVICE_AND_INTERFACE_INFO(0x0789, 0x016e, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* Logitec */
{ USB_DEVICE_AND_INTERFACE_INFO(0x07b8, 0x8812, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* Abocom */
{ USB_DEVICE_AND_INTERFACE_INFO(0x0846, 0x9051, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* Netgear */
{ USB_DEVICE_AND_INTERFACE_INFO(0x0b05, 0x17d2, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* ASUS */
{ USB_DEVICE_AND_INTERFACE_INFO(0x0df6, 0x0074, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* Sitecom */
{ USB_DEVICE_AND_INTERFACE_INFO(0x0e66, 0x0022, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* Hawking */
{ USB_DEVICE_AND_INTERFACE_INFO(0x1058, 0x0632, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* WD */
{ USB_DEVICE_AND_INTERFACE_INFO(0x13b1, 0x003f, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* Linksys */
{ USB_DEVICE_AND_INTERFACE_INFO(0x148f, 0x9097, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* Amped Wireless */
{ USB_DEVICE_AND_INTERFACE_INFO(0x1740, 0x0100, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* EnGenius */
{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x330e, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* D-Link */
{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3313, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* D-Link */
{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3315, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* D-Link */
{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3316, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* D-Link */
{ USB_DEVICE_AND_INTERFACE_INFO(0x2019, 0xab30, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* Planex */
{ USB_DEVICE_AND_INTERFACE_INFO(0x20f4, 0x805b, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* TRENDnet */
{ USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x0101, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* TP-Link */
{ USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x0103, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* TP-Link */
{ USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x010d, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* TP-Link */
{ USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x010e, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* TP-Link */
{ USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x010f, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* TP-Link */
{ USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x0122, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* TP-Link */
{ USB_DEVICE_AND_INTERFACE_INFO(0x2604, 0x0012, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* Tenda */
{ USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0xa822, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* Edimax */
{},
};
MODULE_DEVICE_TABLE(usb, rtw_8812au_id_table);

View file

@ -9,8 +9,58 @@
#include "usb.h"
static const struct usb_device_id rtw_8821au_id_table[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x011e, 0xff, 0xff, 0xff),
{ USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x0811, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) },
{ USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x0820, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) },
{ USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x0821, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) },
{ USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x8822, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) },
{ USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x0823, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) },
{ USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xa811, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x0411, 0x0242, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* Buffalo */
{ USB_DEVICE_AND_INTERFACE_INFO(0x0411, 0x029b, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* Buffalo */
{ USB_DEVICE_AND_INTERFACE_INFO(0x04bb, 0x0953, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* I-O DATA */
{ USB_DEVICE_AND_INTERFACE_INFO(0x056e, 0x4007, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* ELECOM */
{ USB_DEVICE_AND_INTERFACE_INFO(0x056e, 0x400e, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* ELECOM */
{ USB_DEVICE_AND_INTERFACE_INFO(0x056e, 0x400f, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* ELECOM */
{ USB_DEVICE_AND_INTERFACE_INFO(0x0846, 0x9052, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* Netgear */
{ USB_DEVICE_AND_INTERFACE_INFO(0x0e66, 0x0023, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* HAWKING */
{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3314, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* D-Link */
{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3318, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* D-Link */
{ USB_DEVICE_AND_INTERFACE_INFO(0x2019, 0xab32, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* Planex */
{ USB_DEVICE_AND_INTERFACE_INFO(0x20f4, 0x804b, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* TRENDnet */
{ USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x011e, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* TP Link */
{ USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x011f, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* TP Link */
{ USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x0120, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* TP Link */
{ USB_DEVICE_AND_INTERFACE_INFO(0x3823, 0x6249, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* Obihai */
{ USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0xa811, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* Edimax */
{ USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0xa812, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* Edimax */
{ USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0xa813, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* Edimax */
{ USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0xb611, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* Edimax */
{},
};
MODULE_DEVICE_TABLE(usb, rtw_8821au_id_table);

View file

@ -67,6 +67,12 @@ static const struct usb_device_id rtw_8822bu_id_table[] = {
.driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* LiteOn */
{ USB_DEVICE_AND_INTERFACE_INFO(0x20f4, 0x808a, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* TRENDnet TEW-808UBM */
{ USB_DEVICE_AND_INTERFACE_INFO(0x20f4, 0x805a, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* TRENDnet TEW-805UBH */
{ USB_DEVICE_AND_INTERFACE_INFO(0x056e, 0x4011, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* ELECOM WDB-867DU3S */
{ USB_DEVICE_AND_INTERFACE_INFO(0x2c4e, 0x0107, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* Mercusys MA30H */
{},
};
MODULE_DEVICE_TABLE(usb, rtw_8822bu_id_table);

View file

@ -789,6 +789,30 @@ static void rtw_usb_dynamic_rx_agg_v1(struct rtw_dev *rtwdev, bool enable)
rtw_write16(rtwdev, REG_RXDMA_AGG_PG_TH, val16);
}
static void rtw_usb_dynamic_rx_agg_v2(struct rtw_dev *rtwdev, bool enable)
{
struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev);
u8 size, timeout;
u16 val16;
if (!enable) {
size = 0x0;
timeout = 0x1;
} else if (rtwusb->udev->speed == USB_SPEED_SUPER) {
size = 0x6;
timeout = 0x1a;
} else {
size = 0x5;
timeout = 0x20;
}
val16 = u16_encode_bits(size, BIT_RXDMA_AGG_PG_TH) |
u16_encode_bits(timeout, BIT_DMA_AGG_TO_V1);
rtw_write16(rtwdev, REG_RXDMA_AGG_PG_TH, val16);
rtw_write8_set(rtwdev, REG_TXDMA_PQ_MAP, BIT_RXDMA_AGG_EN);
}
static void rtw_usb_dynamic_rx_agg(struct rtw_dev *rtwdev, bool enable)
{
switch (rtwdev->chip->id) {
@ -797,6 +821,10 @@ static void rtw_usb_dynamic_rx_agg(struct rtw_dev *rtwdev, bool enable)
case RTW_CHIP_TYPE_8821C:
rtw_usb_dynamic_rx_agg_v1(rtwdev, enable);
break;
case RTW_CHIP_TYPE_8821A:
case RTW_CHIP_TYPE_8812A:
rtw_usb_dynamic_rx_agg_v2(rtwdev, enable);
break;
case RTW_CHIP_TYPE_8723D:
/* Doesn't like aggregation. */
break;
@ -930,6 +958,32 @@ static void rtw_usb_intf_deinit(struct rtw_dev *rtwdev,
usb_set_intfdata(intf, NULL);
}
static int rtw_usb_switch_mode_old(struct rtw_dev *rtwdev)
{
struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev);
enum usb_device_speed cur_speed = rtwusb->udev->speed;
u8 hci_opt;
if (cur_speed == USB_SPEED_HIGH) {
hci_opt = rtw_read8(rtwdev, REG_HCI_OPT_CTRL);
if ((hci_opt & (BIT(2) | BIT(3))) != BIT(3)) {
rtw_write8(rtwdev, REG_HCI_OPT_CTRL, 0x8);
rtw_write8(rtwdev, REG_SYS_SDIO_CTRL, 0x2);
rtw_write8(rtwdev, REG_ACLK_MON, 0x1);
rtw_write8(rtwdev, 0x3d, 0x3);
/* usb disconnect */
rtw_write8(rtwdev, REG_SYS_PW_CTRL + 1, 0x80);
return 1;
}
} else if (cur_speed == USB_SPEED_SUPER) {
rtw_write8_clr(rtwdev, REG_SYS_SDIO_CTRL, BIT(1));
rtw_write8_clr(rtwdev, REG_ACLK_MON, BIT(0));
}
return 0;
}
static int rtw_usb_switch_mode_new(struct rtw_dev *rtwdev)
{
enum usb_device_speed cur_speed;
@ -979,11 +1033,22 @@ static int rtw_usb_switch_mode_new(struct rtw_dev *rtwdev)
return 1;
}
static bool rtw_usb3_chip_old(u8 chip_id)
{
return chip_id == RTW_CHIP_TYPE_8812A;
}
static bool rtw_usb3_chip_new(u8 chip_id)
{
return chip_id == RTW_CHIP_TYPE_8822C ||
chip_id == RTW_CHIP_TYPE_8822B;
}
static int rtw_usb_switch_mode(struct rtw_dev *rtwdev)
{
u8 id = rtwdev->chip->id;
if (id != RTW_CHIP_TYPE_8822C && id != RTW_CHIP_TYPE_8822B)
if (!rtw_usb3_chip_new(id) && !rtw_usb3_chip_old(id))
return 0;
if (!rtwdev->efuse.usb_mode_switch) {
@ -998,6 +1063,9 @@ static int rtw_usb_switch_mode(struct rtw_dev *rtwdev)
return 0;
}
if (rtw_usb3_chip_old(id))
return rtw_usb_switch_mode_old(rtwdev);
else
return rtw_usb_switch_mode_new(rtwdev);
}

View file

@ -148,3 +148,50 @@ int rtw89_acpi_evaluate_dsm(struct rtw89_dev *rtwdev,
ACPI_FREE(obj);
return ret;
}
int rtw89_acpi_evaluate_rtag(struct rtw89_dev *rtwdev,
struct rtw89_acpi_rtag_result *res)
{
struct acpi_buffer buf = {ACPI_ALLOCATE_BUFFER, NULL};
acpi_handle root, handle;
union acpi_object *obj;
acpi_status status;
u32 buf_len;
int ret = 0;
root = ACPI_HANDLE(rtwdev->dev);
if (!root)
return -EOPNOTSUPP;
status = acpi_get_handle(root, (acpi_string)"RTAG", &handle);
if (ACPI_FAILURE(status))
return -EIO;
status = acpi_evaluate_object(handle, NULL, NULL, &buf);
if (ACPI_FAILURE(status))
return -EIO;
obj = buf.pointer;
if (obj->type != ACPI_TYPE_BUFFER) {
rtw89_debug(rtwdev, RTW89_DBG_ACPI,
"acpi: expect buffer but type: %d\n", obj->type);
ret = -EINVAL;
goto out;
}
buf_len = obj->buffer.length;
if (buf_len != sizeof(*res)) {
rtw89_debug(rtwdev, RTW89_DBG_ACPI, "%s: invalid buffer length: %u\n",
__func__, buf_len);
ret = -EINVAL;
goto out;
}
*res = *(struct rtw89_acpi_rtag_result *)obj->buffer.pointer;
rtw89_hex_dump(rtwdev, RTW89_DBG_ACPI, "antenna_gain: ", res, sizeof(*res));
out:
ACPI_FREE(obj);
return ret;
}

View file

@ -63,8 +63,17 @@ struct rtw89_acpi_dsm_result {
} u;
};
struct rtw89_acpi_rtag_result {
u8 tag[4];
u8 revision;
__le32 domain;
u8 ant_gain_table[RTW89_ANT_GAIN_CHAIN_NUM][RTW89_ANT_GAIN_SUBBAND_NR];
} __packed;
int rtw89_acpi_evaluate_dsm(struct rtw89_dev *rtwdev,
enum rtw89_acpi_dsm_func func,
struct rtw89_acpi_dsm_result *res);
int rtw89_acpi_evaluate_rtag(struct rtw89_dev *rtwdev,
struct rtw89_acpi_rtag_result *res);
#endif

View file

@ -135,8 +135,8 @@ again:
}
static int rtw89_cam_get_addr_cam_key_idx(struct rtw89_addr_cam_entry *addr_cam,
struct rtw89_sec_cam_entry *sec_cam,
struct ieee80211_key_conf *key,
const struct rtw89_sec_cam_entry *sec_cam,
const struct ieee80211_key_conf *key,
u8 *key_idx)
{
u8 idx;
@ -246,8 +246,8 @@ static int __rtw89_cam_detach_sec_cam(struct rtw89_dev *rtwdev,
static int __rtw89_cam_attach_sec_cam(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link,
struct rtw89_sta_link *rtwsta_link,
struct ieee80211_key_conf *key,
struct rtw89_sec_cam_entry *sec_cam)
const struct ieee80211_key_conf *key,
const struct rtw89_sec_cam_entry *sec_cam)
{
struct rtw89_addr_cam_entry *addr_cam;
u8 key_idx = 0;
@ -286,6 +286,22 @@ static int __rtw89_cam_attach_sec_cam(struct rtw89_dev *rtwdev,
return 0;
}
int rtw89_cam_attach_link_sec_cam(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link,
struct rtw89_sta_link *rtwsta_link,
u8 sec_cam_idx)
{
struct rtw89_cam_info *cam_info = &rtwdev->cam_info;
const struct rtw89_sec_cam_entry *sec_cam;
sec_cam = cam_info->sec_entries[sec_cam_idx];
if (!sec_cam)
return -ENOENT;
return __rtw89_cam_attach_sec_cam(rtwdev, rtwvif_link, rtwsta_link,
sec_cam->key_conf, sec_cam);
}
static int rtw89_cam_detach_sec_cam(struct rtw89_dev *rtwdev,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@ -306,6 +322,9 @@ static int rtw89_cam_detach_sec_cam(struct rtw89_dev *rtwdev,
rtwvif = vif_to_rtwvif(vif);
if (rtwsta)
clear_bit(sec_cam->sec_cam_idx, rtwsta->pairwise_sec_cam_map);
rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) {
rtwsta_link = rtwsta ? rtwsta->links[link_id] : NULL;
if (rtwsta && !rtwsta_link)
@ -369,6 +388,8 @@ static int rtw89_cam_attach_sec_cam(struct rtw89_dev *rtwdev,
return ret;
}
set_bit(sec_cam->sec_cam_idx, rtwsta->pairwise_sec_cam_map);
return 0;
}
@ -410,6 +431,9 @@ static int rtw89_cam_sec_key_install(struct rtw89_dev *rtwdev,
sec_cam->len = RTW89_SEC_CAM_LEN;
sec_cam->ext_key = ext_key;
memcpy(sec_cam->key, key->key, key->keylen);
sec_cam->key_conf = key;
ret = rtw89_cam_send_sec_key_cmd(rtwdev, sec_cam);
if (ret) {
rtw89_err(rtwdev, "failed to send sec key cmd: %d\n", ret);

View file

@ -578,4 +578,9 @@ int rtw89_cam_sec_key_del(struct rtw89_dev *rtwdev,
void rtw89_cam_bssid_changed(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link);
void rtw89_cam_reset_keys(struct rtw89_dev *rtwdev);
int rtw89_cam_attach_link_sec_cam(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link,
struct rtw89_sta_link *rtwsta_link,
u8 sec_cam_idx);
#endif

View file

@ -203,6 +203,55 @@ static const struct ieee80211_iface_combination rtw89_iface_combs[] = {
},
};
#define RTW89_6GHZ_SPAN_HEAD 6145
#define RTW89_6GHZ_SPAN_IDX(center_freq) \
((((int)(center_freq) - RTW89_6GHZ_SPAN_HEAD) / 5) / 2)
#define RTW89_DECL_6GHZ_SPAN(center_freq, subband_l, subband_h) \
[RTW89_6GHZ_SPAN_IDX(center_freq)] = { \
.sar_subband_low = RTW89_SAR_6GHZ_ ## subband_l, \
.sar_subband_high = RTW89_SAR_6GHZ_ ## subband_h, \
.ant_gain_subband_low = RTW89_ANT_GAIN_6GHZ_ ## subband_l, \
.ant_gain_subband_high = RTW89_ANT_GAIN_6GHZ_ ## subband_h, \
}
/* Since 6GHz subbands are not edge aligned, some cases span two subbands.
* In the following, we describe each of them with rtw89_6ghz_span.
*/
static const struct rtw89_6ghz_span rtw89_overlapping_6ghz[] = {
RTW89_DECL_6GHZ_SPAN(6145, SUBBAND_5_L, SUBBAND_5_H),
RTW89_DECL_6GHZ_SPAN(6165, SUBBAND_5_L, SUBBAND_5_H),
RTW89_DECL_6GHZ_SPAN(6185, SUBBAND_5_L, SUBBAND_5_H),
RTW89_DECL_6GHZ_SPAN(6505, SUBBAND_6, SUBBAND_7_L),
RTW89_DECL_6GHZ_SPAN(6525, SUBBAND_6, SUBBAND_7_L),
RTW89_DECL_6GHZ_SPAN(6545, SUBBAND_6, SUBBAND_7_L),
RTW89_DECL_6GHZ_SPAN(6665, SUBBAND_7_L, SUBBAND_7_H),
RTW89_DECL_6GHZ_SPAN(6705, SUBBAND_7_L, SUBBAND_7_H),
RTW89_DECL_6GHZ_SPAN(6825, SUBBAND_7_H, SUBBAND_8),
RTW89_DECL_6GHZ_SPAN(6865, SUBBAND_7_H, SUBBAND_8),
RTW89_DECL_6GHZ_SPAN(6875, SUBBAND_7_H, SUBBAND_8),
RTW89_DECL_6GHZ_SPAN(6885, SUBBAND_7_H, SUBBAND_8),
};
const struct rtw89_6ghz_span *
rtw89_get_6ghz_span(struct rtw89_dev *rtwdev, u32 center_freq)
{
int idx;
if (center_freq >= RTW89_6GHZ_SPAN_HEAD) {
idx = RTW89_6GHZ_SPAN_IDX(center_freq);
/* To decrease size of rtw89_overlapping_6ghz[],
* RTW89_6GHZ_SPAN_IDX() truncates the leading NULLs
* to make first span as index 0 of the table. So, if center
* frequency is less than the first one, it will get netative.
*/
if (idx >= 0 && idx < ARRAY_SIZE(rtw89_overlapping_6ghz))
return &rtw89_overlapping_6ghz[idx];
}
return NULL;
}
bool rtw89_ra_report_to_bitrate(struct rtw89_dev *rtwdev, u8 rpt_rate, u16 *bitrate)
{
struct ieee80211_rate rate;
@ -2140,6 +2189,8 @@ static void rtw89_vif_rx_stats_iter(void *data, u8 *mac,
if (phy_ppdu)
ewma_rssi_add(&rtwdev->phystat.bcn_rssi, phy_ppdu->rssi_avg);
pkt_stat->beacon_rate = desc_info->data_rate;
}
if (!ether_addr_equal(bss_conf->addr, hdr->addr1))
@ -2317,6 +2368,12 @@ static void rtw89_core_update_radiotap(struct rtw89_dev *rtwdev,
}
}
static void rtw89_core_validate_rx_signal(struct ieee80211_rx_status *rx_status)
{
if (!rx_status->signal)
rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
}
static void rtw89_core_rx_to_mac80211(struct rtw89_dev *rtwdev,
struct rtw89_rx_phy_ppdu *phy_ppdu,
struct rtw89_rx_desc_info *desc_info,
@ -2333,6 +2390,8 @@ static void rtw89_core_rx_to_mac80211(struct rtw89_dev *rtwdev,
rtw89_core_rx_stats(rtwdev, phy_ppdu, desc_info, skb_ppdu);
rtw89_core_update_rx_status_by_ppdu(rtwdev, rx_status, phy_ppdu);
rtw89_core_update_radiotap(rtwdev, skb_ppdu, rx_status);
rtw89_core_validate_rx_signal(rx_status);
/* In low power mode, it does RX in thread context. */
local_bh_disable();
ieee80211_rx_napi(rtwdev->hw, NULL, skb_ppdu, napi);
@ -2468,6 +2527,7 @@ void rtw89_core_query_rxdesc_v2(struct rtw89_dev *rtwdev,
struct rtw89_rx_desc_info *desc_info,
u8 *data, u32 data_offset)
{
struct rtw89_rxdesc_phy_rpt_v2 *rxd_rpt;
struct rtw89_rxdesc_short_v2 *rxd_s;
struct rtw89_rxdesc_long_v2 *rxd_l;
u16 shift_len, drv_info_len, phy_rtp_len, hdr_cnv_len;
@ -2515,6 +2575,12 @@ void rtw89_core_query_rxdesc_v2(struct rtw89_dev *rtwdev,
desc_info->rxd_len = sizeof(struct rtw89_rxdesc_short_v2);
desc_info->ready = true;
if (phy_rtp_len == sizeof(*rxd_rpt)) {
rxd_rpt = (struct rtw89_rxdesc_phy_rpt_v2 *)(data + data_offset +
desc_info->rxd_len);
desc_info->rssi = le32_get_bits(rxd_rpt->dword0, BE_RXD_PHY_RSSI);
}
if (!desc_info->long_rxdesc)
return;
@ -2657,6 +2723,7 @@ static void rtw89_core_update_rx_status(struct rtw89_dev *rtwdev,
rx_status->flag |= RX_FLAG_MACTIME_START;
rx_status->mactime = desc_info->free_run_cnt;
rtw89_chip_phy_rpt_to_rssi(rtwdev, desc_info, rx_status);
rtw89_core_stats_sta_rx_status(rtwdev, desc_info, rx_status);
}
@ -2664,10 +2731,6 @@ static enum rtw89_ps_mode rtw89_update_ps_mode(struct rtw89_dev *rtwdev)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
/* FIXME: Fix __rtw89_enter_ps_mode() to consider MLO cases. */
if (rtwdev->support_mlo)
return RTW89_PS_MODE_NONE;
if (rtw89_disable_ps_mode || !chip->ps_mode_supported ||
RTW89_CHK_FW_FEATURE(NO_DEEP_PS, &rtwdev->fw))
return RTW89_PS_MODE_NONE;
@ -2700,6 +2763,41 @@ static void rtw89_core_flush_ppdu_rx_queue(struct rtw89_dev *rtwdev,
}
}
static
void rtw89_core_rx_pkt_hdl(struct rtw89_dev *rtwdev, const struct sk_buff *skb,
const struct rtw89_rx_desc_info *desc)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct rtw89_sta_link *rtwsta_link;
struct ieee80211_sta *sta;
struct rtw89_sta *rtwsta;
u8 macid = desc->mac_id;
if (!refcount_read(&rtwdev->refcount_ap_info))
return;
rcu_read_lock();
rtwsta_link = rtw89_assoc_link_rcu_dereference(rtwdev, macid);
if (!rtwsta_link)
goto out;
rtwsta = rtwsta_link->rtwsta;
if (!test_bit(RTW89_REMOTE_STA_IN_PS, rtwsta->flags))
goto out;
sta = rtwsta_to_sta(rtwsta);
if (ieee80211_is_pspoll(hdr->frame_control))
ieee80211_sta_pspoll(sta);
else if (ieee80211_has_pm(hdr->frame_control) &&
(ieee80211_is_data_qos(hdr->frame_control) ||
ieee80211_is_qos_nullfunc(hdr->frame_control)))
ieee80211_sta_uapsd_trigger(sta, ieee80211_get_tid(hdr));
out:
rcu_read_unlock();
}
void rtw89_core_rx(struct rtw89_dev *rtwdev,
struct rtw89_rx_desc_info *desc_info,
struct sk_buff *skb)
@ -2722,6 +2820,7 @@ void rtw89_core_rx(struct rtw89_dev *rtwdev,
rx_status = IEEE80211_SKB_RXCB(skb);
memset(rx_status, 0, sizeof(*rx_status));
rtw89_core_update_rx_status(rtwdev, desc_info, rx_status);
rtw89_core_rx_pkt_hdl(rtwdev, skb, desc_info);
if (desc_info->long_rxdesc &&
BIT(desc_info->frame_type) & PPDU_FILTER_BITMAP)
skb_queue_tail(&ppdu_sts->rx_queue[band], skb);
@ -3131,6 +3230,7 @@ static int rtw89_core_send_nullfunc(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link, bool qos, bool ps)
{
struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
int link_id = ieee80211_vif_is_mld(vif) ? rtwvif_link->link_id : -1;
struct ieee80211_sta *sta;
struct ieee80211_hdr *hdr;
struct sk_buff *skb;
@ -3146,7 +3246,7 @@ static int rtw89_core_send_nullfunc(struct rtw89_dev *rtwdev,
goto out;
}
skb = ieee80211_nullfunc_get(rtwdev->hw, vif, -1, qos);
skb = ieee80211_nullfunc_get(rtwdev->hw, vif, link_id, qos);
if (!skb) {
ret = -ENOMEM;
goto out;
@ -3367,21 +3467,10 @@ static bool rtw89_traffic_stats_track(struct rtw89_dev *rtwdev)
return tfc_changed;
}
static void rtw89_vif_enter_lps(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link)
{
if (rtwvif_link->wifi_role != RTW89_WIFI_ROLE_STATION &&
rtwvif_link->wifi_role != RTW89_WIFI_ROLE_P2P_CLIENT)
return;
rtw89_enter_lps(rtwdev, rtwvif_link, true);
}
static void rtw89_enter_lps_track(struct rtw89_dev *rtwdev)
{
struct rtw89_vif_link *rtwvif_link;
struct ieee80211_vif *vif;
struct rtw89_vif *rtwvif;
unsigned int link_id;
rtw89_for_each_rtwvif(rtwdev, rtwvif) {
if (rtwvif->tdls_peer)
@ -3393,8 +3482,13 @@ static void rtw89_enter_lps_track(struct rtw89_dev *rtwdev)
rtwvif->stats.rx_tfc_lv != RTW89_TFC_IDLE)
continue;
rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id)
rtw89_vif_enter_lps(rtwdev, rtwvif_link);
vif = rtwvif_to_vif(rtwvif);
if (!(vif->type == NL80211_IFTYPE_STATION ||
vif->type == NL80211_IFTYPE_P2P_CLIENT))
continue;
rtw89_enter_lps(rtwdev, rtwvif, true);
}
}
@ -3699,6 +3793,8 @@ int rtw89_core_sta_link_disassoc(struct rtw89_dev *rtwdev,
{
const struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
rtw89_assoc_link_clr(rtwsta_link);
if (vif->type == NL80211_IFTYPE_STATION)
rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, rtwvif_link, false);
@ -3748,6 +3844,22 @@ int rtw89_core_sta_link_disconnect(struct rtw89_dev *rtwdev,
return ret;
}
static bool rtw89_sta_link_can_er(struct rtw89_dev *rtwdev,
struct ieee80211_bss_conf *bss_conf,
struct ieee80211_link_sta *link_sta)
{
if (!bss_conf->he_support ||
bss_conf->he_oper.params & IEEE80211_HE_OPERATION_ER_SU_DISABLE)
return false;
if (rtwdev->chip->chip_id == RTL8852C &&
rtw89_sta_link_has_su_mu_4xhe08(link_sta) &&
!rtw89_sta_link_has_er_su_4xhe08(link_sta))
return false;
return true;
}
int rtw89_core_sta_link_assoc(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link,
struct rtw89_sta_link *rtwsta_link)
@ -3758,12 +3870,11 @@ int rtw89_core_sta_link_assoc(struct rtw89_dev *rtwdev,
rtwsta_link);
const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
rtwvif_link->chanctx_idx);
struct ieee80211_link_sta *link_sta;
int ret;
if (vif->type == NL80211_IFTYPE_AP || sta->tdls) {
if (sta->tdls) {
struct ieee80211_link_sta *link_sta;
rcu_read_lock();
link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true);
@ -3814,9 +3925,8 @@ int rtw89_core_sta_link_assoc(struct rtw89_dev *rtwdev,
rcu_read_lock();
bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true);
if (bss_conf->he_support &&
!(bss_conf->he_oper.params & IEEE80211_HE_OPERATION_ER_SU_DISABLE))
rtwsta_link->er_cap = true;
link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true);
rtwsta_link->er_cap = rtw89_sta_link_can_er(rtwdev, bss_conf, link_sta);
rcu_read_unlock();
@ -3834,6 +3944,7 @@ int rtw89_core_sta_link_assoc(struct rtw89_dev *rtwdev,
rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, rtwvif_link, true);
}
rtw89_assoc_link_set(rtwsta_link);
return ret;
}
@ -4433,6 +4544,7 @@ int rtw89_core_start(struct rtw89_dev *rtwdev)
rtw89_phy_dm_init(rtwdev);
rtw89_mac_cfg_ppdu_status_bands(rtwdev, true);
rtw89_mac_cfg_phy_rpt_bands(rtwdev, true);
rtw89_mac_update_rts_threshold(rtwdev);
rtw89_tas_reset(rtwdev);
@ -4755,6 +4867,7 @@ int rtw89_core_init(struct rtw89_dev *rtwdev)
rtw89_ser_init(rtwdev);
rtw89_entity_init(rtwdev);
rtw89_tas_init(rtwdev);
rtw89_phy_ant_gain_init(rtwdev);
return 0;
}
@ -5100,6 +5213,9 @@ static int rtw89_core_register_hw(struct rtw89_dev *rtwdev)
if (RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw))
ieee80211_hw_set(hw, CONNECTION_MONITOR);
if (RTW89_CHK_FW_FEATURE(NOTIFY_AP_INFO, &rtwdev->fw))
ieee80211_hw_set(hw, AP_LINK_PS);
hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_P2P_CLIENT) |

View file

@ -830,6 +830,7 @@ enum rtw89_phy_idx {
};
#define __RTW89_MLD_MAX_LINK_NUM 2
#define RTW89_MLD_NON_STA_LINK_NUM 1
enum rtw89_chanctx_idx {
RTW89_CHANCTX_0 = 0,
@ -1083,6 +1084,7 @@ struct rtw89_rx_desc_info {
u16 offset;
u16 rxd_len;
bool ready;
u16 rssi;
};
struct rtw89_rxdesc_short {
@ -1125,6 +1127,11 @@ struct rtw89_rxdesc_long_v2 {
__le32 dword9;
} __packed;
struct rtw89_rxdesc_phy_rpt_v2 {
__le32 dword0;
__le32 dword1;
} __packed;
struct rtw89_tx_desc_info {
u16 pkt_size;
u8 wp_offset;
@ -3358,6 +3365,8 @@ struct rtw89_sec_cam_entry {
u8 spp_mode : 1;
/* 256 bits */
u8 key[32];
struct ieee80211_key_conf *key_conf;
};
struct rtw89_sta_link {
@ -3621,6 +3630,9 @@ struct rtw89_chip_ops {
struct ieee80211_rx_status *status);
void (*convert_rpl_to_rssi)(struct rtw89_dev *rtwdev,
struct rtw89_rx_phy_ppdu *phy_ppdu);
void (*phy_rpt_to_rssi)(struct rtw89_dev *rtwdev,
struct rtw89_rx_desc_info *desc_info,
struct ieee80211_rx_status *rx_status);
void (*ctrl_nbtg_bt_tx)(struct rtw89_dev *rtwdev, bool en,
enum rtw89_phy_idx phy_idx);
void (*cfg_txrx_path)(struct rtw89_dev *rtwdev);
@ -4255,6 +4267,7 @@ struct rtw89_chip_info {
u16 support_bandwidths;
bool support_unii4;
bool support_rnr;
bool support_ant_gain;
bool ul_tb_waveform_ctrl;
bool ul_tb_pwr_diff;
bool hw_sec_hdr;
@ -4296,6 +4309,7 @@ struct rtw89_chip_info {
const struct rtw89_rfe_parms *dflt_parms;
const struct rtw89_chanctx_listener *chanctx_listener;
u8 txpwr_factor_bb;
u8 txpwr_factor_rf;
u8 txpwr_factor_mac;
@ -4448,8 +4462,12 @@ enum rtw89_fw_feature {
RTW89_FW_FEATURE_SCAN_OFFLOAD_BE_V0,
RTW89_FW_FEATURE_WOW_REASON_V1,
RTW89_FW_FEATURE_RFK_PRE_NOTIFY_V0,
RTW89_FW_FEATURE_RFK_PRE_NOTIFY_V1,
RTW89_FW_FEATURE_RFK_RXDCK_V0,
RTW89_FW_FEATURE_NO_WOW_CPU_IO_RX,
RTW89_FW_FEATURE_NOTIFY_AP_INFO,
RTW89_FW_FEATURE_CH_INFO_BE_V0,
RTW89_FW_FEATURE_LPS_CH_INFO,
};
struct rtw89_fw_suit {
@ -4597,6 +4615,44 @@ struct rtw89_sar_info {
};
};
enum rtw89_ant_gain_subband {
RTW89_ANT_GAIN_2GHZ_SUBBAND,
RTW89_ANT_GAIN_5GHZ_SUBBAND_1, /* U-NII-1 */
RTW89_ANT_GAIN_5GHZ_SUBBAND_2, /* U-NII-2 */
RTW89_ANT_GAIN_5GHZ_SUBBAND_2E, /* U-NII-2-Extended */
RTW89_ANT_GAIN_5GHZ_SUBBAND_3_4, /* U-NII-3 and U-NII-4 */
RTW89_ANT_GAIN_6GHZ_SUBBAND_5_L, /* U-NII-5 lower part */
RTW89_ANT_GAIN_6GHZ_SUBBAND_5_H, /* U-NII-5 higher part */
RTW89_ANT_GAIN_6GHZ_SUBBAND_6, /* U-NII-6 */
RTW89_ANT_GAIN_6GHZ_SUBBAND_7_L, /* U-NII-7 lower part */
RTW89_ANT_GAIN_6GHZ_SUBBAND_7_H, /* U-NII-7 higher part */
RTW89_ANT_GAIN_6GHZ_SUBBAND_8, /* U-NII-8 */
RTW89_ANT_GAIN_SUBBAND_NR,
};
enum rtw89_ant_gain_domain_type {
RTW89_ANT_GAIN_ETSI = 0,
RTW89_ANT_GAIN_DOMAIN_NUM,
};
#define RTW89_ANT_GAIN_CHAIN_NUM 2
struct rtw89_ant_gain_info {
s8 offset[RTW89_ANT_GAIN_CHAIN_NUM][RTW89_ANT_GAIN_SUBBAND_NR];
u32 regd_enabled;
};
struct rtw89_6ghz_span {
enum rtw89_sar_subband sar_subband_low;
enum rtw89_sar_subband sar_subband_high;
enum rtw89_ant_gain_subband ant_gain_subband_low;
enum rtw89_ant_gain_subband ant_gain_subband_high;
};
#define RTW89_SAR_SPAN_VALID(span) ((span)->sar_subband_high)
#define RTW89_ANT_GAIN_SPAN_VALID(span) ((span)->ant_gain_subband_high)
enum rtw89_tas_state {
RTW89_TAS_STATE_DPR_OFF,
RTW89_TAS_STATE_DPR_ON,
@ -4781,6 +4837,7 @@ struct rtw89_pkt_drop_params {
struct rtw89_pkt_stat {
u16 beacon_nr;
u8 beacon_rate;
u32 rx_rate_cnt[RTW89_HW_RATE_NR];
};
@ -5556,6 +5613,9 @@ struct rtw89_dev {
struct rtw89_rfe_data *rfe_data;
enum rtw89_custid custid;
struct rtw89_sta_link __rcu *assoc_link_on_macid[RTW89_MAX_MAC_ID_NUM];
refcount_t refcount_ap_info;
/* ensures exclusive access from mac80211 callbacks */
struct mutex mutex;
struct list_head rtwvifs_list;
@ -5636,6 +5696,7 @@ struct rtw89_dev {
struct rtw89_regulatory_info regulatory;
struct rtw89_sar_info sar;
struct rtw89_tas_info tas;
struct rtw89_ant_gain_info ant_gain;
struct rtw89_btc btc;
enum rtw89_ps_mode ps_mode;
@ -5654,10 +5715,17 @@ struct rtw89_dev {
u8 priv[] __aligned(sizeof(void *));
};
struct rtw89_link_conf_container {
struct ieee80211_bss_conf *link_conf[IEEE80211_MLD_MAX_NUM_LINKS];
};
#define RTW89_VIF_IDLE_LINK_ID 0
struct rtw89_vif {
struct rtw89_dev *rtwdev;
struct list_head list;
struct list_head mgnt_entry;
struct rtw89_link_conf_container __rcu *snap_link_confs;
u8 mac_addr[ETH_ALEN];
__be32 ip_addr;
@ -5689,10 +5757,18 @@ static inline bool rtw89_vif_assign_link_is_valid(struct rtw89_vif_link **rtwvif
for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) \
if (rtw89_vif_assign_link_is_valid(&(rtwvif_link), rtwvif, link_id))
enum rtw89_sta_flags {
RTW89_REMOTE_STA_IN_PS,
NUM_OF_RTW89_STA_FLAGS,
};
struct rtw89_sta {
struct rtw89_dev *rtwdev;
struct rtw89_vif *rtwvif;
DECLARE_BITMAP(flags, NUM_OF_RTW89_STA_FLAGS);
bool disassoc;
struct sk_buff_head roc_queue;
@ -5700,6 +5776,8 @@ struct rtw89_sta {
struct rtw89_ampdu_params ampdu_params[IEEE80211_NUM_TIDS];
DECLARE_BITMAP(ampdu_map, IEEE80211_NUM_TIDS);
DECLARE_BITMAP(pairwise_sec_cam_map, RTW89_MAX_SEC_CAM_NUM);
u8 links_inst_valid_num;
DECLARE_BITMAP(links_inst_map, __RTW89_MLD_MAX_LINK_NUM);
struct rtw89_sta_link *links[IEEE80211_MLD_MAX_NUM_LINKS];
@ -5770,6 +5848,31 @@ u8 rtw89_sta_link_inst_get_index(struct rtw89_sta_link *rtwsta_link)
return rtwsta_link - rtwsta->links_inst;
}
static inline void rtw89_assoc_link_set(struct rtw89_sta_link *rtwsta_link)
{
struct rtw89_sta *rtwsta = rtwsta_link->rtwsta;
struct rtw89_dev *rtwdev = rtwsta->rtwdev;
rcu_assign_pointer(rtwdev->assoc_link_on_macid[rtwsta_link->mac_id],
rtwsta_link);
}
static inline void rtw89_assoc_link_clr(struct rtw89_sta_link *rtwsta_link)
{
struct rtw89_sta *rtwsta = rtwsta_link->rtwsta;
struct rtw89_dev *rtwdev = rtwsta->rtwdev;
rcu_assign_pointer(rtwdev->assoc_link_on_macid[rtwsta_link->mac_id],
NULL);
synchronize_rcu();
}
static inline struct rtw89_sta_link *
rtw89_assoc_link_rcu_dereference(struct rtw89_dev *rtwdev, u8 macid)
{
return rcu_dereference(rtwdev->assoc_link_on_macid[macid]);
}
static inline int rtw89_hci_tx_write(struct rtw89_dev *rtwdev,
struct rtw89_core_tx_request *tx_req)
{
@ -6194,9 +6297,19 @@ static inline struct ieee80211_bss_conf *
__rtw89_vif_rcu_dereference_link(struct rtw89_vif_link *rtwvif_link, bool *nolink)
{
struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
struct rtw89_link_conf_container *snap;
struct ieee80211_bss_conf *bss_conf;
snap = rcu_dereference(rtwvif->snap_link_confs);
if (snap) {
bss_conf = snap->link_conf[rtwvif_link->link_id];
goto out;
}
bss_conf = rcu_dereference(vif->link_conf[rtwvif_link->link_id]);
out:
if (unlikely(!bss_conf)) {
*nolink = true;
return &vif->bss_conf;
@ -6605,6 +6718,16 @@ static inline void rtw89_chip_convert_rpl_to_rssi(struct rtw89_dev *rtwdev,
chip->ops->convert_rpl_to_rssi(rtwdev, phy_ppdu);
}
static inline void rtw89_chip_phy_rpt_to_rssi(struct rtw89_dev *rtwdev,
struct rtw89_rx_desc_info *desc_info,
struct ieee80211_rx_status *rx_status)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
if (chip->ops->phy_rpt_to_rssi)
chip->ops->phy_rpt_to_rssi(rtwdev, desc_info, rx_status);
}
static inline void rtw89_ctrl_nbtg_bt_tx(struct rtw89_dev *rtwdev, bool en,
enum rtw89_phy_idx phy_idx)
{
@ -6753,6 +6876,26 @@ bool rtw89_sta_has_beamformer_cap(struct ieee80211_link_sta *link_sta)
return false;
}
static inline
bool rtw89_sta_link_has_su_mu_4xhe08(struct ieee80211_link_sta *link_sta)
{
if (link_sta->he_cap.he_cap_elem.phy_cap_info[7] &
IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI)
return true;
return false;
}
static inline
bool rtw89_sta_link_has_er_su_4xhe08(struct ieee80211_link_sta *link_sta)
{
if (link_sta->he_cap.he_cap_elem.phy_cap_info[8] &
IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI)
return true;
return false;
}
static inline struct rtw89_fw_suit *rtw89_fw_suit_get(struct rtw89_dev *rtwdev,
enum rtw89_fw_type type)
{
@ -6908,6 +7051,8 @@ struct rtw89_sta_link *rtw89_sta_set_link(struct rtw89_sta *rtwsta,
unsigned int link_id);
void rtw89_sta_unset_link(struct rtw89_sta *rtwsta, unsigned int link_id);
void rtw89_core_set_chip_txpwr(struct rtw89_dev *rtwdev);
const struct rtw89_6ghz_span *
rtw89_get_6ghz_span(struct rtw89_dev *rtwdev, u32 center_freq);
void rtw89_get_default_chandef(struct cfg80211_chan_def *chandef);
void rtw89_get_channel_params(const struct cfg80211_chan_def *chandef,
struct rtw89_chan *chan);

View file

@ -9,6 +9,7 @@
#include "fw.h"
#include "mac.h"
#include "pci.h"
#include "phy.h"
#include "ps.h"
#include "reg.h"
#include "sar.h"
@ -882,6 +883,9 @@ static int rtw89_debug_priv_txpwr_table_get(struct seq_file *m, void *v)
seq_puts(m, "[TAS]\n");
rtw89_print_tas(m, rtwdev);
seq_puts(m, "[DAG]\n");
rtw89_print_ant_gain(m, rtwdev, chan);
tbl = dbgfs_txpwr_tables[chip_gen];
if (!tbl) {
ret = -EOPNOTSUPP;

View file

@ -709,11 +709,13 @@ static const struct __fw_feat_cfg fw_feat_tbl[] = {
__CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, TX_WAKE),
__CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, CRASH_TRIGGER),
__CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, SCAN_OFFLOAD),
__CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 7, BEACON_FILTER),
__CFG_FW_FEAT(RTL8852B, lt, 0, 29, 30, 0, NO_WOW_CPU_IO_RX),
__CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 74, 0, NO_LPS_PG),
__CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 74, 0, TX_WAKE),
__CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 90, 0, CRASH_TRIGGER),
__CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 91, 0, SCAN_OFFLOAD),
__CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 110, 0, BEACON_FILTER),
__CFG_FW_FEAT(RTL8852C, le, 0, 27, 33, 0, NO_DEEP_PS),
__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 34, 0, TX_WAKE),
__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 36, 0, SCAN_OFFLOAD),
@ -727,7 +729,11 @@ static const struct __fw_feat_cfg fw_feat_tbl[] = {
__CFG_FW_FEAT(RTL8922A, ge, 0, 35, 12, 0, BEACON_FILTER),
__CFG_FW_FEAT(RTL8922A, ge, 0, 35, 22, 0, WOW_REASON_V1),
__CFG_FW_FEAT(RTL8922A, lt, 0, 35, 31, 0, RFK_PRE_NOTIFY_V0),
__CFG_FW_FEAT(RTL8922A, lt, 0, 35, 31, 0, LPS_CH_INFO),
__CFG_FW_FEAT(RTL8922A, lt, 0, 35, 42, 0, RFK_RXDCK_V0),
__CFG_FW_FEAT(RTL8922A, ge, 0, 35, 46, 0, NOTIFY_AP_INFO),
__CFG_FW_FEAT(RTL8922A, lt, 0, 35, 47, 0, CH_INFO_BE_V0),
__CFG_FW_FEAT(RTL8922A, lt, 0, 35, 49, 0, RFK_PRE_NOTIFY_V1),
};
static void rtw89_fw_iterate_feature_cfg(struct rtw89_fw_info *fw,
@ -2414,6 +2420,7 @@ static int rtw89_fw_h2c_add_general_pkt(struct rtw89_dev *rtwdev,
u8 *id)
{
struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
int link_id = ieee80211_vif_is_mld(vif) ? rtwvif_link->link_id : -1;
struct rtw89_pktofld_info *info;
struct sk_buff *skb;
int ret;
@ -2430,10 +2437,10 @@ static int rtw89_fw_h2c_add_general_pkt(struct rtw89_dev *rtwdev,
skb = ieee80211_proberesp_get(rtwdev->hw, vif);
break;
case RTW89_PKT_OFLD_TYPE_NULL_DATA:
skb = ieee80211_nullfunc_get(rtwdev->hw, vif, -1, false);
skb = ieee80211_nullfunc_get(rtwdev->hw, vif, link_id, false);
break;
case RTW89_PKT_OFLD_TYPE_QOS_NULL:
skb = ieee80211_nullfunc_get(rtwdev->hw, vif, -1, true);
skb = ieee80211_nullfunc_get(rtwdev->hw, vif, link_id, true);
break;
case RTW89_PKT_OFLD_TYPE_EAPOL_KEY:
skb = rtw89_eapol_get(rtwdev, rtwvif_link);
@ -2589,14 +2596,17 @@ fail:
return ret;
}
int rtw89_fw_h2c_lps_ch_info(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link)
int rtw89_fw_h2c_lps_ch_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
{
const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
rtwvif_link->chanctx_idx);
const struct rtw89_chip_info *chip = rtwdev->chip;
const struct rtw89_chan *chan;
struct rtw89_vif_link *rtwvif_link;
struct rtw89_h2c_lps_ch_info *h2c;
u32 len = sizeof(*h2c);
unsigned int link_id;
struct sk_buff *skb;
bool no_chan = true;
u8 phy_idx;
u32 done;
int ret;
@ -2611,11 +2621,27 @@ int rtw89_fw_h2c_lps_ch_info(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rt
skb_put(skb, len);
h2c = (struct rtw89_h2c_lps_ch_info *)skb->data;
h2c->info[0].central_ch = chan->channel;
h2c->info[0].pri_ch = chan->primary_channel;
h2c->info[0].band = chan->band_type;
h2c->info[0].bw = chan->band_width;
h2c->mlo_dbcc_mode_lps = cpu_to_le32(MLO_2_PLUS_0_1RF);
rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) {
phy_idx = rtwvif_link->phy_idx;
if (phy_idx >= ARRAY_SIZE(h2c->info))
continue;
chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx);
no_chan = false;
h2c->info[phy_idx].central_ch = chan->channel;
h2c->info[phy_idx].pri_ch = chan->primary_channel;
h2c->info[phy_idx].band = chan->band_type;
h2c->info[phy_idx].bw = chan->band_width;
}
if (no_chan) {
rtw89_err(rtwdev, "no chan for h2c lps_ch_info\n");
ret = -ENOENT;
goto fail;
}
h2c->mlo_dbcc_mode_lps = cpu_to_le32(rtwdev->mlo_dbcc_mode);
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_OUTSRC, H2C_CL_OUTSRC_DM,
@ -2640,6 +2666,87 @@ fail:
return ret;
}
int rtw89_fw_h2c_lps_ml_cmn_info(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif)
{
const struct rtw89_phy_bb_gain_info_be *gain = &rtwdev->bb_gain.be;
struct rtw89_pkt_stat *pkt_stat = &rtwdev->phystat.cur_pkt_stat;
const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_h2c_lps_ml_cmn_info *h2c;
struct rtw89_vif_link *rtwvif_link;
const struct rtw89_chan *chan;
u8 bw_idx = RTW89_BB_BW_20_40;
u32 len = sizeof(*h2c);
unsigned int link_id;
struct sk_buff *skb;
u8 gain_band;
u32 done;
u8 path;
int ret;
int i;
if (chip->chip_gen != RTW89_CHIP_BE)
return 0;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for h2c lps_ml_cmn_info\n");
return -ENOMEM;
}
skb_put(skb, len);
h2c = (struct rtw89_h2c_lps_ml_cmn_info *)skb->data;
h2c->fmt_id = 0x1;
h2c->mlo_dbcc_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode);
rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) {
path = rtwvif_link->phy_idx == RTW89_PHY_1 ? RF_PATH_B : RF_PATH_A;
chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx);
gain_band = rtw89_subband_to_gain_band_be(chan->subband_type);
h2c->central_ch[rtwvif_link->phy_idx] = chan->channel;
h2c->pri_ch[rtwvif_link->phy_idx] = chan->primary_channel;
h2c->band[rtwvif_link->phy_idx] = chan->band_type;
h2c->bw[rtwvif_link->phy_idx] = chan->band_width;
if (pkt_stat->beacon_rate < RTW89_HW_RATE_OFDM6)
h2c->bcn_rate_type[rtwvif_link->phy_idx] = 0x1;
else
h2c->bcn_rate_type[rtwvif_link->phy_idx] = 0x2;
/* Fill BW20 RX gain table for beacon mode */
for (i = 0; i < TIA_GAIN_NUM; i++) {
h2c->tia_gain[rtwvif_link->phy_idx][i] =
cpu_to_le16(gain->tia_gain[gain_band][bw_idx][path][i]);
}
memcpy(h2c->lna_gain[rtwvif_link->phy_idx],
gain->lna_gain[gain_band][bw_idx][path],
LNA_GAIN_NUM);
}
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_OUTSRC, H2C_CL_OUTSRC_DM,
H2C_FUNC_FW_LPS_ML_CMN_INFO, 0, 0, len);
rtw89_phy_write32_mask(rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT, 0);
ret = rtw89_h2c_tx(rtwdev, skb, false);
if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
ret = read_poll_timeout(rtw89_phy_read32_mask, done, done, 50, 5000,
true, rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT);
if (ret)
rtw89_warn(rtwdev, "h2c_lps_ml_cmn_info done polling timeout\n");
return 0;
fail:
dev_kfree_skb_any(skb);
return ret;
}
#define H2C_P2P_ACT_LEN 20
int rtw89_fw_h2c_p2p_act(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link,
@ -4954,13 +5061,14 @@ int rtw89_fw_h2c_scan_list_offload_be(struct rtw89_dev *rtwdev, int ch_num,
struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
struct rtw89_h2c_chinfo_elem_be *elem;
struct rtw89_mac_chinfo_be *ch_info;
struct rtw89_h2c_chinfo *h2c;
struct rtw89_h2c_chinfo_be *h2c;
struct sk_buff *skb;
unsigned int cond;
u8 ver = U8_MAX;
int skb_len;
int ret;
static_assert(sizeof(*elem) == RTW89_MAC_CHINFO_SIZE);
static_assert(sizeof(*elem) == RTW89_MAC_CHINFO_SIZE_BE);
skb_len = struct_size(h2c, elem, ch_num);
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len);
@ -4969,8 +5077,11 @@ int rtw89_fw_h2c_scan_list_offload_be(struct rtw89_dev *rtwdev, int ch_num,
return -ENOMEM;
}
if (RTW89_CHK_FW_FEATURE(CH_INFO_BE_V0, &rtwdev->fw))
ver = 0;
skb_put(skb, sizeof(*h2c));
h2c = (struct rtw89_h2c_chinfo *)skb->data;
h2c = (struct rtw89_h2c_chinfo_be *)skb->data;
h2c->ch_num = ch_num;
h2c->elem_size = sizeof(*elem) / 4; /* in unit of 4 bytes */
@ -4980,8 +5091,7 @@ int rtw89_fw_h2c_scan_list_offload_be(struct rtw89_dev *rtwdev, int ch_num,
list_for_each_entry(ch_info, chan_list, list) {
elem = (struct rtw89_h2c_chinfo_elem_be *)skb_put(skb, sizeof(*elem));
elem->w0 = le32_encode_bits(ch_info->period, RTW89_H2C_CHINFO_BE_W0_PERIOD) |
le32_encode_bits(ch_info->dwell_time, RTW89_H2C_CHINFO_BE_W0_DWELL) |
elem->w0 = le32_encode_bits(ch_info->dwell_time, RTW89_H2C_CHINFO_BE_W0_DWELL) |
le32_encode_bits(ch_info->central_ch,
RTW89_H2C_CHINFO_BE_W0_CENTER_CH) |
le32_encode_bits(ch_info->pri_ch, RTW89_H2C_CHINFO_BE_W0_PRI_CH);
@ -5028,6 +5138,12 @@ int rtw89_fw_h2c_scan_list_offload_be(struct rtw89_dev *rtwdev, int ch_num,
RTW89_H2C_CHINFO_BE_W6_FW_PROBE0_SHORTSSIDS) |
le32_encode_bits(ch_info->fw_probe0_bssids,
RTW89_H2C_CHINFO_BE_W6_FW_PROBE0_BSSIDS);
if (ver == 0)
elem->w0 |=
le32_encode_bits(ch_info->period, RTW89_H2C_CHINFO_BE_W0_PERIOD);
else
elem->w7 = le32_encode_bits(ch_info->period,
RTW89_H2C_CHINFO_BE_W7_PERIOD_V1);
}
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
@ -5171,6 +5287,7 @@ int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev,
u8 probe_id[NUM_NL80211_BANDS];
u8 cfg_len = sizeof(*h2c);
unsigned int cond;
u8 ver = U8_MAX;
void *ptr;
int ret;
u32 len;
@ -5191,6 +5308,9 @@ int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev,
memset(probe_id, RTW89_SCANOFLD_PKT_NONE, sizeof(probe_id));
if (RTW89_CHK_FW_FEATURE(CH_INFO_BE_V0, &rtwdev->fw))
ver = 0;
if (!wowlan) {
list_for_each_entry(pkt_info, &scan_info->pkt_list[NL80211_BAND_6GHZ], list) {
if (pkt_info->wildcard_6ghz) {
@ -5286,9 +5406,7 @@ flex_member:
le32_encode_bits(RTW89_OFF_CHAN_TIME / 10,
RTW89_H2C_SCANOFLD_BE_OPCH_W0_POLICY_VAL);
opch->w1 = le32_encode_bits(RTW89_CHANNEL_TIME,
RTW89_H2C_SCANOFLD_BE_OPCH_W1_DURATION) |
le32_encode_bits(op->band_type,
opch->w1 = le32_encode_bits(op->band_type,
RTW89_H2C_SCANOFLD_BE_OPCH_W1_CH_BAND) |
le32_encode_bits(op->band_width,
RTW89_H2C_SCANOFLD_BE_OPCH_W1_BW) |
@ -5314,6 +5432,13 @@ flex_member:
RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT2) |
le32_encode_bits(RTW89_SCANOFLD_PKT_NONE,
RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT3);
if (ver == 0)
opch->w1 |= le32_encode_bits(RTW89_CHANNEL_TIME,
RTW89_H2C_SCANOFLD_BE_OPCH_W1_DURATION);
else
opch->w4 = le32_encode_bits(RTW89_CHANNEL_TIME,
RTW89_H2C_SCANOFLD_BE_OPCH_W4_DURATION_V1);
ptr += sizeof(*opch);
}
@ -5416,7 +5541,9 @@ int rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx)
{
struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc;
struct rtw89_fw_h2c_rfk_pre_info_common *common;
struct rtw89_fw_h2c_rfk_pre_info_v0 *h2c_v0;
struct rtw89_fw_h2c_rfk_pre_info_v1 *h2c_v1;
struct rtw89_fw_h2c_rfk_pre_info *h2c;
u8 tbl_sel[NUM_OF_RTW89_FW_RFK_PATH];
u32 len = sizeof(*h2c);
@ -5426,7 +5553,10 @@ int rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev *rtwdev,
u32 val32;
int ret;
if (RTW89_CHK_FW_FEATURE(RFK_PRE_NOTIFY_V0, &rtwdev->fw)) {
if (RTW89_CHK_FW_FEATURE(RFK_PRE_NOTIFY_V1, &rtwdev->fw)) {
len = sizeof(*h2c_v1);
ver = 1;
} else if (RTW89_CHK_FW_FEATURE(RFK_PRE_NOTIFY_V0, &rtwdev->fw)) {
len = sizeof(*h2c_v0);
ver = 0;
}
@ -5438,17 +5568,18 @@ int rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev *rtwdev,
}
skb_put(skb, len);
h2c = (struct rtw89_fw_h2c_rfk_pre_info *)skb->data;
common = &h2c->base_v1.common;
h2c->common.mlo_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode);
common->mlo_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode);
BUILD_BUG_ON(NUM_OF_RTW89_FW_RFK_TBL > RTW89_RFK_CHS_NR);
BUILD_BUG_ON(ARRAY_SIZE(rfk_mcc->data) < NUM_OF_RTW89_FW_RFK_PATH);
for (tbl = 0; tbl < NUM_OF_RTW89_FW_RFK_TBL; tbl++) {
for (path = 0; path < NUM_OF_RTW89_FW_RFK_PATH; path++) {
h2c->common.dbcc.ch[path][tbl] =
common->dbcc.ch[path][tbl] =
cpu_to_le32(rfk_mcc->data[path].ch[tbl]);
h2c->common.dbcc.band[path][tbl] =
common->dbcc.band[path][tbl] =
cpu_to_le32(rfk_mcc->data[path].band[tbl]);
}
}
@ -5456,13 +5587,19 @@ int rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev *rtwdev,
for (path = 0; path < NUM_OF_RTW89_FW_RFK_PATH; path++) {
tbl_sel[path] = rfk_mcc->data[path].table_idx;
h2c->common.tbl.cur_ch[path] =
common->tbl.cur_ch[path] =
cpu_to_le32(rfk_mcc->data[path].ch[tbl_sel[path]]);
h2c->common.tbl.cur_band[path] =
common->tbl.cur_band[path] =
cpu_to_le32(rfk_mcc->data[path].band[tbl_sel[path]]);
if (ver <= 1)
continue;
h2c->cur_bandwidth[path] =
cpu_to_le32(rfk_mcc->data[path].bw[tbl_sel[path]]);
}
h2c->common.phy_idx = cpu_to_le32(phy_idx);
common->phy_idx = cpu_to_le32(phy_idx);
if (ver == 0) { /* RFK_PRE_NOTIFY_V0 */
h2c_v0 = (struct rtw89_fw_h2c_rfk_pre_info_v0 *)skb->data;
@ -5488,8 +5625,10 @@ int rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev *rtwdev,
goto done;
}
if (rtw89_is_mlo_1_1(rtwdev))
h2c->mlo_1_1 = cpu_to_le32(1);
if (rtw89_is_mlo_1_1(rtwdev)) {
h2c_v1 = &h2c->base_v1;
h2c_v1->mlo_1_1 = cpu_to_le32(1);
}
done:
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
@ -6496,7 +6635,7 @@ int rtw89_pno_scan_add_chan_list_ax(struct rtw89_dev *rtwdev,
INIT_LIST_HEAD(&chan_list);
for (idx = 0, list_len = 0;
idx < nd_config->n_channels && list_len < RTW89_SCAN_LIST_LIMIT;
idx < nd_config->n_channels && list_len < RTW89_SCAN_LIST_LIMIT_AX;
idx++, list_len++) {
channel = nd_config->channels[idx];
ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL);
@ -6547,7 +6686,7 @@ int rtw89_hw_scan_add_chan_list_ax(struct rtw89_dev *rtwdev,
INIT_LIST_HEAD(&chan_list);
for (idx = rtwdev->scan_info.last_chan_idx, list_len = 0;
idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT;
idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT_AX;
idx++, list_len++) {
channel = req->channels[idx];
ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL);
@ -6624,7 +6763,7 @@ int rtw89_pno_scan_add_chan_list_be(struct rtw89_dev *rtwdev,
INIT_LIST_HEAD(&chan_list);
for (idx = 0, list_len = 0;
idx < nd_config->n_channels && list_len < RTW89_SCAN_LIST_LIMIT;
idx < nd_config->n_channels && list_len < RTW89_SCAN_LIST_LIMIT_BE;
idx++, list_len++) {
channel = nd_config->channels[idx];
ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL);
@ -6679,7 +6818,7 @@ int rtw89_hw_scan_add_chan_list_be(struct rtw89_dev *rtwdev,
INIT_LIST_HEAD(&chan_list);
for (idx = rtwdev->scan_info.last_chan_idx, list_len = 0;
idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT;
idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT_BE;
idx++, list_len++) {
channel = req->channels[idx];
ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL);
@ -8164,6 +8303,71 @@ int rtw89_fw_h2c_mrc_upd_duration(struct rtw89_dev *rtwdev,
return 0;
}
static int rtw89_fw_h2c_ap_info(struct rtw89_dev *rtwdev, bool en)
{
struct rtw89_h2c_ap_info *h2c;
u32 len = sizeof(*h2c);
struct sk_buff *skb;
int ret;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for ap info\n");
return -ENOMEM;
}
skb_put(skb, len);
h2c = (struct rtw89_h2c_ap_info *)skb->data;
h2c->w0 = le32_encode_bits(en, RTW89_H2C_AP_INFO_W0_PWR_INT_EN);
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_MAC,
H2C_CL_AP,
H2C_FUNC_AP_INFO, 0, 0,
len);
ret = rtw89_h2c_tx(rtwdev, skb, false);
if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
dev_kfree_skb_any(skb);
return -EBUSY;
}
return 0;
}
int rtw89_fw_h2c_ap_info_refcount(struct rtw89_dev *rtwdev, bool en)
{
int ret;
if (en) {
if (refcount_inc_not_zero(&rtwdev->refcount_ap_info))
return 0;
} else {
if (!refcount_dec_and_test(&rtwdev->refcount_ap_info))
return 0;
}
ret = rtw89_fw_h2c_ap_info(rtwdev, en);
if (ret) {
if (!test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags))
return ret;
/* During recovery, neither driver nor stack has full error
* handling, so show a warning, but return 0 with refcount
* increased normally. It can avoid underflow when calling
* with @en == false later.
*/
rtw89_warn(rtwdev, "h2c ap_info failed during SER\n");
}
if (en)
refcount_set(&rtwdev->refcount_ap_info, 1);
return 0;
}
static bool __fw_txpwr_entry_zero_ext(const void *ext_ptr, u8 ext_len)
{
static const u8 zeros[U8_MAX] = {};

View file

@ -310,9 +310,12 @@ struct rtw89_fw_macid_pause_sleep_grp {
#define RTW89_SCANOFLD_DEBUG_MASK 0x1F
#define RTW89_CHAN_INVALID 0xFF
#define RTW89_MAC_CHINFO_SIZE 28
#define RTW89_MAC_CHINFO_SIZE_BE 32
#define RTW89_SCAN_LIST_GUARD 4
#define RTW89_SCAN_LIST_LIMIT \
((RTW89_H2C_MAX_SIZE / RTW89_MAC_CHINFO_SIZE) - RTW89_SCAN_LIST_GUARD)
#define RTW89_SCAN_LIST_LIMIT(size) \
((RTW89_H2C_MAX_SIZE / (size)) - RTW89_SCAN_LIST_GUARD)
#define RTW89_SCAN_LIST_LIMIT_AX RTW89_SCAN_LIST_LIMIT(RTW89_MAC_CHINFO_SIZE)
#define RTW89_SCAN_LIST_LIMIT_BE RTW89_SCAN_LIST_LIMIT(RTW89_MAC_CHINFO_SIZE_BE)
#define RTW89_BCN_LOSS_CNT 10
@ -1780,6 +1783,21 @@ struct rtw89_h2c_lps_ch_info {
__le32 mlo_dbcc_mode_lps;
} __packed;
struct rtw89_h2c_lps_ml_cmn_info {
u8 fmt_id;
u8 rsvd0[3];
__le32 mlo_dbcc_mode;
u8 central_ch[RTW89_PHY_MAX];
u8 pri_ch[RTW89_PHY_MAX];
u8 bw[RTW89_PHY_MAX];
u8 band[RTW89_PHY_MAX];
u8 bcn_rate_type[RTW89_PHY_MAX];
u8 rsvd1[2];
__le16 tia_gain[RTW89_PHY_MAX][TIA_GAIN_NUM];
u8 lna_gain[RTW89_PHY_MAX][LNA_GAIN_NUM];
u8 rsvd2[2];
} __packed;
static inline void RTW89_SET_FWCMD_CPU_EXCEPTION_TYPE(void *cmd, u32 val)
{
le32p_replace_bits((__le32 *)cmd, val, GENMASK(31, 0));
@ -2647,6 +2665,7 @@ struct rtw89_h2c_chinfo_elem_be {
__le32 w4;
__le32 w5;
__le32 w6;
__le32 w7;
} __packed;
#define RTW89_H2C_CHINFO_BE_W0_PERIOD GENMASK(7, 0)
@ -2678,6 +2697,7 @@ struct rtw89_h2c_chinfo_elem_be {
#define RTW89_H2C_CHINFO_BE_W5_FW_PROBE0_SSIDS GENMASK(31, 16)
#define RTW89_H2C_CHINFO_BE_W6_FW_PROBE0_SHORTSSIDS GENMASK(15, 0)
#define RTW89_H2C_CHINFO_BE_W6_FW_PROBE0_BSSIDS GENMASK(31, 16)
#define RTW89_H2C_CHINFO_BE_W7_PERIOD_V1 GENMASK(15, 0)
struct rtw89_h2c_chinfo {
u8 ch_num;
@ -2687,6 +2707,14 @@ struct rtw89_h2c_chinfo {
struct rtw89_h2c_chinfo_elem elem[] __counted_by(ch_num);
} __packed;
struct rtw89_h2c_chinfo_be {
u8 ch_num;
u8 elem_size;
u8 arg;
u8 rsvd0;
struct rtw89_h2c_chinfo_elem_be elem[] __counted_by(ch_num);
} __packed;
#define RTW89_H2C_CHINFO_ARG_MAC_IDX_MASK BIT(0)
#define RTW89_H2C_CHINFO_ARG_APPEND_MASK BIT(1)
@ -2733,6 +2761,7 @@ struct rtw89_h2c_scanofld_be_opch {
__le32 w1;
__le32 w2;
__le32 w3;
__le32 w4;
} __packed;
#define RTW89_H2C_SCANOFLD_BE_OPCH_W0_MACID GENMASK(15, 0)
@ -2754,6 +2783,7 @@ struct rtw89_h2c_scanofld_be_opch {
#define RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT1 GENMASK(15, 8)
#define RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT2 GENMASK(23, 16)
#define RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT3 GENMASK(31, 24)
#define RTW89_H2C_SCANOFLD_BE_OPCH_W4_DURATION_V1 GENMASK(15, 0)
struct rtw89_h2c_scanofld_be {
__le32 w0;
@ -3466,6 +3496,12 @@ struct rtw89_h2c_wow_aoac {
__le32 w0;
} __packed;
struct rtw89_h2c_ap_info {
__le32 w0;
} __packed;
#define RTW89_H2C_AP_INFO_W0_PWR_INT_EN BIT(0)
#define RTW89_C2H_HEADER_LEN 8
struct rtw89_c2h_hdr {
@ -3590,6 +3626,7 @@ struct rtw89_c2h_scanofld {
__le32 w5;
__le32 w6;
__le32 w7;
__le32 w8;
} __packed;
#define RTW89_C2H_SCANOFLD_W2_PRI_CH GENMASK(7, 0)
@ -3604,6 +3641,8 @@ struct rtw89_c2h_scanofld {
#define RTW89_C2H_SCANOFLD_W6_EXPECT_PERIOD GENMASK(15, 8)
#define RTW89_C2H_SCANOFLD_W6_FW_DEF GENMASK(23, 16)
#define RTW89_C2H_SCANOFLD_W7_REPORT_TSF GENMASK(31, 0)
#define RTW89_C2H_SCANOFLD_W8_PERIOD_V1 GENMASK(15, 0)
#define RTW89_C2H_SCANOFLD_W8_EXPECT_PERIOD_V1 GENMASK(31, 16)
#define RTW89_GET_MAC_C2H_MCC_RCV_ACK_GROUP(c2h) \
le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(1, 0))
@ -3725,6 +3764,14 @@ struct rtw89_c2h_wow_aoac_report {
#define RTW89_C2H_WOW_AOAC_RPT_REKEY_IDX BIT(0)
struct rtw89_c2h_pwr_int_notify {
struct rtw89_c2h_hdr hdr;
__le32 w2;
} __packed;
#define RTW89_C2H_PWR_INT_NOTIFY_W2_MACID GENMASK(15, 0)
#define RTW89_C2H_PWR_INT_NOTIFY_W2_PWR_STATUS BIT(16)
struct rtw89_h2c_tx_duty {
__le32 w0;
__le32 w1;
@ -4168,6 +4215,10 @@ enum rtw89_mrc_h2c_func {
#define RTW89_MRC_WAIT_COND_REQ_TSF \
RTW89_MRC_WAIT_COND(0 /* don't care */, H2C_FUNC_MRC_REQ_TSF)
/* CLASS 36 - AP */
#define H2C_CL_AP 0x24
#define H2C_FUNC_AP_INFO 0x0
#define H2C_CAT_OUTSRC 0x2
#define H2C_CL_OUTSRC_RA 0x1
@ -4175,6 +4226,7 @@ enum rtw89_mrc_h2c_func {
#define H2C_CL_OUTSRC_DM 0x2
#define H2C_FUNC_FW_LPS_CH_INFO 0xb
#define H2C_FUNC_FW_LPS_ML_CMN_INFO 0xe
#define H2C_CL_OUTSRC_RF_REG_A 0x8
#define H2C_CL_OUTSRC_RF_REG_B 0x9
@ -4241,11 +4293,16 @@ struct rtw89_fw_h2c_rfk_pre_info_v0 {
} __packed mlo;
} __packed;
struct rtw89_fw_h2c_rfk_pre_info {
struct rtw89_fw_h2c_rfk_pre_info_v1 {
struct rtw89_fw_h2c_rfk_pre_info_common common;
__le32 mlo_1_1;
} __packed;
struct rtw89_fw_h2c_rfk_pre_info {
struct rtw89_fw_h2c_rfk_pre_info_v1 base_v1;
__le32 cur_bandwidth[NUM_OF_RTW89_FW_RFK_PATH];
} __packed;
struct rtw89_h2c_rf_tssi {
__le16 len;
u8 phy;
@ -4602,8 +4659,9 @@ int rtw89_fw_h2c_init_ba_cam_users(struct rtw89_dev *rtwdev, u8 users,
int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev,
struct rtw89_lps_parm *lps_param);
int rtw89_fw_h2c_lps_ch_info(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link);
int rtw89_fw_h2c_lps_ch_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif);
int rtw89_fw_h2c_lps_ml_cmn_info(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif);
int rtw89_fw_h2c_fwips(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
bool enable);
struct sk_buff *rtw89_fw_h2c_alloc_skb_with_hdr(struct rtw89_dev *rtwdev, u32 len);
@ -4697,6 +4755,7 @@ int rtw89_fw_h2c_mrc_sync(struct rtw89_dev *rtwdev,
const struct rtw89_fw_mrc_sync_arg *arg);
int rtw89_fw_h2c_mrc_upd_duration(struct rtw89_dev *rtwdev,
const struct rtw89_fw_mrc_upd_duration_arg *arg);
int rtw89_fw_h2c_ap_info_refcount(struct rtw89_dev *rtwdev, bool en);
static inline void rtw89_fw_h2c_init_ba_cam(struct rtw89_dev *rtwdev)
{

View file

@ -4788,9 +4788,11 @@ rtw89_mac_c2h_scanofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *skb,
struct rtw89_vif_link *rtwvif_link = rtwdev->scan_info.scanning_vif;
struct rtw89_vif *rtwvif;
struct rtw89_chan new;
u8 reason, status, tx_fail, band, actual_period, expect_period;
u32 last_chan = rtwdev->scan_info.last_chan_idx, report_tsf;
u16 actual_period, expect_period;
u8 reason, status, tx_fail, band;
u8 mac_idx, sw_def, fw_def;
u8 ver = U8_MAX;
u16 chan;
int ret;
@ -4799,6 +4801,9 @@ rtw89_mac_c2h_scanofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *skb,
rtwvif = rtwvif_link->rtwvif;
if (RTW89_CHK_FW_FEATURE(CH_INFO_BE_V0, &rtwdev->fw))
ver = 0;
tx_fail = le32_get_bits(c2h->w5, RTW89_C2H_SCANOFLD_W5_TX_FAIL);
status = le32_get_bits(c2h->w2, RTW89_C2H_SCANOFLD_W2_STATUS);
chan = le32_get_bits(c2h->w2, RTW89_C2H_SCANOFLD_W2_PRI_CH);
@ -4811,21 +4816,28 @@ rtw89_mac_c2h_scanofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *skb,
if (!(rtwdev->chip->support_bands & BIT(NL80211_BAND_6GHZ)))
band = chan > 14 ? RTW89_BAND_5G : RTW89_BAND_2G;
rtw89_debug(rtwdev, RTW89_DBG_HW_SCAN,
"mac_idx[%d] band: %d, chan: %d, reason: %d, status: %d, tx_fail: %d, actual: %d\n",
mac_idx, band, chan, reason, status, tx_fail, actual_period);
if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) {
sw_def = le32_get_bits(c2h->w6, RTW89_C2H_SCANOFLD_W6_SW_DEF);
expect_period = le32_get_bits(c2h->w6, RTW89_C2H_SCANOFLD_W6_EXPECT_PERIOD);
fw_def = le32_get_bits(c2h->w6, RTW89_C2H_SCANOFLD_W6_FW_DEF);
report_tsf = le32_get_bits(c2h->w7, RTW89_C2H_SCANOFLD_W7_REPORT_TSF);
if (ver == 0) {
expect_period =
le32_get_bits(c2h->w6, RTW89_C2H_SCANOFLD_W6_EXPECT_PERIOD);
} else {
actual_period = le32_get_bits(c2h->w8, RTW89_C2H_SCANOFLD_W8_PERIOD_V1);
expect_period =
le32_get_bits(c2h->w8, RTW89_C2H_SCANOFLD_W8_EXPECT_PERIOD_V1);
}
rtw89_debug(rtwdev, RTW89_DBG_HW_SCAN,
"sw_def: %d, fw_def: %d, tsf: %x, expect: %d\n",
sw_def, fw_def, report_tsf, expect_period);
}
rtw89_debug(rtwdev, RTW89_DBG_HW_SCAN,
"mac_idx[%d] band: %d, chan: %d, reason: %d, status: %d, tx_fail: %d, actual: %d\n",
mac_idx, band, chan, reason, status, tx_fail, actual_period);
switch (reason) {
case RTW89_SCAN_LEAVE_OP_NOTIFY:
case RTW89_SCAN_LEAVE_CH_NOTIFY:
@ -5364,6 +5376,39 @@ rtw89_mac_c2h_mrc_status_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32
rtw89_complete_cond(wait, cond, &data);
}
static void
rtw89_mac_c2h_pwr_int_notify(struct rtw89_dev *rtwdev, struct sk_buff *skb, u32 len)
{
const struct rtw89_c2h_pwr_int_notify *c2h;
struct rtw89_sta_link *rtwsta_link;
struct ieee80211_sta *sta;
struct rtw89_sta *rtwsta;
u16 macid;
bool ps;
c2h = (const struct rtw89_c2h_pwr_int_notify *)skb->data;
macid = le32_get_bits(c2h->w2, RTW89_C2H_PWR_INT_NOTIFY_W2_MACID);
ps = le32_get_bits(c2h->w2, RTW89_C2H_PWR_INT_NOTIFY_W2_PWR_STATUS);
rcu_read_lock();
rtwsta_link = rtw89_assoc_link_rcu_dereference(rtwdev, macid);
if (unlikely(!rtwsta_link))
goto out;
rtwsta = rtwsta_link->rtwsta;
if (ps)
set_bit(RTW89_REMOTE_STA_IN_PS, rtwsta->flags);
else
clear_bit(RTW89_REMOTE_STA_IN_PS, rtwsta->flags);
sta = rtwsta_to_sta(rtwsta);
ieee80211_sta_ps_transition(sta, ps);
out:
rcu_read_unlock();
}
static
void (* const rtw89_mac_c2h_ofld_handler[])(struct rtw89_dev *rtwdev,
struct sk_buff *c2h, u32 len) = {
@ -5409,6 +5454,12 @@ void (* const rtw89_mac_c2h_wow_handler[])(struct rtw89_dev *rtwdev,
[RTW89_MAC_C2H_FUNC_AOAC_REPORT] = rtw89_mac_c2h_wow_aoac_rpt,
};
static
void (* const rtw89_mac_c2h_ap_handler[])(struct rtw89_dev *rtwdev,
struct sk_buff *c2h, u32 len) = {
[RTW89_MAC_C2H_FUNC_PWR_INT_NOTIFY] = rtw89_mac_c2h_pwr_int_notify,
};
static void rtw89_mac_c2h_scanofld_rsp_atomic(struct rtw89_dev *rtwdev,
struct sk_buff *skb)
{
@ -5463,6 +5514,13 @@ bool rtw89_mac_c2h_chk_atomic(struct rtw89_dev *rtwdev, struct sk_buff *c2h,
return true;
case RTW89_MAC_C2H_CLASS_WOW:
return true;
case RTW89_MAC_C2H_CLASS_AP:
switch (func) {
default:
return false;
case RTW89_MAC_C2H_FUNC_PWR_INT_NOTIFY:
return true;
}
}
}
@ -5493,14 +5551,18 @@ void rtw89_mac_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
if (func < NUM_OF_RTW89_MAC_C2H_FUNC_WOW)
handler = rtw89_mac_c2h_wow_handler[func];
break;
case RTW89_MAC_C2H_CLASS_AP:
if (func < NUM_OF_RTW89_MAC_C2H_FUNC_AP)
handler = rtw89_mac_c2h_ap_handler[func];
break;
case RTW89_MAC_C2H_CLASS_FWDBG:
return;
default:
rtw89_info(rtwdev, "c2h class %d not support\n", class);
rtw89_info(rtwdev, "MAC c2h class %d not support\n", class);
return;
}
if (!handler) {
rtw89_info(rtwdev, "c2h class %d func %d not support\n", class,
rtw89_info(rtwdev, "MAC c2h class %d func %d not support\n", class,
func);
return;
}
@ -6674,6 +6736,7 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_ax = {
.typ_fltr_opt = rtw89_mac_typ_fltr_opt_ax,
.cfg_ppdu_status = rtw89_mac_cfg_ppdu_status_ax,
.cfg_phy_rpt = NULL,
.dle_mix_cfg = dle_mix_cfg_ax,
.chk_dle_rdy = chk_dle_rdy_ax,

View file

@ -169,6 +169,20 @@ enum rtw89_mac_ax_l0_to_l1_event {
MAC_AX_L0_TO_L1_EVENT_MAX = 15,
};
enum rtw89_mac_phy_rpt_size {
MAC_AX_PHY_RPT_SIZE_0 = 0,
MAC_AX_PHY_RPT_SIZE_8 = 1,
MAC_AX_PHY_RPT_SIZE_16 = 2,
MAC_AX_PHY_RPT_SIZE_24 = 3,
};
enum rtw89_mac_hdr_cnv_size {
MAC_AX_HDR_CNV_SIZE_0 = 0,
MAC_AX_HDR_CNV_SIZE_32 = 1,
MAC_AX_HDR_CNV_SIZE_64 = 2,
MAC_AX_HDR_CNV_SIZE_96 = 3,
};
enum rtw89_mac_wow_fw_status {
WOWLAN_NOT_READY = 0x00,
WOWLAN_SLEEP_READY = 0x01,
@ -426,6 +440,12 @@ enum rtw89_mac_c2h_wow_func {
NUM_OF_RTW89_MAC_C2H_FUNC_WOW,
};
enum rtw89_mac_c2h_ap_func {
RTW89_MAC_C2H_FUNC_PWR_INT_NOTIFY = 0,
NUM_OF_RTW89_MAC_C2H_FUNC_AP,
};
enum rtw89_mac_c2h_class {
RTW89_MAC_C2H_CLASS_INFO = 0x0,
RTW89_MAC_C2H_CLASS_OFLD = 0x1,
@ -434,6 +454,7 @@ enum rtw89_mac_c2h_class {
RTW89_MAC_C2H_CLASS_MCC = 0x4,
RTW89_MAC_C2H_CLASS_FWDBG = 0x5,
RTW89_MAC_C2H_CLASS_MRC = 0xe,
RTW89_MAC_C2H_CLASS_AP = 0x18,
RTW89_MAC_C2H_CLASS_MAX,
};
@ -961,6 +982,7 @@ struct rtw89_mac_gen_def {
enum rtw89_mac_fwd_target fwd_target,
u8 mac_idx);
int (*cfg_ppdu_status)(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable);
void (*cfg_phy_rpt)(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable);
int (*dle_mix_cfg)(struct rtw89_dev *rtwdev, const struct rtw89_dle_mem *cfg);
int (*chk_dle_rdy)(struct rtw89_dev *rtwdev, bool wde_or_ple);
@ -1216,6 +1238,27 @@ int rtw89_mac_stop_sch_tx_v2(struct rtw89_dev *rtwdev, u8 mac_idx,
int rtw89_mac_resume_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en);
int rtw89_mac_resume_sch_tx_v1(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en);
int rtw89_mac_resume_sch_tx_v2(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en);
void rtw89_mac_cfg_phy_rpt_be(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable);
static inline
void rtw89_mac_cfg_phy_rpt(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable)
{
const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
if (mac->cfg_phy_rpt)
mac->cfg_phy_rpt(rtwdev, mac_idx, enable);
}
static inline
void rtw89_mac_cfg_phy_rpt_bands(struct rtw89_dev *rtwdev, bool enable)
{
rtw89_mac_cfg_phy_rpt(rtwdev, RTW89_MAC_0, enable);
if (!rtwdev->dbcc_en)
return;
rtw89_mac_cfg_phy_rpt(rtwdev, RTW89_MAC_1, enable);
}
static inline
int rtw89_mac_cfg_ppdu_status(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable)

View file

@ -202,7 +202,7 @@ static int rtw89_ops_add_interface(struct ieee80211_hw *hw,
rtw89_traffic_stats_init(rtwdev, &rtwvif->stats);
rtwvif_link = rtw89_vif_set_link(rtwvif, 0);
rtwvif_link = rtw89_vif_set_link(rtwvif, RTW89_VIF_IDLE_LINK_ID);
if (!rtwvif_link) {
ret = -EINVAL;
goto release_port;
@ -218,7 +218,7 @@ static int rtw89_ops_add_interface(struct ieee80211_hw *hw,
return 0;
unset_link:
rtw89_vif_unset_link(rtwvif, 0);
rtw89_vif_unset_link(rtwvif, RTW89_VIF_IDLE_LINK_ID);
release_port:
list_del_init(&rtwvif->list);
rtw89_core_release_bit_map(rtwdev->hw_port, port);
@ -246,17 +246,17 @@ static void rtw89_ops_remove_interface(struct ieee80211_hw *hw,
mutex_lock(&rtwdev->mutex);
rtwvif_link = rtwvif->links[0];
rtwvif_link = rtwvif->links[RTW89_VIF_IDLE_LINK_ID];
if (unlikely(!rtwvif_link)) {
rtw89_err(rtwdev,
"%s: rtwvif link (link_id %u) is not active\n",
__func__, 0);
__func__, RTW89_VIF_IDLE_LINK_ID);
goto bottom;
}
__rtw89_ops_remove_iface_link(rtwdev, rtwvif_link);
rtw89_vif_unset_link(rtwvif, 0);
rtw89_vif_unset_link(rtwvif, RTW89_VIF_IDLE_LINK_ID);
bottom:
list_del_init(&rtwvif->list);
@ -509,6 +509,7 @@ static int __rtw89_ops_sta_add(struct rtw89_dev *rtwdev,
rtw89_core_txq_init(rtwdev, sta->txq[i]);
skb_queue_head_init(&rtwsta->roc_queue);
bitmap_zero(rtwsta->pairwise_sec_cam_map, RTW89_MAX_SEC_CAM_NUM);
rtwsta_link = rtw89_sta_set_link(rtwsta, sta->deflink.link_id);
if (!rtwsta_link) {
@ -775,6 +776,7 @@ static int rtw89_ops_start_ap(struct ieee80211_hw *hw,
struct rtw89_vif *rtwvif = vif_to_rtwvif(vif);
struct rtw89_vif_link *rtwvif_link;
const struct rtw89_chan *chan;
int ret = 0;
mutex_lock(&rtwdev->mutex);
@ -783,6 +785,7 @@ static int rtw89_ops_start_ap(struct ieee80211_hw *hw,
rtw89_err(rtwdev,
"%s: rtwvif link (link_id %u) is not active\n",
__func__, link_conf->link_id);
ret = -ENOLINK;
goto out;
}
@ -804,12 +807,18 @@ static int rtw89_ops_start_ap(struct ieee80211_hw *hw,
rtw89_fw_h2c_cam(rtwdev, rtwvif_link, NULL, NULL);
rtw89_chip_rfk_channel(rtwdev, rtwvif_link);
if (RTW89_CHK_FW_FEATURE(NOTIFY_AP_INFO, &rtwdev->fw)) {
ret = rtw89_fw_h2c_ap_info_refcount(rtwdev, true);
if (ret)
goto out;
}
rtw89_queue_chanctx_work(rtwdev);
out:
mutex_unlock(&rtwdev->mutex);
return 0;
return ret;
}
static
@ -830,6 +839,9 @@ void rtw89_ops_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
goto out;
}
if (RTW89_CHK_FW_FEATURE(NOTIFY_AP_INFO, &rtwdev->fw))
rtw89_fw_h2c_ap_info_refcount(rtwdev, false);
rtw89_mac_stop_ap(rtwdev, rtwvif_link);
rtw89_chip_h2c_assoc_cmac_tbl(rtwdev, rtwvif_link, NULL);
rtw89_fw_h2c_join_info(rtwdev, rtwvif_link, NULL, true);
@ -1295,10 +1307,15 @@ static void rtw89_ops_sta_rc_update(struct ieee80211_hw *hw,
struct ieee80211_link_sta *link_sta,
u32 changed)
{
struct ieee80211_sta *sta = link_sta->sta;
struct rtw89_sta *rtwsta = sta_to_rtwsta(link_sta->sta);
struct rtw89_dev *rtwdev = hw->priv;
struct rtw89_sta_link *rtwsta_link;
rtw89_phy_ra_update_sta(rtwdev, sta, changed);
rtwsta_link = rtwsta->links[link_sta->link_id];
if (unlikely(!rtwsta_link))
return;
rtw89_phy_ra_update_sta_link(rtwdev, rtwsta_link, changed);
}
static int rtw89_ops_add_chanctx(struct ieee80211_hw *hw,
@ -1473,6 +1490,259 @@ static int rtw89_ops_set_tid_config(struct ieee80211_hw *hw,
return 0;
}
static bool rtw89_can_work_on_links(struct rtw89_dev *rtwdev,
struct ieee80211_vif *vif, u16 links)
{
struct rtw89_vif *rtwvif = vif_to_rtwvif(vif);
u8 w = hweight16(links);
if (vif->type != NL80211_IFTYPE_STATION &&
w > RTW89_MLD_NON_STA_LINK_NUM)
return false;
return w <= rtwvif->links_inst_valid_num;
}
static bool rtw89_ops_can_activate_links(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
u16 active_links)
{
struct rtw89_dev *rtwdev = hw->priv;
guard(mutex)(&rtwdev->mutex);
return rtw89_can_work_on_links(rtwdev, vif, active_links);
}
static void __rtw89_ops_clr_vif_links(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif,
unsigned long clr_links)
{
struct rtw89_vif_link *rtwvif_link;
unsigned int link_id;
for_each_set_bit(link_id, &clr_links, IEEE80211_MLD_MAX_NUM_LINKS) {
rtwvif_link = rtwvif->links[link_id];
if (unlikely(!rtwvif_link))
continue;
__rtw89_ops_remove_iface_link(rtwdev, rtwvif_link);
rtw89_vif_unset_link(rtwvif, link_id);
}
}
static int __rtw89_ops_set_vif_links(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif,
unsigned long set_links)
{
struct rtw89_vif_link *rtwvif_link;
unsigned int link_id;
int ret;
for_each_set_bit(link_id, &set_links, IEEE80211_MLD_MAX_NUM_LINKS) {
rtwvif_link = rtw89_vif_set_link(rtwvif, link_id);
if (!rtwvif_link)
return -EINVAL;
ret = __rtw89_ops_add_iface_link(rtwdev, rtwvif_link);
if (ret) {
rtw89_err(rtwdev, "%s: failed to add iface (link id %u)\n",
__func__, link_id);
return ret;
}
}
return 0;
}
static
int rtw89_ops_change_vif_links(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
u16 old_links, u16 new_links,
struct ieee80211_bss_conf *old[IEEE80211_MLD_MAX_NUM_LINKS])
{
struct rtw89_dev *rtwdev = hw->priv;
struct rtw89_vif *rtwvif = vif_to_rtwvif(vif);
unsigned long clr_links = old_links & ~new_links;
unsigned long set_links = new_links & ~old_links;
bool removing_links = !old_links || clr_links;
struct rtw89_link_conf_container *snap;
int ret = 0;
int i;
guard(mutex)(&rtwdev->mutex);
rtw89_debug(rtwdev, RTW89_DBG_STATE,
"%s: old_links (0x%08x) -> new_links (0x%08x)\n",
__func__, old_links, new_links);
if (!rtw89_can_work_on_links(rtwdev, vif, new_links))
return -EOPNOTSUPP;
if (removing_links) {
snap = kzalloc(sizeof(*snap), GFP_KERNEL);
if (!snap)
return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(snap->link_conf); i++)
snap->link_conf[i] = old[i];
rcu_assign_pointer(rtwvif->snap_link_confs, snap);
}
/* might depend on @snap; don't change order */
rtw89_leave_ips_by_hwflags(rtwdev);
if (rtwdev->scanning)
rtw89_hw_scan_abort(rtwdev, rtwdev->scan_info.scanning_vif);
if (!old_links)
__rtw89_ops_clr_vif_links(rtwdev, rtwvif,
BIT(RTW89_VIF_IDLE_LINK_ID));
else if (clr_links)
__rtw89_ops_clr_vif_links(rtwdev, rtwvif, clr_links);
if (removing_links) {
/* @snap is required if and only if during removing links.
* However, it's done here. So, cleanup @snap immediately.
*/
rcu_assign_pointer(rtwvif->snap_link_confs, NULL);
/* The pointers in @old will free after this function return,
* so synchronously wait for all readers of snap to be done.
*/
synchronize_rcu();
kfree(snap);
}
if (set_links) {
ret = __rtw89_ops_set_vif_links(rtwdev, rtwvif, set_links);
if (ret)
__rtw89_ops_clr_vif_links(rtwdev, rtwvif, set_links);
} else if (!new_links) {
ret = __rtw89_ops_set_vif_links(rtwdev, rtwvif,
BIT(RTW89_VIF_IDLE_LINK_ID));
if (ret)
__rtw89_ops_clr_vif_links(rtwdev, rtwvif,
BIT(RTW89_VIF_IDLE_LINK_ID));
}
rtw89_enter_ips_by_hwflags(rtwdev);
return ret;
}
static void __rtw89_ops_clr_sta_links(struct rtw89_dev *rtwdev,
struct rtw89_sta *rtwsta,
unsigned long clr_links)
{
struct rtw89_vif_link *rtwvif_link;
struct rtw89_sta_link *rtwsta_link;
unsigned int link_id;
for_each_set_bit(link_id, &clr_links, IEEE80211_MLD_MAX_NUM_LINKS) {
rtwsta_link = rtwsta->links[link_id];
if (unlikely(!rtwsta_link))
continue;
rtwvif_link = rtwsta_link->rtwvif_link;
rtw89_core_sta_link_disassoc(rtwdev, rtwvif_link, rtwsta_link);
rtw89_core_sta_link_disconnect(rtwdev, rtwvif_link, rtwsta_link);
rtw89_core_sta_link_remove(rtwdev, rtwvif_link, rtwsta_link);
rtw89_sta_unset_link(rtwsta, link_id);
}
}
static int __rtw89_ops_set_sta_links(struct rtw89_dev *rtwdev,
struct rtw89_sta *rtwsta,
unsigned long set_links)
{
struct rtw89_vif_link *rtwvif_link;
struct rtw89_sta_link *rtwsta_link;
unsigned int link_id;
u8 sec_cam_idx;
int ret;
for_each_set_bit(link_id, &set_links, IEEE80211_MLD_MAX_NUM_LINKS) {
rtwsta_link = rtw89_sta_set_link(rtwsta, link_id);
if (!rtwsta_link)
return -EINVAL;
rtwvif_link = rtwsta_link->rtwvif_link;
ret = rtw89_core_sta_link_add(rtwdev, rtwvif_link, rtwsta_link);
if (ret) {
rtw89_err(rtwdev, "%s: failed to add sta (link id %u)\n",
__func__, link_id);
return ret;
}
rtw89_vif_type_mapping(rtwvif_link, true);
ret = rtw89_core_sta_link_assoc(rtwdev, rtwvif_link, rtwsta_link);
if (ret) {
rtw89_err(rtwdev, "%s: failed to assoc sta (link id %u)\n",
__func__, link_id);
return ret;
}
__rtw89_ops_bss_link_assoc(rtwdev, rtwvif_link);
for_each_set_bit(sec_cam_idx, rtwsta->pairwise_sec_cam_map,
RTW89_MAX_SEC_CAM_NUM) {
ret = rtw89_cam_attach_link_sec_cam(rtwdev,
rtwvif_link,
rtwsta_link,
sec_cam_idx);
if (ret) {
rtw89_err(rtwdev,
"%s: failed to apply pairwise key (link id %u)\n",
__func__, link_id);
return ret;
}
}
}
return 0;
}
static
int rtw89_ops_change_sta_links(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
u16 old_links, u16 new_links)
{
struct rtw89_dev *rtwdev = hw->priv;
struct rtw89_sta *rtwsta = sta_to_rtwsta(sta);
unsigned long clr_links = old_links & ~new_links;
unsigned long set_links = new_links & ~old_links;
int ret = 0;
guard(mutex)(&rtwdev->mutex);
rtw89_debug(rtwdev, RTW89_DBG_STATE,
"%s: old_links (0x%08x) -> new_links (0x%08x)\n",
__func__, old_links, new_links);
if (!rtw89_can_work_on_links(rtwdev, vif, new_links))
return -EOPNOTSUPP;
rtw89_leave_ps_mode(rtwdev);
if (clr_links)
__rtw89_ops_clr_sta_links(rtwdev, rtwsta, clr_links);
if (set_links) {
ret = __rtw89_ops_set_sta_links(rtwdev, rtwsta, set_links);
if (ret)
__rtw89_ops_clr_sta_links(rtwdev, rtwsta, set_links);
}
return ret;
}
#ifdef CONFIG_PM
static int rtw89_ops_suspend(struct ieee80211_hw *hw,
struct cfg80211_wowlan *wowlan)
@ -1600,6 +1870,9 @@ const struct ieee80211_ops rtw89_ops = {
.set_sar_specs = rtw89_ops_set_sar_specs,
.link_sta_rc_update = rtw89_ops_sta_rc_update,
.set_tid_config = rtw89_ops_set_tid_config,
.can_activate_links = rtw89_ops_can_activate_links,
.change_vif_links = rtw89_ops_change_vif_links,
.change_sta_links = rtw89_ops_change_sta_links,
#ifdef CONFIG_PM
.suspend = rtw89_ops_suspend,
.resume = rtw89_ops_resume,

View file

@ -1988,6 +1988,20 @@ int rtw89_mac_resume_sch_tx_v2(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en)
}
EXPORT_SYMBOL(rtw89_mac_resume_sch_tx_v2);
void rtw89_mac_cfg_phy_rpt_be(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable)
{
u32 reg, val;
reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_RCR, mac_idx);
val = enable ? MAC_AX_PHY_RPT_SIZE_8 : MAC_AX_PHY_RPT_SIZE_0;
rtw89_write32_mask(rtwdev, reg, B_BE_PHY_RPT_SZ_MASK, val);
rtw89_write32_mask(rtwdev, reg, B_BE_HDR_CNV_SZ_MASK, MAC_AX_HDR_CNV_SIZE_0);
reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_DRV_INFO_OPTION, mac_idx);
rtw89_write32_mask(rtwdev, reg, B_BE_DRV_INFO_PHYRPT_EN, enable);
}
EXPORT_SYMBOL(rtw89_mac_cfg_phy_rpt_be);
static
int rtw89_mac_cfg_ppdu_status_be(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable)
{
@ -2583,6 +2597,7 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_be = {
.typ_fltr_opt = rtw89_mac_typ_fltr_opt_be,
.cfg_ppdu_status = rtw89_mac_cfg_ppdu_status_be,
.cfg_phy_rpt = rtw89_mac_cfg_phy_rpt_be,
.dle_mix_cfg = dle_mix_cfg_be,
.chk_dle_rdy = chk_dle_rdy_be,

View file

@ -2516,7 +2516,7 @@ static int rtw89_pci_dphy_delay(struct rtw89_dev *rtwdev)
PCIE_DPHY_DLY_25US, PCIE_PHY_GEN1);
}
static void rtw89_pci_power_wake(struct rtw89_dev *rtwdev, bool pwr_up)
static void rtw89_pci_power_wake_ax(struct rtw89_dev *rtwdev, bool pwr_up)
{
if (pwr_up)
rtw89_write32_set(rtwdev, R_AX_HCI_OPT_CTRL, BIT_WAKE_CTRL);
@ -2825,6 +2825,8 @@ static int rtw89_pci_ops_deinit(struct rtw89_dev *rtwdev)
{
const struct rtw89_pci_info *info = rtwdev->pci_info;
rtw89_pci_power_wake(rtwdev, false);
if (rtwdev->chip->chip_id == RTL8852A) {
/* ltr sw trigger */
rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_IDLE);
@ -2867,7 +2869,7 @@ static int rtw89_pci_ops_mac_pre_init_ax(struct rtw89_dev *rtwdev)
return ret;
}
rtw89_pci_power_wake(rtwdev, true);
rtw89_pci_power_wake_ax(rtwdev, true);
rtw89_pci_autoload_hang(rtwdev);
rtw89_pci_l12_vmain(rtwdev);
rtw89_pci_gen2_force_ib(rtwdev);
@ -2912,6 +2914,13 @@ static int rtw89_pci_ops_mac_pre_init_ax(struct rtw89_dev *rtwdev)
return 0;
}
static int rtw89_pci_ops_mac_pre_deinit_ax(struct rtw89_dev *rtwdev)
{
rtw89_pci_power_wake_ax(rtwdev, false);
return 0;
}
int rtw89_pci_ltr_set(struct rtw89_dev *rtwdev, bool en)
{
u32 val;
@ -4325,7 +4334,7 @@ const struct rtw89_pci_gen_def rtw89_pci_gen_ax = {
B_AX_RDU_INT},
.mac_pre_init = rtw89_pci_ops_mac_pre_init_ax,
.mac_pre_deinit = NULL,
.mac_pre_deinit = rtw89_pci_ops_mac_pre_deinit_ax,
.mac_post_init = rtw89_pci_ops_mac_post_init_ax,
.clr_idx_all = rtw89_pci_clr_idx_all_ax,
@ -4343,6 +4352,7 @@ const struct rtw89_pci_gen_def rtw89_pci_gen_ax = {
.l1ss_set = rtw89_pci_l1ss_set_ax,
.disable_eq = rtw89_pci_disable_eq_ax,
.power_wake = rtw89_pci_power_wake_ax,
};
EXPORT_SYMBOL(rtw89_pci_gen_ax);

View file

@ -1290,6 +1290,7 @@ struct rtw89_pci_gen_def {
void (*l1ss_set)(struct rtw89_dev *rtwdev, bool enable);
void (*disable_eq)(struct rtw89_dev *rtwdev);
void (*power_wake)(struct rtw89_dev *rtwdev, bool pwr_up);
};
#define RTW89_PCI_SSID(v, d, ssv, ssd, cust) \
@ -1805,4 +1806,12 @@ static inline void rtw89_pci_disable_eq(struct rtw89_dev *rtwdev)
gen_def->disable_eq(rtwdev);
}
static inline void rtw89_pci_power_wake(struct rtw89_dev *rtwdev, bool pwr_up)
{
const struct rtw89_pci_info *info = rtwdev->pci_info;
const struct rtw89_pci_gen_def *gen_def = info->gen_def;
gen_def->power_wake(rtwdev, pwr_up);
}
#endif

View file

@ -691,5 +691,6 @@ const struct rtw89_pci_gen_def rtw89_pci_gen_be = {
.l1ss_set = rtw89_pci_l1ss_set_be,
.disable_eq = rtw89_pci_disable_eq_be,
.power_wake = _patch_pcie_power_wake_be,
};
EXPORT_SYMBOL(rtw89_pci_gen_be);

View file

@ -2,6 +2,7 @@
/* Copyright(c) 2019-2020 Realtek Corporation
*/
#include "acpi.h"
#include "chan.h"
#include "coex.h"
#include "debug.h"
@ -263,16 +264,26 @@ rtw89_ra_mask_eht_rates[4] = {RA_MASK_EHT_1SS_RATES, RA_MASK_EHT_2SS_RATES,
static void rtw89_phy_ra_gi_ltf(struct rtw89_dev *rtwdev,
struct rtw89_sta_link *rtwsta_link,
struct ieee80211_link_sta *link_sta,
const struct rtw89_chan *chan,
bool *fix_giltf_en, u8 *fix_giltf)
{
struct cfg80211_bitrate_mask *mask = &rtwsta_link->mask;
u8 band = chan->band_type;
enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band);
u8 he_gi = mask->control[nl_band].he_gi;
u8 he_ltf = mask->control[nl_band].he_ltf;
u8 he_gi = mask->control[nl_band].he_gi;
if (!rtwsta_link->use_cfg_mask)
*fix_giltf_en = true;
if (rtwdev->chip->chip_id == RTL8852C &&
chan->band_width == RTW89_CHANNEL_WIDTH_160 &&
rtw89_sta_link_has_su_mu_4xhe08(link_sta))
*fix_giltf = RTW89_GILTF_SGI_4XHE08;
else
*fix_giltf = RTW89_GILTF_2XHE08;
if (!(rtwsta_link->use_cfg_mask && link_sta->he_cap.has_he))
return;
if (he_ltf == 2 && he_gi == 2) {
@ -287,12 +298,7 @@ static void rtw89_phy_ra_gi_ltf(struct rtw89_dev *rtwdev,
*fix_giltf = RTW89_GILTF_1XHE16;
} else if (he_ltf == 0 && he_gi == 0) {
*fix_giltf = RTW89_GILTF_1XHE08;
} else {
*fix_giltf_en = false;
return;
}
*fix_giltf_en = true;
}
static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev,
@ -325,6 +331,8 @@ static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev,
mode |= RTW89_RA_MODE_EHT;
ra_mask |= get_eht_ra_mask(link_sta);
high_rate_masks = rtw89_ra_mask_eht_rates;
rtw89_phy_ra_gi_ltf(rtwdev, rtwsta_link, link_sta,
chan, &fix_giltf_en, &fix_giltf);
} else if (link_sta->he_cap.has_he) {
mode |= RTW89_RA_MODE_HE;
csi_mode = RTW89_RA_RPT_MODE_HE;
@ -336,7 +344,8 @@ static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev,
if (link_sta->he_cap.he_cap_elem.phy_cap_info[1] &
IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD)
ldpc_en = 1;
rtw89_phy_ra_gi_ltf(rtwdev, rtwsta_link, chan, &fix_giltf_en, &fix_giltf);
rtw89_phy_ra_gi_ltf(rtwdev, rtwsta_link, link_sta,
chan, &fix_giltf_en, &fix_giltf);
} else if (link_sta->vht_cap.vht_supported) {
u16 mcs_map = le16_to_cpu(link_sta->vht_cap.vht_mcs.rx_mcs_map);
@ -466,11 +475,11 @@ static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev,
ra->csi_mode = csi_mode;
}
static void __rtw89_phy_ra_update_sta(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link,
void rtw89_phy_ra_update_sta_link(struct rtw89_dev *rtwdev,
struct rtw89_sta_link *rtwsta_link,
u32 changed)
{
struct rtw89_vif_link *rtwvif_link = rtwsta_link->rtwvif_link;
struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
struct rtw89_ra_info *ra = &rtwsta_link->ra;
struct ieee80211_link_sta *link_sta;
@ -503,14 +512,11 @@ void rtw89_phy_ra_update_sta(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta
u32 changed)
{
struct rtw89_sta *rtwsta = sta_to_rtwsta(sta);
struct rtw89_vif_link *rtwvif_link;
struct rtw89_sta_link *rtwsta_link;
unsigned int link_id;
rtw89_sta_for_each_link(rtwsta, rtwsta_link, link_id) {
rtwvif_link = rtwsta_link->rtwvif_link;
__rtw89_phy_ra_update_sta(rtwdev, rtwvif_link, rtwsta_link, changed);
}
rtw89_sta_for_each_link(rtwsta, rtwsta_link, link_id)
rtw89_phy_ra_update_sta_link(rtwdev, rtwsta_link, changed);
}
static bool __check_rate_pattern(struct rtw89_phy_rate_pattern *next,
@ -1854,6 +1860,228 @@ void rtw89_phy_write_reg3_tbl(struct rtw89_dev *rtwdev,
}
EXPORT_SYMBOL(rtw89_phy_write_reg3_tbl);
static u8 rtw89_phy_ant_gain_domain_to_regd(struct rtw89_dev *rtwdev, u8 ant_gain_regd)
{
switch (ant_gain_regd) {
case RTW89_ANT_GAIN_ETSI:
return RTW89_ETSI;
default:
rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
"unknown antenna gain domain: %d\n",
ant_gain_regd);
return RTW89_REGD_NUM;
}
}
/* antenna gain in unit of 0.25 dbm */
#define RTW89_ANT_GAIN_2GHZ_MIN -8
#define RTW89_ANT_GAIN_2GHZ_MAX 14
#define RTW89_ANT_GAIN_5GHZ_MIN -8
#define RTW89_ANT_GAIN_5GHZ_MAX 20
#define RTW89_ANT_GAIN_6GHZ_MIN -8
#define RTW89_ANT_GAIN_6GHZ_MAX 20
#define RTW89_ANT_GAIN_REF_2GHZ 14
#define RTW89_ANT_GAIN_REF_5GHZ 20
#define RTW89_ANT_GAIN_REF_6GHZ 20
void rtw89_phy_ant_gain_init(struct rtw89_dev *rtwdev)
{
struct rtw89_ant_gain_info *ant_gain = &rtwdev->ant_gain;
const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_acpi_rtag_result res = {};
u32 domain;
int ret;
u8 i, j;
u8 regd;
u8 val;
if (!chip->support_ant_gain)
return;
ret = rtw89_acpi_evaluate_rtag(rtwdev, &res);
if (ret) {
rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
"acpi: cannot eval rtag: %d\n", ret);
return;
}
if (res.revision != 0) {
rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
"unknown rtag revision: %d\n", res.revision);
return;
}
domain = get_unaligned_le32(&res.domain);
for (i = 0; i < RTW89_ANT_GAIN_DOMAIN_NUM; i++) {
if (!(domain & BIT(i)))
continue;
regd = rtw89_phy_ant_gain_domain_to_regd(rtwdev, i);
if (regd >= RTW89_REGD_NUM)
continue;
ant_gain->regd_enabled |= BIT(regd);
}
for (i = 0; i < RTW89_ANT_GAIN_CHAIN_NUM; i++) {
for (j = 0; j < RTW89_ANT_GAIN_SUBBAND_NR; j++) {
val = res.ant_gain_table[i][j];
switch (j) {
default:
case RTW89_ANT_GAIN_2GHZ_SUBBAND:
val = RTW89_ANT_GAIN_REF_2GHZ -
clamp_t(s8, val,
RTW89_ANT_GAIN_2GHZ_MIN,
RTW89_ANT_GAIN_2GHZ_MAX);
break;
case RTW89_ANT_GAIN_5GHZ_SUBBAND_1:
case RTW89_ANT_GAIN_5GHZ_SUBBAND_2:
case RTW89_ANT_GAIN_5GHZ_SUBBAND_2E:
case RTW89_ANT_GAIN_5GHZ_SUBBAND_3_4:
val = RTW89_ANT_GAIN_REF_5GHZ -
clamp_t(s8, val,
RTW89_ANT_GAIN_5GHZ_MIN,
RTW89_ANT_GAIN_5GHZ_MAX);
break;
case RTW89_ANT_GAIN_6GHZ_SUBBAND_5_L:
case RTW89_ANT_GAIN_6GHZ_SUBBAND_5_H:
case RTW89_ANT_GAIN_6GHZ_SUBBAND_6:
case RTW89_ANT_GAIN_6GHZ_SUBBAND_7_L:
case RTW89_ANT_GAIN_6GHZ_SUBBAND_7_H:
case RTW89_ANT_GAIN_6GHZ_SUBBAND_8:
val = RTW89_ANT_GAIN_REF_6GHZ -
clamp_t(s8, val,
RTW89_ANT_GAIN_6GHZ_MIN,
RTW89_ANT_GAIN_6GHZ_MAX);
}
ant_gain->offset[i][j] = val;
}
}
}
static
enum rtw89_ant_gain_subband rtw89_phy_ant_gain_get_subband(struct rtw89_dev *rtwdev,
u32 center_freq)
{
switch (center_freq) {
default:
rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
"center freq: %u to antenna gain subband is unhandled\n",
center_freq);
fallthrough;
case 2412 ... 2484:
return RTW89_ANT_GAIN_2GHZ_SUBBAND;
case 5180 ... 5240:
return RTW89_ANT_GAIN_5GHZ_SUBBAND_1;
case 5250 ... 5320:
return RTW89_ANT_GAIN_5GHZ_SUBBAND_2;
case 5500 ... 5720:
return RTW89_ANT_GAIN_5GHZ_SUBBAND_2E;
case 5745 ... 5885:
return RTW89_ANT_GAIN_5GHZ_SUBBAND_3_4;
case 5955 ... 6155:
return RTW89_ANT_GAIN_6GHZ_SUBBAND_5_L;
case 6175 ... 6415:
return RTW89_ANT_GAIN_6GHZ_SUBBAND_5_H;
case 6435 ... 6515:
return RTW89_ANT_GAIN_6GHZ_SUBBAND_6;
case 6535 ... 6695:
return RTW89_ANT_GAIN_6GHZ_SUBBAND_7_L;
case 6715 ... 6855:
return RTW89_ANT_GAIN_6GHZ_SUBBAND_7_H;
/* freq 6875 (ch 185, 20MHz) spans RTW89_ANT_GAIN_6GHZ_SUBBAND_7_H
* and RTW89_ANT_GAIN_6GHZ_SUBBAND_8, so directly describe it with
* struct rtw89_6ghz_span.
*/
case 6895 ... 7115:
return RTW89_ANT_GAIN_6GHZ_SUBBAND_8;
}
}
static s8 rtw89_phy_ant_gain_query(struct rtw89_dev *rtwdev,
enum rtw89_rf_path path, u32 center_freq)
{
struct rtw89_ant_gain_info *ant_gain = &rtwdev->ant_gain;
enum rtw89_ant_gain_subband subband_l, subband_h;
const struct rtw89_6ghz_span *span;
span = rtw89_get_6ghz_span(rtwdev, center_freq);
if (span && RTW89_ANT_GAIN_SPAN_VALID(span)) {
subband_l = span->ant_gain_subband_low;
subband_h = span->ant_gain_subband_high;
} else {
subband_l = rtw89_phy_ant_gain_get_subband(rtwdev, center_freq);
subband_h = subband_l;
}
rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
"center_freq %u: antenna gain subband {%u, %u}\n",
center_freq, subband_l, subband_h);
return min(ant_gain->offset[path][subband_l],
ant_gain->offset[path][subband_h]);
}
static s8 rtw89_phy_ant_gain_offset(struct rtw89_dev *rtwdev, u8 band, u32 center_freq)
{
struct rtw89_ant_gain_info *ant_gain = &rtwdev->ant_gain;
const struct rtw89_chip_info *chip = rtwdev->chip;
u8 regd = rtw89_regd_get(rtwdev, band);
s8 offset_patha, offset_pathb;
if (!chip->support_ant_gain)
return 0;
if (!(ant_gain->regd_enabled & BIT(regd)))
return 0;
offset_patha = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_A, center_freq);
offset_pathb = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_B, center_freq);
return max(offset_patha, offset_pathb);
}
s16 rtw89_phy_ant_gain_pwr_offset(struct rtw89_dev *rtwdev,
const struct rtw89_chan *chan)
{
struct rtw89_ant_gain_info *ant_gain = &rtwdev->ant_gain;
u8 regd = rtw89_regd_get(rtwdev, chan->band_type);
s8 offset_patha, offset_pathb;
if (!(ant_gain->regd_enabled & BIT(regd)))
return 0;
offset_patha = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_A, chan->freq);
offset_pathb = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_B, chan->freq);
return rtw89_phy_txpwr_rf_to_bb(rtwdev, offset_patha - offset_pathb);
}
EXPORT_SYMBOL(rtw89_phy_ant_gain_pwr_offset);
void rtw89_print_ant_gain(struct seq_file *m, struct rtw89_dev *rtwdev,
const struct rtw89_chan *chan)
{
struct rtw89_ant_gain_info *ant_gain = &rtwdev->ant_gain;
const struct rtw89_chip_info *chip = rtwdev->chip;
u8 regd = rtw89_regd_get(rtwdev, chan->band_type);
s8 offset_patha, offset_pathb;
if (!chip->support_ant_gain || !(ant_gain->regd_enabled & BIT(regd))) {
seq_puts(m, "no DAG is applied\n");
return;
}
offset_patha = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_A, chan->freq);
offset_pathb = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_B, chan->freq);
seq_printf(m, "ChainA offset: %d dBm\n", offset_patha);
seq_printf(m, "ChainB offset: %d dBm\n", offset_pathb);
}
static const u8 rtw89_rs_idx_num_ax[] = {
[RTW89_RS_CCK] = RTW89_RATE_CCK_NUM,
[RTW89_RS_OFDM] = RTW89_RATE_OFDM_NUM,
@ -1917,20 +2145,6 @@ void rtw89_phy_load_txpwr_byrate(struct rtw89_dev *rtwdev,
}
EXPORT_SYMBOL(rtw89_phy_load_txpwr_byrate);
static s8 rtw89_phy_txpwr_rf_to_mac(struct rtw89_dev *rtwdev, s8 txpwr_rf)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
return txpwr_rf >> (chip->txpwr_factor_rf - chip->txpwr_factor_mac);
}
static s8 rtw89_phy_txpwr_dbm_to_mac(struct rtw89_dev *rtwdev, s8 dbm)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
return clamp_t(s16, dbm << chip->txpwr_factor_mac, -64, 63);
}
static s8 rtw89_phy_txpwr_dbm_without_tolerance(s8 dbm)
{
const u8 tssi_deviation_point = 0;
@ -2027,7 +2241,7 @@ s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev, u8 band,
u8 ch_idx = rtw89_channel_to_idx(rtwdev, band, ch);
u8 regd = rtw89_regd_get(rtwdev, band);
u8 reg6 = regulatory->reg_6ghz_power;
s8 lmt = 0, sar;
s8 lmt = 0, sar, offset;
s8 cstr;
switch (band) {
@ -2059,7 +2273,8 @@ s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev, u8 band,
return 0;
}
lmt = rtw89_phy_txpwr_rf_to_mac(rtwdev, lmt);
offset = rtw89_phy_ant_gain_offset(rtwdev, band, freq);
lmt = rtw89_phy_txpwr_rf_to_mac(rtwdev, lmt + offset);
sar = rtw89_query_sar(rtwdev, freq);
cstr = rtw89_phy_get_tpe_constraint(rtwdev, band);
@ -2286,7 +2501,7 @@ s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev, u8 band,
u8 ch_idx = rtw89_channel_to_idx(rtwdev, band, ch);
u8 regd = rtw89_regd_get(rtwdev, band);
u8 reg6 = regulatory->reg_6ghz_power;
s8 lmt_ru = 0, sar;
s8 lmt_ru = 0, sar, offset;
s8 cstr;
switch (band) {
@ -2318,7 +2533,8 @@ s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev, u8 band,
return 0;
}
lmt_ru = rtw89_phy_txpwr_rf_to_mac(rtwdev, lmt_ru);
offset = rtw89_phy_ant_gain_offset(rtwdev, band, freq);
lmt_ru = rtw89_phy_txpwr_rf_to_mac(rtwdev, lmt_ru + offset);
sar = rtw89_query_sar(rtwdev, freq);
cstr = rtw89_phy_get_tpe_constraint(rtwdev, band);
@ -3228,10 +3444,16 @@ rtw89_phy_c2h_rfk_report_state(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u3
(int)(len - sizeof(report->hdr)), &report->state);
}
static void
rtw89_phy_c2h_rfk_log_tas_pwr(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
{
}
static
void (* const rtw89_phy_c2h_rfk_report_handler[])(struct rtw89_dev *rtwdev,
struct sk_buff *c2h, u32 len) = {
[RTW89_PHY_C2H_RFK_REPORT_FUNC_STATE] = rtw89_phy_c2h_rfk_report_state,
[RTW89_PHY_C2H_RFK_LOG_TAS_PWR] = rtw89_phy_c2h_rfk_log_tas_pwr,
};
bool rtw89_phy_c2h_chk_atomic(struct rtw89_dev *rtwdev, u8 class, u8 func)
@ -3285,11 +3507,11 @@ void rtw89_phy_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
return;
fallthrough;
default:
rtw89_info(rtwdev, "c2h class %d not support\n", class);
rtw89_info(rtwdev, "PHY c2h class %d not support\n", class);
return;
}
if (!handler) {
rtw89_info(rtwdev, "c2h class %d func %d not support\n", class,
rtw89_info(rtwdev, "PHY c2h class %d func %d not support\n", class,
func);
return;
}
@ -4058,7 +4280,6 @@ static void rtw89_phy_cfo_set_crystal_cap(struct rtw89_dev *rtwdev,
if (!force && cfo->crystal_cap == crystal_cap)
return;
crystal_cap = clamp_t(u8, crystal_cap, 0, 127);
if (chip->chip_id == RTL8852A || chip->chip_id == RTL8851B) {
rtw89_phy_cfo_set_xcap_reg(rtwdev, true, crystal_cap);
rtw89_phy_cfo_set_xcap_reg(rtwdev, false, crystal_cap);
@ -4181,7 +4402,7 @@ static void rtw89_phy_cfo_crystal_cap_adjust(struct rtw89_dev *rtwdev,
s32 curr_cfo)
{
struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
s8 crystal_cap = cfo->crystal_cap;
int crystal_cap = cfo->crystal_cap;
s32 cfo_abs = abs(curr_cfo);
int sign;
@ -4202,15 +4423,17 @@ static void rtw89_phy_cfo_crystal_cap_adjust(struct rtw89_dev *rtwdev,
}
sign = curr_cfo > 0 ? 1 : -1;
if (cfo_abs > CFO_TRK_STOP_TH_4)
crystal_cap += 7 * sign;
else if (cfo_abs > CFO_TRK_STOP_TH_3)
crystal_cap += 5 * sign;
else if (cfo_abs > CFO_TRK_STOP_TH_2)
crystal_cap += 3 * sign;
else if (cfo_abs > CFO_TRK_STOP_TH_3)
crystal_cap += 3 * sign;
else if (cfo_abs > CFO_TRK_STOP_TH_2)
crystal_cap += 1 * sign;
else if (cfo_abs > CFO_TRK_STOP_TH_1)
crystal_cap += 1 * sign;
else
return;
crystal_cap = clamp(crystal_cap, 0, 127);
rtw89_phy_cfo_set_crystal_cap(rtwdev, (u8)crystal_cap, false);
rtw89_debug(rtwdev, RTW89_DBG_CFO,
"X_cap{Curr,Default}={0x%x,0x%x}\n",
@ -6310,6 +6533,12 @@ void rtw89_phy_dm_init(struct rtw89_dev *rtwdev)
rtw89_chip_cfg_txrx_path(rtwdev);
}
void rtw89_phy_dm_reinit(struct rtw89_dev *rtwdev)
{
rtw89_phy_env_monitor_init(rtwdev);
rtw89_physts_parsing_init(rtwdev);
}
void rtw89_phy_set_bss_color(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link)
{

View file

@ -57,7 +57,7 @@
#define CFO_TRK_STOP_TH_4 (30 << 2)
#define CFO_TRK_STOP_TH_3 (20 << 2)
#define CFO_TRK_STOP_TH_2 (10 << 2)
#define CFO_TRK_STOP_TH_1 (00 << 2)
#define CFO_TRK_STOP_TH_1 (03 << 2)
#define CFO_TRK_STOP_TH (2 << 2)
#define CFO_SW_COMP_FINE_TUNE (2 << 2)
#define CFO_PERIOD_CNT 15
@ -151,6 +151,7 @@ enum rtw89_phy_c2h_rfk_log_func {
enum rtw89_phy_c2h_rfk_report_func {
RTW89_PHY_C2H_RFK_REPORT_FUNC_STATE = 0,
RTW89_PHY_C2H_RFK_LOG_TAS_PWR = 6,
};
enum rtw89_phy_c2h_dm_func {
@ -813,6 +814,7 @@ void rtw89_phy_config_rf_reg_v1(struct rtw89_dev *rtwdev,
enum rtw89_rf_path rf_path,
void *extra_data);
void rtw89_phy_dm_init(struct rtw89_dev *rtwdev);
void rtw89_phy_dm_reinit(struct rtw89_dev *rtwdev);
void rtw89_phy_write32_idx(struct rtw89_dev *rtwdev, u32 addr, u32 mask,
u32 data, enum rtw89_phy_idx phy_idx);
void rtw89_phy_write32_idx_set(struct rtw89_dev *rtwdev, u32 addr, u32 bits,
@ -826,6 +828,11 @@ s8 *rtw89_phy_raw_byr_seek(struct rtw89_dev *rtwdev,
const struct rtw89_rate_desc *desc);
s8 rtw89_phy_read_txpwr_byrate(struct rtw89_dev *rtwdev, u8 band, u8 bw,
const struct rtw89_rate_desc *rate_desc);
void rtw89_phy_ant_gain_init(struct rtw89_dev *rtwdev);
s16 rtw89_phy_ant_gain_pwr_offset(struct rtw89_dev *rtwdev,
const struct rtw89_chan *chan);
void rtw89_print_ant_gain(struct seq_file *m, struct rtw89_dev *rtwdev,
const struct rtw89_chan *chan);
void rtw89_phy_load_txpwr_byrate(struct rtw89_dev *rtwdev,
const struct rtw89_txpwr_table *tbl);
s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev, u8 band,
@ -896,10 +903,34 @@ void rtw89_phy_set_txpwr_limit_ru(struct rtw89_dev *rtwdev,
phy->set_txpwr_limit_ru(rtwdev, chan, phy_idx);
}
static inline s8 rtw89_phy_txpwr_rf_to_bb(struct rtw89_dev *rtwdev, s8 txpwr_rf)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
return txpwr_rf << (chip->txpwr_factor_bb - chip->txpwr_factor_rf);
}
static inline s8 rtw89_phy_txpwr_rf_to_mac(struct rtw89_dev *rtwdev, s8 txpwr_rf)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
return txpwr_rf >> (chip->txpwr_factor_rf - chip->txpwr_factor_mac);
}
static inline s8 rtw89_phy_txpwr_dbm_to_mac(struct rtw89_dev *rtwdev, s8 dbm)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
return clamp_t(s16, dbm << chip->txpwr_factor_mac, -64, 63);
}
void rtw89_phy_ra_assoc(struct rtw89_dev *rtwdev, struct rtw89_sta_link *rtwsta_link);
void rtw89_phy_ra_update(struct rtw89_dev *rtwdev);
void rtw89_phy_ra_update_sta(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta,
u32 changed);
void rtw89_phy_ra_update_sta_link(struct rtw89_dev *rtwdev,
struct rtw89_sta_link *rtwsta_link,
u32 changed);
void rtw89_phy_rate_pattern_vif(struct rtw89_dev *rtwdev,
struct ieee80211_vif *vif,
const struct cfg80211_bitrate_mask *mask);

View file

@ -8,6 +8,7 @@
#include "debug.h"
#include "fw.h"
#include "mac.h"
#include "phy.h"
#include "ps.h"
#include "reg.h"
#include "util.h"
@ -62,11 +63,8 @@ static void rtw89_ps_power_mode_change(struct rtw89_dev *rtwdev, bool enter)
rtw89_mac_power_mode_change(rtwdev, enter);
}
void __rtw89_enter_ps_mode(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link)
void __rtw89_enter_ps_mode(struct rtw89_dev *rtwdev)
{
if (rtwvif_link->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT)
return;
if (!rtwdev->ps_mode)
return;
@ -85,7 +83,7 @@ void __rtw89_leave_ps_mode(struct rtw89_dev *rtwdev)
rtw89_ps_power_mode_change(rtwdev, false);
}
static void __rtw89_enter_lps(struct rtw89_dev *rtwdev,
static void __rtw89_enter_lps_link(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link)
{
struct rtw89_lps_parm lps_param = {
@ -96,7 +94,6 @@ static void __rtw89_enter_lps(struct rtw89_dev *rtwdev,
rtw89_btc_ntfy_radio_state(rtwdev, BTC_RFCTRL_FW_CTRL);
rtw89_fw_h2c_lps_parm(rtwdev, &lps_param);
rtw89_fw_h2c_lps_ch_info(rtwdev, rtwvif_link);
}
static void __rtw89_leave_lps(struct rtw89_dev *rtwdev,
@ -121,17 +118,32 @@ void rtw89_leave_ps_mode(struct rtw89_dev *rtwdev)
__rtw89_leave_ps_mode(rtwdev);
}
void rtw89_enter_lps(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
void rtw89_enter_lps(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
bool ps_mode)
{
struct rtw89_vif_link *rtwvif_link;
bool can_ps_mode = true;
unsigned int link_id;
lockdep_assert_held(&rtwdev->mutex);
if (test_and_set_bit(RTW89_FLAG_LEISURE_PS, rtwdev->flags))
return;
__rtw89_enter_lps(rtwdev, rtwvif_link);
if (ps_mode)
__rtw89_enter_ps_mode(rtwdev, rtwvif_link);
rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) {
__rtw89_enter_lps_link(rtwdev, rtwvif_link);
if (rtwvif_link->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT)
can_ps_mode = false;
}
if (RTW89_CHK_FW_FEATURE(LPS_CH_INFO, &rtwdev->fw))
rtw89_fw_h2c_lps_ch_info(rtwdev, rtwvif);
else
rtw89_fw_h2c_lps_ml_cmn_info(rtwdev, rtwvif);
if (ps_mode && can_ps_mode)
__rtw89_enter_ps_mode(rtwdev);
}
static void rtw89_leave_lps_vif(struct rtw89_dev *rtwdev,
@ -157,6 +169,8 @@ void rtw89_leave_lps(struct rtw89_dev *rtwdev)
__rtw89_leave_ps_mode(rtwdev);
rtw89_phy_dm_reinit(rtwdev);
rtw89_for_each_rtwvif(rtwdev, rtwvif)
rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id)
rtw89_leave_lps_vif(rtwdev, rtwvif_link);
@ -282,12 +296,6 @@ void rtw89_recalc_lps(struct rtw89_dev *rtwdev)
enum rtw89_entity_mode mode;
int count = 0;
/* FIXME: Fix rtw89_enter_lps() and __rtw89_enter_ps_mode()
* to take MLO cases into account before doing the following.
*/
if (rtwdev->support_mlo)
goto disable_lps;
mode = rtw89_get_entity_mode(rtwdev);
if (mode == RTW89_ENTITY_MODE_MCC)
goto disable_lps;

View file

@ -5,11 +5,11 @@
#ifndef __RTW89_PS_H_
#define __RTW89_PS_H_
void rtw89_enter_lps(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
void rtw89_enter_lps(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
bool ps_mode);
void rtw89_leave_lps(struct rtw89_dev *rtwdev);
void __rtw89_leave_ps_mode(struct rtw89_dev *rtwdev);
void __rtw89_enter_ps_mode(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link);
void __rtw89_enter_ps_mode(struct rtw89_dev *rtwdev);
void rtw89_leave_ps_mode(struct rtw89_dev *rtwdev);
void rtw89_enter_ips(struct rtw89_dev *rtwdev);
void rtw89_leave_ips(struct rtw89_dev *rtwdev);

View file

@ -7447,6 +7447,10 @@
#define B_BE_CSIPRT_HESU_AID_EN BIT(25)
#define B_BE_CSIPRT_VHTSU_AID_EN BIT(24)
#define R_BE_DRV_INFO_OPTION 0x11470
#define R_BE_DRV_INFO_OPTION_C1 0x15470
#define B_BE_DRV_INFO_PHYRPT_EN BIT(0)
#define R_BE_RX_ERR_ISR 0x114F4
#define R_BE_RX_ERR_ISR_C1 0x154F4
#define B_BE_RX_ERR_TRIG_ACT_TO BIT(9)

Some files were not shown because too many files have changed in this diff Show more