qed: Update qed_hsi.h for fw 8.59.1.0
The qed_hsi.h has been updated to support new FW version 8.59.1.0 with changes. - Updates FW HSI (Hardware Software interface) structures. - Addition/update in function declaration and defines as per HSI. - Add generic infrastructure for FW error reporting as part of common event queue handling. - Move malicious VF error reporting to FW error reporting infrastructure. - Move consolidation queue initialization from FW context to ramrod message. qed_hsi.h header file changes lead to change in many files to ensure compilation. This patch also fixes the existing checkpatch warnings and few important checks. Signed-off-by: Ariel Elior <aelior@marvell.com> Signed-off-by: Shai Malin <smalin@marvell.com> Signed-off-by: Omkar Kulkarni <okulkarni@marvell.com> Signed-off-by: Prabhakar Kushwaha <pkushwaha@marvell.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
f2a74107f1
commit
fe40a830dc
12 changed files with 1589 additions and 307 deletions
|
@ -1397,12 +1397,13 @@ void qed_resc_free(struct qed_dev *cdev)
|
||||||
qed_rdma_info_free(p_hwfn);
|
qed_rdma_info_free(p_hwfn);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_COMMON);
|
||||||
qed_iov_free(p_hwfn);
|
qed_iov_free(p_hwfn);
|
||||||
qed_l2_free(p_hwfn);
|
qed_l2_free(p_hwfn);
|
||||||
qed_dmae_info_free(p_hwfn);
|
qed_dmae_info_free(p_hwfn);
|
||||||
qed_dcbx_info_free(p_hwfn);
|
qed_dcbx_info_free(p_hwfn);
|
||||||
qed_dbg_user_data_free(p_hwfn);
|
qed_dbg_user_data_free(p_hwfn);
|
||||||
qed_fw_overlay_mem_free(p_hwfn, p_hwfn->fw_overlay_mem);
|
qed_fw_overlay_mem_free(p_hwfn, &p_hwfn->fw_overlay_mem);
|
||||||
|
|
||||||
/* Destroy doorbell recovery mechanism */
|
/* Destroy doorbell recovery mechanism */
|
||||||
qed_db_recovery_teardown(p_hwfn);
|
qed_db_recovery_teardown(p_hwfn);
|
||||||
|
@ -1629,9 +1630,9 @@ static void qed_init_qm_advance_vport(struct qed_hwfn *p_hwfn)
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/* flags for pq init */
|
/* flags for pq init */
|
||||||
#define PQ_INIT_SHARE_VPORT (1 << 0)
|
#define PQ_INIT_SHARE_VPORT BIT(0)
|
||||||
#define PQ_INIT_PF_RL (1 << 1)
|
#define PQ_INIT_PF_RL BIT(1)
|
||||||
#define PQ_INIT_VF_RL (1 << 2)
|
#define PQ_INIT_VF_RL BIT(2)
|
||||||
|
|
||||||
/* defines for pq init */
|
/* defines for pq init */
|
||||||
#define PQ_INIT_DEFAULT_WRR_GROUP 1
|
#define PQ_INIT_DEFAULT_WRR_GROUP 1
|
||||||
|
@ -2376,6 +2377,49 @@ alloc_err:
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int qed_fw_err_handler(struct qed_hwfn *p_hwfn,
|
||||||
|
u8 opcode,
|
||||||
|
u16 echo,
|
||||||
|
union event_ring_data *data, u8 fw_return_code)
|
||||||
|
{
|
||||||
|
if (fw_return_code != COMMON_ERR_CODE_ERROR)
|
||||||
|
goto eqe_unexpected;
|
||||||
|
|
||||||
|
if (data->err_data.recovery_scope == ERR_SCOPE_FUNC &&
|
||||||
|
le16_to_cpu(data->err_data.entity_id) >= MAX_NUM_PFS) {
|
||||||
|
qed_sriov_vfpf_malicious(p_hwfn, &data->err_data);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
eqe_unexpected:
|
||||||
|
DP_ERR(p_hwfn,
|
||||||
|
"Skipping unexpected eqe 0x%02x, FW return code 0x%x, echo 0x%x\n",
|
||||||
|
opcode, fw_return_code, echo);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int qed_common_eqe_event(struct qed_hwfn *p_hwfn,
|
||||||
|
u8 opcode,
|
||||||
|
__le16 echo,
|
||||||
|
union event_ring_data *data,
|
||||||
|
u8 fw_return_code)
|
||||||
|
{
|
||||||
|
switch (opcode) {
|
||||||
|
case COMMON_EVENT_VF_PF_CHANNEL:
|
||||||
|
case COMMON_EVENT_VF_FLR:
|
||||||
|
return qed_sriov_eqe_event(p_hwfn, opcode, echo, data,
|
||||||
|
fw_return_code);
|
||||||
|
case COMMON_EVENT_FW_ERROR:
|
||||||
|
return qed_fw_err_handler(p_hwfn, opcode,
|
||||||
|
le16_to_cpu(echo), data,
|
||||||
|
fw_return_code);
|
||||||
|
default:
|
||||||
|
DP_INFO(p_hwfn->cdev, "Unknown eqe event 0x%02x, echo 0x%x\n",
|
||||||
|
opcode, echo);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void qed_resc_setup(struct qed_dev *cdev)
|
void qed_resc_setup(struct qed_dev *cdev)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
@ -2404,6 +2448,8 @@ void qed_resc_setup(struct qed_dev *cdev)
|
||||||
|
|
||||||
qed_l2_setup(p_hwfn);
|
qed_l2_setup(p_hwfn);
|
||||||
qed_iov_setup(p_hwfn);
|
qed_iov_setup(p_hwfn);
|
||||||
|
qed_spq_register_async_cb(p_hwfn, PROTOCOLID_COMMON,
|
||||||
|
qed_common_eqe_event);
|
||||||
#ifdef CONFIG_QED_LL2
|
#ifdef CONFIG_QED_LL2
|
||||||
if (p_hwfn->using_ll2)
|
if (p_hwfn->using_ll2)
|
||||||
qed_ll2_setup(p_hwfn);
|
qed_ll2_setup(p_hwfn);
|
||||||
|
@ -2593,7 +2639,7 @@ static void qed_init_cache_line_size(struct qed_hwfn *p_hwfn,
|
||||||
cache_line_size);
|
cache_line_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (L1_CACHE_BYTES > wr_mbs)
|
if (wr_mbs < L1_CACHE_BYTES)
|
||||||
DP_INFO(p_hwfn,
|
DP_INFO(p_hwfn,
|
||||||
"The cache line size for padding is suboptimal for performance [OS cache line size 0x%x, wr mbs 0x%x]\n",
|
"The cache line size for padding is suboptimal for performance [OS cache line size 0x%x, wr mbs 0x%x]\n",
|
||||||
L1_CACHE_BYTES, wr_mbs);
|
L1_CACHE_BYTES, wr_mbs);
|
||||||
|
@ -2609,13 +2655,21 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
|
||||||
struct qed_ptt *p_ptt, int hw_mode)
|
struct qed_ptt *p_ptt, int hw_mode)
|
||||||
{
|
{
|
||||||
struct qed_qm_info *qm_info = &p_hwfn->qm_info;
|
struct qed_qm_info *qm_info = &p_hwfn->qm_info;
|
||||||
struct qed_qm_common_rt_init_params params;
|
struct qed_qm_common_rt_init_params *params;
|
||||||
struct qed_dev *cdev = p_hwfn->cdev;
|
struct qed_dev *cdev = p_hwfn->cdev;
|
||||||
u8 vf_id, max_num_vfs;
|
u8 vf_id, max_num_vfs;
|
||||||
u16 num_pfs, pf_id;
|
u16 num_pfs, pf_id;
|
||||||
u32 concrete_fid;
|
u32 concrete_fid;
|
||||||
int rc = 0;
|
int rc = 0;
|
||||||
|
|
||||||
|
params = kzalloc(sizeof(*params), GFP_KERNEL);
|
||||||
|
if (!params) {
|
||||||
|
DP_NOTICE(p_hwfn->cdev,
|
||||||
|
"Failed to allocate common init params\n");
|
||||||
|
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
qed_init_cau_rt_data(cdev);
|
qed_init_cau_rt_data(cdev);
|
||||||
|
|
||||||
/* Program GTT windows */
|
/* Program GTT windows */
|
||||||
|
@ -2628,16 +2682,15 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
|
||||||
qm_info->pf_wfq_en = true;
|
qm_info->pf_wfq_en = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
memset(¶ms, 0, sizeof(params));
|
params->max_ports_per_engine = p_hwfn->cdev->num_ports_in_engine;
|
||||||
params.max_ports_per_engine = p_hwfn->cdev->num_ports_in_engine;
|
params->max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
|
||||||
params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
|
params->pf_rl_en = qm_info->pf_rl_en;
|
||||||
params.pf_rl_en = qm_info->pf_rl_en;
|
params->pf_wfq_en = qm_info->pf_wfq_en;
|
||||||
params.pf_wfq_en = qm_info->pf_wfq_en;
|
params->global_rl_en = qm_info->vport_rl_en;
|
||||||
params.global_rl_en = qm_info->vport_rl_en;
|
params->vport_wfq_en = qm_info->vport_wfq_en;
|
||||||
params.vport_wfq_en = qm_info->vport_wfq_en;
|
params->port_params = qm_info->qm_port_params;
|
||||||
params.port_params = qm_info->qm_port_params;
|
|
||||||
|
|
||||||
qed_qm_common_rt_init(p_hwfn, ¶ms);
|
qed_qm_common_rt_init(p_hwfn, params);
|
||||||
|
|
||||||
qed_cxt_hw_init_common(p_hwfn);
|
qed_cxt_hw_init_common(p_hwfn);
|
||||||
|
|
||||||
|
@ -2645,7 +2698,7 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
|
||||||
|
|
||||||
rc = qed_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode);
|
rc = qed_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode);
|
||||||
if (rc)
|
if (rc)
|
||||||
return rc;
|
goto out;
|
||||||
|
|
||||||
qed_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0);
|
qed_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0);
|
||||||
qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1);
|
qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1);
|
||||||
|
@ -2673,6 +2726,9 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
|
||||||
/* pretend to original PF */
|
/* pretend to original PF */
|
||||||
qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
|
qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
|
||||||
|
|
||||||
|
out:
|
||||||
|
kfree(params);
|
||||||
|
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3671,12 +3727,14 @@ u32 qed_get_hsi_def_val(struct qed_dev *cdev, enum qed_hsi_def_type type)
|
||||||
|
|
||||||
return qed_hsi_def_val[type][chip_id];
|
return qed_hsi_def_val[type][chip_id];
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
|
qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
|
||||||
{
|
{
|
||||||
u32 resc_max_val, mcp_resp;
|
u32 resc_max_val, mcp_resp;
|
||||||
u8 res_id;
|
u8 res_id;
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
for (res_id = 0; res_id < QED_MAX_RESC; res_id++) {
|
for (res_id = 0; res_id < QED_MAX_RESC; res_id++) {
|
||||||
switch (res_id) {
|
switch (res_id) {
|
||||||
case QED_LL2_RAM_QUEUE:
|
case QED_LL2_RAM_QUEUE:
|
||||||
|
@ -3922,7 +3980,7 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
|
||||||
* resources allocation queries should be atomic. Since several PFs can
|
* resources allocation queries should be atomic. Since several PFs can
|
||||||
* run in parallel - a resource lock is needed.
|
* run in parallel - a resource lock is needed.
|
||||||
* If either the resource lock or resource set value commands are not
|
* If either the resource lock or resource set value commands are not
|
||||||
* supported - skip the the max values setting, release the lock if
|
* supported - skip the max values setting, release the lock if
|
||||||
* needed, and proceed to the queries. Other failures, including a
|
* needed, and proceed to the queries. Other failures, including a
|
||||||
* failure to acquire the lock, will cause this function to fail.
|
* failure to acquire the lock, will cause this function to fail.
|
||||||
*/
|
*/
|
||||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -920,7 +920,8 @@ int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
|
||||||
}
|
}
|
||||||
|
|
||||||
int qed_init_global_rl(struct qed_hwfn *p_hwfn,
|
int qed_init_global_rl(struct qed_hwfn *p_hwfn,
|
||||||
struct qed_ptt *p_ptt, u16 rl_id, u32 rate_limit)
|
struct qed_ptt *p_ptt, u16 rl_id, u32 rate_limit,
|
||||||
|
enum init_qm_rl_type vport_rl_type)
|
||||||
{
|
{
|
||||||
u32 inc_val;
|
u32 inc_val;
|
||||||
|
|
||||||
|
@ -1645,7 +1646,7 @@ struct phys_mem_desc *qed_fw_overlay_mem_alloc(struct qed_hwfn *p_hwfn,
|
||||||
|
|
||||||
/* If memory allocation has failed, free all allocated memory */
|
/* If memory allocation has failed, free all allocated memory */
|
||||||
if (buf_offset < buf_size) {
|
if (buf_offset < buf_size) {
|
||||||
qed_fw_overlay_mem_free(p_hwfn, allocated_mem);
|
qed_fw_overlay_mem_free(p_hwfn, &allocated_mem);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1679,16 +1680,16 @@ void qed_fw_overlay_init_ram(struct qed_hwfn *p_hwfn,
|
||||||
}
|
}
|
||||||
|
|
||||||
void qed_fw_overlay_mem_free(struct qed_hwfn *p_hwfn,
|
void qed_fw_overlay_mem_free(struct qed_hwfn *p_hwfn,
|
||||||
struct phys_mem_desc *fw_overlay_mem)
|
struct phys_mem_desc **fw_overlay_mem)
|
||||||
{
|
{
|
||||||
u8 storm_id;
|
u8 storm_id;
|
||||||
|
|
||||||
if (!fw_overlay_mem)
|
if (!fw_overlay_mem || !(*fw_overlay_mem))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) {
|
for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) {
|
||||||
struct phys_mem_desc *storm_mem_desc =
|
struct phys_mem_desc *storm_mem_desc =
|
||||||
(struct phys_mem_desc *)fw_overlay_mem + storm_id;
|
(struct phys_mem_desc *)*fw_overlay_mem + storm_id;
|
||||||
|
|
||||||
/* Free Storm's physical memory */
|
/* Free Storm's physical memory */
|
||||||
if (storm_mem_desc->virt_addr)
|
if (storm_mem_desc->virt_addr)
|
||||||
|
@ -1699,5 +1700,6 @@ void qed_fw_overlay_mem_free(struct qed_hwfn *p_hwfn,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Free allocated virtual memory */
|
/* Free allocated virtual memory */
|
||||||
kfree(fw_overlay_mem);
|
kfree(*fw_overlay_mem);
|
||||||
|
*fw_overlay_mem = NULL;
|
||||||
}
|
}
|
||||||
|
|
|
@ -38,7 +38,6 @@
|
||||||
#include "qed_sp.h"
|
#include "qed_sp.h"
|
||||||
#include "qed_sriov.h"
|
#include "qed_sriov.h"
|
||||||
|
|
||||||
|
|
||||||
#define QED_MAX_SGES_NUM 16
|
#define QED_MAX_SGES_NUM 16
|
||||||
#define CRC32_POLY 0x1edc6f41
|
#define CRC32_POLY 0x1edc6f41
|
||||||
|
|
||||||
|
@ -1112,7 +1111,6 @@ qed_eth_pf_tx_queue_start(struct qed_hwfn *p_hwfn,
|
||||||
{
|
{
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
|
|
||||||
rc = qed_eth_txq_start_ramrod(p_hwfn, p_cid,
|
rc = qed_eth_txq_start_ramrod(p_hwfn, p_cid,
|
||||||
pbl_addr, pbl_size,
|
pbl_addr, pbl_size,
|
||||||
qed_get_cm_pq_idx_mcos(p_hwfn, tc));
|
qed_get_cm_pq_idx_mcos(p_hwfn, tc));
|
||||||
|
@ -2011,7 +2009,7 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
|
||||||
struct qed_spq_comp_cb *p_cb,
|
struct qed_spq_comp_cb *p_cb,
|
||||||
struct qed_ntuple_filter_params *p_params)
|
struct qed_ntuple_filter_params *p_params)
|
||||||
{
|
{
|
||||||
struct rx_update_gft_filter_data *p_ramrod = NULL;
|
struct rx_update_gft_filter_ramrod_data *p_ramrod = NULL;
|
||||||
struct qed_spq_entry *p_ent = NULL;
|
struct qed_spq_entry *p_ent = NULL;
|
||||||
struct qed_sp_init_data init_data;
|
struct qed_sp_init_data init_data;
|
||||||
u16 abs_rx_q_id = 0;
|
u16 abs_rx_q_id = 0;
|
||||||
|
@ -2032,7 +2030,7 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
|
||||||
}
|
}
|
||||||
|
|
||||||
rc = qed_sp_init_request(p_hwfn, &p_ent,
|
rc = qed_sp_init_request(p_hwfn, &p_ent,
|
||||||
ETH_RAMROD_GFT_UPDATE_FILTER,
|
ETH_RAMROD_RX_UPDATE_GFT_FILTER,
|
||||||
PROTOCOLID_ETH, &init_data);
|
PROTOCOLID_ETH, &init_data);
|
||||||
if (rc)
|
if (rc)
|
||||||
return rc;
|
return rc;
|
||||||
|
|
|
@ -146,7 +146,6 @@ struct qed_sp_vport_start_params {
|
||||||
int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
|
int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
|
||||||
struct qed_sp_vport_start_params *p_params);
|
struct qed_sp_vport_start_params *p_params);
|
||||||
|
|
||||||
|
|
||||||
struct qed_filter_accept_flags {
|
struct qed_filter_accept_flags {
|
||||||
u8 update_rx_mode_config;
|
u8 update_rx_mode_config;
|
||||||
u8 update_tx_mode_config;
|
u8 update_tx_mode_config;
|
||||||
|
|
|
@ -23,9 +23,9 @@ enum spq_mode {
|
||||||
};
|
};
|
||||||
|
|
||||||
struct qed_spq_comp_cb {
|
struct qed_spq_comp_cb {
|
||||||
void (*function)(struct qed_hwfn *,
|
void (*function)(struct qed_hwfn *p_hwfn,
|
||||||
void *,
|
void *cookie,
|
||||||
union event_ring_data *,
|
union event_ring_data *data,
|
||||||
u8 fw_return_code);
|
u8 fw_return_code);
|
||||||
void *cookie;
|
void *cookie;
|
||||||
};
|
};
|
||||||
|
@ -53,7 +53,7 @@ union ramrod_data {
|
||||||
struct tx_queue_stop_ramrod_data tx_queue_stop;
|
struct tx_queue_stop_ramrod_data tx_queue_stop;
|
||||||
struct vport_start_ramrod_data vport_start;
|
struct vport_start_ramrod_data vport_start;
|
||||||
struct vport_stop_ramrod_data vport_stop;
|
struct vport_stop_ramrod_data vport_stop;
|
||||||
struct rx_update_gft_filter_data rx_update_gft;
|
struct rx_update_gft_filter_ramrod_data rx_update_gft;
|
||||||
struct vport_update_ramrod_data vport_update;
|
struct vport_update_ramrod_data vport_update;
|
||||||
struct core_rx_start_ramrod_data core_rx_queue_start;
|
struct core_rx_start_ramrod_data core_rx_queue_start;
|
||||||
struct core_rx_stop_ramrod_data core_rx_queue_stop;
|
struct core_rx_stop_ramrod_data core_rx_queue_stop;
|
||||||
|
|
|
@ -369,8 +369,12 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
|
||||||
qed_chain_get_pbl_phys(&p_hwfn->p_eq->chain));
|
qed_chain_get_pbl_phys(&p_hwfn->p_eq->chain));
|
||||||
page_cnt = (u8)qed_chain_get_page_cnt(&p_hwfn->p_eq->chain);
|
page_cnt = (u8)qed_chain_get_page_cnt(&p_hwfn->p_eq->chain);
|
||||||
p_ramrod->event_ring_num_pages = page_cnt;
|
p_ramrod->event_ring_num_pages = page_cnt;
|
||||||
DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr,
|
|
||||||
|
/* Place consolidation queue address in ramrod */
|
||||||
|
DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_base_addr,
|
||||||
qed_chain_get_pbl_phys(&p_hwfn->p_consq->chain));
|
qed_chain_get_pbl_phys(&p_hwfn->p_consq->chain));
|
||||||
|
page_cnt = (u8)qed_chain_get_page_cnt(&p_hwfn->p_consq->chain);
|
||||||
|
p_ramrod->consolid_q_num_pages = page_cnt;
|
||||||
|
|
||||||
qed_tunn_set_pf_start_params(p_hwfn, p_tunn, &p_ramrod->tunnel_config);
|
qed_tunn_set_pf_start_params(p_hwfn, p_tunn, &p_ramrod->tunnel_config);
|
||||||
|
|
||||||
|
|
|
@ -218,13 +218,10 @@ static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
|
||||||
physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
|
physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
|
||||||
p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(physical_q);
|
p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(physical_q);
|
||||||
|
|
||||||
p_cxt->xstorm_st_context.spq_base_lo =
|
p_cxt->xstorm_st_context.spq_base_addr.lo =
|
||||||
DMA_LO_LE(p_spq->chain.p_phys_addr);
|
DMA_LO_LE(p_spq->chain.p_phys_addr);
|
||||||
p_cxt->xstorm_st_context.spq_base_hi =
|
p_cxt->xstorm_st_context.spq_base_addr.hi =
|
||||||
DMA_HI_LE(p_spq->chain.p_phys_addr);
|
DMA_HI_LE(p_spq->chain.p_phys_addr);
|
||||||
|
|
||||||
DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr,
|
|
||||||
p_hwfn->p_consq->chain.p_phys_addr);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
|
static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
|
||||||
|
@ -549,7 +546,7 @@ int qed_spq_alloc(struct qed_hwfn *p_hwfn)
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
/* SPQ struct */
|
/* SPQ struct */
|
||||||
p_spq = kzalloc(sizeof(struct qed_spq), GFP_KERNEL);
|
p_spq = kzalloc(sizeof(*p_spq), GFP_KERNEL);
|
||||||
if (!p_spq)
|
if (!p_spq)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
@ -677,7 +674,6 @@ static int qed_spq_add_entry(struct qed_hwfn *p_hwfn,
|
||||||
struct qed_spq *p_spq = p_hwfn->p_spq;
|
struct qed_spq *p_spq = p_hwfn->p_spq;
|
||||||
|
|
||||||
if (p_ent->queue == &p_spq->unlimited_pending) {
|
if (p_ent->queue == &p_spq->unlimited_pending) {
|
||||||
|
|
||||||
if (list_empty(&p_spq->free_pool)) {
|
if (list_empty(&p_spq->free_pool)) {
|
||||||
list_add_tail(&p_ent->list, &p_spq->unlimited_pending);
|
list_add_tail(&p_ent->list, &p_spq->unlimited_pending);
|
||||||
p_spq->unlimited_pending_count++;
|
p_spq->unlimited_pending_count++;
|
||||||
|
|
|
@ -20,12 +20,13 @@
|
||||||
#include "qed_sp.h"
|
#include "qed_sp.h"
|
||||||
#include "qed_sriov.h"
|
#include "qed_sriov.h"
|
||||||
#include "qed_vf.h"
|
#include "qed_vf.h"
|
||||||
static int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
|
|
||||||
u8 opcode,
|
|
||||||
__le16 echo,
|
|
||||||
union event_ring_data *data, u8 fw_return_code);
|
|
||||||
static int qed_iov_bulletin_set_mac(struct qed_hwfn *p_hwfn, u8 *mac, int vfid);
|
static int qed_iov_bulletin_set_mac(struct qed_hwfn *p_hwfn, u8 *mac, int vfid);
|
||||||
|
|
||||||
|
static u16 qed_vf_from_entity_id(__le16 entity_id)
|
||||||
|
{
|
||||||
|
return le16_to_cpu(entity_id) - MAX_NUM_PFS;
|
||||||
|
}
|
||||||
|
|
||||||
static u8 qed_vf_calculate_legacy(struct qed_vf_info *p_vf)
|
static u8 qed_vf_calculate_legacy(struct qed_vf_info *p_vf)
|
||||||
{
|
{
|
||||||
u8 legacy = 0;
|
u8 legacy = 0;
|
||||||
|
@ -170,8 +171,8 @@ static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn,
|
||||||
b_enabled_only, false))
|
b_enabled_only, false))
|
||||||
vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id];
|
vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id];
|
||||||
else
|
else
|
||||||
DP_ERR(p_hwfn, "qed_iov_get_vf_info: VF[%d] is not enabled\n",
|
DP_ERR(p_hwfn, "%s: VF[%d] is not enabled\n",
|
||||||
relative_vf_id);
|
__func__, relative_vf_id);
|
||||||
|
|
||||||
return vf;
|
return vf;
|
||||||
}
|
}
|
||||||
|
@ -421,7 +422,7 @@ static void qed_iov_setup_vfdb(struct qed_hwfn *p_hwfn)
|
||||||
bulletin_p = p_iov_info->bulletins_phys;
|
bulletin_p = p_iov_info->bulletins_phys;
|
||||||
if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) {
|
if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) {
|
||||||
DP_ERR(p_hwfn,
|
DP_ERR(p_hwfn,
|
||||||
"qed_iov_setup_vfdb called without allocating mem first\n");
|
"%s called without allocating mem first\n", __func__);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -465,7 +466,7 @@ static int qed_iov_allocate_vfdb(struct qed_hwfn *p_hwfn)
|
||||||
num_vfs = p_hwfn->cdev->p_iov_info->total_vfs;
|
num_vfs = p_hwfn->cdev->p_iov_info->total_vfs;
|
||||||
|
|
||||||
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
|
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
|
||||||
"qed_iov_allocate_vfdb for %d VFs\n", num_vfs);
|
"%s for %d VFs\n", __func__, num_vfs);
|
||||||
|
|
||||||
/* Allocate PF Mailbox buffer (per-VF) */
|
/* Allocate PF Mailbox buffer (per-VF) */
|
||||||
p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs;
|
p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs;
|
||||||
|
@ -609,7 +610,7 @@ int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
|
||||||
if (rc)
|
if (rc)
|
||||||
return rc;
|
return rc;
|
||||||
|
|
||||||
/* We want PF IOV to be synonemous with the existance of p_iov_info;
|
/* We want PF IOV to be synonemous with the existence of p_iov_info;
|
||||||
* In case the capability is published but there are no VFs, simply
|
* In case the capability is published but there are no VFs, simply
|
||||||
* de-allocate the struct.
|
* de-allocate the struct.
|
||||||
*/
|
*/
|
||||||
|
@ -989,7 +990,7 @@ static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn,
|
||||||
|
|
||||||
vf = qed_iov_get_vf_info(p_hwfn, p_params->rel_vf_id, false);
|
vf = qed_iov_get_vf_info(p_hwfn, p_params->rel_vf_id, false);
|
||||||
if (!vf) {
|
if (!vf) {
|
||||||
DP_ERR(p_hwfn, "qed_iov_init_hw_for_vf : vf is NULL\n");
|
DP_ERR(p_hwfn, "%s : vf is NULL\n", __func__);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1093,7 +1094,7 @@ static int qed_iov_release_hw_for_vf(struct qed_hwfn *p_hwfn,
|
||||||
|
|
||||||
vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
|
vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
|
||||||
if (!vf) {
|
if (!vf) {
|
||||||
DP_ERR(p_hwfn, "qed_iov_release_hw_for_vf : vf is NULL\n");
|
DP_ERR(p_hwfn, "%s : vf is NULL\n", __func__);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1546,7 +1547,7 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
|
||||||
memset(resp, 0, sizeof(*resp));
|
memset(resp, 0, sizeof(*resp));
|
||||||
|
|
||||||
/* Write the PF version so that VF would know which version
|
/* Write the PF version so that VF would know which version
|
||||||
* is supported - might be later overriden. This guarantees that
|
* is supported - might be later overridden. This guarantees that
|
||||||
* VF could recognize legacy PF based on lack of versions in reply.
|
* VF could recognize legacy PF based on lack of versions in reply.
|
||||||
*/
|
*/
|
||||||
pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR;
|
pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR;
|
||||||
|
@ -1958,7 +1959,7 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
|
||||||
rc = qed_sp_eth_vport_start(p_hwfn, ¶ms);
|
rc = qed_sp_eth_vport_start(p_hwfn, ¶ms);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
DP_ERR(p_hwfn,
|
DP_ERR(p_hwfn,
|
||||||
"qed_iov_vf_mbx_start_vport returned error %d\n", rc);
|
"%s returned error %d\n", __func__, rc);
|
||||||
status = PFVF_STATUS_FAILURE;
|
status = PFVF_STATUS_FAILURE;
|
||||||
} else {
|
} else {
|
||||||
vf->vport_instance++;
|
vf->vport_instance++;
|
||||||
|
@ -1994,8 +1995,8 @@ static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn,
|
||||||
|
|
||||||
rc = qed_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
|
rc = qed_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
DP_ERR(p_hwfn, "qed_iov_vf_mbx_stop_vport returned error %d\n",
|
DP_ERR(p_hwfn, "%s returned error %d\n",
|
||||||
rc);
|
__func__, rc);
|
||||||
status = PFVF_STATUS_FAILURE;
|
status = PFVF_STATUS_FAILURE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3031,7 +3032,7 @@ static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn,
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
p_rss_params = vzalloc(sizeof(*p_rss_params));
|
p_rss_params = vzalloc(sizeof(*p_rss_params));
|
||||||
if (p_rss_params == NULL) {
|
if (!p_rss_params) {
|
||||||
status = PFVF_STATUS_FAILURE;
|
status = PFVF_STATUS_FAILURE;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
@ -3551,6 +3552,7 @@ out:
|
||||||
qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_COALESCE_UPDATE,
|
qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_COALESCE_UPDATE,
|
||||||
sizeof(struct pfvf_def_resp_tlv), status);
|
sizeof(struct pfvf_def_resp_tlv), status);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn,
|
qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn,
|
||||||
struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
|
struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
|
||||||
|
@ -4014,13 +4016,13 @@ static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn,
|
void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn,
|
||||||
struct malicious_vf_eqe_data *p_data)
|
struct fw_err_data *p_data)
|
||||||
{
|
{
|
||||||
struct qed_vf_info *p_vf;
|
struct qed_vf_info *p_vf;
|
||||||
|
|
||||||
p_vf = qed_sriov_get_vf_from_absid(p_hwfn, p_data->vf_id);
|
p_vf = qed_sriov_get_vf_from_absid(p_hwfn, qed_vf_from_entity_id
|
||||||
|
(p_data->entity_id));
|
||||||
if (!p_vf)
|
if (!p_vf)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
@ -4037,16 +4039,13 @@ static void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, u8 opcode, __le16 echo,
|
int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, u8 opcode, __le16 echo,
|
||||||
union event_ring_data *data, u8 fw_return_code)
|
union event_ring_data *data, u8 fw_return_code)
|
||||||
{
|
{
|
||||||
switch (opcode) {
|
switch (opcode) {
|
||||||
case COMMON_EVENT_VF_PF_CHANNEL:
|
case COMMON_EVENT_VF_PF_CHANNEL:
|
||||||
return qed_sriov_vfpf_msg(p_hwfn, le16_to_cpu(echo),
|
return qed_sriov_vfpf_msg(p_hwfn, le16_to_cpu(echo),
|
||||||
&data->vf_pf_channel.msg_addr);
|
&data->vf_pf_channel.msg_addr);
|
||||||
case COMMON_EVENT_MALICIOUS_VF:
|
|
||||||
qed_sriov_vfpf_malicious(p_hwfn, &data->malicious_vf);
|
|
||||||
return 0;
|
|
||||||
default:
|
default:
|
||||||
DP_INFO(p_hwfn->cdev, "Unknown sriov eqe event 0x%02x\n",
|
DP_INFO(p_hwfn->cdev, "Unknown sriov eqe event 0x%02x\n",
|
||||||
opcode);
|
opcode);
|
||||||
|
@ -4346,7 +4345,8 @@ static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn,
|
||||||
return rc;
|
return rc;
|
||||||
|
|
||||||
rl_id = abs_vp_id; /* The "rl_id" is set as the "vport_id" */
|
rl_id = abs_vp_id; /* The "rl_id" is set as the "vport_id" */
|
||||||
return qed_init_global_rl(p_hwfn, p_ptt, rl_id, (u32)val);
|
return qed_init_global_rl(p_hwfn, p_ptt, rl_id, (u32)val,
|
||||||
|
QM_RL_TYPE_NORMAL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
|
@ -4396,8 +4396,10 @@ static int qed_iov_get_vf_min_rate(struct qed_hwfn *p_hwfn, int vfid)
|
||||||
*/
|
*/
|
||||||
void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag)
|
void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag)
|
||||||
{
|
{
|
||||||
|
/* Memory barrier for setting atomic bit */
|
||||||
smp_mb__before_atomic();
|
smp_mb__before_atomic();
|
||||||
set_bit(flag, &hwfn->iov_task_flags);
|
set_bit(flag, &hwfn->iov_task_flags);
|
||||||
|
/* Memory barrier after setting atomic bit */
|
||||||
smp_mb__after_atomic();
|
smp_mb__after_atomic();
|
||||||
DP_VERBOSE(hwfn, QED_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag);
|
DP_VERBOSE(hwfn, QED_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag);
|
||||||
queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, 0);
|
queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, 0);
|
||||||
|
|
|
@ -142,7 +142,7 @@ struct qed_vf_queue {
|
||||||
|
|
||||||
enum vf_state {
|
enum vf_state {
|
||||||
VF_FREE = 0, /* VF ready to be acquired holds no resc */
|
VF_FREE = 0, /* VF ready to be acquired holds no resc */
|
||||||
VF_ACQUIRED, /* VF, acquired, but not initalized */
|
VF_ACQUIRED, /* VF, acquired, but not initialized */
|
||||||
VF_ENABLED, /* VF, Enabled */
|
VF_ENABLED, /* VF, Enabled */
|
||||||
VF_RESET, /* VF, FLR'd, pending cleanup */
|
VF_RESET, /* VF, FLR'd, pending cleanup */
|
||||||
VF_STOPPED /* VF, Stopped */
|
VF_STOPPED /* VF, Stopped */
|
||||||
|
@ -313,6 +313,31 @@ void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length);
|
||||||
*/
|
*/
|
||||||
void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list);
|
void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* qed_sriov_vfpf_malicious(): Handle malicious VF/PF.
|
||||||
|
*
|
||||||
|
* @p_hwfn: HW device data.
|
||||||
|
* @p_data: Pointer to data.
|
||||||
|
*
|
||||||
|
* Return: Void.
|
||||||
|
*/
|
||||||
|
void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn,
|
||||||
|
struct fw_err_data *p_data);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* qed_sriov_eqe_event(): Callback for SRIOV events.
|
||||||
|
*
|
||||||
|
* @p_hwfn: HW device data.
|
||||||
|
* @opcode: Opcode.
|
||||||
|
* @echo: Echo.
|
||||||
|
* @data: data
|
||||||
|
* @fw_return_code: FW return code.
|
||||||
|
*
|
||||||
|
* Return: Int.
|
||||||
|
*/
|
||||||
|
int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, u8 opcode, __le16 echo,
|
||||||
|
union event_ring_data *data, u8 fw_return_code);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* qed_iov_alloc(): allocate sriov related resources
|
* qed_iov_alloc(): allocate sriov related resources
|
||||||
*
|
*
|
||||||
|
|
|
@ -67,6 +67,7 @@
|
||||||
/* Ethernet vport update constants */
|
/* Ethernet vport update constants */
|
||||||
#define ETH_FILTER_RULES_COUNT 10
|
#define ETH_FILTER_RULES_COUNT 10
|
||||||
#define ETH_RSS_IND_TABLE_ENTRIES_NUM 128
|
#define ETH_RSS_IND_TABLE_ENTRIES_NUM 128
|
||||||
|
#define ETH_RSS_IND_TABLE_MASK_SIZE_REGS (ETH_RSS_IND_TABLE_ENTRIES_NUM / 32)
|
||||||
#define ETH_RSS_KEY_SIZE_REGS 10
|
#define ETH_RSS_KEY_SIZE_REGS 10
|
||||||
#define ETH_RSS_ENGINE_NUM_K2 207
|
#define ETH_RSS_ENGINE_NUM_K2 207
|
||||||
#define ETH_RSS_ENGINE_NUM_BB 127
|
#define ETH_RSS_ENGINE_NUM_BB 127
|
||||||
|
|
|
@ -27,6 +27,7 @@
|
||||||
#define RDMA_MAX_PDS (64 * 1024)
|
#define RDMA_MAX_PDS (64 * 1024)
|
||||||
#define RDMA_MAX_XRC_SRQS (1024)
|
#define RDMA_MAX_XRC_SRQS (1024)
|
||||||
#define RDMA_MAX_SRQS (32 * 1024)
|
#define RDMA_MAX_SRQS (32 * 1024)
|
||||||
|
#define RDMA_MAX_IRQ_ELEMS_IN_PAGE (128)
|
||||||
|
|
||||||
#define RDMA_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS
|
#define RDMA_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS
|
||||||
#define RDMA_NUM_STATISTIC_COUNTERS_K2 MAX_NUM_VPORTS_K2
|
#define RDMA_NUM_STATISTIC_COUNTERS_K2 MAX_NUM_VPORTS_K2
|
||||||
|
|
Loading…
Add table
Reference in a new issue