RDMA/bnxt_re: Refactor net ring allocation function
Introducing a new attribute structure to reduce the long list of arguments passed in bnxt_re_net_ring_alloc() function. The caller of bnxt_re_net_ring_alloc should fill in the list of attributes in bnxt_re_ring_attr structure and then pass the pointer to the function. Link: https://lore.kernel.org/r/1581786665-23705-5-git-send-email-devesh.sharma@broadcom.com Signed-off-by: Naresh Kumar PBS <nareshkumar.pbs@broadcom.com> Signed-off-by: Selvin Xavier <selvin.xavier@broadcom.com> Signed-off-by: Devesh Sharma <devesh.sharma@broadcom.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
parent
0c4dcd6028
commit
b08fe048a6
2 changed files with 44 additions and 29 deletions
|
@ -89,6 +89,15 @@
|
||||||
|
|
||||||
#define BNXT_RE_DEFAULT_ACK_DELAY 16
|
#define BNXT_RE_DEFAULT_ACK_DELAY 16
|
||||||
|
|
||||||
|
struct bnxt_re_ring_attr {
|
||||||
|
dma_addr_t *dma_arr;
|
||||||
|
int pages;
|
||||||
|
int type;
|
||||||
|
u32 depth;
|
||||||
|
u32 lrid; /* Logical ring id */
|
||||||
|
u8 mode;
|
||||||
|
};
|
||||||
|
|
||||||
struct bnxt_re_work {
|
struct bnxt_re_work {
|
||||||
struct work_struct work;
|
struct work_struct work;
|
||||||
unsigned long event;
|
unsigned long event;
|
||||||
|
|
|
@ -427,9 +427,9 @@ static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev,
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int bnxt_re_net_ring_alloc(struct bnxt_re_dev *rdev, dma_addr_t *dma_arr,
|
static int bnxt_re_net_ring_alloc(struct bnxt_re_dev *rdev,
|
||||||
int pages, int type, u32 ring_mask,
|
struct bnxt_re_ring_attr *ring_attr,
|
||||||
u32 map_index, u16 *fw_ring_id)
|
u16 *fw_ring_id)
|
||||||
{
|
{
|
||||||
struct bnxt_en_dev *en_dev = rdev->en_dev;
|
struct bnxt_en_dev *en_dev = rdev->en_dev;
|
||||||
struct hwrm_ring_alloc_input req = {0};
|
struct hwrm_ring_alloc_input req = {0};
|
||||||
|
@ -443,18 +443,18 @@ static int bnxt_re_net_ring_alloc(struct bnxt_re_dev *rdev, dma_addr_t *dma_arr,
|
||||||
memset(&fw_msg, 0, sizeof(fw_msg));
|
memset(&fw_msg, 0, sizeof(fw_msg));
|
||||||
bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_ALLOC, -1, -1);
|
bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_ALLOC, -1, -1);
|
||||||
req.enables = 0;
|
req.enables = 0;
|
||||||
req.page_tbl_addr = cpu_to_le64(dma_arr[0]);
|
req.page_tbl_addr = cpu_to_le64(ring_attr->dma_arr[0]);
|
||||||
if (pages > 1) {
|
if (ring_attr->pages > 1) {
|
||||||
/* Page size is in log2 units */
|
/* Page size is in log2 units */
|
||||||
req.page_size = BNXT_PAGE_SHIFT;
|
req.page_size = BNXT_PAGE_SHIFT;
|
||||||
req.page_tbl_depth = 1;
|
req.page_tbl_depth = 1;
|
||||||
}
|
}
|
||||||
req.fbo = 0;
|
req.fbo = 0;
|
||||||
/* Association of ring index with doorbell index and MSIX number */
|
/* Association of ring index with doorbell index and MSIX number */
|
||||||
req.logical_id = cpu_to_le16(map_index);
|
req.logical_id = cpu_to_le16(ring_attr->lrid);
|
||||||
req.length = cpu_to_le32(ring_mask + 1);
|
req.length = cpu_to_le32(ring_attr->depth + 1);
|
||||||
req.ring_type = type;
|
req.ring_type = ring_attr->type;
|
||||||
req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
|
req.int_mode = ring_attr->mode;
|
||||||
bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
|
bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
|
||||||
sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
|
sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
|
||||||
rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
|
rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
|
||||||
|
@ -1006,10 +1006,10 @@ static void bnxt_re_free_res(struct bnxt_re_dev *rdev)
|
||||||
|
|
||||||
static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
|
static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
|
||||||
{
|
{
|
||||||
|
struct bnxt_re_ring_attr rattr = {};
|
||||||
|
struct bnxt_qplib_ctx *qplib_ctx;
|
||||||
int num_vec_created = 0;
|
int num_vec_created = 0;
|
||||||
dma_addr_t *pg_map;
|
|
||||||
int rc = 0, i;
|
int rc = 0, i;
|
||||||
int pages;
|
|
||||||
u8 type;
|
u8 type;
|
||||||
|
|
||||||
/* Configure and allocate resources for qplib */
|
/* Configure and allocate resources for qplib */
|
||||||
|
@ -1030,10 +1030,13 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
|
||||||
if (rc)
|
if (rc)
|
||||||
goto dealloc_res;
|
goto dealloc_res;
|
||||||
|
|
||||||
|
qplib_ctx = &rdev->qplib_ctx;
|
||||||
for (i = 0; i < rdev->num_msix - 1; i++) {
|
for (i = 0; i < rdev->num_msix - 1; i++) {
|
||||||
rdev->nq[i].res = &rdev->qplib_res;
|
struct bnxt_qplib_nq *nq;
|
||||||
rdev->nq[i].hwq.max_elements = BNXT_RE_MAX_CQ_COUNT +
|
|
||||||
BNXT_RE_MAX_SRQC_COUNT + 2;
|
nq = &rdev->nq[i];
|
||||||
|
nq->hwq.max_elements = (qplib_ctx->cq_count +
|
||||||
|
qplib_ctx->srqc_count + 2);
|
||||||
rc = bnxt_qplib_alloc_nq(&rdev->qplib_res, &rdev->nq[i]);
|
rc = bnxt_qplib_alloc_nq(&rdev->qplib_res, &rdev->nq[i]);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
dev_err(rdev_to_dev(rdev), "Alloc Failed NQ%d rc:%#x",
|
dev_err(rdev_to_dev(rdev), "Alloc Failed NQ%d rc:%#x",
|
||||||
|
@ -1041,12 +1044,13 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
|
||||||
goto free_nq;
|
goto free_nq;
|
||||||
}
|
}
|
||||||
type = bnxt_qplib_get_ring_type(rdev->chip_ctx);
|
type = bnxt_qplib_get_ring_type(rdev->chip_ctx);
|
||||||
pg_map = rdev->nq[i].hwq.pbl[PBL_LVL_0].pg_map_arr;
|
rattr.dma_arr = nq->hwq.pbl[PBL_LVL_0].pg_map_arr;
|
||||||
pages = rdev->nq[i].hwq.pbl[rdev->nq[i].hwq.level].pg_count;
|
rattr.pages = nq->hwq.pbl[rdev->nq[i].hwq.level].pg_count;
|
||||||
rc = bnxt_re_net_ring_alloc(rdev, pg_map, pages, type,
|
rattr.type = type;
|
||||||
BNXT_QPLIB_NQE_MAX_CNT - 1,
|
rattr.mode = RING_ALLOC_REQ_INT_MODE_MSIX;
|
||||||
rdev->msix_entries[i + 1].ring_idx,
|
rattr.depth = BNXT_QPLIB_NQE_MAX_CNT - 1;
|
||||||
&rdev->nq[i].ring_id);
|
rattr.lrid = rdev->msix_entries[i + 1].ring_idx;
|
||||||
|
rc = bnxt_re_net_ring_alloc(rdev, &rattr, &nq->ring_id);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
dev_err(rdev_to_dev(rdev),
|
dev_err(rdev_to_dev(rdev),
|
||||||
"Failed to allocate NQ fw id with rc = 0x%x",
|
"Failed to allocate NQ fw id with rc = 0x%x",
|
||||||
|
@ -1371,10 +1375,10 @@ static void bnxt_re_worker(struct work_struct *work)
|
||||||
|
|
||||||
static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
|
static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
|
||||||
{
|
{
|
||||||
dma_addr_t *pg_map;
|
struct bnxt_re_ring_attr rattr;
|
||||||
u32 db_offt, ridx;
|
u32 db_offt;
|
||||||
int pages, vid;
|
|
||||||
bool locked;
|
bool locked;
|
||||||
|
int vid;
|
||||||
u8 type;
|
u8 type;
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
|
@ -1383,6 +1387,7 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
|
||||||
locked = true;
|
locked = true;
|
||||||
|
|
||||||
/* Registered a new RoCE device instance to netdev */
|
/* Registered a new RoCE device instance to netdev */
|
||||||
|
memset(&rattr, 0, sizeof(rattr));
|
||||||
rc = bnxt_re_register_netdev(rdev);
|
rc = bnxt_re_register_netdev(rdev);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
rtnl_unlock();
|
rtnl_unlock();
|
||||||
|
@ -1422,12 +1427,13 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
|
||||||
}
|
}
|
||||||
|
|
||||||
type = bnxt_qplib_get_ring_type(rdev->chip_ctx);
|
type = bnxt_qplib_get_ring_type(rdev->chip_ctx);
|
||||||
pg_map = rdev->rcfw.creq.pbl[PBL_LVL_0].pg_map_arr;
|
rattr.dma_arr = rdev->rcfw.creq.pbl[PBL_LVL_0].pg_map_arr;
|
||||||
pages = rdev->rcfw.creq.pbl[rdev->rcfw.creq.level].pg_count;
|
rattr.pages = rdev->rcfw.creq.pbl[rdev->rcfw.creq.level].pg_count;
|
||||||
ridx = rdev->msix_entries[BNXT_RE_AEQ_IDX].ring_idx;
|
rattr.type = type;
|
||||||
rc = bnxt_re_net_ring_alloc(rdev, pg_map, pages, type,
|
rattr.mode = RING_ALLOC_REQ_INT_MODE_MSIX;
|
||||||
BNXT_QPLIB_CREQE_MAX_CNT - 1,
|
rattr.depth = BNXT_QPLIB_CREQE_MAX_CNT - 1;
|
||||||
ridx, &rdev->rcfw.creq_ring_id);
|
rattr.lrid = rdev->msix_entries[BNXT_RE_AEQ_IDX].ring_idx;
|
||||||
|
rc = bnxt_re_net_ring_alloc(rdev, &rattr, &rdev->rcfw.creq_ring_id);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
pr_err("Failed to allocate CREQ: %#x\n", rc);
|
pr_err("Failed to allocate CREQ: %#x\n", rc);
|
||||||
goto free_rcfw;
|
goto free_rcfw;
|
||||||
|
|
Loading…
Add table
Reference in a new issue