1
0
Fork 0
mirror of synced 2025-03-06 20:59:54 +01:00

RDMA: Globally allocate and release QP memory

Convert QP object to follow IB/core general allocation scheme.  That
change allows us to make sure that restrack properly kref the memory.

Link: https://lore.kernel.org/r/48e767124758aeecc433360ddd85eaa6325b34d9.1627040189.git.leonro@nvidia.com
Reviewed-by: Gal Pressman <galpress@amazon.com> #efa
Tested-by: Gal Pressman <galpress@amazon.com>
Reviewed-by: Dennis Dalessandro <dennis.dalessandro@cornelisnetworks.com> #rdma and core
Tested-by: Dennis Dalessandro <dennis.dalessandro@cornelisnetworks.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
Tested-by: Tatyana Nikolova <tatyana.e.nikolova@intel.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
Leon Romanovsky 2021-07-23 14:39:50 +03:00 committed by Jason Gunthorpe
parent 44da3730e0
commit 514aee660d
53 changed files with 405 additions and 550 deletions

View file

@ -322,13 +322,14 @@ _ib_create_qp(struct ib_device *dev, struct ib_pd *pd,
struct ib_uqp_object *uobj, const char *caller) struct ib_uqp_object *uobj, const char *caller)
{ {
struct ib_qp *qp; struct ib_qp *qp;
int ret;
if (!dev->ops.create_qp) if (!dev->ops.create_qp)
return ERR_PTR(-EOPNOTSUPP); return ERR_PTR(-EOPNOTSUPP);
qp = dev->ops.create_qp(pd, attr, udata); qp = rdma_zalloc_drv_obj_numa(dev, ib_qp);
if (IS_ERR(qp)) if (!qp)
return qp; return ERR_PTR(-ENOMEM);
qp->device = dev; qp->device = dev;
qp->pd = pd; qp->pd = pd;
@ -337,14 +338,10 @@ _ib_create_qp(struct ib_device *dev, struct ib_pd *pd,
qp->qp_type = attr->qp_type; qp->qp_type = attr->qp_type;
qp->rwq_ind_tbl = attr->rwq_ind_tbl; qp->rwq_ind_tbl = attr->rwq_ind_tbl;
qp->send_cq = attr->send_cq;
qp->recv_cq = attr->recv_cq;
qp->srq = attr->srq; qp->srq = attr->srq;
qp->rwq_ind_tbl = attr->rwq_ind_tbl;
qp->event_handler = attr->event_handler; qp->event_handler = attr->event_handler;
qp->port = attr->port_num; qp->port = attr->port_num;
atomic_set(&qp->usecnt, 0);
spin_lock_init(&qp->mr_lock); spin_lock_init(&qp->mr_lock);
INIT_LIST_HEAD(&qp->rdma_mrs); INIT_LIST_HEAD(&qp->rdma_mrs);
INIT_LIST_HEAD(&qp->sig_mrs); INIT_LIST_HEAD(&qp->sig_mrs);
@ -352,8 +349,25 @@ _ib_create_qp(struct ib_device *dev, struct ib_pd *pd,
rdma_restrack_new(&qp->res, RDMA_RESTRACK_QP); rdma_restrack_new(&qp->res, RDMA_RESTRACK_QP);
WARN_ONCE(!udata && !caller, "Missing kernel QP owner"); WARN_ONCE(!udata && !caller, "Missing kernel QP owner");
rdma_restrack_set_name(&qp->res, udata ? NULL : caller); rdma_restrack_set_name(&qp->res, udata ? NULL : caller);
ret = dev->ops.create_qp(qp, attr, udata);
if (ret)
goto err_create;
/*
* TODO: The mlx4 internally overwrites send_cq and recv_cq.
* Unfortunately, it is not an easy task to fix that driver.
*/
qp->send_cq = attr->send_cq;
qp->recv_cq = attr->recv_cq;
rdma_restrack_add(&qp->res); rdma_restrack_add(&qp->res);
return qp; return qp;
err_create:
rdma_restrack_put(&qp->res);
kfree(qp);
return ERR_PTR(ret);
} }
struct rdma_dev_addr; struct rdma_dev_addr;

View file

@ -2654,6 +2654,7 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_DEVICE_OP(dev_ops, get_hw_stats); SET_DEVICE_OP(dev_ops, get_hw_stats);
SET_DEVICE_OP(dev_ops, get_link_layer); SET_DEVICE_OP(dev_ops, get_link_layer);
SET_DEVICE_OP(dev_ops, get_netdev); SET_DEVICE_OP(dev_ops, get_netdev);
SET_DEVICE_OP(dev_ops, get_numa_node);
SET_DEVICE_OP(dev_ops, get_port_immutable); SET_DEVICE_OP(dev_ops, get_port_immutable);
SET_DEVICE_OP(dev_ops, get_vector_affinity); SET_DEVICE_OP(dev_ops, get_vector_affinity);
SET_DEVICE_OP(dev_ops, get_vf_config); SET_DEVICE_OP(dev_ops, get_vf_config);
@ -2710,6 +2711,7 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_OBJ_SIZE(dev_ops, ib_cq); SET_OBJ_SIZE(dev_ops, ib_cq);
SET_OBJ_SIZE(dev_ops, ib_mw); SET_OBJ_SIZE(dev_ops, ib_mw);
SET_OBJ_SIZE(dev_ops, ib_pd); SET_OBJ_SIZE(dev_ops, ib_pd);
SET_OBJ_SIZE(dev_ops, ib_qp);
SET_OBJ_SIZE(dev_ops, ib_rwq_ind_table); SET_OBJ_SIZE(dev_ops, ib_rwq_ind_table);
SET_OBJ_SIZE(dev_ops, ib_srq); SET_OBJ_SIZE(dev_ops, ib_srq);
SET_OBJ_SIZE(dev_ops, ib_ucontext); SET_OBJ_SIZE(dev_ops, ib_ucontext);

View file

@ -343,7 +343,7 @@ void rdma_restrack_del(struct rdma_restrack_entry *res)
rt = &dev->res[res->type]; rt = &dev->res[res->type];
old = xa_erase(&rt->xa, res->id); old = xa_erase(&rt->xa, res->id);
if (res->type == RDMA_RESTRACK_MR || res->type == RDMA_RESTRACK_QP) if (res->type == RDMA_RESTRACK_MR)
return; return;
WARN_ON(old != res); WARN_ON(old != res);

View file

@ -1963,9 +1963,13 @@ int ib_destroy_qp_user(struct ib_qp *qp, struct ib_udata *udata)
rdma_rw_cleanup_mrs(qp); rdma_rw_cleanup_mrs(qp);
rdma_counter_unbind_qp(qp, true); rdma_counter_unbind_qp(qp, true);
rdma_restrack_del(&qp->res);
ret = qp->device->ops.destroy_qp(qp, udata); ret = qp->device->ops.destroy_qp(qp, udata);
if (!ret) { if (ret) {
if (sec)
ib_destroy_qp_security_abort(sec);
return ret;
}
if (alt_path_sgid_attr) if (alt_path_sgid_attr)
rdma_put_gid_attr(alt_path_sgid_attr); rdma_put_gid_attr(alt_path_sgid_attr);
if (av_sgid_attr) if (av_sgid_attr)
@ -1982,11 +1986,9 @@ int ib_destroy_qp_user(struct ib_qp *qp, struct ib_udata *udata)
atomic_dec(&ind_tbl->usecnt); atomic_dec(&ind_tbl->usecnt);
if (sec) if (sec)
ib_destroy_qp_security_end(sec); ib_destroy_qp_security_end(sec);
} else {
if (sec)
ib_destroy_qp_security_abort(sec);
}
rdma_restrack_del(&qp->res);
kfree(qp);
return ret; return ret;
} }
EXPORT_SYMBOL(ib_destroy_qp_user); EXPORT_SYMBOL(ib_destroy_qp_user);

View file

@ -815,7 +815,7 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp) { if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp) {
rc = bnxt_re_destroy_gsi_sqp(qp); rc = bnxt_re_destroy_gsi_sqp(qp);
if (rc) if (rc)
goto sh_fail; return rc;
} }
mutex_lock(&rdev->qp_lock); mutex_lock(&rdev->qp_lock);
@ -826,10 +826,7 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
ib_umem_release(qp->rumem); ib_umem_release(qp->rumem);
ib_umem_release(qp->sumem); ib_umem_release(qp->sumem);
kfree(qp);
return 0; return 0;
sh_fail:
return rc;
} }
static u8 __from_ib_qp_type(enum ib_qp_type type) static u8 __from_ib_qp_type(enum ib_qp_type type)
@ -1402,27 +1399,22 @@ static bool bnxt_re_test_qp_limits(struct bnxt_re_dev *rdev,
return rc; return rc;
} }
struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd, int bnxt_re_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *qp_init_attr,
struct ib_qp_init_attr *qp_init_attr,
struct ib_udata *udata) struct ib_udata *udata)
{ {
struct ib_pd *ib_pd = ib_qp->pd;
struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
struct bnxt_re_dev *rdev = pd->rdev; struct bnxt_re_dev *rdev = pd->rdev;
struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr; struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
struct bnxt_re_qp *qp; struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
int rc; int rc;
rc = bnxt_re_test_qp_limits(rdev, qp_init_attr, dev_attr); rc = bnxt_re_test_qp_limits(rdev, qp_init_attr, dev_attr);
if (!rc) { if (!rc) {
rc = -EINVAL; rc = -EINVAL;
goto exit; goto fail;
} }
qp = kzalloc(sizeof(*qp), GFP_KERNEL);
if (!qp) {
rc = -ENOMEM;
goto exit;
}
qp->rdev = rdev; qp->rdev = rdev;
rc = bnxt_re_init_qp_attr(qp, pd, qp_init_attr, udata); rc = bnxt_re_init_qp_attr(qp, pd, qp_init_attr, udata);
if (rc) if (rc)
@ -1465,16 +1457,14 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
mutex_unlock(&rdev->qp_lock); mutex_unlock(&rdev->qp_lock);
atomic_inc(&rdev->qp_count); atomic_inc(&rdev->qp_count);
return &qp->ib_qp; return 0;
qp_destroy: qp_destroy:
bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp); bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
free_umem: free_umem:
ib_umem_release(qp->rumem); ib_umem_release(qp->rumem);
ib_umem_release(qp->sumem); ib_umem_release(qp->sumem);
fail: fail:
kfree(qp); return rc;
exit:
return ERR_PTR(rc);
} }
static u8 __from_ib_qp_state(enum ib_qp_state state) static u8 __from_ib_qp_state(enum ib_qp_state state)

View file

@ -78,9 +78,9 @@ struct bnxt_re_srq {
}; };
struct bnxt_re_qp { struct bnxt_re_qp {
struct ib_qp ib_qp;
struct list_head list; struct list_head list;
struct bnxt_re_dev *rdev; struct bnxt_re_dev *rdev;
struct ib_qp ib_qp;
spinlock_t sq_lock; /* protect sq */ spinlock_t sq_lock; /* protect sq */
spinlock_t rq_lock; /* protect rq */ spinlock_t rq_lock; /* protect rq */
struct bnxt_qplib_qp qplib_qp; struct bnxt_qplib_qp qplib_qp;
@ -179,8 +179,7 @@ int bnxt_re_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
int bnxt_re_destroy_srq(struct ib_srq *srq, struct ib_udata *udata); int bnxt_re_destroy_srq(struct ib_srq *srq, struct ib_udata *udata);
int bnxt_re_post_srq_recv(struct ib_srq *srq, const struct ib_recv_wr *recv_wr, int bnxt_re_post_srq_recv(struct ib_srq *srq, const struct ib_recv_wr *recv_wr,
const struct ib_recv_wr **bad_recv_wr); const struct ib_recv_wr **bad_recv_wr);
struct ib_qp *bnxt_re_create_qp(struct ib_pd *pd, int bnxt_re_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *qp_init_attr,
struct ib_qp_init_attr *qp_init_attr,
struct ib_udata *udata); struct ib_udata *udata);
int bnxt_re_modify_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr, int bnxt_re_modify_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_udata *udata); int qp_attr_mask, struct ib_udata *udata);

View file

@ -709,6 +709,7 @@ static const struct ib_device_ops bnxt_re_dev_ops = {
INIT_RDMA_OBJ_SIZE(ib_ah, bnxt_re_ah, ib_ah), INIT_RDMA_OBJ_SIZE(ib_ah, bnxt_re_ah, ib_ah),
INIT_RDMA_OBJ_SIZE(ib_cq, bnxt_re_cq, ib_cq), INIT_RDMA_OBJ_SIZE(ib_cq, bnxt_re_cq, ib_cq),
INIT_RDMA_OBJ_SIZE(ib_pd, bnxt_re_pd, ib_pd), INIT_RDMA_OBJ_SIZE(ib_pd, bnxt_re_pd, ib_pd),
INIT_RDMA_OBJ_SIZE(ib_qp, bnxt_re_qp, ib_qp),
INIT_RDMA_OBJ_SIZE(ib_srq, bnxt_re_srq, ib_srq), INIT_RDMA_OBJ_SIZE(ib_srq, bnxt_re_srq, ib_srq),
INIT_RDMA_OBJ_SIZE(ib_ucontext, bnxt_re_ucontext, ib_uctx), INIT_RDMA_OBJ_SIZE(ib_ucontext, bnxt_re_ucontext, ib_uctx),
}; };

View file

@ -989,8 +989,7 @@ int c4iw_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata);
int c4iw_create_srq(struct ib_srq *srq, struct ib_srq_init_attr *attrs, int c4iw_create_srq(struct ib_srq *srq, struct ib_srq_init_attr *attrs,
struct ib_udata *udata); struct ib_udata *udata);
int c4iw_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata); int c4iw_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata);
struct ib_qp *c4iw_create_qp(struct ib_pd *pd, int c4iw_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *attrs,
struct ib_qp_init_attr *attrs,
struct ib_udata *udata); struct ib_udata *udata);
int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata); int attr_mask, struct ib_udata *udata);

View file

@ -499,6 +499,7 @@ static const struct ib_device_ops c4iw_dev_ops = {
INIT_RDMA_OBJ_SIZE(ib_cq, c4iw_cq, ibcq), INIT_RDMA_OBJ_SIZE(ib_cq, c4iw_cq, ibcq),
INIT_RDMA_OBJ_SIZE(ib_mw, c4iw_mw, ibmw), INIT_RDMA_OBJ_SIZE(ib_mw, c4iw_mw, ibmw),
INIT_RDMA_OBJ_SIZE(ib_pd, c4iw_pd, ibpd), INIT_RDMA_OBJ_SIZE(ib_pd, c4iw_pd, ibpd),
INIT_RDMA_OBJ_SIZE(ib_qp, c4iw_qp, ibqp),
INIT_RDMA_OBJ_SIZE(ib_srq, c4iw_srq, ibsrq), INIT_RDMA_OBJ_SIZE(ib_srq, c4iw_srq, ibsrq),
INIT_RDMA_OBJ_SIZE(ib_ucontext, c4iw_ucontext, ibucontext), INIT_RDMA_OBJ_SIZE(ib_ucontext, c4iw_ucontext, ibucontext),
}; };

View file

@ -2103,16 +2103,15 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
ucontext ? &ucontext->uctx : &rhp->rdev.uctx, !qhp->srq); ucontext ? &ucontext->uctx : &rhp->rdev.uctx, !qhp->srq);
c4iw_put_wr_wait(qhp->wr_waitp); c4iw_put_wr_wait(qhp->wr_waitp);
kfree(qhp);
return 0; return 0;
} }
struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, int c4iw_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *attrs,
struct ib_udata *udata) struct ib_udata *udata)
{ {
struct ib_pd *pd = qp->pd;
struct c4iw_dev *rhp; struct c4iw_dev *rhp;
struct c4iw_qp *qhp; struct c4iw_qp *qhp = to_c4iw_qp(qp);
struct c4iw_pd *php; struct c4iw_pd *php;
struct c4iw_cq *schp; struct c4iw_cq *schp;
struct c4iw_cq *rchp; struct c4iw_cq *rchp;
@ -2124,44 +2123,36 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
struct c4iw_mm_entry *sq_key_mm, *rq_key_mm = NULL, *sq_db_key_mm; struct c4iw_mm_entry *sq_key_mm, *rq_key_mm = NULL, *sq_db_key_mm;
struct c4iw_mm_entry *rq_db_key_mm = NULL, *ma_sync_key_mm = NULL; struct c4iw_mm_entry *rq_db_key_mm = NULL, *ma_sync_key_mm = NULL;
pr_debug("ib_pd %p\n", pd);
if (attrs->qp_type != IB_QPT_RC || attrs->create_flags) if (attrs->qp_type != IB_QPT_RC || attrs->create_flags)
return ERR_PTR(-EOPNOTSUPP); return -EOPNOTSUPP;
php = to_c4iw_pd(pd); php = to_c4iw_pd(pd);
rhp = php->rhp; rhp = php->rhp;
schp = get_chp(rhp, ((struct c4iw_cq *)attrs->send_cq)->cq.cqid); schp = get_chp(rhp, ((struct c4iw_cq *)attrs->send_cq)->cq.cqid);
rchp = get_chp(rhp, ((struct c4iw_cq *)attrs->recv_cq)->cq.cqid); rchp = get_chp(rhp, ((struct c4iw_cq *)attrs->recv_cq)->cq.cqid);
if (!schp || !rchp) if (!schp || !rchp)
return ERR_PTR(-EINVAL); return -EINVAL;
if (attrs->cap.max_inline_data > T4_MAX_SEND_INLINE) if (attrs->cap.max_inline_data > T4_MAX_SEND_INLINE)
return ERR_PTR(-EINVAL); return -EINVAL;
if (!attrs->srq) { if (!attrs->srq) {
if (attrs->cap.max_recv_wr > rhp->rdev.hw_queue.t4_max_rq_size) if (attrs->cap.max_recv_wr > rhp->rdev.hw_queue.t4_max_rq_size)
return ERR_PTR(-E2BIG); return -E2BIG;
rqsize = attrs->cap.max_recv_wr + 1; rqsize = attrs->cap.max_recv_wr + 1;
if (rqsize < 8) if (rqsize < 8)
rqsize = 8; rqsize = 8;
} }
if (attrs->cap.max_send_wr > rhp->rdev.hw_queue.t4_max_sq_size) if (attrs->cap.max_send_wr > rhp->rdev.hw_queue.t4_max_sq_size)
return ERR_PTR(-E2BIG); return -E2BIG;
sqsize = attrs->cap.max_send_wr + 1; sqsize = attrs->cap.max_send_wr + 1;
if (sqsize < 8) if (sqsize < 8)
sqsize = 8; sqsize = 8;
qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
if (!qhp)
return ERR_PTR(-ENOMEM);
qhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL); qhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
if (!qhp->wr_waitp) { if (!qhp->wr_waitp)
ret = -ENOMEM; return -ENOMEM;
goto err_free_qhp;
}
qhp->wq.sq.size = sqsize; qhp->wq.sq.size = sqsize;
qhp->wq.sq.memsize = qhp->wq.sq.memsize =
@ -2339,7 +2330,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
qhp->wq.sq.qid, qhp->wq.sq.size, qhp->wq.sq.memsize, qhp->wq.sq.qid, qhp->wq.sq.size, qhp->wq.sq.memsize,
attrs->cap.max_send_wr, qhp->wq.rq.qid, qhp->wq.rq.size, attrs->cap.max_send_wr, qhp->wq.rq.qid, qhp->wq.rq.size,
qhp->wq.rq.memsize, attrs->cap.max_recv_wr); qhp->wq.rq.memsize, attrs->cap.max_recv_wr);
return &qhp->ibqp; return 0;
err_free_ma_sync_key: err_free_ma_sync_key:
kfree(ma_sync_key_mm); kfree(ma_sync_key_mm);
err_free_rq_db_key: err_free_rq_db_key:
@ -2359,9 +2350,7 @@ err_destroy_qp:
ucontext ? &ucontext->uctx : &rhp->rdev.uctx, !attrs->srq); ucontext ? &ucontext->uctx : &rhp->rdev.uctx, !attrs->srq);
err_free_wr_wait: err_free_wr_wait:
c4iw_put_wr_wait(qhp->wr_waitp); c4iw_put_wr_wait(qhp->wr_waitp);
err_free_qhp: return ret;
kfree(qhp);
return ERR_PTR(ret);
} }
int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,

View file

@ -132,8 +132,7 @@ int efa_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
int efa_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata); int efa_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
int efa_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata); int efa_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
int efa_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata); int efa_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata);
struct ib_qp *efa_create_qp(struct ib_pd *ibpd, int efa_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata); struct ib_udata *udata);
int efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata); int efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,

View file

@ -271,6 +271,7 @@ static const struct ib_device_ops efa_dev_ops = {
INIT_RDMA_OBJ_SIZE(ib_ah, efa_ah, ibah), INIT_RDMA_OBJ_SIZE(ib_ah, efa_ah, ibah),
INIT_RDMA_OBJ_SIZE(ib_cq, efa_cq, ibcq), INIT_RDMA_OBJ_SIZE(ib_cq, efa_cq, ibcq),
INIT_RDMA_OBJ_SIZE(ib_pd, efa_pd, ibpd), INIT_RDMA_OBJ_SIZE(ib_pd, efa_pd, ibpd),
INIT_RDMA_OBJ_SIZE(ib_qp, efa_qp, ibqp),
INIT_RDMA_OBJ_SIZE(ib_ucontext, efa_ucontext, ibucontext), INIT_RDMA_OBJ_SIZE(ib_ucontext, efa_ucontext, ibucontext),
}; };

View file

@ -450,7 +450,6 @@ int efa_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
qp->rq_size, DMA_TO_DEVICE); qp->rq_size, DMA_TO_DEVICE);
} }
kfree(qp);
return 0; return 0;
} }
@ -609,17 +608,16 @@ static int efa_qp_validate_attr(struct efa_dev *dev,
return 0; return 0;
} }
struct ib_qp *efa_create_qp(struct ib_pd *ibpd, int efa_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata) struct ib_udata *udata)
{ {
struct efa_com_create_qp_params create_qp_params = {}; struct efa_com_create_qp_params create_qp_params = {};
struct efa_com_create_qp_result create_qp_resp; struct efa_com_create_qp_result create_qp_resp;
struct efa_dev *dev = to_edev(ibpd->device); struct efa_dev *dev = to_edev(ibqp->device);
struct efa_ibv_create_qp_resp resp = {}; struct efa_ibv_create_qp_resp resp = {};
struct efa_ibv_create_qp cmd = {}; struct efa_ibv_create_qp cmd = {};
struct efa_qp *qp = to_eqp(ibqp);
struct efa_ucontext *ucontext; struct efa_ucontext *ucontext;
struct efa_qp *qp;
int err; int err;
ucontext = rdma_udata_to_drv_context(udata, struct efa_ucontext, ucontext = rdma_udata_to_drv_context(udata, struct efa_ucontext,
@ -664,14 +662,8 @@ struct ib_qp *efa_create_qp(struct ib_pd *ibpd,
goto err_out; goto err_out;
} }
qp = kzalloc(sizeof(*qp), GFP_KERNEL);
if (!qp) {
err = -ENOMEM;
goto err_out;
}
create_qp_params.uarn = ucontext->uarn; create_qp_params.uarn = ucontext->uarn;
create_qp_params.pd = to_epd(ibpd)->pdn; create_qp_params.pd = to_epd(ibqp->pd)->pdn;
if (init_attr->qp_type == IB_QPT_UD) { if (init_attr->qp_type == IB_QPT_UD) {
create_qp_params.qp_type = EFA_ADMIN_QP_TYPE_UD; create_qp_params.qp_type = EFA_ADMIN_QP_TYPE_UD;
@ -682,7 +674,7 @@ struct ib_qp *efa_create_qp(struct ib_pd *ibpd,
"Unsupported qp type %d driver qp type %d\n", "Unsupported qp type %d driver qp type %d\n",
init_attr->qp_type, cmd.driver_qp_type); init_attr->qp_type, cmd.driver_qp_type);
err = -EOPNOTSUPP; err = -EOPNOTSUPP;
goto err_free_qp; goto err_out;
} }
ibdev_dbg(&dev->ibdev, "Create QP: qp type %d driver qp type %#x\n", ibdev_dbg(&dev->ibdev, "Create QP: qp type %d driver qp type %#x\n",
@ -700,7 +692,7 @@ struct ib_qp *efa_create_qp(struct ib_pd *ibpd,
qp->rq_size, DMA_TO_DEVICE); qp->rq_size, DMA_TO_DEVICE);
if (!qp->rq_cpu_addr) { if (!qp->rq_cpu_addr) {
err = -ENOMEM; err = -ENOMEM;
goto err_free_qp; goto err_out;
} }
ibdev_dbg(&dev->ibdev, ibdev_dbg(&dev->ibdev,
@ -746,7 +738,7 @@ struct ib_qp *efa_create_qp(struct ib_pd *ibpd,
ibdev_dbg(&dev->ibdev, "Created qp[%d]\n", qp->ibqp.qp_num); ibdev_dbg(&dev->ibdev, "Created qp[%d]\n", qp->ibqp.qp_num);
return &qp->ibqp; return 0;
err_remove_mmap_entries: err_remove_mmap_entries:
efa_qp_user_mmap_entries_remove(qp); efa_qp_user_mmap_entries_remove(qp);
@ -756,11 +748,9 @@ err_free_mapped:
if (qp->rq_size) if (qp->rq_size)
efa_free_mapped(dev, qp->rq_cpu_addr, qp->rq_dma_addr, efa_free_mapped(dev, qp->rq_cpu_addr, qp->rq_dma_addr,
qp->rq_size, DMA_TO_DEVICE); qp->rq_size, DMA_TO_DEVICE);
err_free_qp:
kfree(qp);
err_out: err_out:
atomic64_inc(&dev->stats.create_qp_err); atomic64_inc(&dev->stats.create_qp_err);
return ERR_PTR(err); return err;
} }
static const struct { static const struct {

View file

@ -1216,8 +1216,7 @@ int hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata);
int hns_roce_alloc_xrcd(struct ib_xrcd *ib_xrcd, struct ib_udata *udata); int hns_roce_alloc_xrcd(struct ib_xrcd *ib_xrcd, struct ib_udata *udata);
int hns_roce_dealloc_xrcd(struct ib_xrcd *ib_xrcd, struct ib_udata *udata); int hns_roce_dealloc_xrcd(struct ib_xrcd *ib_xrcd, struct ib_udata *udata);
struct ib_qp *hns_roce_create_qp(struct ib_pd *ib_pd, int hns_roce_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *init_attr,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata); struct ib_udata *udata);
int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata); int attr_mask, struct ib_udata *udata);

View file

@ -454,6 +454,7 @@ static const struct ib_device_ops hns_roce_dev_ops = {
INIT_RDMA_OBJ_SIZE(ib_ah, hns_roce_ah, ibah), INIT_RDMA_OBJ_SIZE(ib_ah, hns_roce_ah, ibah),
INIT_RDMA_OBJ_SIZE(ib_cq, hns_roce_cq, ib_cq), INIT_RDMA_OBJ_SIZE(ib_cq, hns_roce_cq, ib_cq),
INIT_RDMA_OBJ_SIZE(ib_pd, hns_roce_pd, ibpd), INIT_RDMA_OBJ_SIZE(ib_pd, hns_roce_pd, ibpd),
INIT_RDMA_OBJ_SIZE(ib_qp, hns_roce_qp, ibqp),
INIT_RDMA_OBJ_SIZE(ib_ucontext, hns_roce_ucontext, ibucontext), INIT_RDMA_OBJ_SIZE(ib_ucontext, hns_roce_ucontext, ibucontext),
}; };

View file

@ -959,8 +959,6 @@ static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
struct ib_device *ibdev = &hr_dev->ib_dev; struct ib_device *ibdev = &hr_dev->ib_dev;
int ret; int ret;
hr_qp->ibqp.qp_type = init_attr->qp_type;
if (init_attr->cap.max_inline_data > hr_dev->caps.max_sq_inline) if (init_attr->cap.max_inline_data > hr_dev->caps.max_sq_inline)
init_attr->cap.max_inline_data = hr_dev->caps.max_sq_inline; init_attr->cap.max_inline_data = hr_dev->caps.max_sq_inline;
@ -1121,8 +1119,6 @@ void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
free_qp_buf(hr_dev, hr_qp); free_qp_buf(hr_dev, hr_qp);
free_kernel_wrid(hr_qp); free_kernel_wrid(hr_qp);
free_qp_db(hr_dev, hr_qp, udata); free_qp_db(hr_dev, hr_qp, udata);
kfree(hr_qp);
} }
static int check_qp_type(struct hns_roce_dev *hr_dev, enum ib_qp_type type, static int check_qp_type(struct hns_roce_dev *hr_dev, enum ib_qp_type type,
@ -1154,22 +1150,18 @@ out:
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
struct ib_qp *hns_roce_create_qp(struct ib_pd *pd, int hns_roce_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *init_attr,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata) struct ib_udata *udata)
{ {
struct ib_device *ibdev = pd ? pd->device : init_attr->xrcd->device; struct ib_device *ibdev = qp->device;
struct hns_roce_dev *hr_dev = to_hr_dev(ibdev); struct hns_roce_dev *hr_dev = to_hr_dev(ibdev);
struct hns_roce_qp *hr_qp; struct hns_roce_qp *hr_qp = to_hr_qp(qp);
struct ib_pd *pd = qp->pd;
int ret; int ret;
ret = check_qp_type(hr_dev, init_attr->qp_type, !!udata); ret = check_qp_type(hr_dev, init_attr->qp_type, !!udata);
if (ret) if (ret)
return ERR_PTR(ret); return ret;
hr_qp = kzalloc(sizeof(*hr_qp), GFP_KERNEL);
if (!hr_qp)
return ERR_PTR(-ENOMEM);
if (init_attr->qp_type == IB_QPT_XRC_TGT) if (init_attr->qp_type == IB_QPT_XRC_TGT)
hr_qp->xrcdn = to_hr_xrcd(init_attr->xrcd)->xrcdn; hr_qp->xrcdn = to_hr_xrcd(init_attr->xrcd)->xrcdn;
@ -1180,15 +1172,11 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
} }
ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, hr_qp); ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, hr_qp);
if (ret) { if (ret)
ibdev_err(ibdev, "Create QP type 0x%x failed(%d)\n", ibdev_err(ibdev, "Create QP type 0x%x failed(%d)\n",
init_attr->qp_type, ret); init_attr->qp_type, ret);
kfree(hr_qp); return ret;
return ERR_PTR(ret);
}
return &hr_qp->ibqp;
} }
int to_hr_qp_type(int qp_type) int to_hr_qp_type(int qp_type)

View file

@ -1141,10 +1141,7 @@ void irdma_free_qp_rsrc(struct irdma_qp *iwqp)
iwqp->kqp.dma_mem.va, iwqp->kqp.dma_mem.pa); iwqp->kqp.dma_mem.va, iwqp->kqp.dma_mem.pa);
iwqp->kqp.dma_mem.va = NULL; iwqp->kqp.dma_mem.va = NULL;
kfree(iwqp->kqp.sq_wrid_mem); kfree(iwqp->kqp.sq_wrid_mem);
iwqp->kqp.sq_wrid_mem = NULL;
kfree(iwqp->kqp.rq_wrid_mem); kfree(iwqp->kqp.rq_wrid_mem);
iwqp->kqp.rq_wrid_mem = NULL;
kfree(iwqp);
} }
/** /**

View file

@ -792,18 +792,19 @@ static int irdma_validate_qp_attrs(struct ib_qp_init_attr *init_attr,
/** /**
* irdma_create_qp - create qp * irdma_create_qp - create qp
* @ibpd: ptr of pd * @ibqp: ptr of qp
* @init_attr: attributes for qp * @init_attr: attributes for qp
* @udata: user data for create qp * @udata: user data for create qp
*/ */
static struct ib_qp *irdma_create_qp(struct ib_pd *ibpd, static int irdma_create_qp(struct ib_qp *ibqp,
struct ib_qp_init_attr *init_attr, struct ib_qp_init_attr *init_attr,
struct ib_udata *udata) struct ib_udata *udata)
{ {
struct ib_pd *ibpd = ibqp->pd;
struct irdma_pd *iwpd = to_iwpd(ibpd); struct irdma_pd *iwpd = to_iwpd(ibpd);
struct irdma_device *iwdev = to_iwdev(ibpd->device); struct irdma_device *iwdev = to_iwdev(ibpd->device);
struct irdma_pci_f *rf = iwdev->rf; struct irdma_pci_f *rf = iwdev->rf;
struct irdma_qp *iwqp; struct irdma_qp *iwqp = to_iwqp(ibqp);
struct irdma_create_qp_req req; struct irdma_create_qp_req req;
struct irdma_create_qp_resp uresp = {}; struct irdma_create_qp_resp uresp = {};
u32 qp_num = 0; u32 qp_num = 0;
@ -820,7 +821,7 @@ static struct ib_qp *irdma_create_qp(struct ib_pd *ibpd,
err_code = irdma_validate_qp_attrs(init_attr, iwdev); err_code = irdma_validate_qp_attrs(init_attr, iwdev);
if (err_code) if (err_code)
return ERR_PTR(err_code); return err_code;
sq_size = init_attr->cap.max_send_wr; sq_size = init_attr->cap.max_send_wr;
rq_size = init_attr->cap.max_recv_wr; rq_size = init_attr->cap.max_recv_wr;
@ -833,10 +834,6 @@ static struct ib_qp *irdma_create_qp(struct ib_pd *ibpd,
init_info.qp_uk_init_info.max_rq_frag_cnt = init_attr->cap.max_recv_sge; init_info.qp_uk_init_info.max_rq_frag_cnt = init_attr->cap.max_recv_sge;
init_info.qp_uk_init_info.max_inline_data = init_attr->cap.max_inline_data; init_info.qp_uk_init_info.max_inline_data = init_attr->cap.max_inline_data;
iwqp = kzalloc(sizeof(*iwqp), GFP_KERNEL);
if (!iwqp)
return ERR_PTR(-ENOMEM);
qp = &iwqp->sc_qp; qp = &iwqp->sc_qp;
qp->qp_uk.back_qp = iwqp; qp->qp_uk.back_qp = iwqp;
qp->qp_uk.lock = &iwqp->lock; qp->qp_uk.lock = &iwqp->lock;
@ -849,10 +846,8 @@ static struct ib_qp *irdma_create_qp(struct ib_pd *ibpd,
iwqp->q2_ctx_mem.size, iwqp->q2_ctx_mem.size,
&iwqp->q2_ctx_mem.pa, &iwqp->q2_ctx_mem.pa,
GFP_KERNEL); GFP_KERNEL);
if (!iwqp->q2_ctx_mem.va) { if (!iwqp->q2_ctx_mem.va)
err_code = -ENOMEM; return -ENOMEM;
goto error;
}
init_info.q2 = iwqp->q2_ctx_mem.va; init_info.q2 = iwqp->q2_ctx_mem.va;
init_info.q2_pa = iwqp->q2_ctx_mem.pa; init_info.q2_pa = iwqp->q2_ctx_mem.pa;
@ -1001,17 +996,16 @@ static struct ib_qp *irdma_create_qp(struct ib_pd *ibpd,
if (err_code) { if (err_code) {
ibdev_dbg(&iwdev->ibdev, "VERBS: copy_to_udata failed\n"); ibdev_dbg(&iwdev->ibdev, "VERBS: copy_to_udata failed\n");
irdma_destroy_qp(&iwqp->ibqp, udata); irdma_destroy_qp(&iwqp->ibqp, udata);
return ERR_PTR(err_code); return err_code;
} }
} }
init_completion(&iwqp->free_qp); init_completion(&iwqp->free_qp);
return &iwqp->ibqp; return 0;
error: error:
irdma_free_qp_rsrc(iwqp); irdma_free_qp_rsrc(iwqp);
return err_code;
return ERR_PTR(err_code);
} }
static int irdma_get_ib_acc_flags(struct irdma_qp *iwqp) static int irdma_get_ib_acc_flags(struct irdma_qp *iwqp)
@ -4406,6 +4400,7 @@ static const struct ib_device_ops irdma_dev_ops = {
INIT_RDMA_OBJ_SIZE(ib_ah, irdma_ah, ibah), INIT_RDMA_OBJ_SIZE(ib_ah, irdma_ah, ibah),
INIT_RDMA_OBJ_SIZE(ib_cq, irdma_cq, ibcq), INIT_RDMA_OBJ_SIZE(ib_cq, irdma_cq, ibcq),
INIT_RDMA_OBJ_SIZE(ib_mw, irdma_mr, ibmw), INIT_RDMA_OBJ_SIZE(ib_mw, irdma_mr, ibmw),
INIT_RDMA_OBJ_SIZE(ib_qp, irdma_qp, ibqp),
}; };
/** /**

View file

@ -2577,6 +2577,7 @@ static const struct ib_device_ops mlx4_ib_dev_ops = {
INIT_RDMA_OBJ_SIZE(ib_ah, mlx4_ib_ah, ibah), INIT_RDMA_OBJ_SIZE(ib_ah, mlx4_ib_ah, ibah),
INIT_RDMA_OBJ_SIZE(ib_cq, mlx4_ib_cq, ibcq), INIT_RDMA_OBJ_SIZE(ib_cq, mlx4_ib_cq, ibcq),
INIT_RDMA_OBJ_SIZE(ib_pd, mlx4_ib_pd, ibpd), INIT_RDMA_OBJ_SIZE(ib_pd, mlx4_ib_pd, ibpd),
INIT_RDMA_OBJ_SIZE(ib_qp, mlx4_ib_qp, ibqp),
INIT_RDMA_OBJ_SIZE(ib_srq, mlx4_ib_srq, ibsrq), INIT_RDMA_OBJ_SIZE(ib_srq, mlx4_ib_srq, ibsrq),
INIT_RDMA_OBJ_SIZE(ib_ucontext, mlx4_ib_ucontext, ibucontext), INIT_RDMA_OBJ_SIZE(ib_ucontext, mlx4_ib_ucontext, ibucontext),
}; };

View file

@ -792,8 +792,7 @@ void mlx4_ib_free_srq_wqe(struct mlx4_ib_srq *srq, int wqe_index);
int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad_wr); const struct ib_recv_wr **bad_wr);
struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd, int mlx4_ib_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *init_attr,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata); struct ib_udata *udata);
int mlx4_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata); int mlx4_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata);
void mlx4_ib_drain_sq(struct ib_qp *qp); void mlx4_ib_drain_sq(struct ib_qp *qp);

View file

@ -1578,24 +1578,19 @@ static int _mlx4_ib_create_qp(struct ib_pd *pd, struct mlx4_ib_qp *qp,
return 0; return 0;
} }
struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd, int mlx4_ib_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr,
struct ib_qp_init_attr *init_attr, struct ib_udata *udata)
struct ib_udata *udata) { {
struct ib_device *device = pd ? pd->device : init_attr->xrcd->device; struct ib_device *device = ibqp->device;
struct mlx4_ib_dev *dev = to_mdev(device); struct mlx4_ib_dev *dev = to_mdev(device);
struct mlx4_ib_qp *qp; struct mlx4_ib_qp *qp = to_mqp(ibqp);
struct ib_pd *pd = ibqp->pd;
int ret; int ret;
qp = kzalloc(sizeof(*qp), GFP_KERNEL);
if (!qp)
return ERR_PTR(-ENOMEM);
mutex_init(&qp->mutex); mutex_init(&qp->mutex);
ret = _mlx4_ib_create_qp(pd, qp, init_attr, udata); ret = _mlx4_ib_create_qp(pd, qp, init_attr, udata);
if (ret) { if (ret)
kfree(qp); return ret;
return ERR_PTR(ret);
}
if (init_attr->qp_type == IB_QPT_GSI && if (init_attr->qp_type == IB_QPT_GSI &&
!(init_attr->create_flags & MLX4_IB_QP_CREATE_ROCE_V2_GSI)) { !(init_attr->create_flags & MLX4_IB_QP_CREATE_ROCE_V2_GSI)) {
@ -1618,7 +1613,7 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
init_attr->create_flags &= ~MLX4_IB_QP_CREATE_ROCE_V2_GSI; init_attr->create_flags &= ~MLX4_IB_QP_CREATE_ROCE_V2_GSI;
} }
} }
return &qp->ibqp; return 0;
} }
static int _mlx4_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata) static int _mlx4_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
@ -1646,8 +1641,6 @@ static int _mlx4_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
} }
kfree(mqp->sqp); kfree(mqp->sqp);
kfree(mqp);
return 0; return 0;
} }

View file

@ -193,8 +193,6 @@ int mlx5_ib_destroy_gsi(struct mlx5_ib_qp *mqp)
kfree(gsi->outstanding_wrs); kfree(gsi->outstanding_wrs);
kfree(gsi->tx_qps); kfree(gsi->tx_qps);
kfree(mqp);
return 0; return 0;
} }

View file

@ -3805,6 +3805,7 @@ static const struct ib_device_ops mlx5_ib_dev_ops = {
INIT_RDMA_OBJ_SIZE(ib_counters, mlx5_ib_mcounters, ibcntrs), INIT_RDMA_OBJ_SIZE(ib_counters, mlx5_ib_mcounters, ibcntrs),
INIT_RDMA_OBJ_SIZE(ib_cq, mlx5_ib_cq, ibcq), INIT_RDMA_OBJ_SIZE(ib_cq, mlx5_ib_cq, ibcq),
INIT_RDMA_OBJ_SIZE(ib_pd, mlx5_ib_pd, ibpd), INIT_RDMA_OBJ_SIZE(ib_pd, mlx5_ib_pd, ibpd),
INIT_RDMA_OBJ_SIZE(ib_qp, mlx5_ib_qp, ibqp),
INIT_RDMA_OBJ_SIZE(ib_srq, mlx5_ib_srq, ibsrq), INIT_RDMA_OBJ_SIZE(ib_srq, mlx5_ib_srq, ibsrq),
INIT_RDMA_OBJ_SIZE(ib_ucontext, mlx5_ib_ucontext, ibucontext), INIT_RDMA_OBJ_SIZE(ib_ucontext, mlx5_ib_ucontext, ibucontext),
}; };

View file

@ -1219,8 +1219,7 @@ int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad_wr); const struct ib_recv_wr **bad_wr);
int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp); int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp);
void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev, bool td, bool qp); void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev, bool td, bool qp);
struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, int mlx5_ib_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *init_attr,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata); struct ib_udata *udata);
int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata); int attr_mask, struct ib_udata *udata);

View file

@ -3114,7 +3114,6 @@ static int mlx5_ib_destroy_dct(struct mlx5_ib_qp *mqp)
} }
kfree(mqp->dct.in); kfree(mqp->dct.in);
kfree(mqp);
return 0; return 0;
} }
@ -3152,25 +3151,23 @@ static int check_ucmd_data(struct mlx5_ib_dev *dev,
return ret ? 0 : -EINVAL; return ret ? 0 : -EINVAL;
} }
struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attr, int mlx5_ib_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attr,
struct ib_udata *udata) struct ib_udata *udata)
{ {
struct mlx5_create_qp_params params = {}; struct mlx5_create_qp_params params = {};
struct mlx5_ib_dev *dev; struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
struct mlx5_ib_qp *qp; struct mlx5_ib_qp *qp = to_mqp(ibqp);
struct ib_pd *pd = ibqp->pd;
enum ib_qp_type type; enum ib_qp_type type;
int err; int err;
dev = pd ? to_mdev(pd->device) :
to_mdev(to_mxrcd(attr->xrcd)->ibxrcd.device);
err = check_qp_type(dev, attr, &type); err = check_qp_type(dev, attr, &type);
if (err) if (err)
return ERR_PTR(err); return err;
err = check_valid_flow(dev, pd, attr, udata); err = check_valid_flow(dev, pd, attr, udata);
if (err) if (err)
return ERR_PTR(err); return err;
params.udata = udata; params.udata = udata;
params.uidx = MLX5_IB_DEFAULT_UIDX; params.uidx = MLX5_IB_DEFAULT_UIDX;
@ -3180,49 +3177,43 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attr,
if (udata) { if (udata) {
err = process_udata_size(dev, &params); err = process_udata_size(dev, &params);
if (err) if (err)
return ERR_PTR(err); return err;
err = check_ucmd_data(dev, &params); err = check_ucmd_data(dev, &params);
if (err) if (err)
return ERR_PTR(err); return err;
params.ucmd = kzalloc(params.ucmd_size, GFP_KERNEL); params.ucmd = kzalloc(params.ucmd_size, GFP_KERNEL);
if (!params.ucmd) if (!params.ucmd)
return ERR_PTR(-ENOMEM); return -ENOMEM;
err = ib_copy_from_udata(params.ucmd, udata, params.inlen); err = ib_copy_from_udata(params.ucmd, udata, params.inlen);
if (err) if (err)
goto free_ucmd; goto free_ucmd;
} }
qp = kzalloc(sizeof(*qp), GFP_KERNEL);
if (!qp) {
err = -ENOMEM;
goto free_ucmd;
}
mutex_init(&qp->mutex); mutex_init(&qp->mutex);
qp->type = type; qp->type = type;
if (udata) { if (udata) {
err = process_vendor_flags(dev, qp, params.ucmd, attr); err = process_vendor_flags(dev, qp, params.ucmd, attr);
if (err) if (err)
goto free_qp; goto free_ucmd;
err = get_qp_uidx(qp, &params); err = get_qp_uidx(qp, &params);
if (err) if (err)
goto free_qp; goto free_ucmd;
} }
err = process_create_flags(dev, qp, attr); err = process_create_flags(dev, qp, attr);
if (err) if (err)
goto free_qp; goto free_ucmd;
err = check_qp_attr(dev, qp, attr); err = check_qp_attr(dev, qp, attr);
if (err) if (err)
goto free_qp; goto free_ucmd;
err = create_qp(dev, pd, qp, &params); err = create_qp(dev, pd, qp, &params);
if (err) if (err)
goto free_qp; goto free_ucmd;
kfree(params.ucmd); kfree(params.ucmd);
params.ucmd = NULL; params.ucmd = NULL;
@ -3237,7 +3228,7 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attr,
if (err) if (err)
goto destroy_qp; goto destroy_qp;
return &qp->ibqp; return 0;
destroy_qp: destroy_qp:
switch (qp->type) { switch (qp->type) {
@ -3248,22 +3239,12 @@ destroy_qp:
mlx5_ib_destroy_gsi(qp); mlx5_ib_destroy_gsi(qp);
break; break;
default: default:
/*
* These lines below are temp solution till QP allocation
* will be moved to be under IB/core responsiblity.
*/
qp->ibqp.send_cq = attr->send_cq;
qp->ibqp.recv_cq = attr->recv_cq;
qp->ibqp.pd = pd;
destroy_qp_common(dev, qp, udata); destroy_qp_common(dev, qp, udata);
} }
qp = NULL;
free_qp:
kfree(qp);
free_ucmd: free_ucmd:
kfree(params.ucmd); kfree(params.ucmd);
return ERR_PTR(err); return err;
} }
int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata) int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
@ -3278,9 +3259,6 @@ int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
return mlx5_ib_destroy_dct(mqp); return mlx5_ib_destroy_dct(mqp);
destroy_qp_common(dev, mqp, udata); destroy_qp_common(dev, mqp, udata);
kfree(mqp);
return 0; return 0;
} }

View file

@ -459,52 +459,45 @@ static int mthca_destroy_srq(struct ib_srq *srq, struct ib_udata *udata)
return 0; return 0;
} }
static struct ib_qp *mthca_create_qp(struct ib_pd *pd, static int mthca_create_qp(struct ib_qp *ibqp,
struct ib_qp_init_attr *init_attr, struct ib_qp_init_attr *init_attr,
struct ib_udata *udata) struct ib_udata *udata)
{ {
struct mthca_ucontext *context = rdma_udata_to_drv_context( struct mthca_ucontext *context = rdma_udata_to_drv_context(
udata, struct mthca_ucontext, ibucontext); udata, struct mthca_ucontext, ibucontext);
struct mthca_create_qp ucmd; struct mthca_create_qp ucmd;
struct mthca_qp *qp; struct mthca_qp *qp = to_mqp(ibqp);
struct mthca_dev *dev = to_mdev(ibqp->device);
int err; int err;
if (init_attr->create_flags) if (init_attr->create_flags)
return ERR_PTR(-EOPNOTSUPP); return -EOPNOTSUPP;
switch (init_attr->qp_type) { switch (init_attr->qp_type) {
case IB_QPT_RC: case IB_QPT_RC:
case IB_QPT_UC: case IB_QPT_UC:
case IB_QPT_UD: case IB_QPT_UD:
{ {
qp = kzalloc(sizeof(*qp), GFP_KERNEL);
if (!qp)
return ERR_PTR(-ENOMEM);
if (udata) { if (udata) {
if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) { if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)))
kfree(qp); return -EFAULT;
return ERR_PTR(-EFAULT);
}
err = mthca_map_user_db(to_mdev(pd->device), &context->uar, err = mthca_map_user_db(dev, &context->uar,
context->db_tab, context->db_tab,
ucmd.sq_db_index, ucmd.sq_db_page); ucmd.sq_db_index,
if (err) { ucmd.sq_db_page);
kfree(qp); if (err)
return ERR_PTR(err); return err;
}
err = mthca_map_user_db(to_mdev(pd->device), &context->uar, err = mthca_map_user_db(dev, &context->uar,
context->db_tab, context->db_tab,
ucmd.rq_db_index, ucmd.rq_db_page); ucmd.rq_db_index,
ucmd.rq_db_page);
if (err) { if (err) {
mthca_unmap_user_db(to_mdev(pd->device), mthca_unmap_user_db(dev, &context->uar,
&context->uar,
context->db_tab, context->db_tab,
ucmd.sq_db_index); ucmd.sq_db_index);
kfree(qp); return err;
return ERR_PTR(err);
} }
qp->mr.ibmr.lkey = ucmd.lkey; qp->mr.ibmr.lkey = ucmd.lkey;
@ -512,20 +505,16 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
qp->rq.db_index = ucmd.rq_db_index; qp->rq.db_index = ucmd.rq_db_index;
} }
err = mthca_alloc_qp(to_mdev(pd->device), to_mpd(pd), err = mthca_alloc_qp(dev, to_mpd(ibqp->pd),
to_mcq(init_attr->send_cq), to_mcq(init_attr->send_cq),
to_mcq(init_attr->recv_cq), to_mcq(init_attr->recv_cq),
init_attr->qp_type, init_attr->sq_sig_type, init_attr->qp_type, init_attr->sq_sig_type,
&init_attr->cap, qp, udata); &init_attr->cap, qp, udata);
if (err && udata) { if (err && udata) {
mthca_unmap_user_db(to_mdev(pd->device), mthca_unmap_user_db(dev, &context->uar, context->db_tab,
&context->uar,
context->db_tab,
ucmd.sq_db_index); ucmd.sq_db_index);
mthca_unmap_user_db(to_mdev(pd->device), mthca_unmap_user_db(dev, &context->uar, context->db_tab,
&context->uar,
context->db_tab,
ucmd.rq_db_index); ucmd.rq_db_index);
} }
@ -535,34 +524,28 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
case IB_QPT_SMI: case IB_QPT_SMI:
case IB_QPT_GSI: case IB_QPT_GSI:
{ {
qp = kzalloc(sizeof(*qp), GFP_KERNEL);
if (!qp)
return ERR_PTR(-ENOMEM);
qp->sqp = kzalloc(sizeof(struct mthca_sqp), GFP_KERNEL); qp->sqp = kzalloc(sizeof(struct mthca_sqp), GFP_KERNEL);
if (!qp->sqp) { if (!qp->sqp)
kfree(qp); return -ENOMEM;
return ERR_PTR(-ENOMEM);
}
qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 : 1; qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 : 1;
err = mthca_alloc_sqp(to_mdev(pd->device), to_mpd(pd), err = mthca_alloc_sqp(dev, to_mpd(ibqp->pd),
to_mcq(init_attr->send_cq), to_mcq(init_attr->send_cq),
to_mcq(init_attr->recv_cq), to_mcq(init_attr->recv_cq),
init_attr->sq_sig_type, &init_attr->cap, init_attr->sq_sig_type, &init_attr->cap,
qp->ibqp.qp_num, init_attr->port_num, qp->ibqp.qp_num, init_attr->port_num, qp,
qp, udata); udata);
break; break;
} }
default: default:
/* Don't support raw QPs */ /* Don't support raw QPs */
return ERR_PTR(-EOPNOTSUPP); return -EOPNOTSUPP;
} }
if (err) { if (err) {
kfree(qp->sqp); kfree(qp->sqp);
kfree(qp); return err;
return ERR_PTR(err);
} }
init_attr->cap.max_send_wr = qp->sq.max; init_attr->cap.max_send_wr = qp->sq.max;
@ -571,7 +554,7 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
init_attr->cap.max_recv_sge = qp->rq.max_gs; init_attr->cap.max_recv_sge = qp->rq.max_gs;
init_attr->cap.max_inline_data = qp->max_inline_data; init_attr->cap.max_inline_data = qp->max_inline_data;
return &qp->ibqp; return 0;
} }
static int mthca_destroy_qp(struct ib_qp *qp, struct ib_udata *udata) static int mthca_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
@ -594,7 +577,6 @@ static int mthca_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
} }
mthca_free_qp(to_mdev(qp->device), to_mqp(qp)); mthca_free_qp(to_mdev(qp->device), to_mqp(qp));
kfree(to_mqp(qp)->sqp); kfree(to_mqp(qp)->sqp);
kfree(to_mqp(qp));
return 0; return 0;
} }
@ -1121,6 +1103,7 @@ static const struct ib_device_ops mthca_dev_ops = {
INIT_RDMA_OBJ_SIZE(ib_ah, mthca_ah, ibah), INIT_RDMA_OBJ_SIZE(ib_ah, mthca_ah, ibah),
INIT_RDMA_OBJ_SIZE(ib_cq, mthca_cq, ibcq), INIT_RDMA_OBJ_SIZE(ib_cq, mthca_cq, ibcq),
INIT_RDMA_OBJ_SIZE(ib_pd, mthca_pd, ibpd), INIT_RDMA_OBJ_SIZE(ib_pd, mthca_pd, ibpd),
INIT_RDMA_OBJ_SIZE(ib_qp, mthca_qp, ibqp),
INIT_RDMA_OBJ_SIZE(ib_ucontext, mthca_ucontext, ibucontext), INIT_RDMA_OBJ_SIZE(ib_ucontext, mthca_ucontext, ibucontext),
}; };

View file

@ -185,6 +185,7 @@ static const struct ib_device_ops ocrdma_dev_ops = {
INIT_RDMA_OBJ_SIZE(ib_ah, ocrdma_ah, ibah), INIT_RDMA_OBJ_SIZE(ib_ah, ocrdma_ah, ibah),
INIT_RDMA_OBJ_SIZE(ib_cq, ocrdma_cq, ibcq), INIT_RDMA_OBJ_SIZE(ib_cq, ocrdma_cq, ibcq),
INIT_RDMA_OBJ_SIZE(ib_pd, ocrdma_pd, ibpd), INIT_RDMA_OBJ_SIZE(ib_pd, ocrdma_pd, ibpd),
INIT_RDMA_OBJ_SIZE(ib_qp, ocrdma_qp, ibqp),
INIT_RDMA_OBJ_SIZE(ib_ucontext, ocrdma_ucontext, ibucontext), INIT_RDMA_OBJ_SIZE(ib_ucontext, ocrdma_ucontext, ibucontext),
}; };

View file

@ -1288,19 +1288,19 @@ static void ocrdma_store_gsi_qp_cq(struct ocrdma_dev *dev,
} }
} }
struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd, int ocrdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs,
struct ib_qp_init_attr *attrs,
struct ib_udata *udata) struct ib_udata *udata)
{ {
int status; int status;
struct ib_pd *ibpd = ibqp->pd;
struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
struct ocrdma_qp *qp; struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device); struct ocrdma_dev *dev = get_ocrdma_dev(ibqp->device);
struct ocrdma_create_qp_ureq ureq; struct ocrdma_create_qp_ureq ureq;
u16 dpp_credit_lmt, dpp_offset; u16 dpp_credit_lmt, dpp_offset;
if (attrs->create_flags) if (attrs->create_flags)
return ERR_PTR(-EOPNOTSUPP); return -EOPNOTSUPP;
status = ocrdma_check_qp_params(ibpd, dev, attrs, udata); status = ocrdma_check_qp_params(ibpd, dev, attrs, udata);
if (status) if (status)
@ -1309,12 +1309,7 @@ struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd,
memset(&ureq, 0, sizeof(ureq)); memset(&ureq, 0, sizeof(ureq));
if (udata) { if (udata) {
if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
return ERR_PTR(-EFAULT); return -EFAULT;
}
qp = kzalloc(sizeof(*qp), GFP_KERNEL);
if (!qp) {
status = -ENOMEM;
goto gen_err;
} }
ocrdma_set_qp_init_params(qp, pd, attrs); ocrdma_set_qp_init_params(qp, pd, attrs);
if (udata == NULL) if (udata == NULL)
@ -1349,7 +1344,7 @@ struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd,
ocrdma_store_gsi_qp_cq(dev, attrs); ocrdma_store_gsi_qp_cq(dev, attrs);
qp->ibqp.qp_num = qp->id; qp->ibqp.qp_num = qp->id;
mutex_unlock(&dev->dev_lock); mutex_unlock(&dev->dev_lock);
return &qp->ibqp; return 0;
cpy_err: cpy_err:
ocrdma_del_qpn_map(dev, qp); ocrdma_del_qpn_map(dev, qp);
@ -1359,10 +1354,9 @@ mbx_err:
mutex_unlock(&dev->dev_lock); mutex_unlock(&dev->dev_lock);
kfree(qp->wqe_wr_id_tbl); kfree(qp->wqe_wr_id_tbl);
kfree(qp->rqe_wr_id_tbl); kfree(qp->rqe_wr_id_tbl);
kfree(qp);
pr_err("%s(%d) error=%d\n", __func__, dev->id, status); pr_err("%s(%d) error=%d\n", __func__, dev->id, status);
gen_err: gen_err:
return ERR_PTR(status); return status;
} }
int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
@ -1731,7 +1725,6 @@ int ocrdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
kfree(qp->wqe_wr_id_tbl); kfree(qp->wqe_wr_id_tbl);
kfree(qp->rqe_wr_id_tbl); kfree(qp->rqe_wr_id_tbl);
kfree(qp);
return 0; return 0;
} }

View file

@ -75,9 +75,8 @@ int ocrdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
int ocrdma_resize_cq(struct ib_cq *, int cqe, struct ib_udata *); int ocrdma_resize_cq(struct ib_cq *, int cqe, struct ib_udata *);
int ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata); int ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
struct ib_qp *ocrdma_create_qp(struct ib_pd *, int ocrdma_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *attrs,
struct ib_qp_init_attr *attrs, struct ib_udata *udata);
struct ib_udata *);
int _ocrdma_modify_qp(struct ib_qp *, struct ib_qp_attr *attr, int _ocrdma_modify_qp(struct ib_qp *, struct ib_qp_attr *attr,
int attr_mask); int attr_mask);
int ocrdma_modify_qp(struct ib_qp *, struct ib_qp_attr *attr, int ocrdma_modify_qp(struct ib_qp *, struct ib_qp_attr *attr,

View file

@ -233,6 +233,7 @@ static const struct ib_device_ops qedr_dev_ops = {
INIT_RDMA_OBJ_SIZE(ib_ah, qedr_ah, ibah), INIT_RDMA_OBJ_SIZE(ib_ah, qedr_ah, ibah),
INIT_RDMA_OBJ_SIZE(ib_cq, qedr_cq, ibcq), INIT_RDMA_OBJ_SIZE(ib_cq, qedr_cq, ibcq),
INIT_RDMA_OBJ_SIZE(ib_pd, qedr_pd, ibpd), INIT_RDMA_OBJ_SIZE(ib_pd, qedr_pd, ibpd),
INIT_RDMA_OBJ_SIZE(ib_qp, qedr_qp, ibqp),
INIT_RDMA_OBJ_SIZE(ib_srq, qedr_srq, ibsrq), INIT_RDMA_OBJ_SIZE(ib_srq, qedr_srq, ibsrq),
INIT_RDMA_OBJ_SIZE(ib_xrcd, qedr_xrcd, ibxrcd), INIT_RDMA_OBJ_SIZE(ib_xrcd, qedr_xrcd, ibxrcd),
INIT_RDMA_OBJ_SIZE(ib_ucontext, qedr_ucontext, ibucontext), INIT_RDMA_OBJ_SIZE(ib_ucontext, qedr_ucontext, ibucontext),

View file

@ -319,20 +319,19 @@ err1:
return rc; return rc;
} }
struct ib_qp *qedr_create_gsi_qp(struct qedr_dev *dev, int qedr_create_gsi_qp(struct qedr_dev *dev, struct ib_qp_init_attr *attrs,
struct ib_qp_init_attr *attrs,
struct qedr_qp *qp) struct qedr_qp *qp)
{ {
int rc; int rc;
rc = qedr_check_gsi_qp_attrs(dev, attrs); rc = qedr_check_gsi_qp_attrs(dev, attrs);
if (rc) if (rc)
return ERR_PTR(rc); return rc;
rc = qedr_ll2_start(dev, attrs, qp); rc = qedr_ll2_start(dev, attrs, qp);
if (rc) { if (rc) {
DP_ERR(dev, "create gsi qp: failed on ll2 start. rc=%d\n", rc); DP_ERR(dev, "create gsi qp: failed on ll2 start. rc=%d\n", rc);
return ERR_PTR(rc); return rc;
} }
/* create QP */ /* create QP */
@ -359,7 +358,7 @@ struct ib_qp *qedr_create_gsi_qp(struct qedr_dev *dev,
DP_DEBUG(dev, QEDR_MSG_GSI, "created GSI QP %p\n", qp); DP_DEBUG(dev, QEDR_MSG_GSI, "created GSI QP %p\n", qp);
return &qp->ibqp; return 0;
err: err:
kfree(qp->rqe_wr_id); kfree(qp->rqe_wr_id);
@ -368,7 +367,7 @@ err:
if (rc) if (rc)
DP_ERR(dev, "create gsi qp: failed destroy on create\n"); DP_ERR(dev, "create gsi qp: failed destroy on create\n");
return ERR_PTR(-ENOMEM); return -ENOMEM;
} }
int qedr_destroy_gsi_qp(struct qedr_dev *dev) int qedr_destroy_gsi_qp(struct qedr_dev *dev)

View file

@ -50,8 +50,7 @@ int qedr_gsi_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad_wr); const struct ib_recv_wr **bad_wr);
int qedr_gsi_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, int qedr_gsi_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
const struct ib_send_wr **bad_wr); const struct ib_send_wr **bad_wr);
struct ib_qp *qedr_create_gsi_qp(struct qedr_dev *dev, int qedr_create_gsi_qp(struct qedr_dev *dev, struct ib_qp_init_attr *attrs,
struct ib_qp_init_attr *attrs,
struct qedr_qp *qp); struct qedr_qp *qp);
void qedr_store_gsi_qp_cq(struct qedr_dev *dev, void qedr_store_gsi_qp_cq(struct qedr_dev *dev,
struct qedr_qp *qp, struct ib_qp_init_attr *attrs); struct qedr_qp *qp, struct ib_qp_init_attr *attrs);

View file

@ -2239,34 +2239,30 @@ static int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp,
return 0; return 0;
} }
struct ib_qp *qedr_create_qp(struct ib_pd *ibpd, int qedr_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs,
struct ib_qp_init_attr *attrs,
struct ib_udata *udata) struct ib_udata *udata)
{ {
struct qedr_xrcd *xrcd = NULL; struct qedr_xrcd *xrcd = NULL;
struct qedr_pd *pd = NULL; struct ib_pd *ibpd = ibqp->pd;
struct qedr_dev *dev; struct qedr_pd *pd = get_qedr_pd(ibpd);
struct qedr_qp *qp; struct qedr_dev *dev = get_qedr_dev(ibqp->device);
struct ib_qp *ibqp; struct qedr_qp *qp = get_qedr_qp(ibqp);
int rc = 0; int rc = 0;
if (attrs->create_flags) if (attrs->create_flags)
return ERR_PTR(-EOPNOTSUPP); return -EOPNOTSUPP;
if (attrs->qp_type == IB_QPT_XRC_TGT) { if (attrs->qp_type == IB_QPT_XRC_TGT)
xrcd = get_qedr_xrcd(attrs->xrcd); xrcd = get_qedr_xrcd(attrs->xrcd);
dev = get_qedr_dev(xrcd->ibxrcd.device); else
} else {
pd = get_qedr_pd(ibpd); pd = get_qedr_pd(ibpd);
dev = get_qedr_dev(ibpd->device);
}
DP_DEBUG(dev, QEDR_MSG_QP, "create qp: called from %s, pd=%p\n", DP_DEBUG(dev, QEDR_MSG_QP, "create qp: called from %s, pd=%p\n",
udata ? "user library" : "kernel", pd); udata ? "user library" : "kernel", pd);
rc = qedr_check_qp_attrs(ibpd, dev, attrs, udata); rc = qedr_check_qp_attrs(ibpd, dev, attrs, udata);
if (rc) if (rc)
return ERR_PTR(rc); return rc;
DP_DEBUG(dev, QEDR_MSG_QP, DP_DEBUG(dev, QEDR_MSG_QP,
"create qp: called from %s, event_handler=%p, eepd=%p sq_cq=%p, sq_icid=%d, rq_cq=%p, rq_icid=%d\n", "create qp: called from %s, event_handler=%p, eepd=%p sq_cq=%p, sq_icid=%d, rq_cq=%p, rq_icid=%d\n",
@ -2276,20 +2272,10 @@ struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
get_qedr_cq(attrs->recv_cq), get_qedr_cq(attrs->recv_cq),
attrs->recv_cq ? get_qedr_cq(attrs->recv_cq)->icid : 0); attrs->recv_cq ? get_qedr_cq(attrs->recv_cq)->icid : 0);
qp = kzalloc(sizeof(*qp), GFP_KERNEL);
if (!qp) {
DP_ERR(dev, "create qp: failed allocating memory\n");
return ERR_PTR(-ENOMEM);
}
qedr_set_common_qp_params(dev, qp, pd, attrs); qedr_set_common_qp_params(dev, qp, pd, attrs);
if (attrs->qp_type == IB_QPT_GSI) { if (attrs->qp_type == IB_QPT_GSI)
ibqp = qedr_create_gsi_qp(dev, attrs, qp); return qedr_create_gsi_qp(dev, attrs, qp);
if (IS_ERR(ibqp))
kfree(qp);
return ibqp;
}
if (udata || xrcd) if (udata || xrcd)
rc = qedr_create_user_qp(dev, qp, ibpd, udata, attrs); rc = qedr_create_user_qp(dev, qp, ibpd, udata, attrs);
@ -2297,7 +2283,7 @@ struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
rc = qedr_create_kernel_qp(dev, qp, ibpd, attrs); rc = qedr_create_kernel_qp(dev, qp, ibpd, attrs);
if (rc) if (rc)
goto out_free_qp; return rc;
qp->ibqp.qp_num = qp->qp_id; qp->ibqp.qp_num = qp->qp_id;
@ -2307,14 +2293,11 @@ struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
goto out_free_qp_resources; goto out_free_qp_resources;
} }
return &qp->ibqp; return 0;
out_free_qp_resources: out_free_qp_resources:
qedr_free_qp_resources(dev, qp, udata); qedr_free_qp_resources(dev, qp, udata);
out_free_qp: return -EFAULT;
kfree(qp);
return ERR_PTR(-EFAULT);
} }
static enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state) static enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
@ -2874,8 +2857,6 @@ int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
if (rdma_protocol_iwarp(&dev->ibdev, 1)) if (rdma_protocol_iwarp(&dev->ibdev, 1))
qedr_iw_qp_rem_ref(&qp->ibqp); qedr_iw_qp_rem_ref(&qp->ibqp);
else
kfree(qp);
return 0; return 0;
} }

View file

@ -56,8 +56,8 @@ int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
int qedr_resize_cq(struct ib_cq *, int cqe, struct ib_udata *); int qedr_resize_cq(struct ib_cq *, int cqe, struct ib_udata *);
int qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata); int qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
struct ib_qp *qedr_create_qp(struct ib_pd *, struct ib_qp_init_attr *attrs, int qedr_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *attrs,
struct ib_udata *); struct ib_udata *udata);
int qedr_modify_qp(struct ib_qp *, struct ib_qp_attr *attr, int qedr_modify_qp(struct ib_qp *, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata); int attr_mask, struct ib_udata *udata);
int qedr_query_qp(struct ib_qp *, struct ib_qp_attr *qp_attr, int qedr_query_qp(struct ib_qp *, struct ib_qp_attr *qp_attr,

View file

@ -360,6 +360,7 @@ static const struct ib_device_ops usnic_dev_ops = {
.reg_user_mr = usnic_ib_reg_mr, .reg_user_mr = usnic_ib_reg_mr,
INIT_RDMA_OBJ_SIZE(ib_pd, usnic_ib_pd, ibpd), INIT_RDMA_OBJ_SIZE(ib_pd, usnic_ib_pd, ibpd),
INIT_RDMA_OBJ_SIZE(ib_cq, usnic_ib_cq, ibcq), INIT_RDMA_OBJ_SIZE(ib_cq, usnic_ib_cq, ibcq),
INIT_RDMA_OBJ_SIZE(ib_qp, usnic_ib_qp_grp, ibqp),
INIT_RDMA_OBJ_SIZE(ib_ucontext, usnic_ib_ucontext, ibucontext), INIT_RDMA_OBJ_SIZE(ib_ucontext, usnic_ib_ucontext, ibucontext),
}; };

View file

@ -665,13 +665,12 @@ static int qp_grp_id_from_flow(struct usnic_ib_qp_grp_flow *qp_flow,
return 0; return 0;
} }
struct usnic_ib_qp_grp * int usnic_ib_qp_grp_create(struct usnic_ib_qp_grp *qp_grp,
usnic_ib_qp_grp_create(struct usnic_fwd_dev *ufdev, struct usnic_ib_vf *vf, struct usnic_fwd_dev *ufdev, struct usnic_ib_vf *vf,
struct usnic_ib_pd *pd, struct usnic_ib_pd *pd,
struct usnic_vnic_res_spec *res_spec, struct usnic_vnic_res_spec *res_spec,
struct usnic_transport_spec *transport_spec) struct usnic_transport_spec *transport_spec)
{ {
struct usnic_ib_qp_grp *qp_grp;
int err; int err;
enum usnic_transport_type transport = transport_spec->trans_type; enum usnic_transport_type transport = transport_spec->trans_type;
struct usnic_ib_qp_grp_flow *qp_flow; struct usnic_ib_qp_grp_flow *qp_flow;
@ -684,20 +683,15 @@ usnic_ib_qp_grp_create(struct usnic_fwd_dev *ufdev, struct usnic_ib_vf *vf,
usnic_err("Spec does not meet minimum req for transport %d\n", usnic_err("Spec does not meet minimum req for transport %d\n",
transport); transport);
log_spec(res_spec); log_spec(res_spec);
return ERR_PTR(err); return err;
} }
qp_grp = kzalloc(sizeof(*qp_grp), GFP_ATOMIC);
if (!qp_grp)
return NULL;
qp_grp->res_chunk_list = alloc_res_chunk_list(vf->vnic, res_spec, qp_grp->res_chunk_list = alloc_res_chunk_list(vf->vnic, res_spec,
qp_grp); qp_grp);
if (IS_ERR_OR_NULL(qp_grp->res_chunk_list)) { if (IS_ERR_OR_NULL(qp_grp->res_chunk_list))
err = qp_grp->res_chunk_list ? return qp_grp->res_chunk_list ?
PTR_ERR(qp_grp->res_chunk_list) : -ENOMEM; PTR_ERR(qp_grp->res_chunk_list) :
goto out_free_qp_grp; -ENOMEM;
}
err = qp_grp_and_vf_bind(vf, pd, qp_grp); err = qp_grp_and_vf_bind(vf, pd, qp_grp);
if (err) if (err)
@ -724,7 +718,7 @@ usnic_ib_qp_grp_create(struct usnic_fwd_dev *ufdev, struct usnic_ib_vf *vf,
usnic_ib_sysfs_qpn_add(qp_grp); usnic_ib_sysfs_qpn_add(qp_grp);
return qp_grp; return 0;
out_release_flow: out_release_flow:
release_and_remove_flow(qp_flow); release_and_remove_flow(qp_flow);
@ -732,10 +726,7 @@ out_qp_grp_vf_unbind:
qp_grp_and_vf_unbind(qp_grp); qp_grp_and_vf_unbind(qp_grp);
out_free_res: out_free_res:
free_qp_grp_res(qp_grp->res_chunk_list); free_qp_grp_res(qp_grp->res_chunk_list);
out_free_qp_grp: return err;
kfree(qp_grp);
return ERR_PTR(err);
} }
void usnic_ib_qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp) void usnic_ib_qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp)
@ -748,7 +739,6 @@ void usnic_ib_qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp)
usnic_ib_sysfs_qpn_remove(qp_grp); usnic_ib_sysfs_qpn_remove(qp_grp);
qp_grp_and_vf_unbind(qp_grp); qp_grp_and_vf_unbind(qp_grp);
free_qp_grp_res(qp_grp->res_chunk_list); free_qp_grp_res(qp_grp->res_chunk_list);
kfree(qp_grp);
} }
struct usnic_vnic_res_chunk* struct usnic_vnic_res_chunk*

View file

@ -89,8 +89,8 @@ extern const struct usnic_vnic_res_spec min_transport_spec[USNIC_TRANSPORT_MAX];
const char *usnic_ib_qp_grp_state_to_string(enum ib_qp_state state); const char *usnic_ib_qp_grp_state_to_string(enum ib_qp_state state);
int usnic_ib_qp_grp_dump_hdr(char *buf, int buf_sz); int usnic_ib_qp_grp_dump_hdr(char *buf, int buf_sz);
int usnic_ib_qp_grp_dump_rows(void *obj, char *buf, int buf_sz); int usnic_ib_qp_grp_dump_rows(void *obj, char *buf, int buf_sz);
struct usnic_ib_qp_grp * int usnic_ib_qp_grp_create(struct usnic_ib_qp_grp *qp,
usnic_ib_qp_grp_create(struct usnic_fwd_dev *ufdev, struct usnic_ib_vf *vf, struct usnic_fwd_dev *ufdev, struct usnic_ib_vf *vf,
struct usnic_ib_pd *pd, struct usnic_ib_pd *pd,
struct usnic_vnic_res_spec *res_spec, struct usnic_vnic_res_spec *res_spec,
struct usnic_transport_spec *trans_spec); struct usnic_transport_spec *trans_spec);

View file

@ -168,30 +168,31 @@ static int usnic_ib_fill_create_qp_resp(struct usnic_ib_qp_grp *qp_grp,
return 0; return 0;
} }
static struct usnic_ib_qp_grp* static int
find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev, find_free_vf_and_create_qp_grp(struct ib_qp *qp,
struct usnic_ib_pd *pd,
struct usnic_transport_spec *trans_spec, struct usnic_transport_spec *trans_spec,
struct usnic_vnic_res_spec *res_spec) struct usnic_vnic_res_spec *res_spec)
{ {
struct usnic_ib_dev *us_ibdev = to_usdev(qp->device);
struct usnic_ib_pd *pd = to_upd(qp->pd);
struct usnic_ib_vf *vf; struct usnic_ib_vf *vf;
struct usnic_vnic *vnic; struct usnic_vnic *vnic;
struct usnic_ib_qp_grp *qp_grp; struct usnic_ib_qp_grp *qp_grp = to_uqp_grp(qp);
struct device *dev, **dev_list; struct device *dev, **dev_list;
int i; int i, ret;
BUG_ON(!mutex_is_locked(&us_ibdev->usdev_lock)); BUG_ON(!mutex_is_locked(&us_ibdev->usdev_lock));
if (list_empty(&us_ibdev->vf_dev_list)) { if (list_empty(&us_ibdev->vf_dev_list)) {
usnic_info("No vfs to allocate\n"); usnic_info("No vfs to allocate\n");
return NULL; return -ENOMEM;
} }
if (usnic_ib_share_vf) { if (usnic_ib_share_vf) {
/* Try to find resouces on a used vf which is in pd */ /* Try to find resouces on a used vf which is in pd */
dev_list = usnic_uiom_get_dev_list(pd->umem_pd); dev_list = usnic_uiom_get_dev_list(pd->umem_pd);
if (IS_ERR(dev_list)) if (IS_ERR(dev_list))
return ERR_CAST(dev_list); return PTR_ERR(dev_list);
for (i = 0; dev_list[i]; i++) { for (i = 0; dev_list[i]; i++) {
dev = dev_list[i]; dev = dev_list[i];
vf = dev_get_drvdata(dev); vf = dev_get_drvdata(dev);
@ -202,9 +203,9 @@ find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev,
dev_name(&us_ibdev->ib_dev.dev), dev_name(&us_ibdev->ib_dev.dev),
pci_name(usnic_vnic_get_pdev( pci_name(usnic_vnic_get_pdev(
vnic))); vnic)));
qp_grp = usnic_ib_qp_grp_create(us_ibdev->ufdev, ret = usnic_ib_qp_grp_create(qp_grp,
vf, pd, us_ibdev->ufdev,
res_spec, vf, pd, res_spec,
trans_spec); trans_spec);
spin_unlock(&vf->lock); spin_unlock(&vf->lock);
@ -223,8 +224,8 @@ find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev,
vnic = vf->vnic; vnic = vf->vnic;
if (vf->qp_grp_ref_cnt == 0 && if (vf->qp_grp_ref_cnt == 0 &&
usnic_vnic_check_room(vnic, res_spec) == 0) { usnic_vnic_check_room(vnic, res_spec) == 0) {
qp_grp = usnic_ib_qp_grp_create(us_ibdev->ufdev, vf, ret = usnic_ib_qp_grp_create(qp_grp, us_ibdev->ufdev,
pd, res_spec, vf, pd, res_spec,
trans_spec); trans_spec);
spin_unlock(&vf->lock); spin_unlock(&vf->lock);
@ -235,16 +236,15 @@ find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev,
usnic_info("No free qp grp found on %s\n", usnic_info("No free qp grp found on %s\n",
dev_name(&us_ibdev->ib_dev.dev)); dev_name(&us_ibdev->ib_dev.dev));
return ERR_PTR(-ENOMEM); return -ENOMEM;
qp_grp_check: qp_grp_check:
if (IS_ERR_OR_NULL(qp_grp)) { if (ret) {
usnic_err("Failed to allocate qp_grp\n"); usnic_err("Failed to allocate qp_grp\n");
if (usnic_ib_share_vf) if (usnic_ib_share_vf)
usnic_uiom_free_dev_list(dev_list); usnic_uiom_free_dev_list(dev_list);
return ERR_PTR(qp_grp ? PTR_ERR(qp_grp) : -ENOMEM);
} }
return qp_grp; return ret;
} }
static void qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp) static void qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp)
@ -458,13 +458,12 @@ int usnic_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
return 0; return 0;
} }
struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd, int usnic_ib_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata) struct ib_udata *udata)
{ {
int err; int err;
struct usnic_ib_dev *us_ibdev; struct usnic_ib_dev *us_ibdev;
struct usnic_ib_qp_grp *qp_grp; struct usnic_ib_qp_grp *qp_grp = to_uqp_grp(ibqp);
struct usnic_ib_ucontext *ucontext = rdma_udata_to_drv_context( struct usnic_ib_ucontext *ucontext = rdma_udata_to_drv_context(
udata, struct usnic_ib_ucontext, ibucontext); udata, struct usnic_ib_ucontext, ibucontext);
int cq_cnt; int cq_cnt;
@ -474,29 +473,29 @@ struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd,
usnic_dbg("\n"); usnic_dbg("\n");
us_ibdev = to_usdev(pd->device); us_ibdev = to_usdev(ibqp->device);
if (init_attr->create_flags) if (init_attr->create_flags)
return ERR_PTR(-EOPNOTSUPP); return -EOPNOTSUPP;
err = ib_copy_from_udata(&cmd, udata, sizeof(cmd)); err = ib_copy_from_udata(&cmd, udata, sizeof(cmd));
if (err) { if (err) {
usnic_err("%s: cannot copy udata for create_qp\n", usnic_err("%s: cannot copy udata for create_qp\n",
dev_name(&us_ibdev->ib_dev.dev)); dev_name(&us_ibdev->ib_dev.dev));
return ERR_PTR(-EINVAL); return -EINVAL;
} }
err = create_qp_validate_user_data(cmd); err = create_qp_validate_user_data(cmd);
if (err) { if (err) {
usnic_err("%s: Failed to validate user data\n", usnic_err("%s: Failed to validate user data\n",
dev_name(&us_ibdev->ib_dev.dev)); dev_name(&us_ibdev->ib_dev.dev));
return ERR_PTR(-EINVAL); return -EINVAL;
} }
if (init_attr->qp_type != IB_QPT_UD) { if (init_attr->qp_type != IB_QPT_UD) {
usnic_err("%s asked to make a non-UD QP: %d\n", usnic_err("%s asked to make a non-UD QP: %d\n",
dev_name(&us_ibdev->ib_dev.dev), init_attr->qp_type); dev_name(&us_ibdev->ib_dev.dev), init_attr->qp_type);
return ERR_PTR(-EOPNOTSUPP); return -EOPNOTSUPP;
} }
trans_spec = cmd.spec; trans_spec = cmd.spec;
@ -504,13 +503,9 @@ struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd,
cq_cnt = (init_attr->send_cq == init_attr->recv_cq) ? 1 : 2; cq_cnt = (init_attr->send_cq == init_attr->recv_cq) ? 1 : 2;
res_spec = min_transport_spec[trans_spec.trans_type]; res_spec = min_transport_spec[trans_spec.trans_type];
usnic_vnic_res_spec_update(&res_spec, USNIC_VNIC_RES_TYPE_CQ, cq_cnt); usnic_vnic_res_spec_update(&res_spec, USNIC_VNIC_RES_TYPE_CQ, cq_cnt);
qp_grp = find_free_vf_and_create_qp_grp(us_ibdev, to_upd(pd), err = find_free_vf_and_create_qp_grp(ibqp, &trans_spec, &res_spec);
&trans_spec, if (err)
&res_spec);
if (IS_ERR_OR_NULL(qp_grp)) {
err = qp_grp ? PTR_ERR(qp_grp) : -ENOMEM;
goto out_release_mutex; goto out_release_mutex;
}
err = usnic_ib_fill_create_qp_resp(qp_grp, udata); err = usnic_ib_fill_create_qp_resp(qp_grp, udata);
if (err) { if (err) {
@ -522,13 +517,13 @@ struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd,
list_add_tail(&qp_grp->link, &ucontext->qp_grp_list); list_add_tail(&qp_grp->link, &ucontext->qp_grp_list);
usnic_ib_log_vf(qp_grp->vf); usnic_ib_log_vf(qp_grp->vf);
mutex_unlock(&us_ibdev->usdev_lock); mutex_unlock(&us_ibdev->usdev_lock);
return &qp_grp->ibqp; return 0;
out_release_qp_grp: out_release_qp_grp:
qp_grp_destroy(qp_grp); qp_grp_destroy(qp_grp);
out_release_mutex: out_release_mutex:
mutex_unlock(&us_ibdev->usdev_lock); mutex_unlock(&us_ibdev->usdev_lock);
return ERR_PTR(err); return err;
} }
int usnic_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata) int usnic_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)

View file

@ -50,8 +50,7 @@ int usnic_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
union ib_gid *gid); union ib_gid *gid);
int usnic_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata); int usnic_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
int usnic_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata); int usnic_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd, int usnic_ib_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *init_attr,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata); struct ib_udata *udata);
int usnic_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata); int usnic_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata);
int usnic_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int usnic_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,

View file

@ -185,6 +185,7 @@ static const struct ib_device_ops pvrdma_dev_ops = {
INIT_RDMA_OBJ_SIZE(ib_ah, pvrdma_ah, ibah), INIT_RDMA_OBJ_SIZE(ib_ah, pvrdma_ah, ibah),
INIT_RDMA_OBJ_SIZE(ib_cq, pvrdma_cq, ibcq), INIT_RDMA_OBJ_SIZE(ib_cq, pvrdma_cq, ibcq),
INIT_RDMA_OBJ_SIZE(ib_pd, pvrdma_pd, ibpd), INIT_RDMA_OBJ_SIZE(ib_pd, pvrdma_pd, ibpd),
INIT_RDMA_OBJ_SIZE(ib_qp, pvrdma_qp, ibqp),
INIT_RDMA_OBJ_SIZE(ib_ucontext, pvrdma_ucontext, ibucontext), INIT_RDMA_OBJ_SIZE(ib_ucontext, pvrdma_ucontext, ibucontext),
}; };

View file

@ -182,18 +182,17 @@ static int pvrdma_set_sq_size(struct pvrdma_dev *dev, struct ib_qp_cap *req_cap,
/** /**
* pvrdma_create_qp - create queue pair * pvrdma_create_qp - create queue pair
* @pd: protection domain * @ibqp: queue pair
* @init_attr: queue pair attributes * @init_attr: queue pair attributes
* @udata: user data * @udata: user data
* *
* @return: the ib_qp pointer on success, otherwise returns an errno. * @return: the 0 on success, otherwise returns an errno.
*/ */
struct ib_qp *pvrdma_create_qp(struct ib_pd *pd, int pvrdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata) struct ib_udata *udata)
{ {
struct pvrdma_qp *qp = NULL; struct pvrdma_qp *qp = to_vqp(ibqp);
struct pvrdma_dev *dev = to_vdev(pd->device); struct pvrdma_dev *dev = to_vdev(ibqp->device);
union pvrdma_cmd_req req; union pvrdma_cmd_req req;
union pvrdma_cmd_resp rsp; union pvrdma_cmd_resp rsp;
struct pvrdma_cmd_create_qp *cmd = &req.create_qp; struct pvrdma_cmd_create_qp *cmd = &req.create_qp;
@ -209,7 +208,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
dev_warn(&dev->pdev->dev, dev_warn(&dev->pdev->dev,
"invalid create queuepair flags %#x\n", "invalid create queuepair flags %#x\n",
init_attr->create_flags); init_attr->create_flags);
return ERR_PTR(-EOPNOTSUPP); return -EOPNOTSUPP;
} }
if (init_attr->qp_type != IB_QPT_RC && if (init_attr->qp_type != IB_QPT_RC &&
@ -217,22 +216,22 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
init_attr->qp_type != IB_QPT_GSI) { init_attr->qp_type != IB_QPT_GSI) {
dev_warn(&dev->pdev->dev, "queuepair type %d not supported\n", dev_warn(&dev->pdev->dev, "queuepair type %d not supported\n",
init_attr->qp_type); init_attr->qp_type);
return ERR_PTR(-EOPNOTSUPP); return -EOPNOTSUPP;
} }
if (is_srq && !dev->dsr->caps.max_srq) { if (is_srq && !dev->dsr->caps.max_srq) {
dev_warn(&dev->pdev->dev, dev_warn(&dev->pdev->dev,
"SRQs not supported by device\n"); "SRQs not supported by device\n");
return ERR_PTR(-EINVAL); return -EINVAL;
} }
if (!atomic_add_unless(&dev->num_qps, 1, dev->dsr->caps.max_qp)) if (!atomic_add_unless(&dev->num_qps, 1, dev->dsr->caps.max_qp))
return ERR_PTR(-ENOMEM); return -ENOMEM;
switch (init_attr->qp_type) { switch (init_attr->qp_type) {
case IB_QPT_GSI: case IB_QPT_GSI:
if (init_attr->port_num == 0 || if (init_attr->port_num == 0 ||
init_attr->port_num > pd->device->phys_port_cnt) { init_attr->port_num > ibqp->device->phys_port_cnt) {
dev_warn(&dev->pdev->dev, "invalid queuepair attrs\n"); dev_warn(&dev->pdev->dev, "invalid queuepair attrs\n");
ret = -EINVAL; ret = -EINVAL;
goto err_qp; goto err_qp;
@ -240,12 +239,6 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
fallthrough; fallthrough;
case IB_QPT_RC: case IB_QPT_RC:
case IB_QPT_UD: case IB_QPT_UD:
qp = kzalloc(sizeof(*qp), GFP_KERNEL);
if (!qp) {
ret = -ENOMEM;
goto err_qp;
}
spin_lock_init(&qp->sq.lock); spin_lock_init(&qp->sq.lock);
spin_lock_init(&qp->rq.lock); spin_lock_init(&qp->rq.lock);
mutex_init(&qp->mutex); mutex_init(&qp->mutex);
@ -275,8 +268,8 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
if (!is_srq) { if (!is_srq) {
/* set qp->sq.wqe_cnt, shift, buf_size.. */ /* set qp->sq.wqe_cnt, shift, buf_size.. */
qp->rumem = qp->rumem = ib_umem_get(ibqp->device,
ib_umem_get(pd->device, ucmd.rbuf_addr, ucmd.rbuf_addr,
ucmd.rbuf_size, 0); ucmd.rbuf_size, 0);
if (IS_ERR(qp->rumem)) { if (IS_ERR(qp->rumem)) {
ret = PTR_ERR(qp->rumem); ret = PTR_ERR(qp->rumem);
@ -288,7 +281,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
qp->srq = to_vsrq(init_attr->srq); qp->srq = to_vsrq(init_attr->srq);
} }
qp->sumem = ib_umem_get(pd->device, ucmd.sbuf_addr, qp->sumem = ib_umem_get(ibqp->device, ucmd.sbuf_addr,
ucmd.sbuf_size, 0); ucmd.sbuf_size, 0);
if (IS_ERR(qp->sumem)) { if (IS_ERR(qp->sumem)) {
if (!is_srq) if (!is_srq)
@ -306,12 +299,12 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
qp->npages_recv = 0; qp->npages_recv = 0;
qp->npages = qp->npages_send + qp->npages_recv; qp->npages = qp->npages_send + qp->npages_recv;
} else { } else {
ret = pvrdma_set_sq_size(to_vdev(pd->device), ret = pvrdma_set_sq_size(to_vdev(ibqp->device),
&init_attr->cap, qp); &init_attr->cap, qp);
if (ret) if (ret)
goto err_qp; goto err_qp;
ret = pvrdma_set_rq_size(to_vdev(pd->device), ret = pvrdma_set_rq_size(to_vdev(ibqp->device),
&init_attr->cap, qp); &init_attr->cap, qp);
if (ret) if (ret)
goto err_qp; goto err_qp;
@ -362,7 +355,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
memset(cmd, 0, sizeof(*cmd)); memset(cmd, 0, sizeof(*cmd));
cmd->hdr.cmd = PVRDMA_CMD_CREATE_QP; cmd->hdr.cmd = PVRDMA_CMD_CREATE_QP;
cmd->pd_handle = to_vpd(pd)->pd_handle; cmd->pd_handle = to_vpd(ibqp->pd)->pd_handle;
cmd->send_cq_handle = to_vcq(init_attr->send_cq)->cq_handle; cmd->send_cq_handle = to_vcq(init_attr->send_cq)->cq_handle;
cmd->recv_cq_handle = to_vcq(init_attr->recv_cq)->cq_handle; cmd->recv_cq_handle = to_vcq(init_attr->recv_cq)->cq_handle;
if (is_srq) if (is_srq)
@ -418,11 +411,11 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
dev_warn(&dev->pdev->dev, dev_warn(&dev->pdev->dev,
"failed to copy back udata\n"); "failed to copy back udata\n");
__pvrdma_destroy_qp(dev, qp); __pvrdma_destroy_qp(dev, qp);
return ERR_PTR(-EINVAL); return -EINVAL;
} }
} }
return &qp->ibqp; return 0;
err_pdir: err_pdir:
pvrdma_page_dir_cleanup(dev, &qp->pdir); pvrdma_page_dir_cleanup(dev, &qp->pdir);
@ -430,10 +423,8 @@ err_umem:
ib_umem_release(qp->rumem); ib_umem_release(qp->rumem);
ib_umem_release(qp->sumem); ib_umem_release(qp->sumem);
err_qp: err_qp:
kfree(qp);
atomic_dec(&dev->num_qps); atomic_dec(&dev->num_qps);
return ret;
return ERR_PTR(ret);
} }
static void _pvrdma_free_qp(struct pvrdma_qp *qp) static void _pvrdma_free_qp(struct pvrdma_qp *qp)
@ -454,8 +445,6 @@ static void _pvrdma_free_qp(struct pvrdma_qp *qp)
pvrdma_page_dir_cleanup(dev, &qp->pdir); pvrdma_page_dir_cleanup(dev, &qp->pdir);
kfree(qp);
atomic_dec(&dev->num_qps); atomic_dec(&dev->num_qps);
} }

View file

@ -390,8 +390,7 @@ int pvrdma_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
int pvrdma_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr); int pvrdma_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
int pvrdma_destroy_srq(struct ib_srq *srq, struct ib_udata *udata); int pvrdma_destroy_srq(struct ib_srq *srq, struct ib_udata *udata);
struct ib_qp *pvrdma_create_qp(struct ib_pd *pd, int pvrdma_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *init_attr,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata); struct ib_udata *udata);
int pvrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int pvrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata); int attr_mask, struct ib_udata *udata);

View file

@ -1058,7 +1058,7 @@ static int alloc_ud_wq_attr(struct rvt_qp *qp, int node)
/** /**
* rvt_create_qp - create a queue pair for a device * rvt_create_qp - create a queue pair for a device
* @ibpd: the protection domain who's device we create the queue pair for * @ibqp: the queue pair
* @init_attr: the attributes of the queue pair * @init_attr: the attributes of the queue pair
* @udata: user data for libibverbs.so * @udata: user data for libibverbs.so
* *
@ -1066,47 +1066,45 @@ static int alloc_ud_wq_attr(struct rvt_qp *qp, int node)
* unique idea of what queue pair numbers mean. For instance there is a reserved * unique idea of what queue pair numbers mean. For instance there is a reserved
* range for PSM. * range for PSM.
* *
* Return: the queue pair on success, otherwise returns an errno. * Return: 0 on success, otherwise returns an errno.
* *
* Called by the ib_create_qp() core verbs function. * Called by the ib_create_qp() core verbs function.
*/ */
struct ib_qp *rvt_create_qp(struct ib_pd *ibpd, int rvt_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata) struct ib_udata *udata)
{ {
struct rvt_qp *qp; struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
int err; int ret = -ENOMEM;
struct rvt_swqe *swq = NULL; struct rvt_swqe *swq = NULL;
size_t sz; size_t sz;
size_t sg_list_sz = 0; size_t sg_list_sz = 0;
struct ib_qp *ret = ERR_PTR(-ENOMEM); struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
struct rvt_dev_info *rdi = ib_to_rvt(ibpd->device);
void *priv = NULL; void *priv = NULL;
size_t sqsize; size_t sqsize;
u8 exclude_prefix = 0; u8 exclude_prefix = 0;
if (!rdi) if (!rdi)
return ERR_PTR(-EINVAL); return -EINVAL;
if (init_attr->create_flags & ~IB_QP_CREATE_NETDEV_USE) if (init_attr->create_flags & ~IB_QP_CREATE_NETDEV_USE)
return ERR_PTR(-EOPNOTSUPP); return -EOPNOTSUPP;
if (init_attr->cap.max_send_sge > rdi->dparms.props.max_send_sge || if (init_attr->cap.max_send_sge > rdi->dparms.props.max_send_sge ||
init_attr->cap.max_send_wr > rdi->dparms.props.max_qp_wr) init_attr->cap.max_send_wr > rdi->dparms.props.max_qp_wr)
return ERR_PTR(-EINVAL); return -EINVAL;
/* Check receive queue parameters if no SRQ is specified. */ /* Check receive queue parameters if no SRQ is specified. */
if (!init_attr->srq) { if (!init_attr->srq) {
if (init_attr->cap.max_recv_sge > if (init_attr->cap.max_recv_sge >
rdi->dparms.props.max_recv_sge || rdi->dparms.props.max_recv_sge ||
init_attr->cap.max_recv_wr > rdi->dparms.props.max_qp_wr) init_attr->cap.max_recv_wr > rdi->dparms.props.max_qp_wr)
return ERR_PTR(-EINVAL); return -EINVAL;
if (init_attr->cap.max_send_sge + if (init_attr->cap.max_send_sge +
init_attr->cap.max_send_wr + init_attr->cap.max_send_wr +
init_attr->cap.max_recv_sge + init_attr->cap.max_recv_sge +
init_attr->cap.max_recv_wr == 0) init_attr->cap.max_recv_wr == 0)
return ERR_PTR(-EINVAL); return -EINVAL;
} }
sqsize = sqsize =
init_attr->cap.max_send_wr + 1 + init_attr->cap.max_send_wr + 1 +
@ -1115,8 +1113,8 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
case IB_QPT_SMI: case IB_QPT_SMI:
case IB_QPT_GSI: case IB_QPT_GSI:
if (init_attr->port_num == 0 || if (init_attr->port_num == 0 ||
init_attr->port_num > ibpd->device->phys_port_cnt) init_attr->port_num > ibqp->device->phys_port_cnt)
return ERR_PTR(-EINVAL); return -EINVAL;
fallthrough; fallthrough;
case IB_QPT_UC: case IB_QPT_UC:
case IB_QPT_RC: case IB_QPT_RC:
@ -1124,7 +1122,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
sz = struct_size(swq, sg_list, init_attr->cap.max_send_sge); sz = struct_size(swq, sg_list, init_attr->cap.max_send_sge);
swq = vzalloc_node(array_size(sz, sqsize), rdi->dparms.node); swq = vzalloc_node(array_size(sz, sqsize), rdi->dparms.node);
if (!swq) if (!swq)
return ERR_PTR(-ENOMEM); return -ENOMEM;
if (init_attr->srq) { if (init_attr->srq) {
struct rvt_srq *srq = ibsrq_to_rvtsrq(init_attr->srq); struct rvt_srq *srq = ibsrq_to_rvtsrq(init_attr->srq);
@ -1135,9 +1133,6 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
} else if (init_attr->cap.max_recv_sge > 1) } else if (init_attr->cap.max_recv_sge > 1)
sg_list_sz = sizeof(*qp->r_sg_list) * sg_list_sz = sizeof(*qp->r_sg_list) *
(init_attr->cap.max_recv_sge - 1); (init_attr->cap.max_recv_sge - 1);
qp = kzalloc_node(sizeof(*qp), GFP_KERNEL, rdi->dparms.node);
if (!qp)
goto bail_swq;
qp->r_sg_list = qp->r_sg_list =
kzalloc_node(sg_list_sz, GFP_KERNEL, rdi->dparms.node); kzalloc_node(sg_list_sz, GFP_KERNEL, rdi->dparms.node);
if (!qp->r_sg_list) if (!qp->r_sg_list)
@ -1166,7 +1161,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
*/ */
priv = rdi->driver_f.qp_priv_alloc(rdi, qp); priv = rdi->driver_f.qp_priv_alloc(rdi, qp);
if (IS_ERR(priv)) { if (IS_ERR(priv)) {
ret = priv; ret = PTR_ERR(priv);
goto bail_qp; goto bail_qp;
} }
qp->priv = priv; qp->priv = priv;
@ -1180,13 +1175,11 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
qp->r_rq.max_sge = init_attr->cap.max_recv_sge; qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) + sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
sizeof(struct rvt_rwqe); sizeof(struct rvt_rwqe);
err = rvt_alloc_rq(&qp->r_rq, qp->r_rq.size * sz, ret = rvt_alloc_rq(&qp->r_rq, qp->r_rq.size * sz,
rdi->dparms.node, udata); rdi->dparms.node, udata);
if (err) { if (ret)
ret = ERR_PTR(err);
goto bail_driver_priv; goto bail_driver_priv;
} }
}
/* /*
* ib_create_qp() will initialize qp->ibqp * ib_create_qp() will initialize qp->ibqp
@ -1206,40 +1199,35 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
qp->s_max_sge = init_attr->cap.max_send_sge; qp->s_max_sge = init_attr->cap.max_send_sge;
if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR) if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
qp->s_flags = RVT_S_SIGNAL_REQ_WR; qp->s_flags = RVT_S_SIGNAL_REQ_WR;
err = alloc_ud_wq_attr(qp, rdi->dparms.node); ret = alloc_ud_wq_attr(qp, rdi->dparms.node);
if (err) { if (ret)
ret = (ERR_PTR(err));
goto bail_rq_rvt; goto bail_rq_rvt;
}
if (init_attr->create_flags & IB_QP_CREATE_NETDEV_USE) if (init_attr->create_flags & IB_QP_CREATE_NETDEV_USE)
exclude_prefix = RVT_AIP_QP_PREFIX; exclude_prefix = RVT_AIP_QP_PREFIX;
err = alloc_qpn(rdi, &rdi->qp_dev->qpn_table, ret = alloc_qpn(rdi, &rdi->qp_dev->qpn_table,
init_attr->qp_type, init_attr->qp_type,
init_attr->port_num, init_attr->port_num,
exclude_prefix); exclude_prefix);
if (err < 0) { if (ret < 0)
ret = ERR_PTR(err);
goto bail_rq_wq; goto bail_rq_wq;
}
qp->ibqp.qp_num = err; qp->ibqp.qp_num = ret;
if (init_attr->create_flags & IB_QP_CREATE_NETDEV_USE) if (init_attr->create_flags & IB_QP_CREATE_NETDEV_USE)
qp->ibqp.qp_num |= RVT_AIP_QP_BASE; qp->ibqp.qp_num |= RVT_AIP_QP_BASE;
qp->port_num = init_attr->port_num; qp->port_num = init_attr->port_num;
rvt_init_qp(rdi, qp, init_attr->qp_type); rvt_init_qp(rdi, qp, init_attr->qp_type);
if (rdi->driver_f.qp_priv_init) { if (rdi->driver_f.qp_priv_init) {
err = rdi->driver_f.qp_priv_init(rdi, qp, init_attr); ret = rdi->driver_f.qp_priv_init(rdi, qp, init_attr);
if (err) { if (ret)
ret = ERR_PTR(err);
goto bail_rq_wq; goto bail_rq_wq;
} }
}
break; break;
default: default:
/* Don't support raw QPs */ /* Don't support raw QPs */
return ERR_PTR(-EOPNOTSUPP); return -EOPNOTSUPP;
} }
init_attr->cap.max_inline_data = 0; init_attr->cap.max_inline_data = 0;
@ -1252,36 +1240,32 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
if (!qp->r_rq.wq) { if (!qp->r_rq.wq) {
__u64 offset = 0; __u64 offset = 0;
err = ib_copy_to_udata(udata, &offset, ret = ib_copy_to_udata(udata, &offset,
sizeof(offset)); sizeof(offset));
if (err) { if (ret)
ret = ERR_PTR(err);
goto bail_qpn; goto bail_qpn;
}
} else { } else {
u32 s = sizeof(struct rvt_rwq) + qp->r_rq.size * sz; u32 s = sizeof(struct rvt_rwq) + qp->r_rq.size * sz;
qp->ip = rvt_create_mmap_info(rdi, s, udata, qp->ip = rvt_create_mmap_info(rdi, s, udata,
qp->r_rq.wq); qp->r_rq.wq);
if (IS_ERR(qp->ip)) { if (IS_ERR(qp->ip)) {
ret = ERR_CAST(qp->ip); ret = PTR_ERR(qp->ip);
goto bail_qpn; goto bail_qpn;
} }
err = ib_copy_to_udata(udata, &qp->ip->offset, ret = ib_copy_to_udata(udata, &qp->ip->offset,
sizeof(qp->ip->offset)); sizeof(qp->ip->offset));
if (err) { if (ret)
ret = ERR_PTR(err);
goto bail_ip; goto bail_ip;
} }
}
qp->pid = current->pid; qp->pid = current->pid;
} }
spin_lock(&rdi->n_qps_lock); spin_lock(&rdi->n_qps_lock);
if (rdi->n_qps_allocated == rdi->dparms.props.max_qp) { if (rdi->n_qps_allocated == rdi->dparms.props.max_qp) {
spin_unlock(&rdi->n_qps_lock); spin_unlock(&rdi->n_qps_lock);
ret = ERR_PTR(-ENOMEM); ret = ENOMEM;
goto bail_ip; goto bail_ip;
} }
@ -1307,9 +1291,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
spin_unlock_irq(&rdi->pending_lock); spin_unlock_irq(&rdi->pending_lock);
} }
ret = &qp->ibqp; return 0;
return ret;
bail_ip: bail_ip:
if (qp->ip) if (qp->ip)
@ -1330,11 +1312,7 @@ bail_driver_priv:
bail_qp: bail_qp:
kfree(qp->s_ack_queue); kfree(qp->s_ack_queue);
kfree(qp->r_sg_list); kfree(qp->r_sg_list);
kfree(qp);
bail_swq:
vfree(swq); vfree(swq);
return ret; return ret;
} }
@ -1769,7 +1747,6 @@ int rvt_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
rdma_destroy_ah_attr(&qp->alt_ah_attr); rdma_destroy_ah_attr(&qp->alt_ah_attr);
free_ud_wq_attr(qp); free_ud_wq_attr(qp);
vfree(qp->s_wq); vfree(qp->s_wq);
kfree(qp);
return 0; return 0;
} }

View file

@ -52,8 +52,7 @@
int rvt_driver_qp_init(struct rvt_dev_info *rdi); int rvt_driver_qp_init(struct rvt_dev_info *rdi);
void rvt_qp_exit(struct rvt_dev_info *rdi); void rvt_qp_exit(struct rvt_dev_info *rdi);
struct ib_qp *rvt_create_qp(struct ib_pd *ibpd, int rvt_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata); struct ib_udata *udata);
int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata); int attr_mask, struct ib_udata *udata);

View file

@ -131,6 +131,13 @@ static int rvt_query_device(struct ib_device *ibdev,
return 0; return 0;
} }
static int rvt_get_numa_node(struct ib_device *ibdev)
{
struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
return rdi->dparms.node;
}
static int rvt_modify_device(struct ib_device *device, static int rvt_modify_device(struct ib_device *device,
int device_modify_mask, int device_modify_mask,
struct ib_device_modify *device_modify) struct ib_device_modify *device_modify)
@ -380,6 +387,7 @@ static const struct ib_device_ops rvt_dev_ops = {
.destroy_srq = rvt_destroy_srq, .destroy_srq = rvt_destroy_srq,
.detach_mcast = rvt_detach_mcast, .detach_mcast = rvt_detach_mcast,
.get_dma_mr = rvt_get_dma_mr, .get_dma_mr = rvt_get_dma_mr,
.get_numa_node = rvt_get_numa_node,
.get_port_immutable = rvt_get_port_immutable, .get_port_immutable = rvt_get_port_immutable,
.map_mr_sg = rvt_map_mr_sg, .map_mr_sg = rvt_map_mr_sg,
.mmap = rvt_mmap, .mmap = rvt_mmap,
@ -406,6 +414,7 @@ static const struct ib_device_ops rvt_dev_ops = {
INIT_RDMA_OBJ_SIZE(ib_ah, rvt_ah, ibah), INIT_RDMA_OBJ_SIZE(ib_ah, rvt_ah, ibah),
INIT_RDMA_OBJ_SIZE(ib_cq, rvt_cq, ibcq), INIT_RDMA_OBJ_SIZE(ib_cq, rvt_cq, ibcq),
INIT_RDMA_OBJ_SIZE(ib_pd, rvt_pd, ibpd), INIT_RDMA_OBJ_SIZE(ib_pd, rvt_pd, ibpd),
INIT_RDMA_OBJ_SIZE(ib_qp, rvt_qp, ibqp),
INIT_RDMA_OBJ_SIZE(ib_srq, rvt_srq, ibsrq), INIT_RDMA_OBJ_SIZE(ib_srq, rvt_srq, ibsrq),
INIT_RDMA_OBJ_SIZE(ib_ucontext, rvt_ucontext, ibucontext), INIT_RDMA_OBJ_SIZE(ib_ucontext, rvt_ucontext, ibucontext),
}; };

View file

@ -41,7 +41,7 @@ struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = {
.size = sizeof(struct rxe_qp), .size = sizeof(struct rxe_qp),
.elem_offset = offsetof(struct rxe_qp, pelem), .elem_offset = offsetof(struct rxe_qp, pelem),
.cleanup = rxe_qp_cleanup, .cleanup = rxe_qp_cleanup,
.flags = RXE_POOL_INDEX, .flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC,
.min_index = RXE_MIN_QP_INDEX, .min_index = RXE_MIN_QP_INDEX,
.max_index = RXE_MAX_QP_INDEX, .max_index = RXE_MAX_QP_INDEX,
}, },

View file

@ -391,59 +391,52 @@ static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
return err; return err;
} }
static struct ib_qp *rxe_create_qp(struct ib_pd *ibpd, static int rxe_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init,
struct ib_qp_init_attr *init,
struct ib_udata *udata) struct ib_udata *udata)
{ {
int err; int err;
struct rxe_dev *rxe = to_rdev(ibpd->device); struct rxe_dev *rxe = to_rdev(ibqp->device);
struct rxe_pd *pd = to_rpd(ibpd); struct rxe_pd *pd = to_rpd(ibqp->pd);
struct rxe_qp *qp; struct rxe_qp *qp = to_rqp(ibqp);
struct rxe_create_qp_resp __user *uresp = NULL; struct rxe_create_qp_resp __user *uresp = NULL;
if (udata) { if (udata) {
if (udata->outlen < sizeof(*uresp)) if (udata->outlen < sizeof(*uresp))
return ERR_PTR(-EINVAL); return -EINVAL;
uresp = udata->outbuf; uresp = udata->outbuf;
} }
if (init->create_flags) if (init->create_flags)
return ERR_PTR(-EOPNOTSUPP); return -EOPNOTSUPP;
err = rxe_qp_chk_init(rxe, init); err = rxe_qp_chk_init(rxe, init);
if (err) if (err)
goto err1; return err;
qp = rxe_alloc(&rxe->qp_pool);
if (!qp) {
err = -ENOMEM;
goto err1;
}
if (udata) { if (udata) {
if (udata->inlen) { if (udata->inlen)
err = -EINVAL; return -EINVAL;
goto err2;
}
qp->is_user = true; qp->is_user = true;
} else { } else {
qp->is_user = false; qp->is_user = false;
} }
rxe_add_index(qp); err = rxe_add_to_pool(&rxe->qp_pool, qp);
err = rxe_qp_from_init(rxe, qp, pd, init, uresp, ibpd, udata);
if (err) if (err)
goto err3; return err;
return &qp->ibqp; rxe_add_index(qp);
err = rxe_qp_from_init(rxe, qp, pd, init, uresp, ibqp->pd, udata);
if (err)
goto qp_init;
err3: return 0;
qp_init:
rxe_drop_index(qp); rxe_drop_index(qp);
err2:
rxe_drop_ref(qp); rxe_drop_ref(qp);
err1: return err;
return ERR_PTR(err);
} }
static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
@ -1145,6 +1138,7 @@ static const struct ib_device_ops rxe_dev_ops = {
INIT_RDMA_OBJ_SIZE(ib_ah, rxe_ah, ibah), INIT_RDMA_OBJ_SIZE(ib_ah, rxe_ah, ibah),
INIT_RDMA_OBJ_SIZE(ib_cq, rxe_cq, ibcq), INIT_RDMA_OBJ_SIZE(ib_cq, rxe_cq, ibcq),
INIT_RDMA_OBJ_SIZE(ib_pd, rxe_pd, ibpd), INIT_RDMA_OBJ_SIZE(ib_pd, rxe_pd, ibpd),
INIT_RDMA_OBJ_SIZE(ib_qp, rxe_qp, ibqp),
INIT_RDMA_OBJ_SIZE(ib_srq, rxe_srq, ibsrq), INIT_RDMA_OBJ_SIZE(ib_srq, rxe_srq, ibsrq),
INIT_RDMA_OBJ_SIZE(ib_ucontext, rxe_ucontext, ibuc), INIT_RDMA_OBJ_SIZE(ib_ucontext, rxe_ucontext, ibuc),
INIT_RDMA_OBJ_SIZE(ib_mw, rxe_mw, ibmw), INIT_RDMA_OBJ_SIZE(ib_mw, rxe_mw, ibmw),

View file

@ -210,8 +210,8 @@ struct rxe_resp_info {
}; };
struct rxe_qp { struct rxe_qp {
struct rxe_pool_entry pelem;
struct ib_qp ibqp; struct ib_qp ibqp;
struct rxe_pool_entry pelem;
struct ib_qp_attr attr; struct ib_qp_attr attr;
unsigned int valid; unsigned int valid;
unsigned int mtu; unsigned int mtu;

View file

@ -297,6 +297,7 @@ static const struct ib_device_ops siw_device_ops = {
INIT_RDMA_OBJ_SIZE(ib_cq, siw_cq, base_cq), INIT_RDMA_OBJ_SIZE(ib_cq, siw_cq, base_cq),
INIT_RDMA_OBJ_SIZE(ib_pd, siw_pd, base_pd), INIT_RDMA_OBJ_SIZE(ib_pd, siw_pd, base_pd),
INIT_RDMA_OBJ_SIZE(ib_qp, siw_qp, base_qp),
INIT_RDMA_OBJ_SIZE(ib_srq, siw_srq, base_srq), INIT_RDMA_OBJ_SIZE(ib_srq, siw_srq, base_srq),
INIT_RDMA_OBJ_SIZE(ib_ucontext, siw_ucontext, base_ucontext), INIT_RDMA_OBJ_SIZE(ib_ucontext, siw_ucontext, base_ucontext),
}; };

View file

@ -1344,6 +1344,4 @@ void siw_free_qp(struct kref *ref)
siw_put_tx_cpu(qp->tx_cpu); siw_put_tx_cpu(qp->tx_cpu);
atomic_dec(&sdev->num_qp); atomic_dec(&sdev->num_qp);
siw_dbg_qp(qp, "free QP\n");
kfree_rcu(qp, rcu);
} }

View file

@ -285,16 +285,16 @@ siw_mmap_entry_insert(struct siw_ucontext *uctx,
* *
* Create QP of requested size on given device. * Create QP of requested size on given device.
* *
* @pd: Protection Domain * @qp: Queue pait
* @attrs: Initial QP attributes. * @attrs: Initial QP attributes.
* @udata: used to provide QP ID, SQ and RQ size back to user. * @udata: used to provide QP ID, SQ and RQ size back to user.
*/ */
struct ib_qp *siw_create_qp(struct ib_pd *pd, int siw_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs,
struct ib_qp_init_attr *attrs,
struct ib_udata *udata) struct ib_udata *udata)
{ {
struct siw_qp *qp = NULL; struct ib_pd *pd = ibqp->pd;
struct siw_qp *qp = to_siw_qp(ibqp);
struct ib_device *base_dev = pd->device; struct ib_device *base_dev = pd->device;
struct siw_device *sdev = to_siw_dev(base_dev); struct siw_device *sdev = to_siw_dev(base_dev);
struct siw_ucontext *uctx = struct siw_ucontext *uctx =
@ -307,17 +307,16 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
siw_dbg(base_dev, "create new QP\n"); siw_dbg(base_dev, "create new QP\n");
if (attrs->create_flags) if (attrs->create_flags)
return ERR_PTR(-EOPNOTSUPP); return -EOPNOTSUPP;
if (atomic_inc_return(&sdev->num_qp) > SIW_MAX_QP) { if (atomic_inc_return(&sdev->num_qp) > SIW_MAX_QP) {
siw_dbg(base_dev, "too many QP's\n"); siw_dbg(base_dev, "too many QP's\n");
rv = -ENOMEM; return -ENOMEM;
goto err_out;
} }
if (attrs->qp_type != IB_QPT_RC) { if (attrs->qp_type != IB_QPT_RC) {
siw_dbg(base_dev, "only RC QP's supported\n"); siw_dbg(base_dev, "only RC QP's supported\n");
rv = -EOPNOTSUPP; rv = -EOPNOTSUPP;
goto err_out; goto err_atomic;
} }
if ((attrs->cap.max_send_wr > SIW_MAX_QP_WR) || if ((attrs->cap.max_send_wr > SIW_MAX_QP_WR) ||
(attrs->cap.max_recv_wr > SIW_MAX_QP_WR) || (attrs->cap.max_recv_wr > SIW_MAX_QP_WR) ||
@ -325,13 +324,13 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
(attrs->cap.max_recv_sge > SIW_MAX_SGE)) { (attrs->cap.max_recv_sge > SIW_MAX_SGE)) {
siw_dbg(base_dev, "QP size error\n"); siw_dbg(base_dev, "QP size error\n");
rv = -EINVAL; rv = -EINVAL;
goto err_out; goto err_atomic;
} }
if (attrs->cap.max_inline_data > SIW_MAX_INLINE) { if (attrs->cap.max_inline_data > SIW_MAX_INLINE) {
siw_dbg(base_dev, "max inline send: %d > %d\n", siw_dbg(base_dev, "max inline send: %d > %d\n",
attrs->cap.max_inline_data, (int)SIW_MAX_INLINE); attrs->cap.max_inline_data, (int)SIW_MAX_INLINE);
rv = -EINVAL; rv = -EINVAL;
goto err_out; goto err_atomic;
} }
/* /*
* NOTE: we allow for zero element SQ and RQ WQE's SGL's * NOTE: we allow for zero element SQ and RQ WQE's SGL's
@ -340,19 +339,15 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
if (attrs->cap.max_send_wr + attrs->cap.max_recv_wr == 0) { if (attrs->cap.max_send_wr + attrs->cap.max_recv_wr == 0) {
siw_dbg(base_dev, "QP must have send or receive queue\n"); siw_dbg(base_dev, "QP must have send or receive queue\n");
rv = -EINVAL; rv = -EINVAL;
goto err_out; goto err_atomic;
} }
if (!attrs->send_cq || (!attrs->recv_cq && !attrs->srq)) { if (!attrs->send_cq || (!attrs->recv_cq && !attrs->srq)) {
siw_dbg(base_dev, "send CQ or receive CQ invalid\n"); siw_dbg(base_dev, "send CQ or receive CQ invalid\n");
rv = -EINVAL; rv = -EINVAL;
goto err_out; goto err_atomic;
}
qp = kzalloc(sizeof(*qp), GFP_KERNEL);
if (!qp) {
rv = -ENOMEM;
goto err_out;
} }
init_rwsem(&qp->state_lock); init_rwsem(&qp->state_lock);
spin_lock_init(&qp->sq_lock); spin_lock_init(&qp->sq_lock);
spin_lock_init(&qp->rq_lock); spin_lock_init(&qp->rq_lock);
@ -360,7 +355,7 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
rv = siw_qp_add(sdev, qp); rv = siw_qp_add(sdev, qp);
if (rv) if (rv)
goto err_out; goto err_atomic;
num_sqe = attrs->cap.max_send_wr; num_sqe = attrs->cap.max_send_wr;
num_rqe = attrs->cap.max_recv_wr; num_rqe = attrs->cap.max_recv_wr;
@ -482,23 +477,20 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
list_add_tail(&qp->devq, &sdev->qp_list); list_add_tail(&qp->devq, &sdev->qp_list);
spin_unlock_irqrestore(&sdev->lock, flags); spin_unlock_irqrestore(&sdev->lock, flags);
return &qp->base_qp; return 0;
err_out_xa: err_out_xa:
xa_erase(&sdev->qp_xa, qp_id(qp)); xa_erase(&sdev->qp_xa, qp_id(qp));
err_out:
if (qp) {
if (uctx) { if (uctx) {
rdma_user_mmap_entry_remove(qp->sq_entry); rdma_user_mmap_entry_remove(qp->sq_entry);
rdma_user_mmap_entry_remove(qp->rq_entry); rdma_user_mmap_entry_remove(qp->rq_entry);
} }
vfree(qp->sendq); vfree(qp->sendq);
vfree(qp->recvq); vfree(qp->recvq);
kfree(qp);
}
atomic_dec(&sdev->num_qp);
return ERR_PTR(rv); err_atomic:
atomic_dec(&sdev->num_qp);
return rv;
} }
/* /*

View file

@ -50,8 +50,7 @@ int siw_query_gid(struct ib_device *base_dev, u32 port, int idx,
union ib_gid *gid); union ib_gid *gid);
int siw_alloc_pd(struct ib_pd *base_pd, struct ib_udata *udata); int siw_alloc_pd(struct ib_pd *base_pd, struct ib_udata *udata);
int siw_dealloc_pd(struct ib_pd *base_pd, struct ib_udata *udata); int siw_dealloc_pd(struct ib_pd *base_pd, struct ib_udata *udata);
struct ib_qp *siw_create_qp(struct ib_pd *base_pd, int siw_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *attr,
struct ib_qp_init_attr *attr,
struct ib_udata *udata); struct ib_udata *udata);
int siw_query_qp(struct ib_qp *base_qp, struct ib_qp_attr *qp_attr, int siw_query_qp(struct ib_qp *base_qp, struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr); int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);

View file

@ -2269,7 +2269,12 @@ struct iw_cm_conn_param;
struct ib_struct))) struct ib_struct)))
#define rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, gfp) \ #define rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, gfp) \
((struct ib_type *)kzalloc(ib_dev->ops.size_##ib_type, gfp)) ((struct ib_type *)rdma_zalloc_obj(ib_dev, ib_dev->ops.size_##ib_type, \
gfp, false))
#define rdma_zalloc_drv_obj_numa(ib_dev, ib_type) \
((struct ib_type *)rdma_zalloc_obj(ib_dev, ib_dev->ops.size_##ib_type, \
GFP_KERNEL, true))
#define rdma_zalloc_drv_obj(ib_dev, ib_type) \ #define rdma_zalloc_drv_obj(ib_dev, ib_type) \
rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, GFP_KERNEL) rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, GFP_KERNEL)
@ -2435,8 +2440,7 @@ struct ib_device_ops {
struct ib_udata *udata); struct ib_udata *udata);
int (*query_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr); int (*query_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
int (*destroy_srq)(struct ib_srq *srq, struct ib_udata *udata); int (*destroy_srq)(struct ib_srq *srq, struct ib_udata *udata);
struct ib_qp *(*create_qp)(struct ib_pd *pd, int (*create_qp)(struct ib_qp *qp, struct ib_qp_init_attr *qp_init_attr,
struct ib_qp_init_attr *qp_init_attr,
struct ib_udata *udata); struct ib_udata *udata);
int (*modify_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr, int (*modify_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_udata *udata); int qp_attr_mask, struct ib_udata *udata);
@ -2635,11 +2639,18 @@ struct ib_device_ops {
int (*query_ucontext)(struct ib_ucontext *context, int (*query_ucontext)(struct ib_ucontext *context,
struct uverbs_attr_bundle *attrs); struct uverbs_attr_bundle *attrs);
/*
* Provide NUMA node. This API exists for rdmavt/hfi1 only.
* Everyone else relies on Linux memory management model.
*/
int (*get_numa_node)(struct ib_device *dev);
DECLARE_RDMA_OBJ_SIZE(ib_ah); DECLARE_RDMA_OBJ_SIZE(ib_ah);
DECLARE_RDMA_OBJ_SIZE(ib_counters); DECLARE_RDMA_OBJ_SIZE(ib_counters);
DECLARE_RDMA_OBJ_SIZE(ib_cq); DECLARE_RDMA_OBJ_SIZE(ib_cq);
DECLARE_RDMA_OBJ_SIZE(ib_mw); DECLARE_RDMA_OBJ_SIZE(ib_mw);
DECLARE_RDMA_OBJ_SIZE(ib_pd); DECLARE_RDMA_OBJ_SIZE(ib_pd);
DECLARE_RDMA_OBJ_SIZE(ib_qp);
DECLARE_RDMA_OBJ_SIZE(ib_rwq_ind_table); DECLARE_RDMA_OBJ_SIZE(ib_rwq_ind_table);
DECLARE_RDMA_OBJ_SIZE(ib_srq); DECLARE_RDMA_OBJ_SIZE(ib_srq);
DECLARE_RDMA_OBJ_SIZE(ib_ucontext); DECLARE_RDMA_OBJ_SIZE(ib_ucontext);
@ -2746,6 +2757,15 @@ struct ib_device {
u32 lag_flags; u32 lag_flags;
}; };
static inline void *rdma_zalloc_obj(struct ib_device *dev, size_t size,
gfp_t gfp, bool is_numa_aware)
{
if (is_numa_aware && dev->ops.get_numa_node)
return kzalloc_node(size, gfp, dev->ops.get_numa_node(dev));
return kzalloc(size, gfp);
}
struct ib_client_nl_info; struct ib_client_nl_info;
struct ib_client { struct ib_client {
const char *name; const char *name;