1
0
Fork 0
mirror of synced 2025-03-06 20:59:54 +01:00

IB: Pass uverbs_attr_bundle down ib_x destroy path

The uverbs_attr_bundle with the ucontext is sent down to the drivers ib_x
destroy path as ib_udata. The next patch will use the ib_udata to free the
drivers destroy path from the dependency in 'uobject->context' as we
already did for the create path.

Signed-off-by: Shamir Rabinovitch <shamir.rabinovitch@oracle.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
Shamir Rabinovitch 2019-03-31 19:10:05 +03:00 committed by Jason Gunthorpe
parent a6a3797df2
commit c4367a2635
73 changed files with 513 additions and 343 deletions

View file

@ -128,15 +128,17 @@ static void ib_cq_completion_workqueue(struct ib_cq *cq, void *private)
* @comp_vector: HCA completion vectors for this CQ * @comp_vector: HCA completion vectors for this CQ
* @poll_ctx: context to poll the CQ from. * @poll_ctx: context to poll the CQ from.
* @caller: module owner name. * @caller: module owner name.
* @udata: Valid user data or NULL for kernel object
* *
* This is the proper interface to allocate a CQ for in-kernel users. A * This is the proper interface to allocate a CQ for in-kernel users. A
* CQ allocated with this interface will automatically be polled from the * CQ allocated with this interface will automatically be polled from the
* specified context. The ULP must use wr->wr_cqe instead of wr->wr_id * specified context. The ULP must use wr->wr_cqe instead of wr->wr_id
* to use this CQ abstraction. * to use this CQ abstraction.
*/ */
struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private, struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private,
int nr_cqe, int comp_vector, int nr_cqe, int comp_vector,
enum ib_poll_context poll_ctx, const char *caller) enum ib_poll_context poll_ctx,
const char *caller, struct ib_udata *udata)
{ {
struct ib_cq_init_attr cq_attr = { struct ib_cq_init_attr cq_attr = {
.cqe = nr_cqe, .cqe = nr_cqe,
@ -193,16 +195,17 @@ out_free_wc:
kfree(cq->wc); kfree(cq->wc);
rdma_restrack_del(&cq->res); rdma_restrack_del(&cq->res);
out_destroy_cq: out_destroy_cq:
cq->device->ops.destroy_cq(cq); cq->device->ops.destroy_cq(cq, udata);
return ERR_PTR(ret); return ERR_PTR(ret);
} }
EXPORT_SYMBOL(__ib_alloc_cq); EXPORT_SYMBOL(__ib_alloc_cq_user);
/** /**
* ib_free_cq - free a completion queue * ib_free_cq - free a completion queue
* @cq: completion queue to free. * @cq: completion queue to free.
* @udata: User data or NULL for kernel object
*/ */
void ib_free_cq(struct ib_cq *cq) void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata)
{ {
int ret; int ret;
@ -225,7 +228,7 @@ void ib_free_cq(struct ib_cq *cq)
kfree(cq->wc); kfree(cq->wc);
rdma_restrack_del(&cq->res); rdma_restrack_del(&cq->res);
ret = cq->device->ops.destroy_cq(cq); ret = cq->device->ops.destroy_cq(cq, udata);
WARN_ON_ONCE(ret); WARN_ON_ONCE(ret);
} }
EXPORT_SYMBOL(ib_free_cq); EXPORT_SYMBOL(ib_free_cq_user);

View file

@ -240,7 +240,7 @@ void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr);
void ib_uverbs_event_handler(struct ib_event_handler *handler, void ib_uverbs_event_handler(struct ib_event_handler *handler,
struct ib_event *event); struct ib_event *event);
int ib_uverbs_dealloc_xrcd(struct ib_uobject *uobject, struct ib_xrcd *xrcd, int ib_uverbs_dealloc_xrcd(struct ib_uobject *uobject, struct ib_xrcd *xrcd,
enum rdma_remove_reason why); enum rdma_remove_reason why, struct ib_udata *udata);
int uverbs_dealloc_mw(struct ib_mw *mw); int uverbs_dealloc_mw(struct ib_mw *mw);
void ib_uverbs_detach_umcast(struct ib_qp *qp, void ib_uverbs_detach_umcast(struct ib_qp *qp,

View file

@ -439,7 +439,7 @@ static int ib_uverbs_alloc_pd(struct uverbs_attr_bundle *attrs)
return uobj_alloc_commit(uobj, attrs); return uobj_alloc_commit(uobj, attrs);
err_copy: err_copy:
ib_dealloc_pd(pd); ib_dealloc_pd_user(pd, &attrs->driver_udata);
pd = NULL; pd = NULL;
err_alloc: err_alloc:
kfree(pd); kfree(pd);
@ -643,7 +643,7 @@ err_copy:
} }
err_dealloc_xrcd: err_dealloc_xrcd:
ib_dealloc_xrcd(xrcd); ib_dealloc_xrcd(xrcd, &attrs->driver_udata);
err: err:
uobj_alloc_abort(&obj->uobject, attrs); uobj_alloc_abort(&obj->uobject, attrs);
@ -669,9 +669,8 @@ static int ib_uverbs_close_xrcd(struct uverbs_attr_bundle *attrs)
return uobj_perform_destroy(UVERBS_OBJECT_XRCD, cmd.xrcd_handle, attrs); return uobj_perform_destroy(UVERBS_OBJECT_XRCD, cmd.xrcd_handle, attrs);
} }
int ib_uverbs_dealloc_xrcd(struct ib_uobject *uobject, int ib_uverbs_dealloc_xrcd(struct ib_uobject *uobject, struct ib_xrcd *xrcd,
struct ib_xrcd *xrcd, enum rdma_remove_reason why, struct ib_udata *udata)
enum rdma_remove_reason why)
{ {
struct inode *inode; struct inode *inode;
int ret; int ret;
@ -681,7 +680,7 @@ int ib_uverbs_dealloc_xrcd(struct ib_uobject *uobject,
if (inode && !atomic_dec_and_test(&xrcd->usecnt)) if (inode && !atomic_dec_and_test(&xrcd->usecnt))
return 0; return 0;
ret = ib_dealloc_xrcd(xrcd); ret = ib_dealloc_xrcd(xrcd, udata);
if (ib_is_destroy_retryable(ret, why, uobject)) { if (ib_is_destroy_retryable(ret, why, uobject)) {
atomic_inc(&xrcd->usecnt); atomic_inc(&xrcd->usecnt);
@ -766,7 +765,7 @@ static int ib_uverbs_reg_mr(struct uverbs_attr_bundle *attrs)
return uobj_alloc_commit(uobj, attrs); return uobj_alloc_commit(uobj, attrs);
err_copy: err_copy:
ib_dereg_mr(mr); ib_dereg_mr_user(mr, &attrs->driver_udata);
err_put: err_put:
uobj_put_obj_read(pd); uobj_put_obj_read(pd);
@ -2965,7 +2964,7 @@ static int ib_uverbs_ex_create_wq(struct uverbs_attr_bundle *attrs)
return uobj_alloc_commit(&obj->uevent.uobject, attrs); return uobj_alloc_commit(&obj->uevent.uobject, attrs);
err_copy: err_copy:
ib_destroy_wq(wq); ib_destroy_wq(wq, &attrs->driver_udata);
err_put_cq: err_put_cq:
uobj_put_obj_read(cq); uobj_put_obj_read(cq);
err_put_pd: err_put_pd:
@ -3461,7 +3460,7 @@ static int __uverbs_create_xsrq(struct uverbs_attr_bundle *attrs,
return uobj_alloc_commit(&obj->uevent.uobject, attrs); return uobj_alloc_commit(&obj->uevent.uobject, attrs);
err_copy: err_copy:
ib_destroy_srq(srq); ib_destroy_srq_user(srq, &attrs->driver_udata);
err_put: err_put:
uobj_put_obj_read(pd); uobj_put_obj_read(pd);

View file

@ -43,8 +43,9 @@ static int uverbs_free_ah(struct ib_uobject *uobject,
enum rdma_remove_reason why, enum rdma_remove_reason why,
struct uverbs_attr_bundle *attrs) struct uverbs_attr_bundle *attrs)
{ {
return rdma_destroy_ah((struct ib_ah *)uobject->object, return rdma_destroy_ah_user((struct ib_ah *)uobject->object,
RDMA_DESTROY_AH_SLEEPABLE); RDMA_DESTROY_AH_SLEEPABLE,
&attrs->driver_udata);
} }
static int uverbs_free_flow(struct ib_uobject *uobject, static int uverbs_free_flow(struct ib_uobject *uobject,
@ -97,7 +98,7 @@ static int uverbs_free_qp(struct ib_uobject *uobject,
ib_uverbs_detach_umcast(qp, uqp); ib_uverbs_detach_umcast(qp, uqp);
} }
ret = ib_destroy_qp(qp); ret = ib_destroy_qp_user(qp, &attrs->driver_udata);
if (ib_is_destroy_retryable(ret, why, uobject)) if (ib_is_destroy_retryable(ret, why, uobject))
return ret; return ret;
@ -133,7 +134,7 @@ static int uverbs_free_wq(struct ib_uobject *uobject,
container_of(uobject, struct ib_uwq_object, uevent.uobject); container_of(uobject, struct ib_uwq_object, uevent.uobject);
int ret; int ret;
ret = ib_destroy_wq(wq); ret = ib_destroy_wq(wq, &attrs->driver_udata);
if (ib_is_destroy_retryable(ret, why, uobject)) if (ib_is_destroy_retryable(ret, why, uobject))
return ret; return ret;
@ -151,7 +152,7 @@ static int uverbs_free_srq(struct ib_uobject *uobject,
enum ib_srq_type srq_type = srq->srq_type; enum ib_srq_type srq_type = srq->srq_type;
int ret; int ret;
ret = ib_destroy_srq(srq); ret = ib_destroy_srq_user(srq, &attrs->driver_udata);
if (ib_is_destroy_retryable(ret, why, uobject)) if (ib_is_destroy_retryable(ret, why, uobject))
return ret; return ret;
@ -180,7 +181,7 @@ static int uverbs_free_xrcd(struct ib_uobject *uobject,
return ret; return ret;
mutex_lock(&uobject->context->ufile->device->xrcd_tree_mutex); mutex_lock(&uobject->context->ufile->device->xrcd_tree_mutex);
ret = ib_uverbs_dealloc_xrcd(uobject, xrcd, why); ret = ib_uverbs_dealloc_xrcd(uobject, xrcd, why, &attrs->driver_udata);
mutex_unlock(&uobject->context->ufile->device->xrcd_tree_mutex); mutex_unlock(&uobject->context->ufile->device->xrcd_tree_mutex);
return ret; return ret;
@ -197,7 +198,7 @@ static int uverbs_free_pd(struct ib_uobject *uobject,
if (ret) if (ret)
return ret; return ret;
ib_dealloc_pd(pd); ib_dealloc_pd_user(pd, &attrs->driver_udata);
return 0; return 0;
} }

View file

@ -44,7 +44,7 @@ static int uverbs_free_cq(struct ib_uobject *uobject,
container_of(uobject, struct ib_ucq_object, uobject); container_of(uobject, struct ib_ucq_object, uobject);
int ret; int ret;
ret = ib_destroy_cq(cq); ret = ib_destroy_cq_user(cq, &attrs->driver_udata);
if (ib_is_destroy_retryable(ret, why, uobject)) if (ib_is_destroy_retryable(ret, why, uobject))
return ret; return ret;

View file

@ -45,7 +45,7 @@ static int uverbs_free_dm(struct ib_uobject *uobject,
if (ret) if (ret)
return ret; return ret;
return dm->device->ops.dealloc_dm(dm); return dm->device->ops.dealloc_dm(dm, attrs);
} }
static int UVERBS_HANDLER(UVERBS_METHOD_DM_ALLOC)( static int UVERBS_HANDLER(UVERBS_METHOD_DM_ALLOC)(

View file

@ -38,7 +38,8 @@ static int uverbs_free_mr(struct ib_uobject *uobject,
enum rdma_remove_reason why, enum rdma_remove_reason why,
struct uverbs_attr_bundle *attrs) struct uverbs_attr_bundle *attrs)
{ {
return ib_dereg_mr((struct ib_mr *)uobject->object); return ib_dereg_mr_user((struct ib_mr *)uobject->object,
&attrs->driver_udata);
} }
static int UVERBS_HANDLER(UVERBS_METHOD_ADVISE_MR)( static int UVERBS_HANDLER(UVERBS_METHOD_ADVISE_MR)(
@ -147,7 +148,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_DM_MR_REG)(
return 0; return 0;
err_dereg: err_dereg:
ib_dereg_mr(mr); ib_dereg_mr_user(mr, &attrs->driver_udata);
return ret; return ret;
} }

View file

@ -316,17 +316,18 @@ EXPORT_SYMBOL(__ib_alloc_pd);
/** /**
* ib_dealloc_pd - Deallocates a protection domain. * ib_dealloc_pd - Deallocates a protection domain.
* @pd: The protection domain to deallocate. * @pd: The protection domain to deallocate.
* @udata: Valid user data or NULL for kernel object
* *
* It is an error to call this function while any resources in the pd still * It is an error to call this function while any resources in the pd still
* exist. The caller is responsible to synchronously destroy them and * exist. The caller is responsible to synchronously destroy them and
* guarantee no new allocations will happen. * guarantee no new allocations will happen.
*/ */
void ib_dealloc_pd(struct ib_pd *pd) void ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata)
{ {
int ret; int ret;
if (pd->__internal_mr) { if (pd->__internal_mr) {
ret = pd->device->ops.dereg_mr(pd->__internal_mr); ret = pd->device->ops.dereg_mr(pd->__internal_mr, NULL);
WARN_ON(ret); WARN_ON(ret);
pd->__internal_mr = NULL; pd->__internal_mr = NULL;
} }
@ -336,10 +337,10 @@ void ib_dealloc_pd(struct ib_pd *pd)
WARN_ON(atomic_read(&pd->usecnt)); WARN_ON(atomic_read(&pd->usecnt));
rdma_restrack_del(&pd->res); rdma_restrack_del(&pd->res);
pd->device->ops.dealloc_pd(pd); pd->device->ops.dealloc_pd(pd, udata);
kfree(pd); kfree(pd);
} }
EXPORT_SYMBOL(ib_dealloc_pd); EXPORT_SYMBOL(ib_dealloc_pd_user);
/* Address handles */ /* Address handles */
@ -930,7 +931,7 @@ int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr)
} }
EXPORT_SYMBOL(rdma_query_ah); EXPORT_SYMBOL(rdma_query_ah);
int rdma_destroy_ah(struct ib_ah *ah, u32 flags) int rdma_destroy_ah_user(struct ib_ah *ah, u32 flags, struct ib_udata *udata)
{ {
const struct ib_gid_attr *sgid_attr = ah->sgid_attr; const struct ib_gid_attr *sgid_attr = ah->sgid_attr;
struct ib_pd *pd; struct ib_pd *pd;
@ -939,7 +940,7 @@ int rdma_destroy_ah(struct ib_ah *ah, u32 flags)
might_sleep_if(flags & RDMA_DESTROY_AH_SLEEPABLE); might_sleep_if(flags & RDMA_DESTROY_AH_SLEEPABLE);
pd = ah->pd; pd = ah->pd;
ret = ah->device->ops.destroy_ah(ah, flags); ret = ah->device->ops.destroy_ah(ah, flags, udata);
if (!ret) { if (!ret) {
atomic_dec(&pd->usecnt); atomic_dec(&pd->usecnt);
if (sgid_attr) if (sgid_attr)
@ -948,7 +949,7 @@ int rdma_destroy_ah(struct ib_ah *ah, u32 flags)
return ret; return ret;
} }
EXPORT_SYMBOL(rdma_destroy_ah); EXPORT_SYMBOL(rdma_destroy_ah_user);
/* Shared receive queues */ /* Shared receive queues */
@ -1003,7 +1004,7 @@ int ib_query_srq(struct ib_srq *srq,
} }
EXPORT_SYMBOL(ib_query_srq); EXPORT_SYMBOL(ib_query_srq);
int ib_destroy_srq(struct ib_srq *srq) int ib_destroy_srq_user(struct ib_srq *srq, struct ib_udata *udata)
{ {
struct ib_pd *pd; struct ib_pd *pd;
enum ib_srq_type srq_type; enum ib_srq_type srq_type;
@ -1021,7 +1022,7 @@ int ib_destroy_srq(struct ib_srq *srq)
if (srq_type == IB_SRQT_XRC) if (srq_type == IB_SRQT_XRC)
xrcd = srq->ext.xrc.xrcd; xrcd = srq->ext.xrc.xrcd;
ret = srq->device->ops.destroy_srq(srq); ret = srq->device->ops.destroy_srq(srq, udata);
if (!ret) { if (!ret) {
atomic_dec(&pd->usecnt); atomic_dec(&pd->usecnt);
if (srq_type == IB_SRQT_XRC) if (srq_type == IB_SRQT_XRC)
@ -1032,7 +1033,7 @@ int ib_destroy_srq(struct ib_srq *srq)
return ret; return ret;
} }
EXPORT_SYMBOL(ib_destroy_srq); EXPORT_SYMBOL(ib_destroy_srq_user);
/* Queue pairs */ /* Queue pairs */
@ -1111,8 +1112,9 @@ struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
} }
EXPORT_SYMBOL(ib_open_qp); EXPORT_SYMBOL(ib_open_qp);
static struct ib_qp *create_xrc_qp(struct ib_qp *qp, static struct ib_qp *create_xrc_qp_user(struct ib_qp *qp,
struct ib_qp_init_attr *qp_init_attr) struct ib_qp_init_attr *qp_init_attr,
struct ib_udata *udata)
{ {
struct ib_qp *real_qp = qp; struct ib_qp *real_qp = qp;
@ -1134,8 +1136,9 @@ static struct ib_qp *create_xrc_qp(struct ib_qp *qp,
return qp; return qp;
} }
struct ib_qp *ib_create_qp(struct ib_pd *pd, struct ib_qp *ib_create_qp_user(struct ib_pd *pd,
struct ib_qp_init_attr *qp_init_attr) struct ib_qp_init_attr *qp_init_attr,
struct ib_udata *udata)
{ {
struct ib_device *device = pd ? pd->device : qp_init_attr->xrcd->device; struct ib_device *device = pd ? pd->device : qp_init_attr->xrcd->device;
struct ib_qp *qp; struct ib_qp *qp;
@ -1176,7 +1179,8 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
qp->port = 0; qp->port = 0;
if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) { if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) {
struct ib_qp *xrc_qp = create_xrc_qp(qp, qp_init_attr); struct ib_qp *xrc_qp =
create_xrc_qp_user(qp, qp_init_attr, udata);
if (IS_ERR(xrc_qp)) { if (IS_ERR(xrc_qp)) {
ret = PTR_ERR(xrc_qp); ret = PTR_ERR(xrc_qp);
@ -1230,7 +1234,7 @@ err:
return ERR_PTR(ret); return ERR_PTR(ret);
} }
EXPORT_SYMBOL(ib_create_qp); EXPORT_SYMBOL(ib_create_qp_user);
static const struct { static const struct {
int valid; int valid;
@ -1837,7 +1841,7 @@ static int __ib_destroy_shared_qp(struct ib_qp *qp)
return 0; return 0;
} }
int ib_destroy_qp(struct ib_qp *qp) int ib_destroy_qp_user(struct ib_qp *qp, struct ib_udata *udata)
{ {
const struct ib_gid_attr *alt_path_sgid_attr = qp->alt_path_sgid_attr; const struct ib_gid_attr *alt_path_sgid_attr = qp->alt_path_sgid_attr;
const struct ib_gid_attr *av_sgid_attr = qp->av_sgid_attr; const struct ib_gid_attr *av_sgid_attr = qp->av_sgid_attr;
@ -1869,7 +1873,7 @@ int ib_destroy_qp(struct ib_qp *qp)
rdma_rw_cleanup_mrs(qp); rdma_rw_cleanup_mrs(qp);
rdma_restrack_del(&qp->res); rdma_restrack_del(&qp->res);
ret = qp->device->ops.destroy_qp(qp); ret = qp->device->ops.destroy_qp(qp, udata);
if (!ret) { if (!ret) {
if (alt_path_sgid_attr) if (alt_path_sgid_attr)
rdma_put_gid_attr(alt_path_sgid_attr); rdma_put_gid_attr(alt_path_sgid_attr);
@ -1894,7 +1898,7 @@ int ib_destroy_qp(struct ib_qp *qp)
return ret; return ret;
} }
EXPORT_SYMBOL(ib_destroy_qp); EXPORT_SYMBOL(ib_destroy_qp_user);
/* Completion queues */ /* Completion queues */
@ -1933,15 +1937,15 @@ int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period)
} }
EXPORT_SYMBOL(rdma_set_cq_moderation); EXPORT_SYMBOL(rdma_set_cq_moderation);
int ib_destroy_cq(struct ib_cq *cq) int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata)
{ {
if (atomic_read(&cq->usecnt)) if (atomic_read(&cq->usecnt))
return -EBUSY; return -EBUSY;
rdma_restrack_del(&cq->res); rdma_restrack_del(&cq->res);
return cq->device->ops.destroy_cq(cq); return cq->device->ops.destroy_cq(cq, udata);
} }
EXPORT_SYMBOL(ib_destroy_cq); EXPORT_SYMBOL(ib_destroy_cq_user);
int ib_resize_cq(struct ib_cq *cq, int cqe) int ib_resize_cq(struct ib_cq *cq, int cqe)
{ {
@ -1952,14 +1956,14 @@ EXPORT_SYMBOL(ib_resize_cq);
/* Memory regions */ /* Memory regions */
int ib_dereg_mr(struct ib_mr *mr) int ib_dereg_mr_user(struct ib_mr *mr, struct ib_udata *udata)
{ {
struct ib_pd *pd = mr->pd; struct ib_pd *pd = mr->pd;
struct ib_dm *dm = mr->dm; struct ib_dm *dm = mr->dm;
int ret; int ret;
rdma_restrack_del(&mr->res); rdma_restrack_del(&mr->res);
ret = mr->device->ops.dereg_mr(mr); ret = mr->device->ops.dereg_mr(mr, udata);
if (!ret) { if (!ret) {
atomic_dec(&pd->usecnt); atomic_dec(&pd->usecnt);
if (dm) if (dm)
@ -1968,13 +1972,14 @@ int ib_dereg_mr(struct ib_mr *mr)
return ret; return ret;
} }
EXPORT_SYMBOL(ib_dereg_mr); EXPORT_SYMBOL(ib_dereg_mr_user);
/** /**
* ib_alloc_mr() - Allocates a memory region * ib_alloc_mr() - Allocates a memory region
* @pd: protection domain associated with the region * @pd: protection domain associated with the region
* @mr_type: memory region type * @mr_type: memory region type
* @max_num_sg: maximum sg entries available for registration. * @max_num_sg: maximum sg entries available for registration.
* @udata: user data or null for kernel objects
* *
* Notes: * Notes:
* Memory registeration page/sg lists must not exceed max_num_sg. * Memory registeration page/sg lists must not exceed max_num_sg.
@ -1982,16 +1987,15 @@ EXPORT_SYMBOL(ib_dereg_mr);
* max_num_sg * used_page_size. * max_num_sg * used_page_size.
* *
*/ */
struct ib_mr *ib_alloc_mr(struct ib_pd *pd, struct ib_mr *ib_alloc_mr_user(struct ib_pd *pd, enum ib_mr_type mr_type,
enum ib_mr_type mr_type, u32 max_num_sg, struct ib_udata *udata)
u32 max_num_sg)
{ {
struct ib_mr *mr; struct ib_mr *mr;
if (!pd->device->ops.alloc_mr) if (!pd->device->ops.alloc_mr)
return ERR_PTR(-EOPNOTSUPP); return ERR_PTR(-EOPNOTSUPP);
mr = pd->device->ops.alloc_mr(pd, mr_type, max_num_sg); mr = pd->device->ops.alloc_mr(pd, mr_type, max_num_sg, udata);
if (!IS_ERR(mr)) { if (!IS_ERR(mr)) {
mr->device = pd->device; mr->device = pd->device;
mr->pd = pd; mr->pd = pd;
@ -2005,7 +2009,7 @@ struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
return mr; return mr;
} }
EXPORT_SYMBOL(ib_alloc_mr); EXPORT_SYMBOL(ib_alloc_mr_user);
/* "Fast" memory regions */ /* "Fast" memory regions */
@ -2151,7 +2155,7 @@ struct ib_xrcd *__ib_alloc_xrcd(struct ib_device *device, const char *caller)
} }
EXPORT_SYMBOL(__ib_alloc_xrcd); EXPORT_SYMBOL(__ib_alloc_xrcd);
int ib_dealloc_xrcd(struct ib_xrcd *xrcd) int ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata)
{ {
struct ib_qp *qp; struct ib_qp *qp;
int ret; int ret;
@ -2166,7 +2170,7 @@ int ib_dealloc_xrcd(struct ib_xrcd *xrcd)
return ret; return ret;
} }
return xrcd->device->ops.dealloc_xrcd(xrcd); return xrcd->device->ops.dealloc_xrcd(xrcd, udata);
} }
EXPORT_SYMBOL(ib_dealloc_xrcd); EXPORT_SYMBOL(ib_dealloc_xrcd);
@ -2210,10 +2214,11 @@ struct ib_wq *ib_create_wq(struct ib_pd *pd,
EXPORT_SYMBOL(ib_create_wq); EXPORT_SYMBOL(ib_create_wq);
/** /**
* ib_destroy_wq - Destroys the specified WQ. * ib_destroy_wq - Destroys the specified user WQ.
* @wq: The WQ to destroy. * @wq: The WQ to destroy.
* @udata: Valid user data
*/ */
int ib_destroy_wq(struct ib_wq *wq) int ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata)
{ {
int err; int err;
struct ib_cq *cq = wq->cq; struct ib_cq *cq = wq->cq;
@ -2222,7 +2227,7 @@ int ib_destroy_wq(struct ib_wq *wq)
if (atomic_read(&wq->usecnt)) if (atomic_read(&wq->usecnt))
return -EBUSY; return -EBUSY;
err = wq->device->ops.destroy_wq(wq); err = wq->device->ops.destroy_wq(wq, udata);
if (!err) { if (!err) {
atomic_dec(&pd->usecnt); atomic_dec(&pd->usecnt);
atomic_dec(&cq->usecnt); atomic_dec(&cq->usecnt);

View file

@ -564,7 +564,7 @@ fail:
} }
/* Protection Domains */ /* Protection Domains */
void bnxt_re_dealloc_pd(struct ib_pd *ib_pd) void bnxt_re_dealloc_pd(struct ib_pd *ib_pd, struct ib_udata *udata)
{ {
struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
struct bnxt_re_dev *rdev = pd->rdev; struct bnxt_re_dev *rdev = pd->rdev;
@ -635,7 +635,7 @@ fail:
} }
/* Address Handles */ /* Address Handles */
int bnxt_re_destroy_ah(struct ib_ah *ib_ah, u32 flags) int bnxt_re_destroy_ah(struct ib_ah *ib_ah, u32 flags, struct ib_udata *udata)
{ {
struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah); struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
struct bnxt_re_dev *rdev = ah->rdev; struct bnxt_re_dev *rdev = ah->rdev;
@ -789,7 +789,7 @@ void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp,
} }
/* Queue Pairs */ /* Queue Pairs */
int bnxt_re_destroy_qp(struct ib_qp *ib_qp) int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
{ {
struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
struct bnxt_re_dev *rdev = qp->rdev; struct bnxt_re_dev *rdev = qp->rdev;
@ -1327,7 +1327,7 @@ static enum ib_mtu __to_ib_mtu(u32 mtu)
} }
/* Shared Receive Queues */ /* Shared Receive Queues */
int bnxt_re_destroy_srq(struct ib_srq *ib_srq) int bnxt_re_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata)
{ {
struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq, struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
ib_srq); ib_srq);
@ -2560,7 +2560,7 @@ int bnxt_re_post_recv(struct ib_qp *ib_qp, const struct ib_recv_wr *wr,
} }
/* Completion Queues */ /* Completion Queues */
int bnxt_re_destroy_cq(struct ib_cq *ib_cq) int bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
{ {
int rc; int rc;
struct bnxt_re_cq *cq; struct bnxt_re_cq *cq;
@ -3382,7 +3382,7 @@ fail:
return ERR_PTR(rc); return ERR_PTR(rc);
} }
int bnxt_re_dereg_mr(struct ib_mr *ib_mr) int bnxt_re_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
{ {
struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr); struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
struct bnxt_re_dev *rdev = mr->rdev; struct bnxt_re_dev *rdev = mr->rdev;
@ -3428,7 +3428,7 @@ int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents,
} }
struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type, struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
u32 max_num_sg) u32 max_num_sg, struct ib_udata *udata)
{ {
struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
struct bnxt_re_dev *rdev = pd->rdev; struct bnxt_re_dev *rdev = pd->rdev;

View file

@ -165,14 +165,14 @@ enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
u8 port_num); u8 port_num);
int bnxt_re_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context, int bnxt_re_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context,
struct ib_udata *udata); struct ib_udata *udata);
void bnxt_re_dealloc_pd(struct ib_pd *pd); void bnxt_re_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
struct ib_ah *bnxt_re_create_ah(struct ib_pd *pd, struct ib_ah *bnxt_re_create_ah(struct ib_pd *pd,
struct rdma_ah_attr *ah_attr, struct rdma_ah_attr *ah_attr,
u32 flags, u32 flags,
struct ib_udata *udata); struct ib_udata *udata);
int bnxt_re_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr); int bnxt_re_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
int bnxt_re_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr); int bnxt_re_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
int bnxt_re_destroy_ah(struct ib_ah *ah, u32 flags); int bnxt_re_destroy_ah(struct ib_ah *ah, u32 flags, struct ib_udata *udata);
struct ib_srq *bnxt_re_create_srq(struct ib_pd *pd, struct ib_srq *bnxt_re_create_srq(struct ib_pd *pd,
struct ib_srq_init_attr *srq_init_attr, struct ib_srq_init_attr *srq_init_attr,
struct ib_udata *udata); struct ib_udata *udata);
@ -180,7 +180,7 @@ int bnxt_re_modify_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr,
enum ib_srq_attr_mask srq_attr_mask, enum ib_srq_attr_mask srq_attr_mask,
struct ib_udata *udata); struct ib_udata *udata);
int bnxt_re_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr); int bnxt_re_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
int bnxt_re_destroy_srq(struct ib_srq *srq); int bnxt_re_destroy_srq(struct ib_srq *srq, struct ib_udata *udata);
int bnxt_re_post_srq_recv(struct ib_srq *srq, const struct ib_recv_wr *recv_wr, int bnxt_re_post_srq_recv(struct ib_srq *srq, const struct ib_recv_wr *recv_wr,
const struct ib_recv_wr **bad_recv_wr); const struct ib_recv_wr **bad_recv_wr);
struct ib_qp *bnxt_re_create_qp(struct ib_pd *pd, struct ib_qp *bnxt_re_create_qp(struct ib_pd *pd,
@ -190,7 +190,7 @@ int bnxt_re_modify_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_udata *udata); int qp_attr_mask, struct ib_udata *udata);
int bnxt_re_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr, int bnxt_re_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr); int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
int bnxt_re_destroy_qp(struct ib_qp *qp); int bnxt_re_destroy_qp(struct ib_qp *qp, struct ib_udata *udata);
int bnxt_re_post_send(struct ib_qp *qp, const struct ib_send_wr *send_wr, int bnxt_re_post_send(struct ib_qp *qp, const struct ib_send_wr *send_wr,
const struct ib_send_wr **bad_send_wr); const struct ib_send_wr **bad_send_wr);
int bnxt_re_post_recv(struct ib_qp *qp, const struct ib_recv_wr *recv_wr, int bnxt_re_post_recv(struct ib_qp *qp, const struct ib_recv_wr *recv_wr,
@ -199,7 +199,7 @@ struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
const struct ib_cq_init_attr *attr, const struct ib_cq_init_attr *attr,
struct ib_ucontext *context, struct ib_ucontext *context,
struct ib_udata *udata); struct ib_udata *udata);
int bnxt_re_destroy_cq(struct ib_cq *cq); int bnxt_re_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
int bnxt_re_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc); int bnxt_re_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
int bnxt_re_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags); int bnxt_re_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *pd, int mr_access_flags); struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
@ -207,8 +207,8 @@ struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents, int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents,
unsigned int *sg_offset); unsigned int *sg_offset);
struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type mr_type, struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type mr_type,
u32 max_num_sg); u32 max_num_sg, struct ib_udata *udata);
int bnxt_re_dereg_mr(struct ib_mr *mr); int bnxt_re_dereg_mr(struct ib_mr *mr, struct ib_udata *udata);
struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type, struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
struct ib_udata *udata); struct ib_udata *udata);
int bnxt_re_dealloc_mw(struct ib_mw *mw); int bnxt_re_dealloc_mw(struct ib_mw *mw);

View file

@ -88,7 +88,7 @@ static int iwch_alloc_ucontext(struct ib_ucontext *ucontext,
return 0; return 0;
} }
static int iwch_destroy_cq(struct ib_cq *ib_cq) static int iwch_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
{ {
struct iwch_cq *chp; struct iwch_cq *chp;
@ -175,7 +175,7 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev,
mm = kmalloc(sizeof *mm, GFP_KERNEL); mm = kmalloc(sizeof *mm, GFP_KERNEL);
if (!mm) { if (!mm) {
iwch_destroy_cq(&chp->ibcq); iwch_destroy_cq(&chp->ibcq, udata);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
uresp.cqid = chp->cq.cqid; uresp.cqid = chp->cq.cqid;
@ -201,7 +201,7 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev,
} }
if (ib_copy_to_udata(udata, &uresp, resplen)) { if (ib_copy_to_udata(udata, &uresp, resplen)) {
kfree(mm); kfree(mm);
iwch_destroy_cq(&chp->ibcq); iwch_destroy_cq(&chp->ibcq, udata);
return ERR_PTR(-EFAULT); return ERR_PTR(-EFAULT);
} }
insert_mmap(ucontext, mm); insert_mmap(ucontext, mm);
@ -367,7 +367,7 @@ static int iwch_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
return ret; return ret;
} }
static void iwch_deallocate_pd(struct ib_pd *pd) static void iwch_deallocate_pd(struct ib_pd *pd, struct ib_udata *udata)
{ {
struct iwch_dev *rhp; struct iwch_dev *rhp;
struct iwch_pd *php; struct iwch_pd *php;
@ -398,7 +398,7 @@ static int iwch_allocate_pd(struct ib_pd *pd, struct ib_ucontext *context,
struct iwch_alloc_pd_resp resp = {.pdid = php->pdid}; struct iwch_alloc_pd_resp resp = {.pdid = php->pdid};
if (ib_copy_to_udata(udata, &resp, sizeof(resp))) { if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
iwch_deallocate_pd(&php->ibpd); iwch_deallocate_pd(&php->ibpd, udata);
return -EFAULT; return -EFAULT;
} }
} }
@ -406,7 +406,7 @@ static int iwch_allocate_pd(struct ib_pd *pd, struct ib_ucontext *context,
return 0; return 0;
} }
static int iwch_dereg_mr(struct ib_mr *ib_mr) static int iwch_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
{ {
struct iwch_dev *rhp; struct iwch_dev *rhp;
struct iwch_mr *mhp; struct iwch_mr *mhp;
@ -590,7 +590,7 @@ pbl_done:
uresp.pbl_addr); uresp.pbl_addr);
if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) { if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) {
iwch_dereg_mr(&mhp->ibmr); iwch_dereg_mr(&mhp->ibmr, udata);
err = -EFAULT; err = -EFAULT;
goto err; goto err;
} }
@ -661,9 +661,8 @@ static int iwch_dealloc_mw(struct ib_mw *mw)
return 0; return 0;
} }
static struct ib_mr *iwch_alloc_mr(struct ib_pd *pd, static struct ib_mr *iwch_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
enum ib_mr_type mr_type, u32 max_num_sg, struct ib_udata *udata)
u32 max_num_sg)
{ {
struct iwch_dev *rhp; struct iwch_dev *rhp;
struct iwch_pd *php; struct iwch_pd *php;
@ -742,7 +741,7 @@ static int iwch_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, iwch_set_page); return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, iwch_set_page);
} }
static int iwch_destroy_qp(struct ib_qp *ib_qp) static int iwch_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
{ {
struct iwch_dev *rhp; struct iwch_dev *rhp;
struct iwch_qp *qhp; struct iwch_qp *qhp;
@ -885,14 +884,14 @@ static struct ib_qp *iwch_create_qp(struct ib_pd *pd,
mm1 = kmalloc(sizeof *mm1, GFP_KERNEL); mm1 = kmalloc(sizeof *mm1, GFP_KERNEL);
if (!mm1) { if (!mm1) {
iwch_destroy_qp(&qhp->ibqp); iwch_destroy_qp(&qhp->ibqp, udata);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
mm2 = kmalloc(sizeof *mm2, GFP_KERNEL); mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
if (!mm2) { if (!mm2) {
kfree(mm1); kfree(mm1);
iwch_destroy_qp(&qhp->ibqp); iwch_destroy_qp(&qhp->ibqp, udata);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
@ -909,7 +908,7 @@ static struct ib_qp *iwch_create_qp(struct ib_pd *pd,
if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) { if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) {
kfree(mm1); kfree(mm1);
kfree(mm2); kfree(mm2);
iwch_destroy_qp(&qhp->ibqp); iwch_destroy_qp(&qhp->ibqp, udata);
return ERR_PTR(-EFAULT); return ERR_PTR(-EFAULT);
} }
mm1->key = uresp.key; mm1->key = uresp.key;

View file

@ -968,7 +968,7 @@ int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
return !err || err == -ENODATA ? npolled : err; return !err || err == -ENODATA ? npolled : err;
} }
int c4iw_destroy_cq(struct ib_cq *ib_cq) int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
{ {
struct c4iw_cq *chp; struct c4iw_cq *chp;
struct c4iw_ucontext *ucontext; struct c4iw_ucontext *ucontext;

View file

@ -979,9 +979,8 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len); int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len);
void c4iw_qp_add_ref(struct ib_qp *qp); void c4iw_qp_add_ref(struct ib_qp *qp);
void c4iw_qp_rem_ref(struct ib_qp *qp); void c4iw_qp_rem_ref(struct ib_qp *qp);
struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd, struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
enum ib_mr_type mr_type, u32 max_num_sg, struct ib_udata *udata);
u32 max_num_sg);
int c4iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, int c4iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
unsigned int *sg_offset); unsigned int *sg_offset);
int c4iw_dealloc_mw(struct ib_mw *mw); int c4iw_dealloc_mw(struct ib_mw *mw);
@ -992,8 +991,8 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start,
u64 length, u64 virt, int acc, u64 length, u64 virt, int acc,
struct ib_udata *udata); struct ib_udata *udata);
struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc); struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc);
int c4iw_dereg_mr(struct ib_mr *ib_mr); int c4iw_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata);
int c4iw_destroy_cq(struct ib_cq *ib_cq); int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
const struct ib_cq_init_attr *attr, const struct ib_cq_init_attr *attr,
struct ib_ucontext *ib_context, struct ib_ucontext *ib_context,
@ -1002,11 +1001,11 @@ int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
int c4iw_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *attr, int c4iw_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *attr,
enum ib_srq_attr_mask srq_attr_mask, enum ib_srq_attr_mask srq_attr_mask,
struct ib_udata *udata); struct ib_udata *udata);
int c4iw_destroy_srq(struct ib_srq *ib_srq); int c4iw_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata);
struct ib_srq *c4iw_create_srq(struct ib_pd *pd, struct ib_srq *c4iw_create_srq(struct ib_pd *pd,
struct ib_srq_init_attr *attrs, struct ib_srq_init_attr *attrs,
struct ib_udata *udata); struct ib_udata *udata);
int c4iw_destroy_qp(struct ib_qp *ib_qp); int c4iw_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata);
struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp *c4iw_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *attrs, struct ib_qp_init_attr *attrs,
struct ib_udata *udata); struct ib_udata *udata);

View file

@ -683,9 +683,8 @@ int c4iw_dealloc_mw(struct ib_mw *mw)
return 0; return 0;
} }
struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd, struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
enum ib_mr_type mr_type, u32 max_num_sg, struct ib_udata *udata)
u32 max_num_sg)
{ {
struct c4iw_dev *rhp; struct c4iw_dev *rhp;
struct c4iw_pd *php; struct c4iw_pd *php;
@ -786,7 +785,7 @@ int c4iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, c4iw_set_page); return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, c4iw_set_page);
} }
int c4iw_dereg_mr(struct ib_mr *ib_mr) int c4iw_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
{ {
struct c4iw_dev *rhp; struct c4iw_dev *rhp;
struct c4iw_mr *mhp; struct c4iw_mr *mhp;

View file

@ -190,7 +190,7 @@ static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
return ret; return ret;
} }
static void c4iw_deallocate_pd(struct ib_pd *pd) static void c4iw_deallocate_pd(struct ib_pd *pd, struct ib_udata *udata)
{ {
struct c4iw_dev *rhp; struct c4iw_dev *rhp;
struct c4iw_pd *php; struct c4iw_pd *php;
@ -224,7 +224,7 @@ static int c4iw_allocate_pd(struct ib_pd *pd, struct ib_ucontext *context,
struct c4iw_alloc_pd_resp uresp = {.pdid = php->pdid}; struct c4iw_alloc_pd_resp uresp = {.pdid = php->pdid};
if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) { if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
c4iw_deallocate_pd(&php->ibpd); c4iw_deallocate_pd(&php->ibpd, udata);
return -EFAULT; return -EFAULT;
} }
} }

View file

@ -2095,7 +2095,7 @@ out:
return ret; return ret;
} }
int c4iw_destroy_qp(struct ib_qp *ib_qp) int c4iw_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
{ {
struct c4iw_dev *rhp; struct c4iw_dev *rhp;
struct c4iw_qp *qhp; struct c4iw_qp *qhp;
@ -2826,7 +2826,7 @@ err_free_srq:
return ERR_PTR(ret); return ERR_PTR(ret);
} }
int c4iw_destroy_srq(struct ib_srq *ibsrq) int c4iw_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
{ {
struct c4iw_dev *rhp; struct c4iw_dev *rhp;
struct c4iw_srq *srq; struct c4iw_srq *srq;

View file

@ -111,7 +111,7 @@ int hns_roce_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr)
return 0; return 0;
} }
int hns_roce_destroy_ah(struct ib_ah *ah, u32 flags) int hns_roce_destroy_ah(struct ib_ah *ah, u32 flags, struct ib_udata *udata)
{ {
kfree(to_hr_ah(ah)); kfree(to_hr_ah(ah));

View file

@ -444,14 +444,14 @@ err_cq:
} }
EXPORT_SYMBOL_GPL(hns_roce_ib_create_cq); EXPORT_SYMBOL_GPL(hns_roce_ib_create_cq);
int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq) int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
{ {
struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device); struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq); struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
int ret = 0; int ret = 0;
if (hr_dev->hw->destroy_cq) { if (hr_dev->hw->destroy_cq) {
ret = hr_dev->hw->destroy_cq(ib_cq); ret = hr_dev->hw->destroy_cq(ib_cq, udata);
} else { } else {
hns_roce_free_cq(hr_dev, hr_cq); hns_roce_free_cq(hr_dev, hr_cq);
hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt); hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);

View file

@ -905,7 +905,7 @@ struct hns_roce_hw {
int (*modify_qp)(struct ib_qp *ibqp, const struct ib_qp_attr *attr, int (*modify_qp)(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
int attr_mask, enum ib_qp_state cur_state, int attr_mask, enum ib_qp_state cur_state,
enum ib_qp_state new_state); enum ib_qp_state new_state);
int (*destroy_qp)(struct ib_qp *ibqp); int (*destroy_qp)(struct ib_qp *ibqp, struct ib_udata *udata);
int (*qp_flow_control_init)(struct hns_roce_dev *hr_dev, int (*qp_flow_control_init)(struct hns_roce_dev *hr_dev,
struct hns_roce_qp *hr_qp); struct hns_roce_qp *hr_qp);
int (*post_send)(struct ib_qp *ibqp, const struct ib_send_wr *wr, int (*post_send)(struct ib_qp *ibqp, const struct ib_send_wr *wr,
@ -914,8 +914,9 @@ struct hns_roce_hw {
const struct ib_recv_wr **bad_recv_wr); const struct ib_recv_wr **bad_recv_wr);
int (*req_notify_cq)(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); int (*req_notify_cq)(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
int (*poll_cq)(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); int (*poll_cq)(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
int (*dereg_mr)(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr); int (*dereg_mr)(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr,
int (*destroy_cq)(struct ib_cq *ibcq); struct ib_udata *udata);
int (*destroy_cq)(struct ib_cq *ibcq, struct ib_udata *udata);
int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period); int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period);
int (*init_eq)(struct hns_roce_dev *hr_dev); int (*init_eq)(struct hns_roce_dev *hr_dev);
void (*cleanup_eq)(struct hns_roce_dev *hr_dev); void (*cleanup_eq)(struct hns_roce_dev *hr_dev);
@ -1109,11 +1110,11 @@ struct ib_ah *hns_roce_create_ah(struct ib_pd *pd,
u32 flags, u32 flags,
struct ib_udata *udata); struct ib_udata *udata);
int hns_roce_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr); int hns_roce_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
int hns_roce_destroy_ah(struct ib_ah *ah, u32 flags); int hns_roce_destroy_ah(struct ib_ah *ah, u32 flags, struct ib_udata *udata);
int hns_roce_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context, int hns_roce_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context,
struct ib_udata *udata); struct ib_udata *udata);
void hns_roce_dealloc_pd(struct ib_pd *pd); void hns_roce_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc); struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc);
struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
@ -1123,10 +1124,10 @@ int hns_roce_rereg_user_mr(struct ib_mr *mr, int flags, u64 start, u64 length,
u64 virt_addr, int mr_access_flags, struct ib_pd *pd, u64 virt_addr, int mr_access_flags, struct ib_pd *pd,
struct ib_udata *udata); struct ib_udata *udata);
struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
u32 max_num_sg); u32 max_num_sg, struct ib_udata *udata);
int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
unsigned int *sg_offset); unsigned int *sg_offset);
int hns_roce_dereg_mr(struct ib_mr *ibmr); int hns_roce_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
int hns_roce_hw2sw_mpt(struct hns_roce_dev *hr_dev, int hns_roce_hw2sw_mpt(struct hns_roce_dev *hr_dev,
struct hns_roce_cmd_mailbox *mailbox, struct hns_roce_cmd_mailbox *mailbox,
unsigned long mpt_index); unsigned long mpt_index);
@ -1150,7 +1151,7 @@ struct ib_srq *hns_roce_create_srq(struct ib_pd *pd,
int hns_roce_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr, int hns_roce_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr,
enum ib_srq_attr_mask srq_attr_mask, enum ib_srq_attr_mask srq_attr_mask,
struct ib_udata *udata); struct ib_udata *udata);
int hns_roce_destroy_srq(struct ib_srq *ibsrq); int hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata);
struct ib_qp *hns_roce_create_qp(struct ib_pd *ib_pd, struct ib_qp *hns_roce_create_qp(struct ib_pd *ib_pd,
struct ib_qp_init_attr *init_attr, struct ib_qp_init_attr *init_attr,
@ -1179,7 +1180,7 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
struct ib_ucontext *context, struct ib_ucontext *context,
struct ib_udata *udata); struct ib_udata *udata);
int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq); int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq); void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq);
int hns_roce_db_map_user(struct hns_roce_ucontext *context, int hns_roce_db_map_user(struct hns_roce_ucontext *context,

View file

@ -855,17 +855,17 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
create_lp_qp_failed: create_lp_qp_failed:
for (i -= 1; i >= 0; i--) { for (i -= 1; i >= 0; i--) {
hr_qp = free_mr->mr_free_qp[i]; hr_qp = free_mr->mr_free_qp[i];
if (hns_roce_v1_destroy_qp(&hr_qp->ibqp)) if (hns_roce_v1_destroy_qp(&hr_qp->ibqp, NULL))
dev_err(dev, "Destroy qp %d for mr free failed!\n", i); dev_err(dev, "Destroy qp %d for mr free failed!\n", i);
} }
hns_roce_dealloc_pd(pd); hns_roce_dealloc_pd(pd, NULL);
alloc_pd_failed: alloc_pd_failed:
kfree(pd); kfree(pd);
alloc_mem_failed: alloc_mem_failed:
if (hns_roce_ib_destroy_cq(cq)) if (hns_roce_ib_destroy_cq(cq, NULL))
dev_err(dev, "Destroy cq for create_lp_qp failed!\n"); dev_err(dev, "Destroy cq for create_lp_qp failed!\n");
return ret; return ret;
@ -888,17 +888,17 @@ static void hns_roce_v1_release_lp_qp(struct hns_roce_dev *hr_dev)
if (!hr_qp) if (!hr_qp)
continue; continue;
ret = hns_roce_v1_destroy_qp(&hr_qp->ibqp); ret = hns_roce_v1_destroy_qp(&hr_qp->ibqp, NULL);
if (ret) if (ret)
dev_err(dev, "Destroy qp %d for mr free failed(%d)!\n", dev_err(dev, "Destroy qp %d for mr free failed(%d)!\n",
i, ret); i, ret);
} }
ret = hns_roce_ib_destroy_cq(&free_mr->mr_free_cq->ib_cq); ret = hns_roce_ib_destroy_cq(&free_mr->mr_free_cq->ib_cq, NULL);
if (ret) if (ret)
dev_err(dev, "Destroy cq for mr_free failed(%d)!\n", ret); dev_err(dev, "Destroy cq for mr_free failed(%d)!\n", ret);
hns_roce_dealloc_pd(&free_mr->mr_free_pd->ibpd); hns_roce_dealloc_pd(&free_mr->mr_free_pd->ibpd, NULL);
} }
static int hns_roce_db_init(struct hns_roce_dev *hr_dev) static int hns_roce_db_init(struct hns_roce_dev *hr_dev)
@ -1096,7 +1096,7 @@ free_work:
} }
static int hns_roce_v1_dereg_mr(struct hns_roce_dev *hr_dev, static int hns_roce_v1_dereg_mr(struct hns_roce_dev *hr_dev,
struct hns_roce_mr *mr) struct hns_roce_mr *mr, struct ib_udata *udata)
{ {
struct device *dev = &hr_dev->pdev->dev; struct device *dev = &hr_dev->pdev->dev;
struct hns_roce_mr_free_work *mr_work; struct hns_roce_mr_free_work *mr_work;
@ -3921,7 +3921,7 @@ static void hns_roce_v1_destroy_qp_work_fn(struct work_struct *work)
dev_dbg(dev, "Accomplished destroy QP(0x%lx) work.\n", qpn); dev_dbg(dev, "Accomplished destroy QP(0x%lx) work.\n", qpn);
} }
int hns_roce_v1_destroy_qp(struct ib_qp *ibqp) int hns_roce_v1_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
{ {
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
@ -3998,7 +3998,7 @@ int hns_roce_v1_destroy_qp(struct ib_qp *ibqp)
return 0; return 0;
} }
static int hns_roce_v1_destroy_cq(struct ib_cq *ibcq) static int hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
{ {
struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device); struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
struct hns_roce_cq *hr_cq = to_hr_cq(ibcq); struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);

View file

@ -1106,6 +1106,6 @@ struct hns_roce_v1_priv {
int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset); int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset);
int hns_roce_v1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); int hns_roce_v1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
int hns_roce_v1_destroy_qp(struct ib_qp *ibqp); int hns_roce_v1_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata);
#endif #endif

View file

@ -4513,7 +4513,7 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
return 0; return 0;
} }
static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp) static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
{ {
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);

View file

@ -1282,14 +1282,14 @@ free_cmd_mbox:
return ret; return ret;
} }
int hns_roce_dereg_mr(struct ib_mr *ibmr) int hns_roce_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
{ {
struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device); struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
struct hns_roce_mr *mr = to_hr_mr(ibmr); struct hns_roce_mr *mr = to_hr_mr(ibmr);
int ret = 0; int ret = 0;
if (hr_dev->hw->dereg_mr) { if (hr_dev->hw->dereg_mr) {
ret = hr_dev->hw->dereg_mr(hr_dev, mr); ret = hr_dev->hw->dereg_mr(hr_dev, mr, udata);
} else { } else {
hns_roce_mr_free(hr_dev, mr); hns_roce_mr_free(hr_dev, mr);
@ -1303,7 +1303,7 @@ int hns_roce_dereg_mr(struct ib_mr *ibmr)
} }
struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
u32 max_num_sg) u32 max_num_sg, struct ib_udata *udata)
{ {
struct hns_roce_dev *hr_dev = to_hr_dev(pd->device); struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
struct device *dev = hr_dev->dev; struct device *dev = hr_dev->dev;

View file

@ -86,7 +86,7 @@ int hns_roce_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
} }
EXPORT_SYMBOL_GPL(hns_roce_alloc_pd); EXPORT_SYMBOL_GPL(hns_roce_alloc_pd);
void hns_roce_dealloc_pd(struct ib_pd *pd) void hns_roce_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
{ {
hns_roce_pd_free(to_hr_dev(pd->device), to_hr_pd(pd)->pdn); hns_roce_pd_free(to_hr_dev(pd->device), to_hr_pd(pd)->pdn);
} }

View file

@ -423,7 +423,7 @@ err_srq:
return ERR_PTR(ret); return ERR_PTR(ret);
} }
int hns_roce_destroy_srq(struct ib_srq *ibsrq) int hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
{ {
struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device); struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
struct hns_roce_srq *srq = to_hr_srq(ibsrq); struct hns_roce_srq *srq = to_hr_srq(ibsrq);

View file

@ -3490,7 +3490,8 @@ static void i40iw_qp_disconnect(struct i40iw_qp *iwqp)
/* Need to free the Last Streaming Mode Message */ /* Need to free the Last Streaming Mode Message */
if (iwqp->ietf_mem.va) { if (iwqp->ietf_mem.va) {
if (iwqp->lsmm_mr) if (iwqp->lsmm_mr)
iwibdev->ibdev.ops.dereg_mr(iwqp->lsmm_mr); iwibdev->ibdev.ops.dereg_mr(iwqp->lsmm_mr,
NULL);
i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->ietf_mem); i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->ietf_mem);
} }
} }

View file

@ -342,8 +342,9 @@ error:
/** /**
* i40iw_dealloc_pd - deallocate pd * i40iw_dealloc_pd - deallocate pd
* @ibpd: ptr of pd to be deallocated * @ibpd: ptr of pd to be deallocated
* @udata: user data or null for kernel object
*/ */
static void i40iw_dealloc_pd(struct ib_pd *ibpd) static void i40iw_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
{ {
struct i40iw_pd *iwpd = to_iwpd(ibpd); struct i40iw_pd *iwpd = to_iwpd(ibpd);
struct i40iw_device *iwdev = to_iwdev(ibpd->device); struct i40iw_device *iwdev = to_iwdev(ibpd->device);
@ -413,7 +414,7 @@ static void i40iw_clean_cqes(struct i40iw_qp *iwqp, struct i40iw_cq *iwcq)
* i40iw_destroy_qp - destroy qp * i40iw_destroy_qp - destroy qp
* @ibqp: qp's ib pointer also to get to device's qp address * @ibqp: qp's ib pointer also to get to device's qp address
*/ */
static int i40iw_destroy_qp(struct ib_qp *ibqp) static int i40iw_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
{ {
struct i40iw_qp *iwqp = to_iwqp(ibqp); struct i40iw_qp *iwqp = to_iwqp(ibqp);
@ -744,7 +745,7 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
err_code = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); err_code = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
if (err_code) { if (err_code) {
i40iw_pr_err("copy_to_udata failed\n"); i40iw_pr_err("copy_to_udata failed\n");
i40iw_destroy_qp(&iwqp->ibqp); i40iw_destroy_qp(&iwqp->ibqp, udata);
/* let the completion of the qp destroy free the qp */ /* let the completion of the qp destroy free the qp */
return ERR_PTR(err_code); return ERR_PTR(err_code);
} }
@ -1063,8 +1064,9 @@ void i40iw_cq_wq_destroy(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq)
/** /**
* i40iw_destroy_cq - destroy cq * i40iw_destroy_cq - destroy cq
* @ib_cq: cq pointer * @ib_cq: cq pointer
* @udata: user data or NULL for kernel object
*/ */
static int i40iw_destroy_cq(struct ib_cq *ib_cq) static int i40iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
{ {
struct i40iw_cq *iwcq; struct i40iw_cq *iwcq;
struct i40iw_device *iwdev; struct i40iw_device *iwdev;
@ -1601,10 +1603,10 @@ static int i40iw_hw_alloc_stag(struct i40iw_device *iwdev, struct i40iw_mr *iwmr
* @pd: ibpd pointer * @pd: ibpd pointer
* @mr_type: memory for stag registrion * @mr_type: memory for stag registrion
* @max_num_sg: man number of pages * @max_num_sg: man number of pages
* @udata: user data or NULL for kernel objects
*/ */
static struct ib_mr *i40iw_alloc_mr(struct ib_pd *pd, static struct ib_mr *i40iw_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
enum ib_mr_type mr_type, u32 max_num_sg, struct ib_udata *udata)
u32 max_num_sg)
{ {
struct i40iw_pd *iwpd = to_iwpd(pd); struct i40iw_pd *iwpd = to_iwpd(pd);
struct i40iw_device *iwdev = to_iwdev(pd->device); struct i40iw_device *iwdev = to_iwdev(pd->device);
@ -2038,7 +2040,7 @@ static void i40iw_del_memlist(struct i40iw_mr *iwmr,
* i40iw_dereg_mr - deregister mr * i40iw_dereg_mr - deregister mr
* @ib_mr: mr ptr for dereg * @ib_mr: mr ptr for dereg
*/ */
static int i40iw_dereg_mr(struct ib_mr *ib_mr) static int i40iw_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
{ {
struct ib_pd *ibpd = ib_mr->pd; struct ib_pd *ibpd = ib_mr->pd;
struct i40iw_pd *iwpd = to_iwpd(ibpd); struct i40iw_pd *iwpd = to_iwpd(ibpd);

View file

@ -250,7 +250,7 @@ int mlx4_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr)
return 0; return 0;
} }
int mlx4_ib_destroy_ah(struct ib_ah *ah, u32 flags) int mlx4_ib_destroy_ah(struct ib_ah *ah, u32 flags, struct ib_udata *udata)
{ {
kfree(to_mah(ah)); kfree(to_mah(ah));
return 0; return 0;

View file

@ -485,7 +485,7 @@ out:
return err; return err;
} }
int mlx4_ib_destroy_cq(struct ib_cq *cq) int mlx4_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
{ {
struct mlx4_ib_dev *dev = to_mdev(cq->device); struct mlx4_ib_dev *dev = to_mdev(cq->device);
struct mlx4_ib_cq *mcq = to_mcq(cq); struct mlx4_ib_cq *mcq = to_mcq(cq);

View file

@ -1411,7 +1411,7 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
sqp_mad = (struct mlx4_mad_snd_buf *) (sqp->tx_ring[wire_tx_ix].buf.addr); sqp_mad = (struct mlx4_mad_snd_buf *) (sqp->tx_ring[wire_tx_ix].buf.addr);
if (sqp->tx_ring[wire_tx_ix].ah) if (sqp->tx_ring[wire_tx_ix].ah)
mlx4_ib_destroy_ah(sqp->tx_ring[wire_tx_ix].ah, 0); mlx4_ib_destroy_ah(sqp->tx_ring[wire_tx_ix].ah, 0, NULL);
sqp->tx_ring[wire_tx_ix].ah = ah; sqp->tx_ring[wire_tx_ix].ah = ah;
ib_dma_sync_single_for_cpu(&dev->ib_dev, ib_dma_sync_single_for_cpu(&dev->ib_dev,
sqp->tx_ring[wire_tx_ix].buf.map, sqp->tx_ring[wire_tx_ix].buf.map,
@ -1450,7 +1450,7 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
spin_unlock(&sqp->tx_lock); spin_unlock(&sqp->tx_lock);
sqp->tx_ring[wire_tx_ix].ah = NULL; sqp->tx_ring[wire_tx_ix].ah = NULL;
out: out:
mlx4_ib_destroy_ah(ah, 0); mlx4_ib_destroy_ah(ah, 0, NULL);
return ret; return ret;
} }
@ -1903,7 +1903,8 @@ static void mlx4_ib_sqp_comp_worker(struct work_struct *work)
switch (wc.opcode) { switch (wc.opcode) {
case IB_WC_SEND: case IB_WC_SEND:
mlx4_ib_destroy_ah(sqp->tx_ring[wc.wr_id & mlx4_ib_destroy_ah(sqp->tx_ring[wc.wr_id &
(MLX4_NUM_TUNNEL_BUFS - 1)].ah, 0); (MLX4_NUM_TUNNEL_BUFS - 1)].ah,
0, NULL);
sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
= NULL; = NULL;
spin_lock(&sqp->tx_lock); spin_lock(&sqp->tx_lock);
@ -1932,7 +1933,8 @@ static void mlx4_ib_sqp_comp_worker(struct work_struct *work)
ctx->slave, wc.status, wc.wr_id); ctx->slave, wc.status, wc.wr_id);
if (!MLX4_TUN_IS_RECV(wc.wr_id)) { if (!MLX4_TUN_IS_RECV(wc.wr_id)) {
mlx4_ib_destroy_ah(sqp->tx_ring[wc.wr_id & mlx4_ib_destroy_ah(sqp->tx_ring[wc.wr_id &
(MLX4_NUM_TUNNEL_BUFS - 1)].ah, 0); (MLX4_NUM_TUNNEL_BUFS - 1)].ah,
0, NULL);
sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
= NULL; = NULL;
spin_lock(&sqp->tx_lock); spin_lock(&sqp->tx_lock);

View file

@ -1195,7 +1195,7 @@ static int mlx4_ib_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
return 0; return 0;
} }
static void mlx4_ib_dealloc_pd(struct ib_pd *pd) static void mlx4_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
{ {
mlx4_pd_free(to_mdev(pd->device)->dev, to_mpd(pd)->pdn); mlx4_pd_free(to_mdev(pd->device)->dev, to_mpd(pd)->pdn);
} }
@ -1243,7 +1243,7 @@ err1:
return ERR_PTR(err); return ERR_PTR(err);
} }
static int mlx4_ib_dealloc_xrcd(struct ib_xrcd *xrcd) static int mlx4_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata)
{ {
ib_destroy_cq(to_mxrcd(xrcd)->cq); ib_destroy_cq(to_mxrcd(xrcd)->cq);
ib_dealloc_pd(to_mxrcd(xrcd)->pd); ib_dealloc_pd(to_mxrcd(xrcd)->pd);

View file

@ -734,13 +734,12 @@ int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt,
struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags, u64 virt_addr, int access_flags,
struct ib_udata *udata); struct ib_udata *udata);
int mlx4_ib_dereg_mr(struct ib_mr *mr); int mlx4_ib_dereg_mr(struct ib_mr *mr, struct ib_udata *udata);
struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type, struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
struct ib_udata *udata); struct ib_udata *udata);
int mlx4_ib_dealloc_mw(struct ib_mw *mw); int mlx4_ib_dealloc_mw(struct ib_mw *mw);
struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd, struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
enum ib_mr_type mr_type, u32 max_num_sg, struct ib_udata *udata);
u32 max_num_sg);
int mlx4_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, int mlx4_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
unsigned int *sg_offset); unsigned int *sg_offset);
int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period); int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
@ -749,7 +748,7 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
const struct ib_cq_init_attr *attr, const struct ib_cq_init_attr *attr,
struct ib_ucontext *context, struct ib_ucontext *context,
struct ib_udata *udata); struct ib_udata *udata);
int mlx4_ib_destroy_cq(struct ib_cq *cq); int mlx4_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
int mlx4_ib_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags); int mlx4_ib_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq); void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq);
@ -762,7 +761,7 @@ struct ib_ah *mlx4_ib_create_ah_slave(struct ib_pd *pd,
int slave_sgid_index, u8 *s_mac, int slave_sgid_index, u8 *s_mac,
u16 vlan_tag); u16 vlan_tag);
int mlx4_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr); int mlx4_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
int mlx4_ib_destroy_ah(struct ib_ah *ah, u32 flags); int mlx4_ib_destroy_ah(struct ib_ah *ah, u32 flags, struct ib_udata *udata);
struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd, struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
struct ib_srq_init_attr *init_attr, struct ib_srq_init_attr *init_attr,
@ -770,7 +769,7 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
int mlx4_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, int mlx4_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
enum ib_srq_attr_mask attr_mask, struct ib_udata *udata); enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
int mlx4_ib_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr); int mlx4_ib_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
int mlx4_ib_destroy_srq(struct ib_srq *srq); int mlx4_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata);
void mlx4_ib_free_srq_wqe(struct mlx4_ib_srq *srq, int wqe_index); void mlx4_ib_free_srq_wqe(struct mlx4_ib_srq *srq, int wqe_index);
int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad_wr); const struct ib_recv_wr **bad_wr);
@ -778,7 +777,7 @@ int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd, struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *init_attr, struct ib_qp_init_attr *init_attr,
struct ib_udata *udata); struct ib_udata *udata);
int mlx4_ib_destroy_qp(struct ib_qp *qp); int mlx4_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata);
void mlx4_ib_drain_sq(struct ib_qp *qp); void mlx4_ib_drain_sq(struct ib_qp *qp);
void mlx4_ib_drain_rq(struct ib_qp *qp); void mlx4_ib_drain_rq(struct ib_qp *qp);
int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
@ -913,7 +912,7 @@ void mlx4_ib_sl2vl_update(struct mlx4_ib_dev *mdev, int port);
struct ib_wq *mlx4_ib_create_wq(struct ib_pd *pd, struct ib_wq *mlx4_ib_create_wq(struct ib_pd *pd,
struct ib_wq_init_attr *init_attr, struct ib_wq_init_attr *init_attr,
struct ib_udata *udata); struct ib_udata *udata);
int mlx4_ib_destroy_wq(struct ib_wq *wq); int mlx4_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
int mlx4_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr, int mlx4_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
u32 wq_attr_mask, struct ib_udata *udata); u32 wq_attr_mask, struct ib_udata *udata);

View file

@ -595,7 +595,7 @@ mlx4_free_priv_pages(struct mlx4_ib_mr *mr)
} }
} }
int mlx4_ib_dereg_mr(struct ib_mr *ibmr) int mlx4_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
{ {
struct mlx4_ib_mr *mr = to_mmr(ibmr); struct mlx4_ib_mr *mr = to_mmr(ibmr);
int ret; int ret;
@ -655,9 +655,8 @@ int mlx4_ib_dealloc_mw(struct ib_mw *ibmw)
return 0; return 0;
} }
struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd, struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
enum ib_mr_type mr_type, u32 max_num_sg, struct ib_udata *udata)
u32 max_num_sg)
{ {
struct mlx4_ib_dev *dev = to_mdev(pd->device); struct mlx4_ib_dev *dev = to_mdev(pd->device);
struct mlx4_ib_mr *mr; struct mlx4_ib_mr *mr;

View file

@ -1626,7 +1626,7 @@ static int _mlx4_ib_destroy_qp(struct ib_qp *qp)
return 0; return 0;
} }
int mlx4_ib_destroy_qp(struct ib_qp *qp) int mlx4_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
{ {
struct mlx4_ib_qp *mqp = to_mqp(qp); struct mlx4_ib_qp *mqp = to_mqp(qp);
@ -4244,7 +4244,7 @@ int mlx4_ib_modify_wq(struct ib_wq *ibwq, struct ib_wq_attr *wq_attr,
return err; return err;
} }
int mlx4_ib_destroy_wq(struct ib_wq *ibwq) int mlx4_ib_destroy_wq(struct ib_wq *ibwq, struct ib_udata *udata)
{ {
struct mlx4_ib_dev *dev = to_mdev(ibwq->device); struct mlx4_ib_dev *dev = to_mdev(ibwq->device);
struct mlx4_ib_qp *qp = to_mqp((struct ib_qp *)ibwq); struct mlx4_ib_qp *qp = to_mqp((struct ib_qp *)ibwq);

View file

@ -272,7 +272,7 @@ int mlx4_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
return 0; return 0;
} }
int mlx4_ib_destroy_srq(struct ib_srq *srq) int mlx4_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata)
{ {
struct mlx4_ib_dev *dev = to_mdev(srq->device); struct mlx4_ib_dev *dev = to_mdev(srq->device);
struct mlx4_ib_srq *msrq = to_msrq(srq); struct mlx4_ib_srq *msrq = to_msrq(srq);

View file

@ -131,7 +131,7 @@ int mlx5_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr)
return 0; return 0;
} }
int mlx5_ib_destroy_ah(struct ib_ah *ah, u32 flags) int mlx5_ib_destroy_ah(struct ib_ah *ah, u32 flags, struct ib_udata *udata)
{ {
kfree(to_mah(ah)); kfree(to_mah(ah));
return 0; return 0;

View file

@ -996,8 +996,7 @@ err_create:
return ERR_PTR(err); return ERR_PTR(err);
} }
int mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
int mlx5_ib_destroy_cq(struct ib_cq *cq)
{ {
struct mlx5_ib_dev *dev = to_mdev(cq->device); struct mlx5_ib_dev *dev = to_mdev(cq->device);
struct mlx5_ib_cq *mcq = to_mcq(cq); struct mlx5_ib_cq *mcq = to_mcq(cq);

View file

@ -2314,7 +2314,7 @@ err_free:
return ERR_PTR(err); return ERR_PTR(err);
} }
int mlx5_ib_dealloc_dm(struct ib_dm *ibdm) int mlx5_ib_dealloc_dm(struct ib_dm *ibdm, struct uverbs_attr_bundle *attrs)
{ {
struct mlx5_memic *memic = &to_mdev(ibdm->device)->memic; struct mlx5_memic *memic = &to_mdev(ibdm->device)->memic;
struct mlx5_ib_dm *dm = to_mdm(ibdm); struct mlx5_ib_dm *dm = to_mdm(ibdm);
@ -2370,7 +2370,7 @@ static int mlx5_ib_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
return 0; return 0;
} }
static void mlx5_ib_dealloc_pd(struct ib_pd *pd) static void mlx5_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
{ {
struct mlx5_ib_dev *mdev = to_mdev(pd->device); struct mlx5_ib_dev *mdev = to_mdev(pd->device);
struct mlx5_ib_pd *mpd = to_mpd(pd); struct mlx5_ib_pd *mpd = to_mpd(pd);
@ -4590,7 +4590,7 @@ static void destroy_umrc_res(struct mlx5_ib_dev *dev)
mlx5_ib_warn(dev, "mr cache cleanup failed\n"); mlx5_ib_warn(dev, "mr cache cleanup failed\n");
if (dev->umrc.qp) if (dev->umrc.qp)
mlx5_ib_destroy_qp(dev->umrc.qp); mlx5_ib_destroy_qp(dev->umrc.qp, NULL);
if (dev->umrc.cq) if (dev->umrc.cq)
ib_free_cq(dev->umrc.cq); ib_free_cq(dev->umrc.cq);
if (dev->umrc.pd) if (dev->umrc.pd)
@ -4695,7 +4695,7 @@ static int create_umr_res(struct mlx5_ib_dev *dev)
return 0; return 0;
error_4: error_4:
mlx5_ib_destroy_qp(qp); mlx5_ib_destroy_qp(qp, NULL);
dev->umrc.qp = NULL; dev->umrc.qp = NULL;
error_3: error_3:
@ -4837,15 +4837,15 @@ static int create_dev_resources(struct mlx5_ib_resources *devr)
return 0; return 0;
error5: error5:
mlx5_ib_destroy_srq(devr->s0); mlx5_ib_destroy_srq(devr->s0, NULL);
error4: error4:
mlx5_ib_dealloc_xrcd(devr->x1); mlx5_ib_dealloc_xrcd(devr->x1, NULL);
error3: error3:
mlx5_ib_dealloc_xrcd(devr->x0); mlx5_ib_dealloc_xrcd(devr->x0, NULL);
error2: error2:
mlx5_ib_destroy_cq(devr->c0); mlx5_ib_destroy_cq(devr->c0, NULL);
error1: error1:
mlx5_ib_dealloc_pd(devr->p0); mlx5_ib_dealloc_pd(devr->p0, NULL);
error0: error0:
kfree(devr->p0); kfree(devr->p0);
return ret; return ret;
@ -4857,12 +4857,12 @@ static void destroy_dev_resources(struct mlx5_ib_resources *devr)
container_of(devr, struct mlx5_ib_dev, devr); container_of(devr, struct mlx5_ib_dev, devr);
int port; int port;
mlx5_ib_destroy_srq(devr->s1); mlx5_ib_destroy_srq(devr->s1, NULL);
mlx5_ib_destroy_srq(devr->s0); mlx5_ib_destroy_srq(devr->s0, NULL);
mlx5_ib_dealloc_xrcd(devr->x0); mlx5_ib_dealloc_xrcd(devr->x0, NULL);
mlx5_ib_dealloc_xrcd(devr->x1); mlx5_ib_dealloc_xrcd(devr->x1, NULL);
mlx5_ib_destroy_cq(devr->c0); mlx5_ib_destroy_cq(devr->c0, NULL);
mlx5_ib_dealloc_pd(devr->p0); mlx5_ib_dealloc_pd(devr->p0, NULL);
kfree(devr->p0); kfree(devr->p0);
/* Make sure no change P_Key work items are still executing */ /* Make sure no change P_Key work items are still executing */

View file

@ -1049,14 +1049,14 @@ void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index);
struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr, struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
u32 flags, struct ib_udata *udata); u32 flags, struct ib_udata *udata);
int mlx5_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr); int mlx5_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
int mlx5_ib_destroy_ah(struct ib_ah *ah, u32 flags); int mlx5_ib_destroy_ah(struct ib_ah *ah, u32 flags, struct ib_udata *udata);
struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd, struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
struct ib_srq_init_attr *init_attr, struct ib_srq_init_attr *init_attr,
struct ib_udata *udata); struct ib_udata *udata);
int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
enum ib_srq_attr_mask attr_mask, struct ib_udata *udata); enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr); int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr);
int mlx5_ib_destroy_srq(struct ib_srq *srq); int mlx5_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata);
int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad_wr); const struct ib_recv_wr **bad_wr);
int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp); int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp);
@ -1068,7 +1068,7 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata); int attr_mask, struct ib_udata *udata);
int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
struct ib_qp_init_attr *qp_init_attr); struct ib_qp_init_attr *qp_init_attr);
int mlx5_ib_destroy_qp(struct ib_qp *qp); int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata);
void mlx5_ib_drain_sq(struct ib_qp *qp); void mlx5_ib_drain_sq(struct ib_qp *qp);
void mlx5_ib_drain_rq(struct ib_qp *qp); void mlx5_ib_drain_rq(struct ib_qp *qp);
int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
@ -1085,7 +1085,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
const struct ib_cq_init_attr *attr, const struct ib_cq_init_attr *attr,
struct ib_ucontext *context, struct ib_ucontext *context,
struct ib_udata *udata); struct ib_udata *udata);
int mlx5_ib_destroy_cq(struct ib_cq *cq); int mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period); int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
@ -1112,10 +1112,9 @@ void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *mr);
int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
u64 length, u64 virt_addr, int access_flags, u64 length, u64 virt_addr, int access_flags,
struct ib_pd *pd, struct ib_udata *udata); struct ib_pd *pd, struct ib_udata *udata);
int mlx5_ib_dereg_mr(struct ib_mr *ibmr); int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
enum ib_mr_type mr_type, u32 max_num_sg, struct ib_udata *udata);
u32 max_num_sg);
int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
unsigned int *sg_offset); unsigned int *sg_offset);
int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
@ -1126,7 +1125,7 @@ int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev, struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
struct ib_ucontext *context, struct ib_ucontext *context,
struct ib_udata *udata); struct ib_udata *udata);
int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd); int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata);
int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset); int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset);
int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port); int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port);
int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev, int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev,
@ -1170,7 +1169,7 @@ int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd, struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
struct ib_wq_init_attr *init_attr, struct ib_wq_init_attr *init_attr,
struct ib_udata *udata); struct ib_udata *udata);
int mlx5_ib_destroy_wq(struct ib_wq *wq); int mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr, int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
u32 wq_attr_mask, struct ib_udata *udata); u32 wq_attr_mask, struct ib_udata *udata);
struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device, struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
@ -1182,7 +1181,7 @@ struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev,
struct ib_ucontext *context, struct ib_ucontext *context,
struct ib_dm_alloc_attr *attr, struct ib_dm_alloc_attr *attr,
struct uverbs_attr_bundle *attrs); struct uverbs_attr_bundle *attrs);
int mlx5_ib_dealloc_dm(struct ib_dm *ibdm); int mlx5_ib_dealloc_dm(struct ib_dm *ibdm, struct uverbs_attr_bundle *attrs);
struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm, struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm,
struct ib_dm_mr_attr *attr, struct ib_dm_mr_attr *attr,
struct uverbs_attr_bundle *attrs); struct uverbs_attr_bundle *attrs);

View file

@ -1623,15 +1623,14 @@ static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
kfree(mr); kfree(mr);
} }
int mlx5_ib_dereg_mr(struct ib_mr *ibmr) int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
{ {
dereg_mr(to_mdev(ibmr->device), to_mmr(ibmr)); dereg_mr(to_mdev(ibmr->device), to_mmr(ibmr));
return 0; return 0;
} }
struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
enum ib_mr_type mr_type, u32 max_num_sg, struct ib_udata *udata)
u32 max_num_sg)
{ {
struct mlx5_ib_dev *dev = to_mdev(pd->device); struct mlx5_ib_dev *dev = to_mdev(pd->device);
int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);

View file

@ -2732,7 +2732,7 @@ static int mlx5_ib_destroy_dct(struct mlx5_ib_qp *mqp)
return 0; return 0;
} }
int mlx5_ib_destroy_qp(struct ib_qp *qp) int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
{ {
struct mlx5_ib_dev *dev = to_mdev(qp->device); struct mlx5_ib_dev *dev = to_mdev(qp->device);
struct mlx5_ib_qp *mqp = to_mqp(qp); struct mlx5_ib_qp *mqp = to_mqp(qp);
@ -5647,7 +5647,7 @@ struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
return &xrcd->ibxrcd; return &xrcd->ibxrcd;
} }
int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd) int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata)
{ {
struct mlx5_ib_dev *dev = to_mdev(xrcd->device); struct mlx5_ib_dev *dev = to_mdev(xrcd->device);
u32 xrcdn = to_mxrcd(xrcd)->xrcdn; u32 xrcdn = to_mxrcd(xrcd)->xrcdn;
@ -5965,7 +5965,7 @@ err:
return ERR_PTR(err); return ERR_PTR(err);
} }
int mlx5_ib_destroy_wq(struct ib_wq *wq) int mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata)
{ {
struct mlx5_ib_dev *dev = to_mdev(wq->device); struct mlx5_ib_dev *dev = to_mdev(wq->device);
struct mlx5_ib_rwq *rwq = to_mrwq(wq); struct mlx5_ib_rwq *rwq = to_mrwq(wq);

View file

@ -387,7 +387,7 @@ out_box:
return ret; return ret;
} }
int mlx5_ib_destroy_srq(struct ib_srq *srq) int mlx5_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata)
{ {
struct mlx5_ib_dev *dev = to_mdev(srq->device); struct mlx5_ib_dev *dev = to_mdev(srq->device);
struct mlx5_ib_srq *msrq = to_msrq(srq); struct mlx5_ib_srq *msrq = to_msrq(srq);

View file

@ -384,7 +384,7 @@ static int mthca_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
return 0; return 0;
} }
static void mthca_dealloc_pd(struct ib_pd *pd) static void mthca_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
{ {
mthca_pd_free(to_mdev(pd->device), to_mpd(pd)); mthca_pd_free(to_mdev(pd->device), to_mpd(pd));
} }
@ -411,7 +411,7 @@ static struct ib_ah *mthca_ah_create(struct ib_pd *pd,
return &ah->ibah; return &ah->ibah;
} }
static int mthca_ah_destroy(struct ib_ah *ah, u32 flags) static int mthca_ah_destroy(struct ib_ah *ah, u32 flags, struct ib_udata *udata)
{ {
mthca_destroy_ah(to_mdev(ah->device), to_mah(ah)); mthca_destroy_ah(to_mdev(ah->device), to_mah(ah));
kfree(ah); kfree(ah);
@ -477,7 +477,7 @@ err_free:
return ERR_PTR(err); return ERR_PTR(err);
} }
static int mthca_destroy_srq(struct ib_srq *srq) static int mthca_destroy_srq(struct ib_srq *srq, struct ib_udata *udata)
{ {
struct mthca_ucontext *context; struct mthca_ucontext *context;
@ -607,7 +607,7 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
return &qp->ibqp; return &qp->ibqp;
} }
static int mthca_destroy_qp(struct ib_qp *qp) static int mthca_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
{ {
if (qp->uobject) { if (qp->uobject) {
mthca_unmap_user_db(to_mdev(qp->device), mthca_unmap_user_db(to_mdev(qp->device),
@ -827,7 +827,7 @@ out:
return ret; return ret;
} }
static int mthca_destroy_cq(struct ib_cq *cq) static int mthca_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
{ {
if (cq->uobject) { if (cq->uobject) {
mthca_unmap_user_db(to_mdev(cq->device), mthca_unmap_user_db(to_mdev(cq->device),
@ -974,7 +974,7 @@ err:
return ERR_PTR(err); return ERR_PTR(err);
} }
static int mthca_dereg_mr(struct ib_mr *mr) static int mthca_dereg_mr(struct ib_mr *mr, struct ib_udata *udata)
{ {
struct mthca_mr *mmr = to_mmr(mr); struct mthca_mr *mmr = to_mmr(mr);

View file

@ -3033,7 +3033,8 @@ static int nes_disconnect(struct nes_qp *nesqp, int abrupt)
/* Need to free the Last Streaming Mode Message */ /* Need to free the Last Streaming Mode Message */
if (nesqp->ietf_frame) { if (nesqp->ietf_frame) {
if (nesqp->lsmm_mr) if (nesqp->lsmm_mr)
nesibdev->ibdev.ops.dereg_mr(nesqp->lsmm_mr); nesibdev->ibdev.ops.dereg_mr(nesqp->lsmm_mr,
NULL);
pci_free_consistent(nesdev->pcidev, pci_free_consistent(nesdev->pcidev,
nesqp->private_data_len + nesqp->ietf_frame_size, nesqp->private_data_len + nesqp->ietf_frame_size,
nesqp->ietf_frame, nesqp->ietf_frame_pbase); nesqp->ietf_frame, nesqp->ietf_frame_pbase);

View file

@ -52,7 +52,7 @@ atomic_t qps_created;
atomic_t sw_qps_destroyed; atomic_t sw_qps_destroyed;
static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev); static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
static int nes_dereg_mr(struct ib_mr *ib_mr); static int nes_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata);
/** /**
* nes_alloc_mw * nes_alloc_mw
@ -306,9 +306,8 @@ static int alloc_fast_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd,
/* /*
* nes_alloc_mr * nes_alloc_mr
*/ */
static struct ib_mr *nes_alloc_mr(struct ib_pd *ibpd, static struct ib_mr *nes_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
enum ib_mr_type mr_type, u32 max_num_sg, struct ib_udata *udata)
u32 max_num_sg)
{ {
struct nes_pd *nespd = to_nespd(ibpd); struct nes_pd *nespd = to_nespd(ibpd);
struct nes_vnic *nesvnic = to_nesvnic(ibpd->device); struct nes_vnic *nesvnic = to_nesvnic(ibpd->device);
@ -386,7 +385,7 @@ static struct ib_mr *nes_alloc_mr(struct ib_pd *ibpd,
return ibmr; return ibmr;
err: err:
nes_dereg_mr(ibmr); nes_dereg_mr(ibmr, udata);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
@ -700,7 +699,7 @@ static int nes_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context,
/** /**
* nes_dealloc_pd * nes_dealloc_pd
*/ */
static void nes_dealloc_pd(struct ib_pd *ibpd) static void nes_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
{ {
struct nes_ucontext *nesucontext; struct nes_ucontext *nesucontext;
struct nes_pd *nespd = to_nespd(ibpd); struct nes_pd *nespd = to_nespd(ibpd);
@ -1298,7 +1297,7 @@ static void nes_clean_cq(struct nes_qp *nesqp, struct nes_cq *nescq)
/** /**
* nes_destroy_qp * nes_destroy_qp
*/ */
static int nes_destroy_qp(struct ib_qp *ibqp) static int nes_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
{ {
struct nes_qp *nesqp = to_nesqp(ibqp); struct nes_qp *nesqp = to_nesqp(ibqp);
struct nes_ucontext *nes_ucontext; struct nes_ucontext *nes_ucontext;
@ -1626,7 +1625,7 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev,
/** /**
* nes_destroy_cq * nes_destroy_cq
*/ */
static int nes_destroy_cq(struct ib_cq *ib_cq) static int nes_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
{ {
struct nes_cq *nescq; struct nes_cq *nescq;
struct nes_device *nesdev; struct nes_device *nesdev;
@ -2377,7 +2376,7 @@ reg_user_mr_err:
/** /**
* nes_dereg_mr * nes_dereg_mr
*/ */
static int nes_dereg_mr(struct ib_mr *ib_mr) static int nes_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
{ {
struct nes_mr *nesmr = to_nesmr(ib_mr); struct nes_mr *nesmr = to_nesmr(ib_mr);
struct nes_vnic *nesvnic = to_nesvnic(ib_mr->device); struct nes_vnic *nesvnic = to_nesvnic(ib_mr->device);

View file

@ -219,7 +219,7 @@ av_err:
return ERR_PTR(status); return ERR_PTR(status);
} }
int ocrdma_destroy_ah(struct ib_ah *ibah, u32 flags) int ocrdma_destroy_ah(struct ib_ah *ibah, u32 flags, struct ib_udata *udata)
{ {
struct ocrdma_ah *ah = get_ocrdma_ah(ibah); struct ocrdma_ah *ah = get_ocrdma_ah(ibah);
struct ocrdma_dev *dev = get_ocrdma_dev(ibah->device); struct ocrdma_dev *dev = get_ocrdma_dev(ibah->device);

View file

@ -53,7 +53,7 @@ enum {
struct ib_ah *ocrdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr, struct ib_ah *ocrdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
u32 flags, struct ib_udata *udata); u32 flags, struct ib_udata *udata);
int ocrdma_destroy_ah(struct ib_ah *ah, u32 flags); int ocrdma_destroy_ah(struct ib_ah *ah, u32 flags, struct ib_udata *udata);
int ocrdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr); int ocrdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
int ocrdma_process_mad(struct ib_device *, int ocrdma_process_mad(struct ib_device *,

View file

@ -680,7 +680,7 @@ exit:
return status; return status;
} }
void ocrdma_dealloc_pd(struct ib_pd *ibpd) void ocrdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
{ {
struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device); struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
@ -922,7 +922,7 @@ umem_err:
return ERR_PTR(status); return ERR_PTR(status);
} }
int ocrdma_dereg_mr(struct ib_mr *ib_mr) int ocrdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
{ {
struct ocrdma_mr *mr = get_ocrdma_mr(ib_mr); struct ocrdma_mr *mr = get_ocrdma_mr(ib_mr);
struct ocrdma_dev *dev = get_ocrdma_dev(ib_mr->device); struct ocrdma_dev *dev = get_ocrdma_dev(ib_mr->device);
@ -1076,7 +1076,7 @@ static void ocrdma_flush_cq(struct ocrdma_cq *cq)
spin_unlock_irqrestore(&cq->cq_lock, flags); spin_unlock_irqrestore(&cq->cq_lock, flags);
} }
int ocrdma_destroy_cq(struct ib_cq *ibcq) int ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
{ {
struct ocrdma_cq *cq = get_ocrdma_cq(ibcq); struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
struct ocrdma_eq *eq = NULL; struct ocrdma_eq *eq = NULL;
@ -1697,7 +1697,7 @@ void ocrdma_del_flush_qp(struct ocrdma_qp *qp)
spin_unlock_irqrestore(&dev->flush_q_lock, flags); spin_unlock_irqrestore(&dev->flush_q_lock, flags);
} }
int ocrdma_destroy_qp(struct ib_qp *ibqp) int ocrdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
{ {
struct ocrdma_pd *pd; struct ocrdma_pd *pd;
struct ocrdma_qp *qp; struct ocrdma_qp *qp;
@ -1885,7 +1885,7 @@ int ocrdma_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
return status; return status;
} }
int ocrdma_destroy_srq(struct ib_srq *ibsrq) int ocrdma_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
{ {
int status; int status;
struct ocrdma_srq *srq; struct ocrdma_srq *srq;
@ -2931,9 +2931,8 @@ int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags)
return 0; return 0;
} }
struct ib_mr *ocrdma_alloc_mr(struct ib_pd *ibpd, struct ib_mr *ocrdma_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
enum ib_mr_type mr_type, u32 max_num_sg, struct ib_udata *udata)
u32 max_num_sg)
{ {
int status; int status;
struct ocrdma_mr *mr; struct ocrdma_mr *mr;

View file

@ -71,14 +71,14 @@ int ocrdma_mmap(struct ib_ucontext *, struct vm_area_struct *vma);
int ocrdma_alloc_pd(struct ib_pd *pd, struct ib_ucontext *uctx, int ocrdma_alloc_pd(struct ib_pd *pd, struct ib_ucontext *uctx,
struct ib_udata *udata); struct ib_udata *udata);
void ocrdma_dealloc_pd(struct ib_pd *pd); void ocrdma_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev, struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev,
const struct ib_cq_init_attr *attr, const struct ib_cq_init_attr *attr,
struct ib_ucontext *ib_ctx, struct ib_ucontext *ib_ctx,
struct ib_udata *udata); struct ib_udata *udata);
int ocrdma_resize_cq(struct ib_cq *, int cqe, struct ib_udata *); int ocrdma_resize_cq(struct ib_cq *, int cqe, struct ib_udata *);
int ocrdma_destroy_cq(struct ib_cq *); int ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
struct ib_qp *ocrdma_create_qp(struct ib_pd *, struct ib_qp *ocrdma_create_qp(struct ib_pd *,
struct ib_qp_init_attr *attrs, struct ib_qp_init_attr *attrs,
@ -90,7 +90,7 @@ int ocrdma_modify_qp(struct ib_qp *, struct ib_qp_attr *attr,
int ocrdma_query_qp(struct ib_qp *, int ocrdma_query_qp(struct ib_qp *,
struct ib_qp_attr *qp_attr, struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_qp_init_attr *); int qp_attr_mask, struct ib_qp_init_attr *);
int ocrdma_destroy_qp(struct ib_qp *); int ocrdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata);
void ocrdma_del_flush_qp(struct ocrdma_qp *qp); void ocrdma_del_flush_qp(struct ocrdma_qp *qp);
struct ib_srq *ocrdma_create_srq(struct ib_pd *, struct ib_srq_init_attr *, struct ib_srq *ocrdma_create_srq(struct ib_pd *, struct ib_srq_init_attr *,
@ -98,17 +98,16 @@ struct ib_srq *ocrdma_create_srq(struct ib_pd *, struct ib_srq_init_attr *,
int ocrdma_modify_srq(struct ib_srq *, struct ib_srq_attr *, int ocrdma_modify_srq(struct ib_srq *, struct ib_srq_attr *,
enum ib_srq_attr_mask, struct ib_udata *); enum ib_srq_attr_mask, struct ib_udata *);
int ocrdma_query_srq(struct ib_srq *, struct ib_srq_attr *); int ocrdma_query_srq(struct ib_srq *, struct ib_srq_attr *);
int ocrdma_destroy_srq(struct ib_srq *); int ocrdma_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata);
int ocrdma_post_srq_recv(struct ib_srq *, const struct ib_recv_wr *, int ocrdma_post_srq_recv(struct ib_srq *, const struct ib_recv_wr *,
const struct ib_recv_wr **bad_recv_wr); const struct ib_recv_wr **bad_recv_wr);
int ocrdma_dereg_mr(struct ib_mr *); int ocrdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata);
struct ib_mr *ocrdma_get_dma_mr(struct ib_pd *, int acc); struct ib_mr *ocrdma_get_dma_mr(struct ib_pd *, int acc);
struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *, u64 start, u64 length, struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *, u64 start, u64 length,
u64 virt, int acc, struct ib_udata *); u64 virt, int acc, struct ib_udata *);
struct ib_mr *ocrdma_alloc_mr(struct ib_pd *pd, struct ib_mr *ocrdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
enum ib_mr_type mr_type, u32 max_num_sg, struct ib_udata *udata);
u32 max_num_sg);
int ocrdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, int ocrdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
unsigned int *sg_offset); unsigned int *sg_offset);

View file

@ -478,7 +478,7 @@ int qedr_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
return 0; return 0;
} }
void qedr_dealloc_pd(struct ib_pd *ibpd) void qedr_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
{ {
struct qedr_dev *dev = get_qedr_dev(ibpd->device); struct qedr_dev *dev = get_qedr_dev(ibpd->device);
struct qedr_pd *pd = get_qedr_pd(ibpd); struct qedr_pd *pd = get_qedr_pd(ibpd);
@ -962,7 +962,7 @@ int qedr_resize_cq(struct ib_cq *ibcq, int new_cnt, struct ib_udata *udata)
#define QEDR_DESTROY_CQ_MAX_ITERATIONS (10) #define QEDR_DESTROY_CQ_MAX_ITERATIONS (10)
#define QEDR_DESTROY_CQ_ITER_DURATION (10) #define QEDR_DESTROY_CQ_ITER_DURATION (10)
int qedr_destroy_cq(struct ib_cq *ibcq) int qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
{ {
struct qedr_dev *dev = get_qedr_dev(ibcq->device); struct qedr_dev *dev = get_qedr_dev(ibcq->device);
struct qed_rdma_destroy_cq_out_params oparams; struct qed_rdma_destroy_cq_out_params oparams;
@ -1485,7 +1485,7 @@ err0:
return ERR_PTR(-EFAULT); return ERR_PTR(-EFAULT);
} }
int qedr_destroy_srq(struct ib_srq *ibsrq) int qedr_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
{ {
struct qed_rdma_destroy_srq_in_params in_params = {}; struct qed_rdma_destroy_srq_in_params in_params = {};
struct qedr_dev *dev = get_qedr_dev(ibsrq->device); struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
@ -2488,7 +2488,7 @@ static int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp)
return 0; return 0;
} }
int qedr_destroy_qp(struct ib_qp *ibqp) int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
{ {
struct qedr_qp *qp = get_qedr_qp(ibqp); struct qedr_qp *qp = get_qedr_qp(ibqp);
struct qedr_dev *dev = qp->dev; struct qedr_dev *dev = qp->dev;
@ -2556,7 +2556,7 @@ struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct rdma_ah_attr *attr,
return &ah->ibah; return &ah->ibah;
} }
int qedr_destroy_ah(struct ib_ah *ibah, u32 flags) int qedr_destroy_ah(struct ib_ah *ibah, u32 flags, struct ib_udata *udata)
{ {
struct qedr_ah *ah = get_qedr_ah(ibah); struct qedr_ah *ah = get_qedr_ah(ibah);
@ -2711,7 +2711,7 @@ err0:
return ERR_PTR(rc); return ERR_PTR(rc);
} }
int qedr_dereg_mr(struct ib_mr *ib_mr) int qedr_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
{ {
struct qedr_mr *mr = get_qedr_mr(ib_mr); struct qedr_mr *mr = get_qedr_mr(ib_mr);
struct qedr_dev *dev = get_qedr_dev(ib_mr->device); struct qedr_dev *dev = get_qedr_dev(ib_mr->device);
@ -2803,8 +2803,8 @@ err0:
return ERR_PTR(rc); return ERR_PTR(rc);
} }
struct ib_mr *qedr_alloc_mr(struct ib_pd *ibpd, struct ib_mr *qedr_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
enum ib_mr_type mr_type, u32 max_num_sg) u32 max_num_sg, struct ib_udata *udata)
{ {
struct qedr_mr *mr; struct qedr_mr *mr;

View file

@ -49,14 +49,14 @@ void qedr_dealloc_ucontext(struct ib_ucontext *uctx);
int qedr_mmap(struct ib_ucontext *, struct vm_area_struct *vma); int qedr_mmap(struct ib_ucontext *, struct vm_area_struct *vma);
int qedr_alloc_pd(struct ib_pd *pd, struct ib_ucontext *uctx, int qedr_alloc_pd(struct ib_pd *pd, struct ib_ucontext *uctx,
struct ib_udata *udata); struct ib_udata *udata);
void qedr_dealloc_pd(struct ib_pd *pd); void qedr_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
struct ib_cq *qedr_create_cq(struct ib_device *ibdev, struct ib_cq *qedr_create_cq(struct ib_device *ibdev,
const struct ib_cq_init_attr *attr, const struct ib_cq_init_attr *attr,
struct ib_ucontext *ib_ctx, struct ib_ucontext *ib_ctx,
struct ib_udata *udata); struct ib_udata *udata);
int qedr_resize_cq(struct ib_cq *, int cqe, struct ib_udata *); int qedr_resize_cq(struct ib_cq *, int cqe, struct ib_udata *);
int qedr_destroy_cq(struct ib_cq *); int qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
struct ib_qp *qedr_create_qp(struct ib_pd *, struct ib_qp_init_attr *attrs, struct ib_qp *qedr_create_qp(struct ib_pd *, struct ib_qp_init_attr *attrs,
struct ib_udata *); struct ib_udata *);
@ -64,7 +64,7 @@ int qedr_modify_qp(struct ib_qp *, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata); int attr_mask, struct ib_udata *udata);
int qedr_query_qp(struct ib_qp *, struct ib_qp_attr *qp_attr, int qedr_query_qp(struct ib_qp *, struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_qp_init_attr *); int qp_attr_mask, struct ib_qp_init_attr *);
int qedr_destroy_qp(struct ib_qp *ibqp); int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata);
struct ib_srq *qedr_create_srq(struct ib_pd *ibpd, struct ib_srq *qedr_create_srq(struct ib_pd *ibpd,
struct ib_srq_init_attr *attr, struct ib_srq_init_attr *attr,
@ -72,14 +72,14 @@ struct ib_srq *qedr_create_srq(struct ib_pd *ibpd,
int qedr_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, int qedr_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
enum ib_srq_attr_mask attr_mask, struct ib_udata *udata); enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
int qedr_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr); int qedr_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr);
int qedr_destroy_srq(struct ib_srq *ibsrq); int qedr_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata);
int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad_recv_wr); const struct ib_recv_wr **bad_recv_wr);
struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct rdma_ah_attr *attr, struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct rdma_ah_attr *attr,
u32 flags, struct ib_udata *udata); u32 flags, struct ib_udata *udata);
int qedr_destroy_ah(struct ib_ah *ibah, u32 flags); int qedr_destroy_ah(struct ib_ah *ibah, u32 flags, struct ib_udata *udata);
int qedr_dereg_mr(struct ib_mr *); int qedr_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata);
struct ib_mr *qedr_get_dma_mr(struct ib_pd *, int acc); struct ib_mr *qedr_get_dma_mr(struct ib_pd *, int acc);
struct ib_mr *qedr_reg_user_mr(struct ib_pd *, u64 start, u64 length, struct ib_mr *qedr_reg_user_mr(struct ib_pd *, u64 start, u64 length,
@ -89,7 +89,7 @@ int qedr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
int sg_nents, unsigned int *sg_offset); int sg_nents, unsigned int *sg_offset);
struct ib_mr *qedr_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, struct ib_mr *qedr_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
u32 max_num_sg); u32 max_num_sg, struct ib_udata *udata);
int qedr_poll_cq(struct ib_cq *, int num_entries, struct ib_wc *wc); int qedr_poll_cq(struct ib_cq *, int num_entries, struct ib_wc *wc);
int qedr_post_send(struct ib_qp *, const struct ib_send_wr *, int qedr_post_send(struct ib_qp *, const struct ib_send_wr *,
const struct ib_send_wr **bad_wr); const struct ib_send_wr **bad_wr);

View file

@ -461,7 +461,7 @@ int usnic_ib_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
return 0; return 0;
} }
void usnic_ib_dealloc_pd(struct ib_pd *pd) void usnic_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
{ {
usnic_uiom_dealloc_pd((to_upd(pd))->umem_pd); usnic_uiom_dealloc_pd((to_upd(pd))->umem_pd);
} }
@ -539,7 +539,7 @@ out_release_mutex:
return ERR_PTR(err); return ERR_PTR(err);
} }
int usnic_ib_destroy_qp(struct ib_qp *qp) int usnic_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
{ {
struct usnic_ib_qp_grp *qp_grp; struct usnic_ib_qp_grp *qp_grp;
struct usnic_ib_vf *vf; struct usnic_ib_vf *vf;
@ -606,7 +606,7 @@ struct ib_cq *usnic_ib_create_cq(struct ib_device *ibdev,
return cq; return cq;
} }
int usnic_ib_destroy_cq(struct ib_cq *cq) int usnic_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
{ {
usnic_dbg("\n"); usnic_dbg("\n");
kfree(cq); kfree(cq);
@ -642,7 +642,7 @@ err_free:
return ERR_PTR(err); return ERR_PTR(err);
} }
int usnic_ib_dereg_mr(struct ib_mr *ibmr) int usnic_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
{ {
struct usnic_ib_mr *mr = to_umr(ibmr); struct usnic_ib_mr *mr = to_umr(ibmr);
@ -731,4 +731,3 @@ int usnic_ib_mmap(struct ib_ucontext *context,
return -EINVAL; return -EINVAL;
} }
/* End of ib callbacks section */

View file

@ -52,22 +52,22 @@ int usnic_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
u16 *pkey); u16 *pkey);
int usnic_ib_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context, int usnic_ib_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
struct ib_udata *udata); struct ib_udata *udata);
void usnic_ib_dealloc_pd(struct ib_pd *pd); void usnic_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd, struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *init_attr, struct ib_qp_init_attr *init_attr,
struct ib_udata *udata); struct ib_udata *udata);
int usnic_ib_destroy_qp(struct ib_qp *qp); int usnic_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata);
int usnic_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int usnic_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata); int attr_mask, struct ib_udata *udata);
struct ib_cq *usnic_ib_create_cq(struct ib_device *ibdev, struct ib_cq *usnic_ib_create_cq(struct ib_device *ibdev,
const struct ib_cq_init_attr *attr, const struct ib_cq_init_attr *attr,
struct ib_ucontext *context, struct ib_ucontext *context,
struct ib_udata *udata); struct ib_udata *udata);
int usnic_ib_destroy_cq(struct ib_cq *cq); int usnic_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
struct ib_mr *usnic_ib_reg_mr(struct ib_pd *pd, u64 start, u64 length, struct ib_mr *usnic_ib_reg_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags, u64 virt_addr, int access_flags,
struct ib_udata *udata); struct ib_udata *udata);
int usnic_ib_dereg_mr(struct ib_mr *ibmr); int usnic_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
int usnic_ib_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata); int usnic_ib_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata);
void usnic_ib_dealloc_ucontext(struct ib_ucontext *ibcontext); void usnic_ib_dealloc_ucontext(struct ib_ucontext *ibcontext);
int usnic_ib_mmap(struct ib_ucontext *context, int usnic_ib_mmap(struct ib_ucontext *context,

View file

@ -210,7 +210,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
if (ib_copy_to_udata(udata, &cq_resp, sizeof(cq_resp))) { if (ib_copy_to_udata(udata, &cq_resp, sizeof(cq_resp))) {
dev_warn(&dev->pdev->dev, dev_warn(&dev->pdev->dev,
"failed to copy back udata\n"); "failed to copy back udata\n");
pvrdma_destroy_cq(&cq->ibcq); pvrdma_destroy_cq(&cq->ibcq, udata);
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
} }
@ -245,10 +245,11 @@ static void pvrdma_free_cq(struct pvrdma_dev *dev, struct pvrdma_cq *cq)
/** /**
* pvrdma_destroy_cq - destroy completion queue * pvrdma_destroy_cq - destroy completion queue
* @cq: the completion queue to destroy. * @cq: the completion queue to destroy.
* @udata: user data or null for kernel object
* *
* @return: 0 for success. * @return: 0 for success.
*/ */
int pvrdma_destroy_cq(struct ib_cq *cq) int pvrdma_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
{ {
struct pvrdma_cq *vcq = to_vcq(cq); struct pvrdma_cq *vcq = to_vcq(cq);
union pvrdma_cmd_req req; union pvrdma_cmd_req req;

View file

@ -201,7 +201,7 @@ err_umem:
* @return: ib_mr pointer on success, otherwise returns an errno. * @return: ib_mr pointer on success, otherwise returns an errno.
*/ */
struct ib_mr *pvrdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, struct ib_mr *pvrdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
u32 max_num_sg) u32 max_num_sg, struct ib_udata *udata)
{ {
struct pvrdma_dev *dev = to_vdev(pd->device); struct pvrdma_dev *dev = to_vdev(pd->device);
struct pvrdma_user_mr *mr; struct pvrdma_user_mr *mr;
@ -272,7 +272,7 @@ freemr:
* *
* @return: 0 on success. * @return: 0 on success.
*/ */
int pvrdma_dereg_mr(struct ib_mr *ibmr) int pvrdma_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
{ {
struct pvrdma_user_mr *mr = to_vmr(ibmr); struct pvrdma_user_mr *mr = to_vmr(ibmr);
struct pvrdma_dev *dev = to_vdev(ibmr->device); struct pvrdma_dev *dev = to_vdev(ibmr->device);

View file

@ -446,10 +446,11 @@ static void pvrdma_free_qp(struct pvrdma_qp *qp)
/** /**
* pvrdma_destroy_qp - destroy a queue pair * pvrdma_destroy_qp - destroy a queue pair
* @qp: the queue pair to destroy * @qp: the queue pair to destroy
* @udata: user data or null for kernel object
* *
* @return: 0 on success. * @return: 0 on success.
*/ */
int pvrdma_destroy_qp(struct ib_qp *qp) int pvrdma_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
{ {
struct pvrdma_qp *vqp = to_vqp(qp); struct pvrdma_qp *vqp = to_vqp(qp);
union pvrdma_cmd_req req; union pvrdma_cmd_req req;

View file

@ -204,7 +204,7 @@ struct ib_srq *pvrdma_create_srq(struct ib_pd *pd,
/* Copy udata back. */ /* Copy udata back. */
if (ib_copy_to_udata(udata, &srq_resp, sizeof(srq_resp))) { if (ib_copy_to_udata(udata, &srq_resp, sizeof(srq_resp))) {
dev_warn(&dev->pdev->dev, "failed to copy back udata\n"); dev_warn(&dev->pdev->dev, "failed to copy back udata\n");
pvrdma_destroy_srq(&srq->ibsrq); pvrdma_destroy_srq(&srq->ibsrq, udata);
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
@ -246,10 +246,11 @@ static void pvrdma_free_srq(struct pvrdma_dev *dev, struct pvrdma_srq *srq)
/** /**
* pvrdma_destroy_srq - destroy shared receive queue * pvrdma_destroy_srq - destroy shared receive queue
* @srq: the shared receive queue to destroy * @srq: the shared receive queue to destroy
* @udata: user data or null for kernel object
* *
* @return: 0 for success. * @return: 0 for success.
*/ */
int pvrdma_destroy_srq(struct ib_srq *srq) int pvrdma_destroy_srq(struct ib_srq *srq, struct ib_udata *udata)
{ {
struct pvrdma_srq *vsrq = to_vsrq(srq); struct pvrdma_srq *vsrq = to_vsrq(srq);
union pvrdma_cmd_req req; union pvrdma_cmd_req req;

View file

@ -460,7 +460,7 @@ int pvrdma_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
if (ib_copy_to_udata(udata, &pd_resp, sizeof(pd_resp))) { if (ib_copy_to_udata(udata, &pd_resp, sizeof(pd_resp))) {
dev_warn(&dev->pdev->dev, dev_warn(&dev->pdev->dev,
"failed to copy back protection domain\n"); "failed to copy back protection domain\n");
pvrdma_dealloc_pd(&pd->ibpd); pvrdma_dealloc_pd(&pd->ibpd, udata);
return -EFAULT; return -EFAULT;
} }
} }
@ -476,10 +476,11 @@ err:
/** /**
* pvrdma_dealloc_pd - deallocate protection domain * pvrdma_dealloc_pd - deallocate protection domain
* @pd: the protection domain to be released * @pd: the protection domain to be released
* @udata: user data or null for kernel object
* *
* @return: 0 on success, otherwise errno. * @return: 0 on success, otherwise errno.
*/ */
void pvrdma_dealloc_pd(struct ib_pd *pd) void pvrdma_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
{ {
struct pvrdma_dev *dev = to_vdev(pd->device); struct pvrdma_dev *dev = to_vdev(pd->device);
union pvrdma_cmd_req req = {}; union pvrdma_cmd_req req = {};
@ -556,7 +557,7 @@ struct ib_ah *pvrdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
* *
* @return: 0 on success. * @return: 0 on success.
*/ */
int pvrdma_destroy_ah(struct ib_ah *ah, u32 flags) int pvrdma_destroy_ah(struct ib_ah *ah, u32 flags, struct ib_udata *udata)
{ {
struct pvrdma_dev *dev = to_vdev(ah->device); struct pvrdma_dev *dev = to_vdev(ah->device);

View file

@ -400,26 +400,26 @@ int pvrdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata);
void pvrdma_dealloc_ucontext(struct ib_ucontext *context); void pvrdma_dealloc_ucontext(struct ib_ucontext *context);
int pvrdma_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context, int pvrdma_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context,
struct ib_udata *udata); struct ib_udata *udata);
void pvrdma_dealloc_pd(struct ib_pd *ibpd); void pvrdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
struct ib_mr *pvrdma_get_dma_mr(struct ib_pd *pd, int acc); struct ib_mr *pvrdma_get_dma_mr(struct ib_pd *pd, int acc);
struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags, u64 virt_addr, int access_flags,
struct ib_udata *udata); struct ib_udata *udata);
int pvrdma_dereg_mr(struct ib_mr *mr); int pvrdma_dereg_mr(struct ib_mr *mr, struct ib_udata *udata);
struct ib_mr *pvrdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, struct ib_mr *pvrdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
u32 max_num_sg); u32 max_num_sg, struct ib_udata *udata);
int pvrdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int pvrdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
int sg_nents, unsigned int *sg_offset); int sg_nents, unsigned int *sg_offset);
struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev, struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
const struct ib_cq_init_attr *attr, const struct ib_cq_init_attr *attr,
struct ib_ucontext *context, struct ib_ucontext *context,
struct ib_udata *udata); struct ib_udata *udata);
int pvrdma_destroy_cq(struct ib_cq *cq); int pvrdma_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
int pvrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); int pvrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
int pvrdma_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags); int pvrdma_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
struct ib_ah *pvrdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr, struct ib_ah *pvrdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
u32 flags, struct ib_udata *udata); u32 flags, struct ib_udata *udata);
int pvrdma_destroy_ah(struct ib_ah *ah, u32 flags); int pvrdma_destroy_ah(struct ib_ah *ah, u32 flags, struct ib_udata *udata);
struct ib_srq *pvrdma_create_srq(struct ib_pd *pd, struct ib_srq *pvrdma_create_srq(struct ib_pd *pd,
struct ib_srq_init_attr *init_attr, struct ib_srq_init_attr *init_attr,
@ -427,7 +427,7 @@ struct ib_srq *pvrdma_create_srq(struct ib_pd *pd,
int pvrdma_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, int pvrdma_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
enum ib_srq_attr_mask attr_mask, struct ib_udata *udata); enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
int pvrdma_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr); int pvrdma_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
int pvrdma_destroy_srq(struct ib_srq *srq); int pvrdma_destroy_srq(struct ib_srq *srq, struct ib_udata *udata);
struct ib_qp *pvrdma_create_qp(struct ib_pd *pd, struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *init_attr, struct ib_qp_init_attr *init_attr,
@ -436,7 +436,7 @@ int pvrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata); int attr_mask, struct ib_udata *udata);
int pvrdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int pvrdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr); int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
int pvrdma_destroy_qp(struct ib_qp *qp); int pvrdma_destroy_qp(struct ib_qp *qp, struct ib_udata *udata);
int pvrdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, int pvrdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
const struct ib_send_wr **bad_wr); const struct ib_send_wr **bad_wr);
int pvrdma_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, int pvrdma_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,

View file

@ -138,10 +138,12 @@ struct ib_ah *rvt_create_ah(struct ib_pd *pd,
* rvt_destory_ah - Destory an address handle * rvt_destory_ah - Destory an address handle
* @ibah: address handle * @ibah: address handle
* @destroy_flags: destroy address handle flags (see enum rdma_destroy_ah_flags) * @destroy_flags: destroy address handle flags (see enum rdma_destroy_ah_flags)
* @udata: user data or NULL for kernel object
* *
* Return: 0 on success * Return: 0 on success
*/ */
int rvt_destroy_ah(struct ib_ah *ibah, u32 destroy_flags) int rvt_destroy_ah(struct ib_ah *ibah, u32 destroy_flags,
struct ib_udata *udata)
{ {
struct rvt_dev_info *dev = ib_to_rvt(ibah->device); struct rvt_dev_info *dev = ib_to_rvt(ibah->device);
struct rvt_ah *ah = ibah_to_rvtah(ibah); struct rvt_ah *ah = ibah_to_rvtah(ibah);

View file

@ -54,7 +54,8 @@ struct ib_ah *rvt_create_ah(struct ib_pd *pd,
struct rdma_ah_attr *ah_attr, struct rdma_ah_attr *ah_attr,
u32 create_flags, u32 create_flags,
struct ib_udata *udata); struct ib_udata *udata);
int rvt_destroy_ah(struct ib_ah *ibah, u32 destroy_flags); int rvt_destroy_ah(struct ib_ah *ibah, u32 destroy_flags,
struct ib_udata *udata);
int rvt_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr); int rvt_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
int rvt_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr); int rvt_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);

View file

@ -299,12 +299,13 @@ done:
/** /**
* rvt_destroy_cq - destroy a completion queue * rvt_destroy_cq - destroy a completion queue
* @ibcq: the completion queue to destroy. * @ibcq: the completion queue to destroy.
* @udata: user data or NULL for kernel object
* *
* Called by ib_destroy_cq() in the generic verbs code. * Called by ib_destroy_cq() in the generic verbs code.
* *
* Return: always 0 * Return: always 0
*/ */
int rvt_destroy_cq(struct ib_cq *ibcq) int rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
{ {
struct rvt_cq *cq = ibcq_to_rvtcq(ibcq); struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
struct rvt_dev_info *rdi = cq->rdi; struct rvt_dev_info *rdi = cq->rdi;

View file

@ -55,7 +55,7 @@ struct ib_cq *rvt_create_cq(struct ib_device *ibdev,
const struct ib_cq_init_attr *attr, const struct ib_cq_init_attr *attr,
struct ib_ucontext *context, struct ib_ucontext *context,
struct ib_udata *udata); struct ib_udata *udata);
int rvt_destroy_cq(struct ib_cq *ibcq); int rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
int rvt_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags); int rvt_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags);
int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata); int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
int rvt_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry); int rvt_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);

View file

@ -548,7 +548,7 @@ bool rvt_ss_has_lkey(struct rvt_sge_state *ss, u32 lkey)
* *
* Returns 0 on success. * Returns 0 on success.
*/ */
int rvt_dereg_mr(struct ib_mr *ibmr) int rvt_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
{ {
struct rvt_mr *mr = to_imr(ibmr); struct rvt_mr *mr = to_imr(ibmr);
int ret; int ret;
@ -575,9 +575,8 @@ out:
* *
* Return: the memory region on success, otherwise return an errno. * Return: the memory region on success, otherwise return an errno.
*/ */
struct ib_mr *rvt_alloc_mr(struct ib_pd *pd, struct ib_mr *rvt_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
enum ib_mr_type mr_type, u32 max_num_sg, struct ib_udata *udata)
u32 max_num_sg)
{ {
struct rvt_mr *mr; struct rvt_mr *mr;

View file

@ -78,10 +78,9 @@ struct ib_mr *rvt_get_dma_mr(struct ib_pd *pd, int acc);
struct ib_mr *rvt_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, struct ib_mr *rvt_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int mr_access_flags, u64 virt_addr, int mr_access_flags,
struct ib_udata *udata); struct ib_udata *udata);
int rvt_dereg_mr(struct ib_mr *ibmr); int rvt_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
struct ib_mr *rvt_alloc_mr(struct ib_pd *pd, struct ib_mr *rvt_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
enum ib_mr_type mr_type, u32 max_num_sg, struct ib_udata *udata);
u32 max_num_sg);
int rvt_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int rvt_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
int sg_nents, unsigned int *sg_offset); int sg_nents, unsigned int *sg_offset);
struct ib_fmr *rvt_alloc_fmr(struct ib_pd *pd, int mr_access_flags, struct ib_fmr *rvt_alloc_fmr(struct ib_pd *pd, int mr_access_flags,

View file

@ -93,10 +93,11 @@ bail:
/** /**
* rvt_dealloc_pd - Free PD * rvt_dealloc_pd - Free PD
* @ibpd: Free up PD * @ibpd: Free up PD
* @udata: Valid user data or NULL for kernel object
* *
* Return: always 0 * Return: always 0
*/ */
void rvt_dealloc_pd(struct ib_pd *ibpd) void rvt_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
{ {
struct rvt_dev_info *dev = ib_to_rvt(ibpd->device); struct rvt_dev_info *dev = ib_to_rvt(ibpd->device);

View file

@ -52,6 +52,6 @@
int rvt_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context, int rvt_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context,
struct ib_udata *udata); struct ib_udata *udata);
void rvt_dealloc_pd(struct ib_pd *ibpd); void rvt_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
#endif /* DEF_RDMAVTPD_H */ #endif /* DEF_RDMAVTPD_H */

View file

@ -1617,7 +1617,7 @@ inval:
* *
* Return: 0 on success. * Return: 0 on success.
*/ */
int rvt_destroy_qp(struct ib_qp *ibqp) int rvt_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
{ {
struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);

View file

@ -57,7 +57,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
struct ib_udata *udata); struct ib_udata *udata);
int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata); int attr_mask, struct ib_udata *udata);
int rvt_destroy_qp(struct ib_qp *ibqp); int rvt_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata);
int rvt_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int rvt_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_qp_init_attr *init_attr); int attr_mask, struct ib_qp_init_attr *init_attr);
int rvt_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, int rvt_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,

View file

@ -340,7 +340,7 @@ int rvt_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
* *
* Return always 0 * Return always 0
*/ */
int rvt_destroy_srq(struct ib_srq *ibsrq) int rvt_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
{ {
struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq); struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
struct rvt_dev_info *dev = ib_to_rvt(ibsrq->device); struct rvt_dev_info *dev = ib_to_rvt(ibsrq->device);

View file

@ -57,6 +57,6 @@ int rvt_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
enum ib_srq_attr_mask attr_mask, enum ib_srq_attr_mask attr_mask,
struct ib_udata *udata); struct ib_udata *udata);
int rvt_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr); int rvt_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr);
int rvt_destroy_srq(struct ib_srq *ibsrq); int rvt_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata);
#endif /* DEF_RVTSRQ_H */ #endif /* DEF_RVTSRQ_H */

View file

@ -185,7 +185,7 @@ static int rxe_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
return rxe_add_to_pool(&rxe->pd_pool, &pd->pelem); return rxe_add_to_pool(&rxe->pd_pool, &pd->pelem);
} }
static void rxe_dealloc_pd(struct ib_pd *ibpd) static void rxe_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
{ {
struct rxe_pd *pd = to_rpd(ibpd); struct rxe_pd *pd = to_rpd(ibpd);
@ -242,7 +242,7 @@ static int rxe_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
return 0; return 0;
} }
static int rxe_destroy_ah(struct ib_ah *ibah, u32 flags) static int rxe_destroy_ah(struct ib_ah *ibah, u32 flags, struct ib_udata *udata)
{ {
struct rxe_ah *ah = to_rah(ibah); struct rxe_ah *ah = to_rah(ibah);
@ -389,7 +389,7 @@ static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
return 0; return 0;
} }
static int rxe_destroy_srq(struct ib_srq *ibsrq) static int rxe_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
{ {
struct rxe_srq *srq = to_rsrq(ibsrq); struct rxe_srq *srq = to_rsrq(ibsrq);
@ -509,7 +509,7 @@ static int rxe_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
return 0; return 0;
} }
static int rxe_destroy_qp(struct ib_qp *ibqp) static int rxe_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
{ {
struct rxe_qp *qp = to_rqp(ibqp); struct rxe_qp *qp = to_rqp(ibqp);
@ -839,7 +839,7 @@ err1:
return ERR_PTR(err); return ERR_PTR(err);
} }
static int rxe_destroy_cq(struct ib_cq *ibcq) static int rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
{ {
struct rxe_cq *cq = to_rcq(ibcq); struct rxe_cq *cq = to_rcq(ibcq);
@ -990,7 +990,7 @@ err2:
return ERR_PTR(err); return ERR_PTR(err);
} }
static int rxe_dereg_mr(struct ib_mr *ibmr) static int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
{ {
struct rxe_mem *mr = to_rmr(ibmr); struct rxe_mem *mr = to_rmr(ibmr);
@ -1001,9 +1001,8 @@ static int rxe_dereg_mr(struct ib_mr *ibmr)
return 0; return 0;
} }
static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
enum ib_mr_type mr_type, u32 max_num_sg, struct ib_udata *udata)
u32 max_num_sg)
{ {
struct rxe_dev *rxe = to_rdev(ibpd->device); struct rxe_dev *rxe = to_rdev(ibpd->device);
struct rxe_pd *pd = to_rpd(ibpd); struct rxe_pd *pd = to_rpd(ibpd);

View file

@ -2396,13 +2396,13 @@ struct ib_device_ops {
void (*disassociate_ucontext)(struct ib_ucontext *ibcontext); void (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
int (*alloc_pd)(struct ib_pd *pd, struct ib_ucontext *context, int (*alloc_pd)(struct ib_pd *pd, struct ib_ucontext *context,
struct ib_udata *udata); struct ib_udata *udata);
void (*dealloc_pd)(struct ib_pd *pd); void (*dealloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
struct ib_ah *(*create_ah)(struct ib_pd *pd, struct ib_ah *(*create_ah)(struct ib_pd *pd,
struct rdma_ah_attr *ah_attr, u32 flags, struct rdma_ah_attr *ah_attr, u32 flags,
struct ib_udata *udata); struct ib_udata *udata);
int (*modify_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr); int (*modify_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
int (*query_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr); int (*query_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
int (*destroy_ah)(struct ib_ah *ah, u32 flags); int (*destroy_ah)(struct ib_ah *ah, u32 flags, struct ib_udata *udata);
struct ib_srq *(*create_srq)(struct ib_pd *pd, struct ib_srq *(*create_srq)(struct ib_pd *pd,
struct ib_srq_init_attr *srq_init_attr, struct ib_srq_init_attr *srq_init_attr,
struct ib_udata *udata); struct ib_udata *udata);
@ -2410,7 +2410,7 @@ struct ib_device_ops {
enum ib_srq_attr_mask srq_attr_mask, enum ib_srq_attr_mask srq_attr_mask,
struct ib_udata *udata); struct ib_udata *udata);
int (*query_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr); int (*query_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
int (*destroy_srq)(struct ib_srq *srq); int (*destroy_srq)(struct ib_srq *srq, struct ib_udata *udata);
struct ib_qp *(*create_qp)(struct ib_pd *pd, struct ib_qp *(*create_qp)(struct ib_pd *pd,
struct ib_qp_init_attr *qp_init_attr, struct ib_qp_init_attr *qp_init_attr,
struct ib_udata *udata); struct ib_udata *udata);
@ -2418,13 +2418,13 @@ struct ib_device_ops {
int qp_attr_mask, struct ib_udata *udata); int qp_attr_mask, struct ib_udata *udata);
int (*query_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr, int (*query_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr); int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
int (*destroy_qp)(struct ib_qp *qp); int (*destroy_qp)(struct ib_qp *qp, struct ib_udata *udata);
struct ib_cq *(*create_cq)(struct ib_device *device, struct ib_cq *(*create_cq)(struct ib_device *device,
const struct ib_cq_init_attr *attr, const struct ib_cq_init_attr *attr,
struct ib_ucontext *context, struct ib_ucontext *context,
struct ib_udata *udata); struct ib_udata *udata);
int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period); int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period);
int (*destroy_cq)(struct ib_cq *cq); int (*destroy_cq)(struct ib_cq *cq, struct ib_udata *udata);
int (*resize_cq)(struct ib_cq *cq, int cqe, struct ib_udata *udata); int (*resize_cq)(struct ib_cq *cq, int cqe, struct ib_udata *udata);
struct ib_mr *(*get_dma_mr)(struct ib_pd *pd, int mr_access_flags); struct ib_mr *(*get_dma_mr)(struct ib_pd *pd, int mr_access_flags);
struct ib_mr *(*reg_user_mr)(struct ib_pd *pd, u64 start, u64 length, struct ib_mr *(*reg_user_mr)(struct ib_pd *pd, u64 start, u64 length,
@ -2433,9 +2433,9 @@ struct ib_device_ops {
int (*rereg_user_mr)(struct ib_mr *mr, int flags, u64 start, u64 length, int (*rereg_user_mr)(struct ib_mr *mr, int flags, u64 start, u64 length,
u64 virt_addr, int mr_access_flags, u64 virt_addr, int mr_access_flags,
struct ib_pd *pd, struct ib_udata *udata); struct ib_pd *pd, struct ib_udata *udata);
int (*dereg_mr)(struct ib_mr *mr); int (*dereg_mr)(struct ib_mr *mr, struct ib_udata *udata);
struct ib_mr *(*alloc_mr)(struct ib_pd *pd, enum ib_mr_type mr_type, struct ib_mr *(*alloc_mr)(struct ib_pd *pd, enum ib_mr_type mr_type,
u32 max_num_sg); u32 max_num_sg, struct ib_udata *udata);
int (*advise_mr)(struct ib_pd *pd, int (*advise_mr)(struct ib_pd *pd,
enum ib_uverbs_advise_mr_advice advice, u32 flags, enum ib_uverbs_advise_mr_advice advice, u32 flags,
struct ib_sge *sg_list, u32 num_sge, struct ib_sge *sg_list, u32 num_sge,
@ -2458,7 +2458,7 @@ struct ib_device_ops {
struct ib_xrcd *(*alloc_xrcd)(struct ib_device *device, struct ib_xrcd *(*alloc_xrcd)(struct ib_device *device,
struct ib_ucontext *ucontext, struct ib_ucontext *ucontext,
struct ib_udata *udata); struct ib_udata *udata);
int (*dealloc_xrcd)(struct ib_xrcd *xrcd); int (*dealloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata);
struct ib_flow *(*create_flow)(struct ib_qp *qp, struct ib_flow *(*create_flow)(struct ib_qp *qp,
struct ib_flow_attr *flow_attr, struct ib_flow_attr *flow_attr,
int domain, struct ib_udata *udata); int domain, struct ib_udata *udata);
@ -2483,7 +2483,7 @@ struct ib_device_ops {
struct ib_wq *(*create_wq)(struct ib_pd *pd, struct ib_wq *(*create_wq)(struct ib_pd *pd,
struct ib_wq_init_attr *init_attr, struct ib_wq_init_attr *init_attr,
struct ib_udata *udata); struct ib_udata *udata);
int (*destroy_wq)(struct ib_wq *wq); int (*destroy_wq)(struct ib_wq *wq, struct ib_udata *udata);
int (*modify_wq)(struct ib_wq *wq, struct ib_wq_attr *attr, int (*modify_wq)(struct ib_wq *wq, struct ib_wq_attr *attr,
u32 wq_attr_mask, struct ib_udata *udata); u32 wq_attr_mask, struct ib_udata *udata);
struct ib_rwq_ind_table *(*create_rwq_ind_table)( struct ib_rwq_ind_table *(*create_rwq_ind_table)(
@ -2495,7 +2495,7 @@ struct ib_device_ops {
struct ib_ucontext *context, struct ib_ucontext *context,
struct ib_dm_alloc_attr *attr, struct ib_dm_alloc_attr *attr,
struct uverbs_attr_bundle *attrs); struct uverbs_attr_bundle *attrs);
int (*dealloc_dm)(struct ib_dm *dm); int (*dealloc_dm)(struct ib_dm *dm, struct uverbs_attr_bundle *attrs);
struct ib_mr *(*reg_dm_mr)(struct ib_pd *pd, struct ib_dm *dm, struct ib_mr *(*reg_dm_mr)(struct ib_pd *pd, struct ib_dm *dm,
struct ib_dm_mr_attr *attr, struct ib_dm_mr_attr *attr,
struct uverbs_attr_bundle *attrs); struct uverbs_attr_bundle *attrs);
@ -3252,9 +3252,27 @@ enum ib_pd_flags {
struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags, struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
const char *caller); const char *caller);
#define ib_alloc_pd(device, flags) \ #define ib_alloc_pd(device, flags) \
__ib_alloc_pd((device), (flags), KBUILD_MODNAME) __ib_alloc_pd((device), (flags), KBUILD_MODNAME)
void ib_dealloc_pd(struct ib_pd *pd);
/**
* ib_dealloc_pd_user - Deallocate kernel/user PD
* @pd: The protection domain
* @udata: Valid user data or NULL for kernel objects
*/
void ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata);
/**
* ib_dealloc_pd - Deallocate kernel PD
* @pd: The protection domain
*
* NOTE: for user PD use ib_dealloc_pd_user with valid udata!
*/
static inline void ib_dealloc_pd(struct ib_pd *pd)
{
ib_dealloc_pd_user(pd, NULL);
}
enum rdma_create_ah_flags { enum rdma_create_ah_flags {
/* In a sleepable context */ /* In a sleepable context */
@ -3367,11 +3385,24 @@ enum rdma_destroy_ah_flags {
}; };
/** /**
* rdma_destroy_ah - Destroys an address handle. * rdma_destroy_ah_user - Destroys an address handle.
* @ah: The address handle to destroy. * @ah: The address handle to destroy.
* @flags: Destroy address handle flags (see enum rdma_destroy_ah_flags). * @flags: Destroy address handle flags (see enum rdma_destroy_ah_flags).
* @udata: Valid user data or NULL for kernel objects
*/ */
int rdma_destroy_ah(struct ib_ah *ah, u32 flags); int rdma_destroy_ah_user(struct ib_ah *ah, u32 flags, struct ib_udata *udata);
/**
* rdma_destroy_ah - Destroys an kernel address handle.
* @ah: The address handle to destroy.
* @flags: Destroy address handle flags (see enum rdma_destroy_ah_flags).
*
* NOTE: for user ah use rdma_destroy_ah_user with valid udata!
*/
static inline int rdma_destroy_ah(struct ib_ah *ah, u32 flags)
{
return rdma_destroy_ah_user(ah, flags, NULL);
}
/** /**
* ib_create_srq - Creates a SRQ associated with the specified protection * ib_create_srq - Creates a SRQ associated with the specified protection
@ -3415,10 +3446,22 @@ int ib_query_srq(struct ib_srq *srq,
struct ib_srq_attr *srq_attr); struct ib_srq_attr *srq_attr);
/** /**
* ib_destroy_srq - Destroys the specified SRQ. * ib_destroy_srq_user - Destroys the specified SRQ.
* @srq: The SRQ to destroy. * @srq: The SRQ to destroy.
* @udata: Valid user data or NULL for kernel objects
*/ */
int ib_destroy_srq(struct ib_srq *srq); int ib_destroy_srq_user(struct ib_srq *srq, struct ib_udata *udata);
/**
* ib_destroy_srq - Destroys the specified kernel SRQ.
* @srq: The SRQ to destroy.
*
* NOTE: for user srq use ib_destroy_srq_user with valid udata!
*/
static inline int ib_destroy_srq(struct ib_srq *srq)
{
return ib_destroy_srq_user(srq, NULL);
}
/** /**
* ib_post_srq_recv - Posts a list of work requests to the specified SRQ. * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
@ -3438,15 +3481,34 @@ static inline int ib_post_srq_recv(struct ib_srq *srq,
} }
/** /**
* ib_create_qp - Creates a QP associated with the specified protection * ib_create_qp_user - Creates a QP associated with the specified protection
* domain. * domain.
* @pd: The protection domain associated with the QP. * @pd: The protection domain associated with the QP.
* @qp_init_attr: A list of initial attributes required to create the * @qp_init_attr: A list of initial attributes required to create the
* QP. If QP creation succeeds, then the attributes are updated to * QP. If QP creation succeeds, then the attributes are updated to
* the actual capabilities of the created QP. * the actual capabilities of the created QP.
* @udata: Valid user data or NULL for kernel objects
*/ */
struct ib_qp *ib_create_qp(struct ib_pd *pd, struct ib_qp *ib_create_qp_user(struct ib_pd *pd,
struct ib_qp_init_attr *qp_init_attr); struct ib_qp_init_attr *qp_init_attr,
struct ib_udata *udata);
/**
* ib_create_qp - Creates a kernel QP associated with the specified protection
* domain.
* @pd: The protection domain associated with the QP.
* @qp_init_attr: A list of initial attributes required to create the
* QP. If QP creation succeeds, then the attributes are updated to
* the actual capabilities of the created QP.
* @udata: Valid user data or NULL for kernel objects
*
* NOTE: for user qp use ib_create_qp_user with valid udata!
*/
static inline struct ib_qp *ib_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *qp_init_attr)
{
return ib_create_qp_user(pd, qp_init_attr, NULL);
}
/** /**
* ib_modify_qp_with_udata - Modifies the attributes for the specified QP. * ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
@ -3496,8 +3558,20 @@ int ib_query_qp(struct ib_qp *qp,
/** /**
* ib_destroy_qp - Destroys the specified QP. * ib_destroy_qp - Destroys the specified QP.
* @qp: The QP to destroy. * @qp: The QP to destroy.
* @udata: Valid udata or NULL for kernel objects
*/ */
int ib_destroy_qp(struct ib_qp *qp); int ib_destroy_qp_user(struct ib_qp *qp, struct ib_udata *udata);
/**
* ib_destroy_qp - Destroys the specified kernel QP.
* @qp: The QP to destroy.
*
* NOTE: for user qp use ib_destroy_qp_user with valid udata!
*/
static inline int ib_destroy_qp(struct ib_qp *qp)
{
return ib_destroy_qp_user(qp, NULL);
}
/** /**
* ib_open_qp - Obtain a reference to an existing sharable QP. * ib_open_qp - Obtain a reference to an existing sharable QP.
@ -3557,13 +3631,66 @@ static inline int ib_post_recv(struct ib_qp *qp,
return qp->device->ops.post_recv(qp, recv_wr, bad_recv_wr ? : &dummy); return qp->device->ops.post_recv(qp, recv_wr, bad_recv_wr ? : &dummy);
} }
struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private, struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private,
int nr_cqe, int comp_vector, int nr_cqe, int comp_vector,
enum ib_poll_context poll_ctx, const char *caller); enum ib_poll_context poll_ctx,
#define ib_alloc_cq(device, priv, nr_cqe, comp_vect, poll_ctx) \ const char *caller, struct ib_udata *udata);
__ib_alloc_cq((device), (priv), (nr_cqe), (comp_vect), (poll_ctx), KBUILD_MODNAME)
/**
* ib_alloc_cq_user: Allocate kernel/user CQ
* @dev: The IB device
* @private: Private data attached to the CQE
* @nr_cqe: Number of CQEs in the CQ
* @comp_vector: Completion vector used for the IRQs
* @poll_ctx: Context used for polling the CQ
* @udata: Valid user data or NULL for kernel objects
*/
static inline struct ib_cq *ib_alloc_cq_user(struct ib_device *dev,
void *private, int nr_cqe,
int comp_vector,
enum ib_poll_context poll_ctx,
struct ib_udata *udata)
{
return __ib_alloc_cq_user(dev, private, nr_cqe, comp_vector, poll_ctx,
KBUILD_MODNAME, udata);
}
/**
* ib_alloc_cq: Allocate kernel CQ
* @dev: The IB device
* @private: Private data attached to the CQE
* @nr_cqe: Number of CQEs in the CQ
* @comp_vector: Completion vector used for the IRQs
* @poll_ctx: Context used for polling the CQ
*
* NOTE: for user cq use ib_alloc_cq_user with valid udata!
*/
static inline struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
int nr_cqe, int comp_vector,
enum ib_poll_context poll_ctx)
{
return ib_alloc_cq_user(dev, private, nr_cqe, comp_vector, poll_ctx,
NULL);
}
/**
* ib_free_cq_user - Free kernel/user CQ
* @cq: The CQ to free
* @udata: Valid user data or NULL for kernel objects
*/
void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata);
/**
* ib_free_cq - Free kernel CQ
* @cq: The CQ to free
*
* NOTE: for user cq use ib_free_cq_user with valid udata!
*/
static inline void ib_free_cq(struct ib_cq *cq)
{
ib_free_cq_user(cq, NULL);
}
void ib_free_cq(struct ib_cq *cq);
int ib_process_cq_direct(struct ib_cq *cq, int budget); int ib_process_cq_direct(struct ib_cq *cq, int budget);
/** /**
@ -3607,10 +3734,22 @@ int ib_resize_cq(struct ib_cq *cq, int cqe);
int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period); int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period);
/** /**
* ib_destroy_cq - Destroys the specified CQ. * ib_destroy_cq_user - Destroys the specified CQ.
* @cq: The CQ to destroy. * @cq: The CQ to destroy.
* @udata: Valid user data or NULL for kernel objects
*/ */
int ib_destroy_cq(struct ib_cq *cq); int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata);
/**
* ib_destroy_cq - Destroys the specified kernel CQ.
* @cq: The CQ to destroy.
*
* NOTE: for user cq use ib_destroy_cq_user with valid udata!
*/
static inline int ib_destroy_cq(struct ib_cq *cq)
{
return ib_destroy_cq_user(cq, NULL);
}
/** /**
* ib_poll_cq - poll a CQ for completion(s) * ib_poll_cq - poll a CQ for completion(s)
@ -3864,17 +4003,37 @@ static inline void ib_dma_free_coherent(struct ib_device *dev,
} }
/** /**
* ib_dereg_mr - Deregisters a memory region and removes it from the * ib_dereg_mr_user - Deregisters a memory region and removes it from the
* HCA translation table.
* @mr: The memory region to deregister.
* @udata: Valid user data or NULL for kernel object
*
* This function can fail, if the memory region has memory windows bound to it.
*/
int ib_dereg_mr_user(struct ib_mr *mr, struct ib_udata *udata);
/**
* ib_dereg_mr - Deregisters a kernel memory region and removes it from the
* HCA translation table. * HCA translation table.
* @mr: The memory region to deregister. * @mr: The memory region to deregister.
* *
* This function can fail, if the memory region has memory windows bound to it. * This function can fail, if the memory region has memory windows bound to it.
*
* NOTE: for user mr use ib_dereg_mr_user with valid udata!
*/ */
int ib_dereg_mr(struct ib_mr *mr); static inline int ib_dereg_mr(struct ib_mr *mr)
{
return ib_dereg_mr_user(mr, NULL);
}
struct ib_mr *ib_alloc_mr(struct ib_pd *pd, struct ib_mr *ib_alloc_mr_user(struct ib_pd *pd, enum ib_mr_type mr_type,
enum ib_mr_type mr_type, u32 max_num_sg, struct ib_udata *udata);
u32 max_num_sg);
static inline struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
enum ib_mr_type mr_type, u32 max_num_sg)
{
return ib_alloc_mr_user(pd, mr_type, max_num_sg, NULL);
}
/** /**
* ib_update_fast_reg_key - updates the key portion of the fast_reg MR * ib_update_fast_reg_key - updates the key portion of the fast_reg MR
@ -3972,8 +4131,9 @@ struct ib_xrcd *__ib_alloc_xrcd(struct ib_device *device, const char *caller);
/** /**
* ib_dealloc_xrcd - Deallocates an XRC domain. * ib_dealloc_xrcd - Deallocates an XRC domain.
* @xrcd: The XRC domain to deallocate. * @xrcd: The XRC domain to deallocate.
* @udata: Valid user data or NULL for kernel object
*/ */
int ib_dealloc_xrcd(struct ib_xrcd *xrcd); int ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata);
static inline int ib_check_mr_access(int flags) static inline int ib_check_mr_access(int flags)
{ {
@ -4049,7 +4209,7 @@ struct net_device *ib_device_netdev(struct ib_device *dev, u8 port);
struct ib_wq *ib_create_wq(struct ib_pd *pd, struct ib_wq *ib_create_wq(struct ib_pd *pd,
struct ib_wq_init_attr *init_attr); struct ib_wq_init_attr *init_attr);
int ib_destroy_wq(struct ib_wq *wq); int ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *attr, int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *attr,
u32 wq_attr_mask); u32 wq_attr_mask);
struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device, struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device,