svcrdma: Clean up allocation of svc_rdma_recv_ctxt
The physical device's favored NUMA node ID is available when allocating a recv_ctxt. Use that value instead of relying on the assumption that the memory allocation happens to be running on a node close to the device. This clean up eliminates the hack of destroying recv_ctxts that were not created by the receive CQ thread -- recv_ctxts are now always allocated on a "good" node. Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
This commit is contained in:
parent
fe2b401e55
commit
c5d68d25bd
2 changed files with 7 additions and 12 deletions
|
@ -135,7 +135,6 @@ struct svc_rdma_recv_ctxt {
|
||||||
struct ib_sge rc_recv_sge;
|
struct ib_sge rc_recv_sge;
|
||||||
void *rc_recv_buf;
|
void *rc_recv_buf;
|
||||||
struct xdr_stream rc_stream;
|
struct xdr_stream rc_stream;
|
||||||
bool rc_temp;
|
|
||||||
u32 rc_byte_len;
|
u32 rc_byte_len;
|
||||||
unsigned int rc_page_count;
|
unsigned int rc_page_count;
|
||||||
u32 rc_inv_rkey;
|
u32 rc_inv_rkey;
|
||||||
|
|
|
@ -125,14 +125,15 @@ static void svc_rdma_recv_cid_init(struct svcxprt_rdma *rdma,
|
||||||
static struct svc_rdma_recv_ctxt *
|
static struct svc_rdma_recv_ctxt *
|
||||||
svc_rdma_recv_ctxt_alloc(struct svcxprt_rdma *rdma)
|
svc_rdma_recv_ctxt_alloc(struct svcxprt_rdma *rdma)
|
||||||
{
|
{
|
||||||
|
int node = ibdev_to_node(rdma->sc_cm_id->device);
|
||||||
struct svc_rdma_recv_ctxt *ctxt;
|
struct svc_rdma_recv_ctxt *ctxt;
|
||||||
dma_addr_t addr;
|
dma_addr_t addr;
|
||||||
void *buffer;
|
void *buffer;
|
||||||
|
|
||||||
ctxt = kmalloc(sizeof(*ctxt), GFP_KERNEL);
|
ctxt = kmalloc_node(sizeof(*ctxt), GFP_KERNEL, node);
|
||||||
if (!ctxt)
|
if (!ctxt)
|
||||||
goto fail0;
|
goto fail0;
|
||||||
buffer = kmalloc(rdma->sc_max_req_size, GFP_KERNEL);
|
buffer = kmalloc_node(rdma->sc_max_req_size, GFP_KERNEL, node);
|
||||||
if (!buffer)
|
if (!buffer)
|
||||||
goto fail1;
|
goto fail1;
|
||||||
addr = ib_dma_map_single(rdma->sc_pd->device, buffer,
|
addr = ib_dma_map_single(rdma->sc_pd->device, buffer,
|
||||||
|
@ -155,7 +156,6 @@ svc_rdma_recv_ctxt_alloc(struct svcxprt_rdma *rdma)
|
||||||
ctxt->rc_recv_sge.length = rdma->sc_max_req_size;
|
ctxt->rc_recv_sge.length = rdma->sc_max_req_size;
|
||||||
ctxt->rc_recv_sge.lkey = rdma->sc_pd->local_dma_lkey;
|
ctxt->rc_recv_sge.lkey = rdma->sc_pd->local_dma_lkey;
|
||||||
ctxt->rc_recv_buf = buffer;
|
ctxt->rc_recv_buf = buffer;
|
||||||
ctxt->rc_temp = false;
|
|
||||||
return ctxt;
|
return ctxt;
|
||||||
|
|
||||||
fail2:
|
fail2:
|
||||||
|
@ -232,10 +232,7 @@ void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma,
|
||||||
pcl_free(&ctxt->rc_write_pcl);
|
pcl_free(&ctxt->rc_write_pcl);
|
||||||
pcl_free(&ctxt->rc_reply_pcl);
|
pcl_free(&ctxt->rc_reply_pcl);
|
||||||
|
|
||||||
if (!ctxt->rc_temp)
|
llist_add(&ctxt->rc_node, &rdma->sc_recv_ctxts);
|
||||||
llist_add(&ctxt->rc_node, &rdma->sc_recv_ctxts);
|
|
||||||
else
|
|
||||||
svc_rdma_recv_ctxt_destroy(rdma, ctxt);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -258,7 +255,7 @@ void svc_rdma_release_ctxt(struct svc_xprt *xprt, void *vctxt)
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool svc_rdma_refresh_recvs(struct svcxprt_rdma *rdma,
|
static bool svc_rdma_refresh_recvs(struct svcxprt_rdma *rdma,
|
||||||
unsigned int wanted, bool temp)
|
unsigned int wanted)
|
||||||
{
|
{
|
||||||
const struct ib_recv_wr *bad_wr = NULL;
|
const struct ib_recv_wr *bad_wr = NULL;
|
||||||
struct svc_rdma_recv_ctxt *ctxt;
|
struct svc_rdma_recv_ctxt *ctxt;
|
||||||
|
@ -275,7 +272,6 @@ static bool svc_rdma_refresh_recvs(struct svcxprt_rdma *rdma,
|
||||||
break;
|
break;
|
||||||
|
|
||||||
trace_svcrdma_post_recv(ctxt);
|
trace_svcrdma_post_recv(ctxt);
|
||||||
ctxt->rc_temp = temp;
|
|
||||||
ctxt->rc_recv_wr.next = recv_chain;
|
ctxt->rc_recv_wr.next = recv_chain;
|
||||||
recv_chain = &ctxt->rc_recv_wr;
|
recv_chain = &ctxt->rc_recv_wr;
|
||||||
rdma->sc_pending_recvs++;
|
rdma->sc_pending_recvs++;
|
||||||
|
@ -309,7 +305,7 @@ err_free:
|
||||||
*/
|
*/
|
||||||
bool svc_rdma_post_recvs(struct svcxprt_rdma *rdma)
|
bool svc_rdma_post_recvs(struct svcxprt_rdma *rdma)
|
||||||
{
|
{
|
||||||
return svc_rdma_refresh_recvs(rdma, rdma->sc_max_requests, true);
|
return svc_rdma_refresh_recvs(rdma, rdma->sc_max_requests);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -343,7 +339,7 @@ static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
|
||||||
* client reconnects.
|
* client reconnects.
|
||||||
*/
|
*/
|
||||||
if (rdma->sc_pending_recvs < rdma->sc_max_requests)
|
if (rdma->sc_pending_recvs < rdma->sc_max_requests)
|
||||||
if (!svc_rdma_refresh_recvs(rdma, rdma->sc_recv_batch, false))
|
if (!svc_rdma_refresh_recvs(rdma, rdma->sc_recv_batch))
|
||||||
goto dropped;
|
goto dropped;
|
||||||
|
|
||||||
/* All wc fields are now known to be valid */
|
/* All wc fields are now known to be valid */
|
||||||
|
|
Loading…
Add table
Reference in a new issue