crypto: qat - remove dma_free_coherent() for DH
The functions qat_dh_compute_value() allocates memory with
dma_alloc_coherent() if the source or the destination buffers are made
of multiple flat buffers or of a size that is not compatible with the
hardware.
This memory is then freed with dma_free_coherent() in the context of a
tasklet invoked to handle the response for the corresponding request.
According to Documentation/core-api/dma-api-howto.rst, the function
dma_free_coherent() cannot be called in an interrupt context.
Replace allocations with dma_alloc_coherent() in the function
qat_dh_compute_value() with kmalloc() + dma_map_single().
Cc: stable@vger.kernel.org
Fixes: c9839143eb
("crypto: qat - Add DH support")
Signed-off-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
Reviewed-by: Adam Guerin <adam.guerin@intel.com>
Reviewed-by: Wojciech Ziemba <wojciech.ziemba@intel.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
parent
3dfaf0071e
commit
029aa4624a
1 changed files with 34 additions and 49 deletions
|
@ -164,25 +164,20 @@ static void qat_dh_cb(struct icp_qat_fw_pke_resp *resp)
|
||||||
err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
|
err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
|
||||||
|
|
||||||
if (areq->src) {
|
if (areq->src) {
|
||||||
if (req->src_align)
|
dma_unmap_single(dev, req->in.dh.in.b, req->ctx.dh->p_size,
|
||||||
dma_free_coherent(dev, req->ctx.dh->p_size,
|
DMA_TO_DEVICE);
|
||||||
req->src_align, req->in.dh.in.b);
|
kfree_sensitive(req->src_align);
|
||||||
else
|
|
||||||
dma_unmap_single(dev, req->in.dh.in.b,
|
|
||||||
req->ctx.dh->p_size, DMA_TO_DEVICE);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
areq->dst_len = req->ctx.dh->p_size;
|
areq->dst_len = req->ctx.dh->p_size;
|
||||||
if (req->dst_align) {
|
if (req->dst_align) {
|
||||||
scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
|
scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
|
||||||
areq->dst_len, 1);
|
areq->dst_len, 1);
|
||||||
|
kfree_sensitive(req->dst_align);
|
||||||
|
}
|
||||||
|
|
||||||
dma_free_coherent(dev, req->ctx.dh->p_size, req->dst_align,
|
|
||||||
req->out.dh.r);
|
|
||||||
} else {
|
|
||||||
dma_unmap_single(dev, req->out.dh.r, req->ctx.dh->p_size,
|
dma_unmap_single(dev, req->out.dh.r, req->ctx.dh->p_size,
|
||||||
DMA_FROM_DEVICE);
|
DMA_FROM_DEVICE);
|
||||||
}
|
|
||||||
|
|
||||||
dma_unmap_single(dev, req->phy_in, sizeof(struct qat_dh_input_params),
|
dma_unmap_single(dev, req->phy_in, sizeof(struct qat_dh_input_params),
|
||||||
DMA_TO_DEVICE);
|
DMA_TO_DEVICE);
|
||||||
|
@ -231,6 +226,7 @@ static int qat_dh_compute_value(struct kpp_request *req)
|
||||||
struct icp_qat_fw_pke_request *msg = &qat_req->req;
|
struct icp_qat_fw_pke_request *msg = &qat_req->req;
|
||||||
int ret;
|
int ret;
|
||||||
int n_input_params = 0;
|
int n_input_params = 0;
|
||||||
|
u8 *vaddr;
|
||||||
|
|
||||||
if (unlikely(!ctx->xa))
|
if (unlikely(!ctx->xa))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -287,27 +283,24 @@ static int qat_dh_compute_value(struct kpp_request *req)
|
||||||
*/
|
*/
|
||||||
if (sg_is_last(req->src) && req->src_len == ctx->p_size) {
|
if (sg_is_last(req->src) && req->src_len == ctx->p_size) {
|
||||||
qat_req->src_align = NULL;
|
qat_req->src_align = NULL;
|
||||||
qat_req->in.dh.in.b = dma_map_single(dev,
|
vaddr = sg_virt(req->src);
|
||||||
sg_virt(req->src),
|
|
||||||
req->src_len,
|
|
||||||
DMA_TO_DEVICE);
|
|
||||||
if (unlikely(dma_mapping_error(dev,
|
|
||||||
qat_req->in.dh.in.b)))
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
int shift = ctx->p_size - req->src_len;
|
int shift = ctx->p_size - req->src_len;
|
||||||
|
|
||||||
qat_req->src_align = dma_alloc_coherent(dev,
|
qat_req->src_align = kzalloc(ctx->p_size, GFP_KERNEL);
|
||||||
ctx->p_size,
|
|
||||||
&qat_req->in.dh.in.b,
|
|
||||||
GFP_KERNEL);
|
|
||||||
if (unlikely(!qat_req->src_align))
|
if (unlikely(!qat_req->src_align))
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
scatterwalk_map_and_copy(qat_req->src_align + shift,
|
scatterwalk_map_and_copy(qat_req->src_align + shift,
|
||||||
req->src, 0, req->src_len, 0);
|
req->src, 0, req->src_len, 0);
|
||||||
|
|
||||||
|
vaddr = qat_req->src_align;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
qat_req->in.dh.in.b = dma_map_single(dev, vaddr, ctx->p_size,
|
||||||
|
DMA_TO_DEVICE);
|
||||||
|
if (unlikely(dma_mapping_error(dev, qat_req->in.dh.in.b)))
|
||||||
|
goto unmap_src;
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
* dst can be of any size in valid range, but HW expects it to be the
|
* dst can be of any size in valid range, but HW expects it to be the
|
||||||
|
@ -318,20 +311,18 @@ static int qat_dh_compute_value(struct kpp_request *req)
|
||||||
*/
|
*/
|
||||||
if (sg_is_last(req->dst) && req->dst_len == ctx->p_size) {
|
if (sg_is_last(req->dst) && req->dst_len == ctx->p_size) {
|
||||||
qat_req->dst_align = NULL;
|
qat_req->dst_align = NULL;
|
||||||
qat_req->out.dh.r = dma_map_single(dev, sg_virt(req->dst),
|
vaddr = sg_virt(req->dst);
|
||||||
req->dst_len,
|
|
||||||
DMA_FROM_DEVICE);
|
|
||||||
|
|
||||||
if (unlikely(dma_mapping_error(dev, qat_req->out.dh.r)))
|
|
||||||
goto unmap_src;
|
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
qat_req->dst_align = dma_alloc_coherent(dev, ctx->p_size,
|
qat_req->dst_align = kzalloc(ctx->p_size, GFP_KERNEL);
|
||||||
&qat_req->out.dh.r,
|
|
||||||
GFP_KERNEL);
|
|
||||||
if (unlikely(!qat_req->dst_align))
|
if (unlikely(!qat_req->dst_align))
|
||||||
goto unmap_src;
|
goto unmap_src;
|
||||||
|
|
||||||
|
vaddr = qat_req->dst_align;
|
||||||
}
|
}
|
||||||
|
qat_req->out.dh.r = dma_map_single(dev, vaddr, ctx->p_size,
|
||||||
|
DMA_FROM_DEVICE);
|
||||||
|
if (unlikely(dma_mapping_error(dev, qat_req->out.dh.r)))
|
||||||
|
goto unmap_dst;
|
||||||
|
|
||||||
qat_req->in.dh.in_tab[n_input_params] = 0;
|
qat_req->in.dh.in_tab[n_input_params] = 0;
|
||||||
qat_req->out.dh.out_tab[1] = 0;
|
qat_req->out.dh.out_tab[1] = 0;
|
||||||
|
@ -371,23 +362,17 @@ unmap_in_params:
|
||||||
sizeof(struct qat_dh_input_params),
|
sizeof(struct qat_dh_input_params),
|
||||||
DMA_TO_DEVICE);
|
DMA_TO_DEVICE);
|
||||||
unmap_dst:
|
unmap_dst:
|
||||||
if (qat_req->dst_align)
|
|
||||||
dma_free_coherent(dev, ctx->p_size, qat_req->dst_align,
|
|
||||||
qat_req->out.dh.r);
|
|
||||||
else
|
|
||||||
if (!dma_mapping_error(dev, qat_req->out.dh.r))
|
if (!dma_mapping_error(dev, qat_req->out.dh.r))
|
||||||
dma_unmap_single(dev, qat_req->out.dh.r, ctx->p_size,
|
dma_unmap_single(dev, qat_req->out.dh.r, ctx->p_size,
|
||||||
DMA_FROM_DEVICE);
|
DMA_FROM_DEVICE);
|
||||||
|
kfree_sensitive(qat_req->dst_align);
|
||||||
unmap_src:
|
unmap_src:
|
||||||
if (req->src) {
|
if (req->src) {
|
||||||
if (qat_req->src_align)
|
|
||||||
dma_free_coherent(dev, ctx->p_size, qat_req->src_align,
|
|
||||||
qat_req->in.dh.in.b);
|
|
||||||
else
|
|
||||||
if (!dma_mapping_error(dev, qat_req->in.dh.in.b))
|
if (!dma_mapping_error(dev, qat_req->in.dh.in.b))
|
||||||
dma_unmap_single(dev, qat_req->in.dh.in.b,
|
dma_unmap_single(dev, qat_req->in.dh.in.b,
|
||||||
ctx->p_size,
|
ctx->p_size,
|
||||||
DMA_TO_DEVICE);
|
DMA_TO_DEVICE);
|
||||||
|
kfree_sensitive(qat_req->src_align);
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Reference in a new issue