1
0
Fork 0
mirror of synced 2025-03-06 20:59:54 +01:00
linux/drivers/crypto/intel/qat/qat_common/qat_algs_send.c
Giovanni Cabiddu 203b01001c crypto: qat - fix deadlock in backlog processing
If a request has the flag CRYPTO_TFM_REQ_MAY_BACKLOG set, the function
qat_alg_send_message_maybacklog(), enqueues it in a backlog list if
either (1) there is already at least one request in the backlog list, or
(2) the HW ring is nearly full or (3) the enqueue to the HW ring fails.
If an interrupt occurs right before the lock in qat_alg_backlog_req() is
taken and the backlog queue is being emptied, then there is no request
in the HW queues that can trigger a subsequent interrupt that can clear
the backlog queue. In addition subsequent requests are enqueued to the
backlog list and not sent to the hardware.

Fix it by holding the lock while taking the decision if the request
needs to be included in the backlog queue or not. This synchronizes the
flow with the interrupt handler that drains the backlog queue.

For performance reasons, the logic has been changed to try to enqueue
first without holding the lock.

Fixes: 3868238397 ("crypto: qat - add backlog mechanism")
Reported-by: Mikulas Patocka <mpatocka@redhat.com>
Closes: https://lore.kernel.org/all/af9581e2-58f9-cc19-428f-6f18f1f83d54@redhat.com/T/
Signed-off-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
Reviewed-by: Mikulas Patocka <mpatocka@redhat.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2023-10-27 18:04:28 +08:00

91 lines
2 KiB
C

// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2022 Intel Corporation */
#include <crypto/algapi.h>
#include "adf_transport.h"
#include "qat_algs_send.h"
#include "qat_crypto.h"
#define ADF_MAX_RETRIES 20
static int qat_alg_send_message_retry(struct qat_alg_req *req)
{
int ret = 0, ctr = 0;
do {
ret = adf_send_message(req->tx_ring, req->fw_req);
} while (ret == -EAGAIN && ctr++ < ADF_MAX_RETRIES);
if (ret == -EAGAIN)
return -ENOSPC;
return -EINPROGRESS;
}
void qat_alg_send_backlog(struct qat_instance_backlog *backlog)
{
struct qat_alg_req *req, *tmp;
spin_lock_bh(&backlog->lock);
list_for_each_entry_safe(req, tmp, &backlog->list, list) {
if (adf_send_message(req->tx_ring, req->fw_req)) {
/* The HW ring is full. Do nothing.
* qat_alg_send_backlog() will be invoked again by
* another callback.
*/
break;
}
list_del(&req->list);
crypto_request_complete(req->base, -EINPROGRESS);
}
spin_unlock_bh(&backlog->lock);
}
static bool qat_alg_try_enqueue(struct qat_alg_req *req)
{
struct qat_instance_backlog *backlog = req->backlog;
struct adf_etr_ring_data *tx_ring = req->tx_ring;
u32 *fw_req = req->fw_req;
/* Check if any request is already backlogged */
if (!list_empty(&backlog->list))
return false;
/* Check if ring is nearly full */
if (adf_ring_nearly_full(tx_ring))
return false;
/* Try to enqueue to HW ring */
if (adf_send_message(tx_ring, fw_req))
return false;
return true;
}
static int qat_alg_send_message_maybacklog(struct qat_alg_req *req)
{
struct qat_instance_backlog *backlog = req->backlog;
int ret = -EINPROGRESS;
if (qat_alg_try_enqueue(req))
return ret;
spin_lock_bh(&backlog->lock);
if (!qat_alg_try_enqueue(req)) {
list_add_tail(&req->list, &backlog->list);
ret = -EBUSY;
}
spin_unlock_bh(&backlog->lock);
return ret;
}
int qat_alg_send_message(struct qat_alg_req *req)
{
u32 flags = req->base->flags;
if (flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
return qat_alg_send_message_maybacklog(req);
else
return qat_alg_send_message_retry(req);
}