1
0
Fork 0
mirror of synced 2025-03-06 20:59:54 +01:00
linux/drivers/infiniband/hw/hfi1/sdma_txreq.h
Brendan Cunningham c9358de193 IB/hfi1: Fix wrong mmu_node used for user SDMA packet after invalidate
The hfi1 user SDMA pinned-page cache will leave a stale cache entry when
the cache-entry's virtual address range is invalidated but that cache
entry is in-use by an outstanding SDMA request.

Subsequent user SDMA requests with buffers in or spanning the virtual
address range of the stale cache entry will result in packets constructed
from the wrong memory, the physical pages pointed to by the stale cache
entry.

To fix this, remove mmu_rb_node cache entries from the mmu_rb_handler
cache independent of the cache entry's refcount. Add 'struct kref
refcount' to struct mmu_rb_node and manage mmu_rb_node lifetime with
kref_get() and kref_put().

mmu_rb_node.refcount makes sdma_mmu_node.refcount redundant. Remove
'atomic_t refcount' from struct sdma_mmu_node and change sdma_mmu_node
code to use mmu_rb_node.refcount.

Move the mmu_rb_handler destructor call after a
wait-for-SDMA-request-completion call so mmu_rb_nodes that need
mmu_rb_handler's workqueue to queue themselves up for destruction from an
interrupt context may do so.

Fixes: f48ad614c1 ("IB/hfi1: Move driver out of staging")
Fixes: 00cbce5cbf ("IB/hfi1: Fix bugs with non-PAGE_SIZE-end multi-iovec user SDMA requests")
Link: https://lore.kernel.org/r/168451393605.3700681.13493776139032178861.stgit@awfm-02.cornelisnetworks.com
Reviewed-by: Dean Luick <dean.luick@cornelisnetworks.com>
Signed-off-by: Brendan Cunningham <bcunningham@cornelisnetworks.com>
Signed-off-by: Dennis Dalessandro <dennis.dalessandro@cornelisnetworks.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
2023-06-01 14:38:00 -03:00

97 lines
2.5 KiB
C

/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/*
* Copyright(c) 2016 Intel Corporation.
*/
#ifndef HFI1_SDMA_TXREQ_H
#define HFI1_SDMA_TXREQ_H
/* increased for AHG */
#define NUM_DESC 6
/*
* struct sdma_desc - canonical fragment descriptor
*
* This is the descriptor carried in the tx request
* corresponding to each fragment.
*
*/
struct sdma_desc {
/* private: don't use directly */
u64 qw[2];
void *pinning_ctx;
/* Release reference to @pinning_ctx. May be called in interrupt context. Must not sleep. */
void (*ctx_put)(void *ctx);
};
/**
* struct sdma_txreq - the sdma_txreq structure (one per packet)
* @list: for use by user and by queuing for wait
*
* This is the representation of a packet which consists of some
* number of fragments. Storage is provided to within the structure.
* for all fragments.
*
* The storage for the descriptors are automatically extended as needed
* when the currently allocation is exceeded.
*
* The user (Verbs or PSM) may overload this structure with fields
* specific to their use by putting this struct first in their struct.
* The method of allocation of the overloaded structure is user dependent
*
* The list is the only public field in the structure.
*
*/
#define SDMA_TXREQ_S_OK 0
#define SDMA_TXREQ_S_SENDERROR 1
#define SDMA_TXREQ_S_ABORTED 2
#define SDMA_TXREQ_S_SHUTDOWN 3
/* flags bits */
#define SDMA_TXREQ_F_URGENT 0x0001
#define SDMA_TXREQ_F_AHG_COPY 0x0002
#define SDMA_TXREQ_F_USE_AHG 0x0004
#define SDMA_TXREQ_F_VIP 0x0010
struct sdma_txreq;
typedef void (*callback_t)(struct sdma_txreq *, int);
struct iowait;
struct sdma_txreq {
struct list_head list;
/* private: */
struct sdma_desc *descp;
/* private: */
void *coalesce_buf;
/* private: */
struct iowait *wait;
/* private: */
callback_t complete;
#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
u64 sn;
#endif
/* private: - used in coalesce/pad processing */
u16 packet_len;
/* private: - down-counted to trigger last */
u16 tlen;
/* private: */
u16 num_desc;
/* private: */
u16 desc_limit;
/* private: */
u16 next_descq_idx;
/* private: */
u16 coalesce_idx;
/* private: flags */
u16 flags;
/* private: */
struct sdma_desc descs[NUM_DESC];
};
static inline int sdma_txreq_built(struct sdma_txreq *tx)
{
return tx->num_desc;
}
#endif /* HFI1_SDMA_TXREQ_H */