wifi: iwlwifi: keep the TSO and workaround pages mapped
Map the pages when allocating them so that we will not need to map each of the used fragments at a later point. For now the mapping is not used, this will be changed in a later commit. Signed-off-by: Benjamin Berg <benjamin.berg@intel.com> Signed-off-by: Miri Korenblit <miriam.rachel.korenblit@intel.com> Reviewed-by: Johannes Berg <johannes.berg@intel.com> Link: https://patch.msgid.link/20240703125541.7ced468fe431.Ibb109867dc680c37fe8d891e9ab9ef64ed5c5d2d@changeid Signed-off-by: Johannes Berg <johannes.berg@intel.com>
This commit is contained in:
parent
7f5e3038f0
commit
adc902cead
3 changed files with 95 additions and 17 deletions
|
@ -603,6 +603,22 @@ struct iwl_tso_hdr_page {
|
||||||
u8 *pos;
|
u8 *pos;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Note that we put this struct *last* in the page. By doing that, we ensure
|
||||||
|
* that no TB referencing this page can trigger the 32-bit boundary hardware
|
||||||
|
* bug.
|
||||||
|
*/
|
||||||
|
struct iwl_tso_page_info {
|
||||||
|
dma_addr_t dma_addr;
|
||||||
|
struct page *next;
|
||||||
|
refcount_t use_count;
|
||||||
|
};
|
||||||
|
|
||||||
|
#define IWL_TSO_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(struct iwl_tso_page_info))
|
||||||
|
#define IWL_TSO_PAGE_INFO(addr) \
|
||||||
|
((struct iwl_tso_page_info *)(((unsigned long)addr & PAGE_MASK) + \
|
||||||
|
IWL_TSO_PAGE_DATA_SIZE))
|
||||||
|
|
||||||
int iwl_pcie_tx_init(struct iwl_trans *trans);
|
int iwl_pcie_tx_init(struct iwl_trans *trans);
|
||||||
void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr);
|
void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr);
|
||||||
int iwl_pcie_tx_stop(struct iwl_trans *trans);
|
int iwl_pcie_tx_stop(struct iwl_trans *trans);
|
||||||
|
@ -628,9 +644,19 @@ struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb,
|
||||||
struct iwl_cmd_meta *cmd_meta,
|
struct iwl_cmd_meta *cmd_meta,
|
||||||
u8 **hdr, unsigned int hdr_room);
|
u8 **hdr, unsigned int hdr_room);
|
||||||
|
|
||||||
void iwl_pcie_free_tso_page(struct iwl_trans *trans, struct sk_buff *skb,
|
void iwl_pcie_free_tso_pages(struct iwl_trans *trans, struct sk_buff *skb,
|
||||||
struct iwl_cmd_meta *cmd_meta);
|
struct iwl_cmd_meta *cmd_meta);
|
||||||
|
|
||||||
|
static inline dma_addr_t iwl_pcie_get_tso_page_phys(void *addr)
|
||||||
|
{
|
||||||
|
dma_addr_t res;
|
||||||
|
|
||||||
|
res = IWL_TSO_PAGE_INFO(addr)->dma_addr;
|
||||||
|
res += (unsigned long)addr & ~PAGE_MASK;
|
||||||
|
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
static inline dma_addr_t
|
static inline dma_addr_t
|
||||||
iwl_txq_get_first_tb_dma(struct iwl_txq *txq, int idx)
|
iwl_txq_get_first_tb_dma(struct iwl_txq *txq, int idx)
|
||||||
{
|
{
|
||||||
|
|
|
@ -19,8 +19,10 @@ static struct page *get_workaround_page(struct iwl_trans *trans,
|
||||||
struct sk_buff *skb)
|
struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||||
|
struct iwl_tso_page_info *info;
|
||||||
struct page **page_ptr;
|
struct page **page_ptr;
|
||||||
struct page *ret;
|
struct page *ret;
|
||||||
|
dma_addr_t phys;
|
||||||
|
|
||||||
page_ptr = (void *)((u8 *)skb->cb + trans_pcie->txqs.page_offs);
|
page_ptr = (void *)((u8 *)skb->cb + trans_pcie->txqs.page_offs);
|
||||||
|
|
||||||
|
@ -28,8 +30,22 @@ static struct page *get_workaround_page(struct iwl_trans *trans,
|
||||||
if (!ret)
|
if (!ret)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
info = IWL_TSO_PAGE_INFO(page_address(ret));
|
||||||
|
|
||||||
|
/* Create a DMA mapping for the page */
|
||||||
|
phys = dma_map_page_attrs(trans->dev, ret, 0, PAGE_SIZE,
|
||||||
|
DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
|
||||||
|
if (unlikely(dma_mapping_error(trans->dev, phys))) {
|
||||||
|
__free_page(ret);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Store physical address and set use count */
|
||||||
|
info->dma_addr = phys;
|
||||||
|
refcount_set(&info->use_count, 1);
|
||||||
|
|
||||||
/* set the chaining pointer to the previous page if there */
|
/* set the chaining pointer to the previous page if there */
|
||||||
*(void **)((u8 *)page_address(ret) + PAGE_SIZE - sizeof(void *)) = *page_ptr;
|
info->next = *page_ptr;
|
||||||
*page_ptr = ret;
|
*page_ptr = ret;
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -76,7 +92,7 @@ static int iwl_txq_gen2_set_tb_with_wa(struct iwl_trans *trans,
|
||||||
* a new mapping for it so the device will not fail.
|
* a new mapping for it so the device will not fail.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
if (WARN_ON(len > PAGE_SIZE - sizeof(void *))) {
|
if (WARN_ON(len > IWL_TSO_PAGE_DATA_SIZE)) {
|
||||||
ret = -ENOBUFS;
|
ret = -ENOBUFS;
|
||||||
goto unmap;
|
goto unmap;
|
||||||
}
|
}
|
||||||
|
@ -782,7 +798,7 @@ static void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id)
|
||||||
struct sk_buff *skb = txq->entries[idx].skb;
|
struct sk_buff *skb = txq->entries[idx].skb;
|
||||||
|
|
||||||
if (!WARN_ON_ONCE(!skb))
|
if (!WARN_ON_ONCE(!skb))
|
||||||
iwl_pcie_free_tso_page(trans, skb, cmd_meta);
|
iwl_pcie_free_tso_pages(trans, skb, cmd_meta);
|
||||||
}
|
}
|
||||||
iwl_txq_gen2_free_tfd(trans, txq);
|
iwl_txq_gen2_free_tfd(trans, txq);
|
||||||
txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
|
txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
|
||||||
|
|
|
@ -209,7 +209,21 @@ static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
|
||||||
spin_unlock(&trans_pcie->reg_lock);
|
spin_unlock(&trans_pcie->reg_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
void iwl_pcie_free_tso_page(struct iwl_trans *trans, struct sk_buff *skb,
|
static void iwl_pcie_free_and_unmap_tso_page(struct iwl_trans *trans,
|
||||||
|
struct page *page)
|
||||||
|
{
|
||||||
|
struct iwl_tso_page_info *info = IWL_TSO_PAGE_INFO(page_address(page));
|
||||||
|
|
||||||
|
/* Decrease internal use count and unmap/free page if needed */
|
||||||
|
if (refcount_dec_and_test(&info->use_count)) {
|
||||||
|
dma_unmap_page(trans->dev, info->dma_addr, PAGE_SIZE,
|
||||||
|
DMA_TO_DEVICE);
|
||||||
|
|
||||||
|
__free_page(page);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void iwl_pcie_free_tso_pages(struct iwl_trans *trans, struct sk_buff *skb,
|
||||||
struct iwl_cmd_meta *cmd_meta)
|
struct iwl_cmd_meta *cmd_meta)
|
||||||
{
|
{
|
||||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||||
|
@ -221,10 +235,11 @@ void iwl_pcie_free_tso_page(struct iwl_trans *trans, struct sk_buff *skb,
|
||||||
*page_ptr = NULL;
|
*page_ptr = NULL;
|
||||||
|
|
||||||
while (next) {
|
while (next) {
|
||||||
|
struct iwl_tso_page_info *info;
|
||||||
struct page *tmp = next;
|
struct page *tmp = next;
|
||||||
|
|
||||||
next = *(void **)((u8 *)page_address(next) + PAGE_SIZE -
|
info = IWL_TSO_PAGE_INFO(page_address(next));
|
||||||
sizeof(void *));
|
next = info->next;
|
||||||
|
|
||||||
/* Unmap the scatter gather list that is on the last page */
|
/* Unmap the scatter gather list that is on the last page */
|
||||||
if (!next && cmd_meta->sg_offset) {
|
if (!next && cmd_meta->sg_offset) {
|
||||||
|
@ -236,7 +251,7 @@ void iwl_pcie_free_tso_page(struct iwl_trans *trans, struct sk_buff *skb,
|
||||||
dma_unmap_sgtable(trans->dev, sgt, DMA_TO_DEVICE, 0);
|
dma_unmap_sgtable(trans->dev, sgt, DMA_TO_DEVICE, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
__free_page(tmp);
|
iwl_pcie_free_and_unmap_tso_page(trans, tmp);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -381,7 +396,7 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
|
||||||
if (WARN_ON_ONCE(!skb))
|
if (WARN_ON_ONCE(!skb))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
iwl_pcie_free_tso_page(trans, skb, cmd_meta);
|
iwl_pcie_free_tso_pages(trans, skb, cmd_meta);
|
||||||
}
|
}
|
||||||
iwl_txq_free_tfd(trans, txq);
|
iwl_txq_free_tfd(trans, txq);
|
||||||
txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
|
txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
|
||||||
|
@ -1722,7 +1737,9 @@ static void *iwl_pcie_get_page_hdr(struct iwl_trans *trans,
|
||||||
{
|
{
|
||||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||||
struct iwl_tso_hdr_page *p = this_cpu_ptr(trans_pcie->txqs.tso_hdr_page);
|
struct iwl_tso_hdr_page *p = this_cpu_ptr(trans_pcie->txqs.tso_hdr_page);
|
||||||
|
struct iwl_tso_page_info *info;
|
||||||
struct page **page_ptr;
|
struct page **page_ptr;
|
||||||
|
dma_addr_t phys;
|
||||||
void *ret;
|
void *ret;
|
||||||
|
|
||||||
page_ptr = (void *)((u8 *)skb->cb + trans_pcie->txqs.page_offs);
|
page_ptr = (void *)((u8 *)skb->cb + trans_pcie->txqs.page_offs);
|
||||||
|
@ -1743,23 +1760,42 @@ static void *iwl_pcie_get_page_hdr(struct iwl_trans *trans,
|
||||||
*
|
*
|
||||||
* (see also get_workaround_page() in tx-gen2.c)
|
* (see also get_workaround_page() in tx-gen2.c)
|
||||||
*/
|
*/
|
||||||
if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE -
|
if (((unsigned long)p->pos & ~PAGE_MASK) + len < IWL_TSO_PAGE_DATA_SIZE) {
|
||||||
sizeof(void *))
|
info = IWL_TSO_PAGE_INFO(page_address(ret));
|
||||||
goto out;
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
/* We don't have enough room on this page, get a new one. */
|
/* We don't have enough room on this page, get a new one. */
|
||||||
__free_page(p->page);
|
iwl_pcie_free_and_unmap_tso_page(trans, p->page);
|
||||||
|
|
||||||
alloc:
|
alloc:
|
||||||
p->page = alloc_page(GFP_ATOMIC);
|
p->page = alloc_page(GFP_ATOMIC);
|
||||||
if (!p->page)
|
if (!p->page)
|
||||||
return NULL;
|
return NULL;
|
||||||
p->pos = page_address(p->page);
|
p->pos = page_address(p->page);
|
||||||
|
|
||||||
|
info = IWL_TSO_PAGE_INFO(page_address(ret));
|
||||||
|
|
||||||
/* set the chaining pointer to NULL */
|
/* set the chaining pointer to NULL */
|
||||||
*(void **)((u8 *)page_address(p->page) + PAGE_SIZE - sizeof(void *)) = NULL;
|
info->next = NULL;
|
||||||
|
|
||||||
|
/* Create a DMA mapping for the page */
|
||||||
|
phys = dma_map_page_attrs(trans->dev, p->page, 0, PAGE_SIZE,
|
||||||
|
DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
|
||||||
|
if (unlikely(dma_mapping_error(trans->dev, phys))) {
|
||||||
|
__free_page(p->page);
|
||||||
|
p->page = NULL;
|
||||||
|
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Store physical address and set use count */
|
||||||
|
info->dma_addr = phys;
|
||||||
|
refcount_set(&info->use_count, 1);
|
||||||
out:
|
out:
|
||||||
*page_ptr = p->page;
|
*page_ptr = p->page;
|
||||||
get_page(p->page);
|
/* Return an internal reference for the caller */
|
||||||
|
refcount_inc(&info->use_count);
|
||||||
ret = p->pos;
|
ret = p->pos;
|
||||||
p->pos += len;
|
p->pos += len;
|
||||||
|
|
||||||
|
@ -2330,7 +2366,7 @@ void iwl_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
|
||||||
read_ptr, txq->read_ptr, txq_id))
|
read_ptr, txq->read_ptr, txq_id))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
iwl_pcie_free_tso_page(trans, skb, cmd_meta);
|
iwl_pcie_free_tso_pages(trans, skb, cmd_meta);
|
||||||
|
|
||||||
__skb_queue_tail(skbs, skb);
|
__skb_queue_tail(skbs, skb);
|
||||||
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue