1
0
Fork 0
mirror of synced 2025-03-06 20:59:54 +01:00

dma-debug: store a phys_addr_t in struct dma_debug_entry

dma-debug goes to great length to split incoming physical addresses into
a PFN and offset to store them in struct dma_debug_entry, just to
recombine those for all meaningful uses.  Just store a phys_addr_t
instead.

Signed-off-by: Christoph Hellwig <hch@lst.de>
This commit is contained in:
Christoph Hellwig 2024-10-06 09:20:01 +02:00
parent 7543c3e3b9
commit 9d4f645a1f

View file

@ -59,8 +59,7 @@ enum map_err_types {
* @direction: enum dma_data_direction * @direction: enum dma_data_direction
* @sg_call_ents: 'nents' from dma_map_sg * @sg_call_ents: 'nents' from dma_map_sg
* @sg_mapped_ents: 'mapped_ents' from dma_map_sg * @sg_mapped_ents: 'mapped_ents' from dma_map_sg
* @pfn: page frame of the start address * @paddr: physical start address of the mapping
* @offset: offset of mapping relative to pfn
* @map_err_type: track whether dma_mapping_error() was checked * @map_err_type: track whether dma_mapping_error() was checked
* @stack_len: number of backtrace entries in @stack_entries * @stack_len: number of backtrace entries in @stack_entries
* @stack_entries: stack of backtrace history * @stack_entries: stack of backtrace history
@ -74,8 +73,7 @@ struct dma_debug_entry {
int direction; int direction;
int sg_call_ents; int sg_call_ents;
int sg_mapped_ents; int sg_mapped_ents;
unsigned long pfn; phys_addr_t paddr;
size_t offset;
enum map_err_types map_err_type; enum map_err_types map_err_type;
#ifdef CONFIG_STACKTRACE #ifdef CONFIG_STACKTRACE
unsigned int stack_len; unsigned int stack_len;
@ -389,14 +387,6 @@ static void hash_bucket_del(struct dma_debug_entry *entry)
list_del(&entry->list); list_del(&entry->list);
} }
static unsigned long long phys_addr(struct dma_debug_entry *entry)
{
if (entry->type == dma_debug_resource)
return __pfn_to_phys(entry->pfn) + entry->offset;
return page_to_phys(pfn_to_page(entry->pfn)) + entry->offset;
}
/* /*
* For each mapping (initial cacheline in the case of * For each mapping (initial cacheline in the case of
* dma_alloc_coherent/dma_map_page, initial cacheline in each page of a * dma_alloc_coherent/dma_map_page, initial cacheline in each page of a
@ -428,8 +418,8 @@ static DEFINE_SPINLOCK(radix_lock);
static phys_addr_t to_cacheline_number(struct dma_debug_entry *entry) static phys_addr_t to_cacheline_number(struct dma_debug_entry *entry)
{ {
return (entry->pfn << CACHELINE_PER_PAGE_SHIFT) + return ((entry->paddr >> PAGE_SHIFT) << CACHELINE_PER_PAGE_SHIFT) +
(entry->offset >> L1_CACHE_SHIFT); (offset_in_page(entry->paddr) >> L1_CACHE_SHIFT);
} }
static int active_cacheline_read_overlap(phys_addr_t cln) static int active_cacheline_read_overlap(phys_addr_t cln)
@ -538,11 +528,11 @@ void debug_dma_dump_mappings(struct device *dev)
if (!dev || dev == entry->dev) { if (!dev || dev == entry->dev) {
cln = to_cacheline_number(entry); cln = to_cacheline_number(entry);
dev_info(entry->dev, dev_info(entry->dev,
"%s idx %d P=%llx N=%lx D=%llx L=%llx cln=%pa %s %s\n", "%s idx %d P=%pa D=%llx L=%llx cln=%pa %s %s\n",
type2name[entry->type], idx, type2name[entry->type], idx,
phys_addr(entry), entry->pfn, &entry->paddr, entry->dev_addr,
entry->dev_addr, entry->size, entry->size, &cln,
&cln, dir2name[entry->direction], dir2name[entry->direction],
maperr2str[entry->map_err_type]); maperr2str[entry->map_err_type]);
} }
} }
@ -569,13 +559,13 @@ static int dump_show(struct seq_file *seq, void *v)
list_for_each_entry(entry, &bucket->list, list) { list_for_each_entry(entry, &bucket->list, list) {
cln = to_cacheline_number(entry); cln = to_cacheline_number(entry);
seq_printf(seq, seq_printf(seq,
"%s %s %s idx %d P=%llx N=%lx D=%llx L=%llx cln=%pa %s %s\n", "%s %s %s idx %d P=%pa D=%llx L=%llx cln=%pa %s %s\n",
dev_driver_string(entry->dev), dev_driver_string(entry->dev),
dev_name(entry->dev), dev_name(entry->dev),
type2name[entry->type], idx, type2name[entry->type], idx,
phys_addr(entry), entry->pfn, &entry->paddr, entry->dev_addr,
entry->dev_addr, entry->size, entry->size, &cln,
&cln, dir2name[entry->direction], dir2name[entry->direction],
maperr2str[entry->map_err_type]); maperr2str[entry->map_err_type]);
} }
spin_unlock_irqrestore(&bucket->lock, flags); spin_unlock_irqrestore(&bucket->lock, flags);
@ -1003,16 +993,16 @@ static void check_unmap(struct dma_debug_entry *ref)
"[mapped as %s] [unmapped as %s]\n", "[mapped as %s] [unmapped as %s]\n",
ref->dev_addr, ref->size, ref->dev_addr, ref->size,
type2name[entry->type], type2name[ref->type]); type2name[entry->type], type2name[ref->type]);
} else if ((entry->type == dma_debug_coherent) && } else if (entry->type == dma_debug_coherent &&
(phys_addr(ref) != phys_addr(entry))) { ref->paddr != entry->paddr) {
err_printk(ref->dev, entry, "device driver frees " err_printk(ref->dev, entry, "device driver frees "
"DMA memory with different CPU address " "DMA memory with different CPU address "
"[device address=0x%016llx] [size=%llu bytes] " "[device address=0x%016llx] [size=%llu bytes] "
"[cpu alloc address=0x%016llx] " "[cpu alloc address=0x%pa] "
"[cpu free address=0x%016llx]", "[cpu free address=0x%pa]",
ref->dev_addr, ref->size, ref->dev_addr, ref->size,
phys_addr(entry), &entry->paddr,
phys_addr(ref)); &ref->paddr);
} }
if (ref->sg_call_ents && ref->type == dma_debug_sg && if (ref->sg_call_ents && ref->type == dma_debug_sg &&
@ -1231,8 +1221,7 @@ void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
entry->dev = dev; entry->dev = dev;
entry->type = dma_debug_single; entry->type = dma_debug_single;
entry->pfn = page_to_pfn(page); entry->paddr = page_to_phys(page);
entry->offset = offset;
entry->dev_addr = dma_addr; entry->dev_addr = dma_addr;
entry->size = size; entry->size = size;
entry->direction = direction; entry->direction = direction;
@ -1327,8 +1316,7 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
entry->type = dma_debug_sg; entry->type = dma_debug_sg;
entry->dev = dev; entry->dev = dev;
entry->pfn = page_to_pfn(sg_page(s)); entry->paddr = sg_phys(s);
entry->offset = s->offset;
entry->size = sg_dma_len(s); entry->size = sg_dma_len(s);
entry->dev_addr = sg_dma_address(s); entry->dev_addr = sg_dma_address(s);
entry->direction = direction; entry->direction = direction;
@ -1374,8 +1362,7 @@ void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
struct dma_debug_entry ref = { struct dma_debug_entry ref = {
.type = dma_debug_sg, .type = dma_debug_sg,
.dev = dev, .dev = dev,
.pfn = page_to_pfn(sg_page(s)), .paddr = sg_phys(s),
.offset = s->offset,
.dev_addr = sg_dma_address(s), .dev_addr = sg_dma_address(s),
.size = sg_dma_len(s), .size = sg_dma_len(s),
.direction = dir, .direction = dir,
@ -1414,16 +1401,12 @@ void debug_dma_alloc_coherent(struct device *dev, size_t size,
entry->type = dma_debug_coherent; entry->type = dma_debug_coherent;
entry->dev = dev; entry->dev = dev;
entry->offset = offset_in_page(virt); entry->paddr = page_to_phys((is_vmalloc_addr(virt) ?
vmalloc_to_page(virt) : virt_to_page(virt)));
entry->size = size; entry->size = size;
entry->dev_addr = dma_addr; entry->dev_addr = dma_addr;
entry->direction = DMA_BIDIRECTIONAL; entry->direction = DMA_BIDIRECTIONAL;
if (is_vmalloc_addr(virt))
entry->pfn = vmalloc_to_pfn(virt);
else
entry->pfn = page_to_pfn(virt_to_page(virt));
add_dma_entry(entry, attrs); add_dma_entry(entry, attrs);
} }
@ -1433,7 +1416,6 @@ void debug_dma_free_coherent(struct device *dev, size_t size,
struct dma_debug_entry ref = { struct dma_debug_entry ref = {
.type = dma_debug_coherent, .type = dma_debug_coherent,
.dev = dev, .dev = dev,
.offset = offset_in_page(virt),
.dev_addr = dma_addr, .dev_addr = dma_addr,
.size = size, .size = size,
.direction = DMA_BIDIRECTIONAL, .direction = DMA_BIDIRECTIONAL,
@ -1443,10 +1425,8 @@ void debug_dma_free_coherent(struct device *dev, size_t size,
if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt)) if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt))
return; return;
if (is_vmalloc_addr(virt)) ref.paddr = page_to_phys((is_vmalloc_addr(virt) ?
ref.pfn = vmalloc_to_pfn(virt); vmalloc_to_page(virt) : virt_to_page(virt)));
else
ref.pfn = page_to_pfn(virt_to_page(virt));
if (unlikely(dma_debug_disabled())) if (unlikely(dma_debug_disabled()))
return; return;
@ -1469,8 +1449,7 @@ void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size,
entry->type = dma_debug_resource; entry->type = dma_debug_resource;
entry->dev = dev; entry->dev = dev;
entry->pfn = PHYS_PFN(addr); entry->paddr = addr;
entry->offset = offset_in_page(addr);
entry->size = size; entry->size = size;
entry->dev_addr = dma_addr; entry->dev_addr = dma_addr;
entry->direction = direction; entry->direction = direction;
@ -1547,8 +1526,7 @@ void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
struct dma_debug_entry ref = { struct dma_debug_entry ref = {
.type = dma_debug_sg, .type = dma_debug_sg,
.dev = dev, .dev = dev,
.pfn = page_to_pfn(sg_page(s)), .paddr = sg_phys(s),
.offset = s->offset,
.dev_addr = sg_dma_address(s), .dev_addr = sg_dma_address(s),
.size = sg_dma_len(s), .size = sg_dma_len(s),
.direction = direction, .direction = direction,
@ -1579,8 +1557,7 @@ void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
struct dma_debug_entry ref = { struct dma_debug_entry ref = {
.type = dma_debug_sg, .type = dma_debug_sg,
.dev = dev, .dev = dev,
.pfn = page_to_pfn(sg_page(s)), .paddr = sg_phys(sg),
.offset = s->offset,
.dev_addr = sg_dma_address(s), .dev_addr = sg_dma_address(s),
.size = sg_dma_len(s), .size = sg_dma_len(s),
.direction = direction, .direction = direction,