dma-mapping: introduce dma_get_merge_boundary()
This patch adds a new DMA API "dma_get_merge_boundary". This function returns the DMA merge boundary if the DMA layer can merge the segments. This patch also adds the implementation for a new dma_map_ops pointer. Signed-off-by: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com> Reviewed-by: Simon Horman <horms+renesas@verge.net.au> Signed-off-by: Christoph Hellwig <hch@lst.de>
This commit is contained in:
parent
38c38cb732
commit
6ba99411b8
3 changed files with 25 additions and 0 deletions
|
@ -204,6 +204,14 @@ Returns the maximum size of a mapping for the device. The size parameter
|
||||||
of the mapping functions like dma_map_single(), dma_map_page() and
|
of the mapping functions like dma_map_single(), dma_map_page() and
|
||||||
others should not be larger than the returned value.
|
others should not be larger than the returned value.
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
unsigned long
|
||||||
|
dma_get_merge_boundary(struct device *dev);
|
||||||
|
|
||||||
|
Returns the DMA merge boundary. If the device cannot merge any the DMA address
|
||||||
|
segments, the function returns 0.
|
||||||
|
|
||||||
Part Id - Streaming DMA mappings
|
Part Id - Streaming DMA mappings
|
||||||
--------------------------------
|
--------------------------------
|
||||||
|
|
||||||
|
|
|
@ -131,6 +131,7 @@ struct dma_map_ops {
|
||||||
int (*dma_supported)(struct device *dev, u64 mask);
|
int (*dma_supported)(struct device *dev, u64 mask);
|
||||||
u64 (*get_required_mask)(struct device *dev);
|
u64 (*get_required_mask)(struct device *dev);
|
||||||
size_t (*max_mapping_size)(struct device *dev);
|
size_t (*max_mapping_size)(struct device *dev);
|
||||||
|
unsigned long (*get_merge_boundary)(struct device *dev);
|
||||||
};
|
};
|
||||||
|
|
||||||
#define DMA_MAPPING_ERROR (~(dma_addr_t)0)
|
#define DMA_MAPPING_ERROR (~(dma_addr_t)0)
|
||||||
|
@ -467,6 +468,7 @@ int dma_set_mask(struct device *dev, u64 mask);
|
||||||
int dma_set_coherent_mask(struct device *dev, u64 mask);
|
int dma_set_coherent_mask(struct device *dev, u64 mask);
|
||||||
u64 dma_get_required_mask(struct device *dev);
|
u64 dma_get_required_mask(struct device *dev);
|
||||||
size_t dma_max_mapping_size(struct device *dev);
|
size_t dma_max_mapping_size(struct device *dev);
|
||||||
|
unsigned long dma_get_merge_boundary(struct device *dev);
|
||||||
#else /* CONFIG_HAS_DMA */
|
#else /* CONFIG_HAS_DMA */
|
||||||
static inline dma_addr_t dma_map_page_attrs(struct device *dev,
|
static inline dma_addr_t dma_map_page_attrs(struct device *dev,
|
||||||
struct page *page, size_t offset, size_t size,
|
struct page *page, size_t offset, size_t size,
|
||||||
|
@ -572,6 +574,10 @@ static inline size_t dma_max_mapping_size(struct device *dev)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
static inline unsigned long dma_get_merge_boundary(struct device *dev)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
#endif /* CONFIG_HAS_DMA */
|
#endif /* CONFIG_HAS_DMA */
|
||||||
|
|
||||||
static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
|
static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
|
||||||
|
|
|
@ -407,3 +407,14 @@ size_t dma_max_mapping_size(struct device *dev)
|
||||||
return size;
|
return size;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(dma_max_mapping_size);
|
EXPORT_SYMBOL_GPL(dma_max_mapping_size);
|
||||||
|
|
||||||
|
unsigned long dma_get_merge_boundary(struct device *dev)
|
||||||
|
{
|
||||||
|
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||||
|
|
||||||
|
if (!ops || !ops->get_merge_boundary)
|
||||||
|
return 0; /* can't merge */
|
||||||
|
|
||||||
|
return ops->get_merge_boundary(dev);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(dma_get_merge_boundary);
|
||||||
|
|
Loading…
Add table
Reference in a new issue