Revert "dmaengine: virt-dma: don't always free descriptor upon completion"
This reverts commit b9855f03d5
.
The patch break existing DMA usage case. For example, audio SOC
dmaengine never release channel and cause virt-dma to cache too
much memory in descriptor to exhaust system memory.
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
This commit is contained in:
parent
0ec9ebc706
commit
8c8fe97b2b
2 changed files with 7 additions and 25 deletions
|
@ -29,7 +29,7 @@ dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx)
|
||||||
spin_lock_irqsave(&vc->lock, flags);
|
spin_lock_irqsave(&vc->lock, flags);
|
||||||
cookie = dma_cookie_assign(tx);
|
cookie = dma_cookie_assign(tx);
|
||||||
|
|
||||||
list_move_tail(&vd->node, &vc->desc_submitted);
|
list_add_tail(&vd->node, &vc->desc_submitted);
|
||||||
spin_unlock_irqrestore(&vc->lock, flags);
|
spin_unlock_irqrestore(&vc->lock, flags);
|
||||||
|
|
||||||
dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n",
|
dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n",
|
||||||
|
@ -83,10 +83,8 @@ static void vchan_complete(unsigned long arg)
|
||||||
cb_data = vd->tx.callback_param;
|
cb_data = vd->tx.callback_param;
|
||||||
|
|
||||||
list_del(&vd->node);
|
list_del(&vd->node);
|
||||||
if (async_tx_test_ack(&vd->tx))
|
|
||||||
list_add(&vd->node, &vc->desc_allocated);
|
vc->desc_free(vd);
|
||||||
else
|
|
||||||
vc->desc_free(vd);
|
|
||||||
|
|
||||||
if (cb)
|
if (cb)
|
||||||
cb(cb_data);
|
cb(cb_data);
|
||||||
|
@ -98,13 +96,9 @@ void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head)
|
||||||
while (!list_empty(head)) {
|
while (!list_empty(head)) {
|
||||||
struct virt_dma_desc *vd = list_first_entry(head,
|
struct virt_dma_desc *vd = list_first_entry(head,
|
||||||
struct virt_dma_desc, node);
|
struct virt_dma_desc, node);
|
||||||
if (async_tx_test_ack(&vd->tx)) {
|
list_del(&vd->node);
|
||||||
list_move_tail(&vd->node, &vc->desc_allocated);
|
dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd);
|
||||||
} else {
|
vc->desc_free(vd);
|
||||||
dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd);
|
|
||||||
list_del(&vd->node);
|
|
||||||
vc->desc_free(vd);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list);
|
EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list);
|
||||||
|
@ -114,7 +108,6 @@ void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev)
|
||||||
dma_cookie_init(&vc->chan);
|
dma_cookie_init(&vc->chan);
|
||||||
|
|
||||||
spin_lock_init(&vc->lock);
|
spin_lock_init(&vc->lock);
|
||||||
INIT_LIST_HEAD(&vc->desc_allocated);
|
|
||||||
INIT_LIST_HEAD(&vc->desc_submitted);
|
INIT_LIST_HEAD(&vc->desc_submitted);
|
||||||
INIT_LIST_HEAD(&vc->desc_issued);
|
INIT_LIST_HEAD(&vc->desc_issued);
|
||||||
INIT_LIST_HEAD(&vc->desc_completed);
|
INIT_LIST_HEAD(&vc->desc_completed);
|
||||||
|
|
|
@ -29,7 +29,6 @@ struct virt_dma_chan {
|
||||||
spinlock_t lock;
|
spinlock_t lock;
|
||||||
|
|
||||||
/* protected by vc.lock */
|
/* protected by vc.lock */
|
||||||
struct list_head desc_allocated;
|
|
||||||
struct list_head desc_submitted;
|
struct list_head desc_submitted;
|
||||||
struct list_head desc_issued;
|
struct list_head desc_issued;
|
||||||
struct list_head desc_completed;
|
struct list_head desc_completed;
|
||||||
|
@ -56,16 +55,11 @@ static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan
|
||||||
struct virt_dma_desc *vd, unsigned long tx_flags)
|
struct virt_dma_desc *vd, unsigned long tx_flags)
|
||||||
{
|
{
|
||||||
extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *);
|
extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *);
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
dma_async_tx_descriptor_init(&vd->tx, &vc->chan);
|
dma_async_tx_descriptor_init(&vd->tx, &vc->chan);
|
||||||
vd->tx.flags = tx_flags;
|
vd->tx.flags = tx_flags;
|
||||||
vd->tx.tx_submit = vchan_tx_submit;
|
vd->tx.tx_submit = vchan_tx_submit;
|
||||||
|
|
||||||
spin_lock_irqsave(&vc->lock, flags);
|
|
||||||
list_add_tail(&vd->node, &vc->desc_allocated);
|
|
||||||
spin_unlock_irqrestore(&vc->lock, flags);
|
|
||||||
|
|
||||||
return &vd->tx;
|
return &vd->tx;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -128,8 +122,7 @@ static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* vchan_get_all_descriptors - obtain all allocated, submitted and issued
|
* vchan_get_all_descriptors - obtain all submitted and issued descriptors
|
||||||
* descriptors
|
|
||||||
* vc: virtual channel to get descriptors from
|
* vc: virtual channel to get descriptors from
|
||||||
* head: list of descriptors found
|
* head: list of descriptors found
|
||||||
*
|
*
|
||||||
|
@ -141,7 +134,6 @@ static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc)
|
||||||
static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc,
|
static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc,
|
||||||
struct list_head *head)
|
struct list_head *head)
|
||||||
{
|
{
|
||||||
list_splice_tail_init(&vc->desc_allocated, head);
|
|
||||||
list_splice_tail_init(&vc->desc_submitted, head);
|
list_splice_tail_init(&vc->desc_submitted, head);
|
||||||
list_splice_tail_init(&vc->desc_issued, head);
|
list_splice_tail_init(&vc->desc_issued, head);
|
||||||
list_splice_tail_init(&vc->desc_completed, head);
|
list_splice_tail_init(&vc->desc_completed, head);
|
||||||
|
@ -149,14 +141,11 @@ static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc,
|
||||||
|
|
||||||
static inline void vchan_free_chan_resources(struct virt_dma_chan *vc)
|
static inline void vchan_free_chan_resources(struct virt_dma_chan *vc)
|
||||||
{
|
{
|
||||||
struct virt_dma_desc *vd;
|
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
LIST_HEAD(head);
|
LIST_HEAD(head);
|
||||||
|
|
||||||
spin_lock_irqsave(&vc->lock, flags);
|
spin_lock_irqsave(&vc->lock, flags);
|
||||||
vchan_get_all_descriptors(vc, &head);
|
vchan_get_all_descriptors(vc, &head);
|
||||||
list_for_each_entry(vd, &head, node)
|
|
||||||
async_tx_clear_ack(&vd->tx);
|
|
||||||
spin_unlock_irqrestore(&vc->lock, flags);
|
spin_unlock_irqrestore(&vc->lock, flags);
|
||||||
|
|
||||||
vchan_dma_desc_free_list(vc, &head);
|
vchan_dma_desc_free_list(vc, &head);
|
||||||
|
|
Loading…
Add table
Reference in a new issue