1
0
Fork 0
mirror of synced 2025-03-06 20:59:54 +01:00

dmaengine fixes for v5.17

Bunch of driver fixes for:
  - ptdma error handling in init
  - lock fix in at_hdmac
  - error path and error num fix for sh dma
  - pm balance fix for stm32
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEE+vs47OPLdNbVcHzyfBQHDyUjg0cFAmISSYgACgkQfBQHDyUj
 g0dQ7Q//RsXEFQuqGJRmWTcjaZAKZAHbn0euwCsBxgNUv8mJBcuIHlr2NLRp/Fp+
 I2gI6mBy0bTdcn1UTzN1yw2IFNMs3+NX6wOeHX+zwQYtZwFzaOvbxoJDPZCn/8/C
 saRSs6SBzgD2a7tpL56vyO2Fsap+eB3Q2OuTlowvfDv/rcfwMp4R3qbFCQgOQZCx
 tYq1QIQ/cmsFl0DvLyovski2jk2LTmmN+1B/OvKV1G3RvCZuDl0koOBvc8CrpIfu
 fSgOxhu49qPO9YW4EXALHX9gcQabfdBTvslnqeSepfJsGmpig1qA6nKQOQm2xf/s
 5C6RMeWg10P1TttB24Gced+hEDHOm82fGkJVNyamvU/xOQuYVC97scq0p6TSH6ow
 QdNYHvDqKyAOSfBz1SiyP22PpXh++s/n3Ta+a+4AUuupzTpLIogH/Q2a0hCYo/rB
 UumMY4olGo5nQmCKvHMSp0qeQSWKPlXggjbeqDuSc0QuBQqF3IO9am65HbcS+Weg
 4Wy3bXlE/WJ5RJwInZsCQNMhjYMJKc/NPkuv9deQmkWM2muX75NUGmYm4nnDdSWg
 EAC1V7S4poGdn2MCBl7Ifpu0hSd112a98NIFtoGNwlY5g18m+6/9D/v6bRFm8NTn
 E3ZFFKwUBcrNnq2JVD3CJJRCIwjpRalTph9an25lNfmm/sxgbv0=
 =5uHl
 -----END PGP SIGNATURE-----

Merge tag 'dmaengine-fix-5.17' into next

This merges dmaengine/dmaengine-fix-5.17 tag into next as that is
already merged mainline and resolves depencency in patches
This commit is contained in:
Vinod Koul 2022-03-10 10:12:25 +05:30
commit 60c10db9b1
5 changed files with 25 additions and 13 deletions

View file

@ -1681,8 +1681,10 @@ static void at_xdmac_tasklet(struct tasklet_struct *t)
__func__, atchan->irq_status); __func__, atchan->irq_status);
if (!(atchan->irq_status & AT_XDMAC_CIS_LIS) && if (!(atchan->irq_status & AT_XDMAC_CIS_LIS) &&
!(atchan->irq_status & error_mask)) !(atchan->irq_status & error_mask)) {
spin_unlock_irq(&atchan->lock);
return; return;
}
if (atchan->irq_status & error_mask) if (atchan->irq_status & error_mask)
at_xdmac_handle_error(atchan); at_xdmac_handle_error(atchan);

View file

@ -207,7 +207,7 @@ int pt_core_init(struct pt_device *pt)
if (!cmd_q->qbase) { if (!cmd_q->qbase) {
dev_err(dev, "unable to allocate command queue\n"); dev_err(dev, "unable to allocate command queue\n");
ret = -ENOMEM; ret = -ENOMEM;
goto e_dma_alloc; goto e_destroy_pool;
} }
cmd_q->qidx = 0; cmd_q->qidx = 0;
@ -229,8 +229,10 @@ int pt_core_init(struct pt_device *pt)
/* Request an irq */ /* Request an irq */
ret = request_irq(pt->pt_irq, pt_core_irq_handler, 0, dev_name(pt->dev), pt); ret = request_irq(pt->pt_irq, pt_core_irq_handler, 0, dev_name(pt->dev), pt);
if (ret) if (ret) {
goto e_pool; dev_err(dev, "unable to allocate an IRQ\n");
goto e_free_dma;
}
/* Update the device registers with queue information. */ /* Update the device registers with queue information. */
cmd_q->qcontrol &= ~CMD_Q_SIZE; cmd_q->qcontrol &= ~CMD_Q_SIZE;
@ -250,21 +252,20 @@ int pt_core_init(struct pt_device *pt)
/* Register the DMA engine support */ /* Register the DMA engine support */
ret = pt_dmaengine_register(pt); ret = pt_dmaengine_register(pt);
if (ret) if (ret)
goto e_dmaengine; goto e_free_irq;
/* Set up debugfs entries */ /* Set up debugfs entries */
ptdma_debugfs_setup(pt); ptdma_debugfs_setup(pt);
return 0; return 0;
e_dmaengine: e_free_irq:
free_irq(pt->pt_irq, pt); free_irq(pt->pt_irq, pt);
e_dma_alloc: e_free_dma:
dma_free_coherent(dev, cmd_q->qsize, cmd_q->qbase, cmd_q->qbase_dma); dma_free_coherent(dev, cmd_q->qsize, cmd_q->qbase, cmd_q->qbase_dma);
e_pool: e_destroy_pool:
dev_err(dev, "unable to allocate an IRQ\n");
dma_pool_destroy(pt->cmd_q.dma_pool); dma_pool_destroy(pt->cmd_q.dma_pool);
return ret; return ret;

View file

@ -1868,8 +1868,13 @@ static int rcar_dmac_probe(struct platform_device *pdev)
dmac->dev = &pdev->dev; dmac->dev = &pdev->dev;
platform_set_drvdata(pdev, dmac); platform_set_drvdata(pdev, dmac);
dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK); ret = dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK);
dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40)); if (ret)
return ret;
ret = dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40));
if (ret)
return ret;
ret = rcar_dmac_parse_of(&pdev->dev, dmac); ret = rcar_dmac_parse_of(&pdev->dev, dmac);
if (ret < 0) if (ret < 0)

View file

@ -115,8 +115,10 @@ static dma_cookie_t shdma_tx_submit(struct dma_async_tx_descriptor *tx)
ret = pm_runtime_get(schan->dev); ret = pm_runtime_get(schan->dev);
spin_unlock_irq(&schan->chan_lock); spin_unlock_irq(&schan->chan_lock);
if (ret < 0) if (ret < 0) {
dev_err(schan->dev, "%s(): GET = %d\n", __func__, ret); dev_err(schan->dev, "%s(): GET = %d\n", __func__, ret);
pm_runtime_put(schan->dev);
}
pm_runtime_barrier(schan->dev); pm_runtime_barrier(schan->dev);

View file

@ -292,10 +292,12 @@ static int stm32_dmamux_probe(struct platform_device *pdev)
ret = of_dma_router_register(node, stm32_dmamux_route_allocate, ret = of_dma_router_register(node, stm32_dmamux_route_allocate,
&stm32_dmamux->dmarouter); &stm32_dmamux->dmarouter);
if (ret) if (ret)
goto err_clk; goto pm_disable;
return 0; return 0;
pm_disable:
pm_runtime_disable(&pdev->dev);
err_clk: err_clk:
clk_disable_unprepare(stm32_dmamux->clk); clk_disable_unprepare(stm32_dmamux->clk);