dmaengine fixes for v5.19

Core:
  - Revert verification of DMA_INTERRUPT capability as that was incorrect
 
 Bunch of driver fixes for:
  - ti: refcount and put_device leak
  - qcom_bam: runtime pm overflow
  - idxd: force wq context cleanup and call idxd_enable_system_pasid() on
    success
  - dw-axi-dmac: RMW on channel suspend register
  - imx-sdma: restart cyclic channel when enabled
  - at_xdma: error handling for at_xdmac_alloc_desc
  - pl330: lockdep warning
  - lgm: error handling path in probe
  - allwinner: Fix min/max typo in binding
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEE+vs47OPLdNbVcHzyfBQHDyUjg0cFAmLLB+oACgkQfBQHDyUj
 g0c9URAAgNrLzEJFBFRkZW3gKw0PH66a7HZjV88roGI5d9fAZ67O8A21Sn3VKQCs
 0dUOxPWT6lAMt1kPz/AOug80oiEo8dxaf/eai45eP2kyaLFEaHvulHUGQv4ZrRRq
 YaWDoLi5iXly3LYF/skMoXY4XQrbGw6mjgGbwzfn27JqiG9pL7aPx3QAHBYtHf//
 w7O/u9AUaJmSHd7FviKJ9vcun1P4dCUMt7dy2Qhctv6+CBzUmBEMFFMc7FA7ORtP
 kQz2qk5wErG5hQfwJeLPrgKOwdfbEol7crZlyRmi7DwbewA8R9iU/ms7e7498Nhi
 Lh6G+Q9BnTeM+hwRql8Eok+8Oc56xIseWdRPrXiW6EDOpgWSNhuRI7jy0iICs4Wl
 fz9ogDjr3buQI5DrB967IuxD+lD8p80J1jMxaunWrCB5y4MxRJd7n2BuYhjAngeD
 Fsb4NQItHThTiVy86SPFhNfxQbzGOJmWyrerGU+0g3Mnxh8FdGIS5gbk+IfcBcgo
 6Ef7qbypq85DAOkmNx2ZY3E3H2amatLFvJ1cBdtxZp5Nj+rvV+dqvXXFzIped8xP
 GS96uLfWZZKgG4PwyZoK8/fOmU/vxrNsPmHXHpMt9BEa5rNmB8VDB6Txrn3pJywY
 Tm0KK5tgxXYpdUmTL7cDeslJZ4r51LlXUuAbf4wPZr/cEScQwJc=
 =knzk
 -----END PGP SIGNATURE-----

Merge tag 'dmaengine-fix-5.19' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine

Pull dmaengine fixes from Vinod Koul:
 "One core fix for DMA_INTERRUPT and rest driver fixes.

  Core:

   - Revert verification of DMA_INTERRUPT capability as that was
     incorrect

  Bunch of driver fixes for:

   - ti: refcount and put_device leak

   - qcom_bam: runtime pm overflow

   - idxd: force wq context cleanup and call idxd_enable_system_pasid()
     on success

   - dw-axi-dmac: RMW on channel suspend register

   - imx-sdma: restart cyclic channel when enabled

   - at_xdma: error handling for at_xdmac_alloc_desc

   - pl330: lockdep warning

   - lgm: error handling path in probe

   - allwinner: Fix min/max typo in binding"

* tag 'dmaengine-fix-5.19' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine:
  dt-bindings: dma: allwinner,sun50i-a64-dma: Fix min/max typo
  dmaengine: lgm: Fix an error handling path in intel_ldma_probe()
  dmaengine: pl330: Fix lockdep warning about non-static key
  dmaengine: idxd: Only call idxd_enable_system_pasid() if succeeded in enabling SVA feature
  dmaengine: at_xdma: handle errors of at_xdmac_alloc_desc() correctly
  dmaengine: imx-sdma: only restart cyclic channel when enabled
  dmaengine: dw-axi-dmac: Fix RMW on channel suspend register
  dmaengine: idxd: force wq context cleanup on device disable path
  dmaengine: qcom: bam_dma: fix runtime PM underflow
  dmaengine: imx-sdma: Allow imx8m for imx7 FW revs
  dmaengine: Revert "dmaengine: add verification of DMA_INTERRUPT capability for dmatest"
  dmaengine: ti: Add missing put_device in ti_dra7_xbar_route_allocate
  dmaengine: ti: Fix refcount leak in ti_dra7_xbar_route_allocate
This commit is contained in:
Linus Torvalds 2022-07-10 11:23:01 -07:00
commit 952c53cd35
11 changed files with 43 additions and 56 deletions

View File

@ -67,7 +67,7 @@ if:
then:
properties:
clocks:
maxItems: 2
minItems: 2
required:
- clock-names

View File

@ -1900,6 +1900,11 @@ static int at_xdmac_alloc_chan_resources(struct dma_chan *chan)
for (i = 0; i < init_nr_desc_per_channel; i++) {
desc = at_xdmac_alloc_desc(chan, GFP_KERNEL);
if (!desc) {
if (i == 0) {
dev_warn(chan2dev(chan),
"can't allocate any descriptors\n");
return -EIO;
}
dev_warn(chan2dev(chan),
"only %d descriptors have been allocated\n", i);
break;

View File

@ -675,16 +675,10 @@ static int dmatest_func(void *data)
/*
* src and dst buffers are freed by ourselves below
*/
if (params->polled) {
if (params->polled)
flags = DMA_CTRL_ACK;
} else {
if (dma_has_cap(DMA_INTERRUPT, dev->cap_mask)) {
flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
} else {
pr_err("Channel does not support interrupt!\n");
goto err_pq_array;
}
}
else
flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
ktime = ktime_get();
while (!(kthread_should_stop() ||
@ -912,7 +906,6 @@ error_unmap_continue:
runtime = ktime_to_us(ktime);
ret = 0;
err_pq_array:
kfree(dma_pq);
err_srcs_array:
kfree(srcs);

View File

@ -1164,8 +1164,9 @@ static int dma_chan_pause(struct dma_chan *dchan)
BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT;
axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
} else {
val = BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT |
BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT;
val = axi_dma_ioread32(chan->chip, DMAC_CHSUSPREG);
val |= BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT |
BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT;
axi_dma_iowrite32(chan->chip, DMAC_CHSUSPREG, val);
}
@ -1190,12 +1191,13 @@ static inline void axi_chan_resume(struct axi_dma_chan *chan)
{
u32 val;
val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
if (chan->chip->dw->hdata->reg_map_8_channels) {
val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT);
val |= (BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT);
axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
} else {
val = axi_dma_ioread32(chan->chip, DMAC_CHSUSPREG);
val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT);
val |= (BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT);
axi_dma_iowrite32(chan->chip, DMAC_CHSUSPREG, val);

View File

@ -716,10 +716,7 @@ static void idxd_device_wqs_clear_state(struct idxd_device *idxd)
struct idxd_wq *wq = idxd->wqs[i];
mutex_lock(&wq->wq_lock);
if (wq->state == IDXD_WQ_ENABLED) {
idxd_wq_disable_cleanup(wq);
wq->state = IDXD_WQ_DISABLED;
}
idxd_wq_disable_cleanup(wq);
idxd_wq_device_reset_cleanup(wq);
mutex_unlock(&wq->wq_lock);
}

View File

@ -512,15 +512,16 @@ static int idxd_probe(struct idxd_device *idxd)
dev_dbg(dev, "IDXD reset complete\n");
if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM) && sva) {
if (iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA))
if (iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA)) {
dev_warn(dev, "Unable to turn on user SVA feature.\n");
else
} else {
set_bit(IDXD_FLAG_USER_PASID_ENABLED, &idxd->flags);
if (idxd_enable_system_pasid(idxd))
dev_warn(dev, "No in-kernel DMA with PASID.\n");
else
set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
if (idxd_enable_system_pasid(idxd))
dev_warn(dev, "No in-kernel DMA with PASID.\n");
else
set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
}
} else if (!sva) {
dev_warn(dev, "User forced SVA off via module param.\n");
}

View File

@ -891,7 +891,7 @@ static void sdma_update_channel_loop(struct sdma_channel *sdmac)
* SDMA stops cyclic channel when DMA request triggers a channel and no SDMA
* owned buffer is available (i.e. BD_DONE was set too late).
*/
if (!is_sdma_channel_enabled(sdmac->sdma, sdmac->channel)) {
if (sdmac->desc && !is_sdma_channel_enabled(sdmac->sdma, sdmac->channel)) {
dev_warn(sdmac->sdma->dev, "restart cyclic channel %d\n", sdmac->channel);
sdma_enable_channel(sdmac->sdma, sdmac->channel);
}
@ -2346,7 +2346,7 @@ MODULE_DESCRIPTION("i.MX SDMA driver");
#if IS_ENABLED(CONFIG_SOC_IMX6Q)
MODULE_FIRMWARE("imx/sdma/sdma-imx6q.bin");
#endif
#if IS_ENABLED(CONFIG_SOC_IMX7D)
#if IS_ENABLED(CONFIG_SOC_IMX7D) || IS_ENABLED(CONFIG_SOC_IMX8M)
MODULE_FIRMWARE("imx/sdma/sdma-imx7d.bin");
#endif
MODULE_LICENSE("GPL");

View File

@ -1593,11 +1593,12 @@ static int intel_ldma_probe(struct platform_device *pdev)
d->core_clk = devm_clk_get_optional(dev, NULL);
if (IS_ERR(d->core_clk))
return PTR_ERR(d->core_clk);
clk_prepare_enable(d->core_clk);
d->rst = devm_reset_control_get_optional(dev, NULL);
if (IS_ERR(d->rst))
return PTR_ERR(d->rst);
clk_prepare_enable(d->core_clk);
reset_control_deassert(d->rst);
ret = devm_add_action_or_reset(dev, ldma_clk_disable, d);

View File

@ -2589,7 +2589,7 @@ static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
/* If the DMAC pool is empty, alloc new */
if (!desc) {
DEFINE_SPINLOCK(lock);
static DEFINE_SPINLOCK(lock);
LIST_HEAD(pool);
if (!add_desc(&pool, &lock, GFP_ATOMIC, 1))

View File

@ -558,14 +558,6 @@ static int bam_alloc_chan(struct dma_chan *chan)
return 0;
}
static int bam_pm_runtime_get_sync(struct device *dev)
{
if (pm_runtime_enabled(dev))
return pm_runtime_get_sync(dev);
return 0;
}
/**
* bam_free_chan - Frees dma resources associated with specific channel
* @chan: specified channel
@ -581,7 +573,7 @@ static void bam_free_chan(struct dma_chan *chan)
unsigned long flags;
int ret;
ret = bam_pm_runtime_get_sync(bdev->dev);
ret = pm_runtime_get_sync(bdev->dev);
if (ret < 0)
return;
@ -784,7 +776,7 @@ static int bam_pause(struct dma_chan *chan)
unsigned long flag;
int ret;
ret = bam_pm_runtime_get_sync(bdev->dev);
ret = pm_runtime_get_sync(bdev->dev);
if (ret < 0)
return ret;
@ -810,7 +802,7 @@ static int bam_resume(struct dma_chan *chan)
unsigned long flag;
int ret;
ret = bam_pm_runtime_get_sync(bdev->dev);
ret = pm_runtime_get_sync(bdev->dev);
if (ret < 0)
return ret;
@ -919,7 +911,7 @@ static irqreturn_t bam_dma_irq(int irq, void *data)
if (srcs & P_IRQ)
tasklet_schedule(&bdev->task);
ret = bam_pm_runtime_get_sync(bdev->dev);
ret = pm_runtime_get_sync(bdev->dev);
if (ret < 0)
return IRQ_NONE;
@ -1037,7 +1029,7 @@ static void bam_start_dma(struct bam_chan *bchan)
if (!vd)
return;
ret = bam_pm_runtime_get_sync(bdev->dev);
ret = pm_runtime_get_sync(bdev->dev);
if (ret < 0)
return;
@ -1374,11 +1366,6 @@ static int bam_dma_probe(struct platform_device *pdev)
if (ret)
goto err_unregister_dma;
if (!bdev->bamclk) {
pm_runtime_disable(&pdev->dev);
return 0;
}
pm_runtime_irq_safe(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, BAM_DMA_AUTOSUSPEND_DELAY);
pm_runtime_use_autosuspend(&pdev->dev);
@ -1462,10 +1449,8 @@ static int __maybe_unused bam_dma_suspend(struct device *dev)
{
struct bam_device *bdev = dev_get_drvdata(dev);
if (bdev->bamclk) {
pm_runtime_force_suspend(dev);
clk_unprepare(bdev->bamclk);
}
pm_runtime_force_suspend(dev);
clk_unprepare(bdev->bamclk);
return 0;
}
@ -1475,13 +1460,11 @@ static int __maybe_unused bam_dma_resume(struct device *dev)
struct bam_device *bdev = dev_get_drvdata(dev);
int ret;
if (bdev->bamclk) {
ret = clk_prepare(bdev->bamclk);
if (ret)
return ret;
ret = clk_prepare(bdev->bamclk);
if (ret)
return ret;
pm_runtime_force_resume(dev);
}
pm_runtime_force_resume(dev);
return 0;
}

View File

@ -245,6 +245,7 @@ static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec,
if (dma_spec->args[0] >= xbar->xbar_requests) {
dev_err(&pdev->dev, "Invalid XBAR request number: %d\n",
dma_spec->args[0]);
put_device(&pdev->dev);
return ERR_PTR(-EINVAL);
}
@ -252,12 +253,14 @@ static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec,
dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0);
if (!dma_spec->np) {
dev_err(&pdev->dev, "Can't get DMA master\n");
put_device(&pdev->dev);
return ERR_PTR(-EINVAL);
}
map = kzalloc(sizeof(*map), GFP_KERNEL);
if (!map) {
of_node_put(dma_spec->np);
put_device(&pdev->dev);
return ERR_PTR(-ENOMEM);
}
@ -268,6 +271,8 @@ static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec,
mutex_unlock(&xbar->mutex);
dev_err(&pdev->dev, "Run out of free DMA requests\n");
kfree(map);
of_node_put(dma_spec->np);
put_device(&pdev->dev);
return ERR_PTR(-ENOMEM);
}
set_bit(map->xbar_out, xbar->dma_inuse);