From 5ba846b1ee0792f5a596b9b0b86d6e8cdebfab06 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Mon, 18 Mar 2019 18:39:30 +0300 Subject: dmaengine: idma64: Use actual device for DMA transfers Intel IOMMU, when enabled, tries to find the domain of the device, assuming it's a PCI one, during DMA operations, such as mapping or unmapping. Since we are splitting the actual PCI device to couple of children via MFD framework (see drivers/mfd/intel-lpss.c for details), the DMA device appears to be a platform one, and thus not an actual one that performs DMA. In a such situation IOMMU can't find or allocate a proper domain for its operations. As a result, all DMA operations are failed. In order to fix this, supply parent of the platform device to the DMA engine framework and fix filter functions accordingly. We may rely on the fact that parent is a real PCI device, because no other configuration is present in the wild. Signed-off-by: Andy Shevchenko Acked-by: Mark Brown Acked-by: Greg Kroah-Hartman [for tty parts] Signed-off-by: Vinod Koul --- drivers/dma/idma64.c | 6 ++++-- drivers/dma/idma64.h | 2 ++ drivers/spi/spi-pxa2xx.c | 7 +------ drivers/tty/serial/8250/8250_dw.c | 4 ++-- 4 files changed, 9 insertions(+), 10 deletions(-) diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c index 0baf9797cc09..83796a33dc16 100644 --- a/drivers/dma/idma64.c +++ b/drivers/dma/idma64.c @@ -592,7 +592,7 @@ static int idma64_probe(struct idma64_chip *chip) idma64->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); idma64->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; - idma64->dma.dev = chip->dev; + idma64->dma.dev = chip->sysdev; dma_set_max_seg_size(idma64->dma.dev, IDMA64C_CTLH_BLOCK_TS_MASK); @@ -632,6 +632,7 @@ static int idma64_platform_probe(struct platform_device *pdev) { struct idma64_chip *chip; struct device *dev = &pdev->dev; + struct device *sysdev = dev->parent; struct resource *mem; int ret; @@ -648,11 +649,12 @@ static int idma64_platform_probe(struct platform_device *pdev) if (IS_ERR(chip->regs)) return PTR_ERR(chip->regs); - ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + ret = dma_coerce_mask_and_coherent(sysdev, DMA_BIT_MASK(64)); if (ret) return ret; chip->dev = dev; + chip->sysdev = sysdev; ret = idma64_probe(chip); if (ret) diff --git a/drivers/dma/idma64.h b/drivers/dma/idma64.h index 6b816878e5e7..baa32e1425de 100644 --- a/drivers/dma/idma64.h +++ b/drivers/dma/idma64.h @@ -216,12 +216,14 @@ static inline void idma64_writel(struct idma64 *idma64, int offset, u32 value) /** * struct idma64_chip - representation of iDMA 64-bit controller hardware * @dev: struct device of the DMA controller + * @sysdev: struct device of the physical device that does DMA * @irq: irq line * @regs: memory mapped I/O space * @idma64: struct idma64 that is filed by idma64_probe() */ struct idma64_chip { struct device *dev; + struct device *sysdev; int irq; void __iomem *regs; struct idma64 *idma64; diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c index b6ddba833d02..5ea70f7d12e7 100644 --- a/drivers/spi/spi-pxa2xx.c +++ b/drivers/spi/spi-pxa2xx.c @@ -1487,12 +1487,7 @@ static int pxa2xx_spi_get_port_id(struct acpi_device *adev) static bool pxa2xx_spi_idma_filter(struct dma_chan *chan, void *param) { - struct device *dev = param; - - if (dev != chan->device->dev->parent) - return false; - - return true; + return param == chan->device->dev; } #endif /* CONFIG_PCI */ diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c index d31b975dd3fd..284e8d052fc3 100644 --- a/drivers/tty/serial/8250/8250_dw.c +++ b/drivers/tty/serial/8250/8250_dw.c @@ -365,7 +365,7 @@ static bool dw8250_fallback_dma_filter(struct dma_chan *chan, void *param) static bool dw8250_idma_filter(struct dma_chan *chan, void *param) { - return param == chan->device->dev->parent; + return param == chan->device->dev; } /* @@ -434,7 +434,7 @@ static void dw8250_quirks(struct uart_port *p, struct dw8250_data *data) data->uart_16550_compatible = true; } - /* Platforms with iDMA */ + /* Platforms with iDMA 64-bit */ if (platform_get_resource_byname(to_platform_device(p->dev), IORESOURCE_MEM, "lpss_priv")) { data->dma.rx_param = p->dev->parent; -- cgit v1.2.3 From f6ed6491d565c336a360471e0c29228e34f4380e Mon Sep 17 00:00:00 2001 From: Sameer Pujar Date: Wed, 13 Mar 2019 17:02:36 +0530 Subject: dmaengine: tegra210-adma: use devm_clk_*() helpers adma driver is using pm_clk_*() interface for managing clock resources. With this it is observed that clocks remain ON always. This happens on Tegra devices which use BPMP co-processor to manage clock resources, where clocks are enabled during prepare phase. This is necessary because clocks to BPMP are always blocking. When pm_clk_*() interface is used on such Tegra devices, clock prepare count is not balanced till remove call happens for the driver and hence clocks are seen ON always. Thus this patch replaces pm_clk_*() with devm_clk_*() framework. Suggested-by: Mohan Kumar D Reviewed-by: Jonathan Hunter Signed-off-by: Sameer Pujar Signed-off-by: Vinod Koul --- drivers/dma/tegra210-adma.c | 27 ++++++++++++--------------- 1 file changed, 12 insertions(+), 15 deletions(-) diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c index 5ec0dd97b397..650cd9c6974e 100644 --- a/drivers/dma/tegra210-adma.c +++ b/drivers/dma/tegra210-adma.c @@ -22,7 +22,6 @@ #include #include #include -#include #include #include @@ -141,6 +140,7 @@ struct tegra_adma { struct dma_device dma_dev; struct device *dev; void __iomem *base_addr; + struct clk *ahub_clk; unsigned int nr_channels; unsigned long rx_requests_reserved; unsigned long tx_requests_reserved; @@ -637,8 +637,9 @@ static int tegra_adma_runtime_suspend(struct device *dev) struct tegra_adma *tdma = dev_get_drvdata(dev); tdma->global_cmd = tdma_read(tdma, ADMA_GLOBAL_CMD); + clk_disable_unprepare(tdma->ahub_clk); - return pm_clk_suspend(dev); + return 0; } static int tegra_adma_runtime_resume(struct device *dev) @@ -646,10 +647,11 @@ static int tegra_adma_runtime_resume(struct device *dev) struct tegra_adma *tdma = dev_get_drvdata(dev); int ret; - ret = pm_clk_resume(dev); - if (ret) + ret = clk_prepare_enable(tdma->ahub_clk); + if (ret) { + dev_err(dev, "ahub clk_enable failed: %d\n", ret); return ret; - + } tdma_write(tdma, ADMA_GLOBAL_CMD, tdma->global_cmd); return 0; @@ -693,13 +695,11 @@ static int tegra_adma_probe(struct platform_device *pdev) if (IS_ERR(tdma->base_addr)) return PTR_ERR(tdma->base_addr); - ret = pm_clk_create(&pdev->dev); - if (ret) - return ret; - - ret = of_pm_clk_add_clk(&pdev->dev, "d_audio"); - if (ret) - goto clk_destroy; + tdma->ahub_clk = devm_clk_get(&pdev->dev, "d_audio"); + if (IS_ERR(tdma->ahub_clk)) { + dev_err(&pdev->dev, "Error: Missing ahub controller clock\n"); + return PTR_ERR(tdma->ahub_clk); + } pm_runtime_enable(&pdev->dev); @@ -776,8 +776,6 @@ rpm_put: pm_runtime_put_sync(&pdev->dev); rpm_disable: pm_runtime_disable(&pdev->dev); -clk_destroy: - pm_clk_destroy(&pdev->dev); return ret; } @@ -794,7 +792,6 @@ static int tegra_adma_remove(struct platform_device *pdev) pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); - pm_clk_destroy(&pdev->dev); return 0; } -- cgit v1.2.3 From 74fca241e6a0c0a4c33d84f64708e4f88ee73402 Mon Sep 17 00:00:00 2001 From: Sameer Pujar Date: Wed, 13 Mar 2019 17:02:37 +0530 Subject: dmaengine: tegra210-adma: update system sleep callbacks If the driver is active till late suspend, where runtime PM cannot run, force suspend is essential in such case to put the device in low power state. Thus pm_runtime_force_suspend and pm_runtime_force_resume are used as system sleep callbacks during system wide PM transitions. Late system sleep callbacks are used to ensure, for instance, that the sound core has suspended any on-going activity, including stopping the ADMA if active, before we attempt to suspend the ADMA. Suggested-by: Jonathan Hunter Signed-off-by: Sameer Pujar Acked-by: Jon Hunter Signed-off-by: Vinod Koul --- drivers/dma/tegra210-adma.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c index 650cd9c6974e..253d3128fec3 100644 --- a/drivers/dma/tegra210-adma.c +++ b/drivers/dma/tegra210-adma.c @@ -796,17 +796,11 @@ static int tegra_adma_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM_SLEEP -static int tegra_adma_pm_suspend(struct device *dev) -{ - return pm_runtime_suspended(dev) == false; -} -#endif - static const struct dev_pm_ops tegra_adma_dev_pm_ops = { SET_RUNTIME_PM_OPS(tegra_adma_runtime_suspend, tegra_adma_runtime_resume, NULL) - SET_SYSTEM_SLEEP_PM_OPS(tegra_adma_pm_suspend, NULL) + SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) }; static struct platform_driver tegra_admac_driver = { -- cgit v1.2.3 From b45aef3aef63610e75fa67c7ae8b838304bdad3e Mon Sep 17 00:00:00 2001 From: Katsuhiro Suzuki Date: Sun, 17 Mar 2019 19:03:06 +0900 Subject: dmaengine: pl330: introduce debugfs interface This patch adds debugfs interface to show the relationship between DMA threads (hardware resource for transferring data) and DMA channel ID of DMA slave. Typically, PL330 has many slaves than number of DMA threads. So sometimes PL330 cannot allocate DMA threads for all slaves even if a user specify DMA channel ID in devicetree. This interface will be useful for checking that DMA threads are allocated or not. Below is an output sample: $ sudo cat /sys/kernel/debug/ff1f0000.dmac PL330 physical channels: THREAD: CHANNEL: -------- ----- 0 8 1 9 2 11 3 12 4 14 5 15 6 10 7 -- Signed-off-by: Katsuhiro Suzuki Signed-off-by: Vinod Koul --- drivers/dma/pl330.c | 51 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index eec79fdf27a5..c72f6fd79c43 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -11,6 +11,7 @@ * (at your option) any later version. */ +#include #include #include #include @@ -2896,6 +2897,55 @@ static irqreturn_t pl330_irq_handler(int irq, void *data) BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \ BIT(DMA_SLAVE_BUSWIDTH_8_BYTES) +#ifdef CONFIG_DEBUG_FS +static int pl330_debugfs_show(struct seq_file *s, void *data) +{ + struct pl330_dmac *pl330 = s->private; + int chans, pchs, ch, pr; + + chans = pl330->pcfg.num_chan; + pchs = pl330->num_peripherals; + + seq_puts(s, "PL330 physical channels:\n"); + seq_puts(s, "THREAD:\t\tCHANNEL:\n"); + seq_puts(s, "--------\t-----\n"); + for (ch = 0; ch < chans; ch++) { + struct pl330_thread *thrd = &pl330->channels[ch]; + int found = -1; + + for (pr = 0; pr < pchs; pr++) { + struct dma_pl330_chan *pch = &pl330->peripherals[pr]; + + if (!pch->thread || thrd->id != pch->thread->id) + continue; + + found = pr; + } + + seq_printf(s, "%d\t\t", thrd->id); + if (found == -1) + seq_puts(s, "--\n"); + else + seq_printf(s, "%d\n", found); + } + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(pl330_debugfs); + +static inline void init_pl330_debugfs(struct pl330_dmac *pl330) +{ + debugfs_create_file(dev_name(pl330->ddma.dev), + S_IFREG | 0444, NULL, pl330, + &pl330_debugfs_fops); +} +#else +static inline void init_pl330_debugfs(struct pl330_dmac *pl330) +{ +} +#endif + /* * Runtime PM callbacks are provided by amba/bus.c driver. * @@ -3082,6 +3132,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) dev_err(&adev->dev, "unable to set the seg size\n"); + init_pl330_debugfs(pl330); dev_info(&adev->dev, "Loaded driver for PL330 DMAC-%x\n", adev->periphid); dev_info(&adev->dev, -- cgit v1.2.3 From 921234e0c5d77b510ccc22d9fcfa844f20de970a Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Fri, 8 Mar 2019 15:02:35 +0200 Subject: dmaengine: axi-dmac: Split too large segments The axi-dmac driver currently rejects transfers with segments that are larger than what the hardware can handle. Re-work the driver so that these large segments are split into multiple segments instead where each segment is smaller or equal to the maximum segment size. This allows the driver to handle transfers with segments of arbitrary size. Signed-off-by: Lars-Peter Clausen Signed-off-by: Bogdan Togorean Signed-off-by: Alexandru Ardelean Signed-off-by: Vinod Koul --- drivers/dma/dma-axi-dmac.c | 81 ++++++++++++++++++++++++++++++++++------------ 1 file changed, 61 insertions(+), 20 deletions(-) diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c index ffc0adc2f6ce..0fe3a931d8d5 100644 --- a/drivers/dma/dma-axi-dmac.c +++ b/drivers/dma/dma-axi-dmac.c @@ -166,7 +166,7 @@ static int axi_dmac_dest_is_mem(struct axi_dmac_chan *chan) static bool axi_dmac_check_len(struct axi_dmac_chan *chan, unsigned int len) { - if (len == 0 || len > chan->max_length) + if (len == 0) return false; if ((len & chan->align_mask) != 0) /* Not aligned */ return false; @@ -379,6 +379,49 @@ static struct axi_dmac_desc *axi_dmac_alloc_desc(unsigned int num_sgs) return desc; } +static struct axi_dmac_sg *axi_dmac_fill_linear_sg(struct axi_dmac_chan *chan, + enum dma_transfer_direction direction, dma_addr_t addr, + unsigned int num_periods, unsigned int period_len, + struct axi_dmac_sg *sg) +{ + unsigned int num_segments, i; + unsigned int segment_size; + unsigned int len; + + /* Split into multiple equally sized segments if necessary */ + num_segments = DIV_ROUND_UP(period_len, chan->max_length); + segment_size = DIV_ROUND_UP(period_len, num_segments); + /* Take care of alignment */ + segment_size = ((segment_size - 1) | chan->align_mask) + 1; + + for (i = 0; i < num_periods; i++) { + len = period_len; + + while (len > segment_size) { + if (direction == DMA_DEV_TO_MEM) + sg->dest_addr = addr; + else + sg->src_addr = addr; + sg->x_len = segment_size; + sg->y_len = 1; + sg++; + addr += segment_size; + len -= segment_size; + } + + if (direction == DMA_DEV_TO_MEM) + sg->dest_addr = addr; + else + sg->src_addr = addr; + sg->x_len = len; + sg->y_len = 1; + sg++; + addr += len; + } + + return sg; +} + static struct dma_async_tx_descriptor *axi_dmac_prep_slave_sg( struct dma_chan *c, struct scatterlist *sgl, unsigned int sg_len, enum dma_transfer_direction direction, @@ -386,16 +429,24 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_slave_sg( { struct axi_dmac_chan *chan = to_axi_dmac_chan(c); struct axi_dmac_desc *desc; + struct axi_dmac_sg *dsg; struct scatterlist *sg; + unsigned int num_sgs; unsigned int i; if (direction != chan->direction) return NULL; - desc = axi_dmac_alloc_desc(sg_len); + num_sgs = 0; + for_each_sg(sgl, sg, sg_len, i) + num_sgs += DIV_ROUND_UP(sg_dma_len(sg), chan->max_length); + + desc = axi_dmac_alloc_desc(num_sgs); if (!desc) return NULL; + dsg = desc->sg; + for_each_sg(sgl, sg, sg_len, i) { if (!axi_dmac_check_addr(chan, sg_dma_address(sg)) || !axi_dmac_check_len(chan, sg_dma_len(sg))) { @@ -403,12 +454,8 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_slave_sg( return NULL; } - if (direction == DMA_DEV_TO_MEM) - desc->sg[i].dest_addr = sg_dma_address(sg); - else - desc->sg[i].src_addr = sg_dma_address(sg); - desc->sg[i].x_len = sg_dma_len(sg); - desc->sg[i].y_len = 1; + dsg = axi_dmac_fill_linear_sg(chan, direction, sg_dma_address(sg), 1, + sg_dma_len(sg), dsg); } desc->cyclic = false; @@ -423,7 +470,7 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_dma_cyclic( { struct axi_dmac_chan *chan = to_axi_dmac_chan(c); struct axi_dmac_desc *desc; - unsigned int num_periods, i; + unsigned int num_periods, num_segments; if (direction != chan->direction) return NULL; @@ -436,20 +483,14 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_dma_cyclic( return NULL; num_periods = buf_len / period_len; + num_segments = DIV_ROUND_UP(period_len, chan->max_length); - desc = axi_dmac_alloc_desc(num_periods); + desc = axi_dmac_alloc_desc(num_periods * num_segments); if (!desc) return NULL; - for (i = 0; i < num_periods; i++) { - if (direction == DMA_DEV_TO_MEM) - desc->sg[i].dest_addr = buf_addr; - else - desc->sg[i].src_addr = buf_addr; - desc->sg[i].x_len = period_len; - desc->sg[i].y_len = 1; - buf_addr += period_len; - } + axi_dmac_fill_linear_sg(chan, direction, buf_addr, num_periods, + period_len, desc->sg); desc->cyclic = true; @@ -647,7 +688,7 @@ static int axi_dmac_probe(struct platform_device *pdev) of_node_put(of_channels); pdev->dev.dma_parms = &dmac->dma_parms; - dma_set_max_seg_size(&pdev->dev, dmac->chan.max_length); + dma_set_max_seg_size(&pdev->dev, UINT_MAX); dma_dev = &dmac->dma_dev; dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); -- cgit v1.2.3 From 2ff25c1c3279b09b7f4ad1a06be5dc3b55d29027 Mon Sep 17 00:00:00 2001 From: Jean-Nicolas Graux Date: Mon, 4 Mar 2019 16:03:58 +0100 Subject: dmaengine: pl08x: be fair when re-assigning physical channel Current way we find a waiting virtual channel for the next transfer at the time one physical channel becomes free is not really fair. More in details, in case there is more than one channel waiting at a time, by just going through the arrays of memcpy and slave channels and stopping as soon as state match waiting state, channels with high indexes can be penalized. Whenever dma engine is substantially overloaded so that we constantly get several channels waiting, channels with highest indexes might not be served for a substantial time which in the worse case, might hang task that wait for dma transfer to complete. This patch makes physical channel re-assignment more fair by storing time in jiffies when a channel is put in waiting state. Whenever a physical channel has to be re-assigned, this time is used to select channel that is waiting for the longest time. Signed-off-by: Jean-Nicolas Graux Reviewed-by: Linus Walleij Reviewed-by: Nicolas Guion Signed-off-by: Vinod Koul --- drivers/dma/amba-pl08x.c | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index fc8c2bab563c..8cfc753ad4b0 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -254,6 +254,7 @@ enum pl08x_dma_chan_state { * @slave: whether this channel is a device (slave) or for memcpy * @signal: the physical DMA request signal which this channel is using * @mux_use: count of descriptors using this DMA request signal setting + * @waiting_at: time in jiffies when this channel moved to waiting state */ struct pl08x_dma_chan { struct virt_dma_chan vc; @@ -267,6 +268,7 @@ struct pl08x_dma_chan { bool slave; int signal; unsigned mux_use; + unsigned long waiting_at; }; /** @@ -875,6 +877,7 @@ static void pl08x_phy_alloc_and_start(struct pl08x_dma_chan *plchan) if (!ch) { dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name); plchan->state = PL08X_CHAN_WAITING; + plchan->waiting_at = jiffies; return; } @@ -913,22 +916,29 @@ static void pl08x_phy_free(struct pl08x_dma_chan *plchan) { struct pl08x_driver_data *pl08x = plchan->host; struct pl08x_dma_chan *p, *next; - + unsigned long waiting_at; retry: next = NULL; + waiting_at = jiffies; - /* Find a waiting virtual channel for the next transfer. */ + /* + * Find a waiting virtual channel for the next transfer. + * To be fair, time when each channel reached waiting state is compared + * to select channel that is waiting for the longest time. + */ list_for_each_entry(p, &pl08x->memcpy.channels, vc.chan.device_node) - if (p->state == PL08X_CHAN_WAITING) { + if (p->state == PL08X_CHAN_WAITING && + p->waiting_at <= waiting_at) { next = p; - break; + waiting_at = p->waiting_at; } if (!next && pl08x->has_slave) { list_for_each_entry(p, &pl08x->slave.channels, vc.chan.device_node) - if (p->state == PL08X_CHAN_WAITING) { + if (p->state == PL08X_CHAN_WAITING && + p->waiting_at <= waiting_at) { next = p; - break; + waiting_at = p->waiting_at; } } -- cgit v1.2.3 From f177a43121949b2c0ff7b0f775949e4cedafd721 Mon Sep 17 00:00:00 2001 From: Jeff Xie Date: Wed, 20 Mar 2019 00:45:19 +0800 Subject: dmaengine: xgene-dma: move spin_lock_bh to spin_lock in tasklet It is unnecessary to call spin_lock_bh in a tasklet. Signed-off-by: Jeff Xie Signed-off-by: Vinod Koul --- drivers/dma/xgene-dma.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c index eafd6c4b90fe..002323661d07 100644 --- a/drivers/dma/xgene-dma.c +++ b/drivers/dma/xgene-dma.c @@ -703,7 +703,7 @@ static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan *chan) INIT_LIST_HEAD(&ld_completed); - spin_lock_bh(&chan->lock); + spin_lock(&chan->lock); /* Clean already completed and acked descriptors */ xgene_dma_clean_completed_descriptor(chan); @@ -772,7 +772,7 @@ static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan *chan) */ xgene_chan_xfer_ld_pending(chan); - spin_unlock_bh(&chan->lock); + spin_unlock(&chan->lock); /* Run the callback for each descriptor, in order */ list_for_each_entry_safe(desc_sw, _desc_sw, &ld_completed, node) { -- cgit v1.2.3 From 23b846396b9501f9500cd8fb033a866f8c01f1a6 Mon Sep 17 00:00:00 2001 From: Michael Hennerich Date: Thu, 28 Feb 2019 14:39:43 +0200 Subject: dmaengine: axi-dmac: extend support for ZynqMP arch The AXI DMAC driver is currently supported also on the Xilinx ZynqMP architecture. This change allows this driver to be enabled & used on it as well. Signed-off-by: Michael Hennerich Signed-off-by: Alexandru Ardelean Signed-off-by: Vinod Koul --- drivers/dma/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 0b1dfb5bf2d9..eaf78f4e07ce 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -99,7 +99,7 @@ config AT_XDMAC config AXI_DMAC tristate "Analog Devices AXI-DMAC DMA support" - depends on MICROBLAZE || NIOS2 || ARCH_ZYNQ || ARCH_SOCFPGA || COMPILE_TEST + depends on MICROBLAZE || NIOS2 || ARCH_ZYNQ || ARCH_ZYNQMP || ARCH_SOCFPGA || COMPILE_TEST select DMA_ENGINE select DMA_VIRTUAL_CHANNELS help -- cgit v1.2.3 From e2c114c06da2d9ffad5b16690abf008d6696f689 Mon Sep 17 00:00:00 2001 From: Nicolas Ferre Date: Wed, 3 Apr 2019 12:23:57 +0200 Subject: dmaengine: at_xdmac: remove BUG_ON macro in tasklet Even if this case shouldn't happen when controller is properly programmed, it's still better to avoid dumping a kernel Oops for this. As the sequence may happen only for debugging purposes, log the error and just finish the tasklet call. Signed-off-by: Nicolas Ferre Acked-by: Ludovic Desroches Signed-off-by: Vinod Koul --- drivers/dma/at_xdmac.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index fe69dccfa0c0..37a269420435 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -1606,7 +1606,11 @@ static void at_xdmac_tasklet(unsigned long data) struct at_xdmac_desc, xfer_node); dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc); - BUG_ON(!desc->active_xfer); + if (!desc->active_xfer) { + dev_err(chan2dev(&atchan->chan), "Xfer not active: exiting"); + spin_unlock_bh(&atchan->lock); + return; + } txd = &desc->tx_dma_desc; -- cgit v1.2.3 From 223a4f4cfe93de2fce47a8f1f719cf4d0da4e3e6 Mon Sep 17 00:00:00 2001 From: Nicolas Ferre Date: Wed, 3 Apr 2019 12:23:58 +0200 Subject: dmaengine: at_xdmac: enhance channel errors handling in tasklet Complement the identification of errors with stopping the channel and dumping the descriptor that led to the error case. Signed-off-by: Nicolas Ferre Acked-by: Ludovic Desroches Signed-off-by: Vinod Koul --- drivers/dma/at_xdmac.c | 48 ++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 42 insertions(+), 6 deletions(-) diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index 37a269420435..1dd7edaefbdc 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -1575,6 +1575,46 @@ static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan) dmaengine_desc_get_callback_invoke(txd, NULL); } +static void at_xdmac_handle_error(struct at_xdmac_chan *atchan) +{ + struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); + struct at_xdmac_desc *bad_desc; + + /* + * The descriptor currently at the head of the active list is + * broken. Since we don't have any way to report errors, we'll + * just have to scream loudly and try to continue with other + * descriptors queued (if any). + */ + if (atchan->irq_status & AT_XDMAC_CIS_RBEIS) + dev_err(chan2dev(&atchan->chan), "read bus error!!!"); + if (atchan->irq_status & AT_XDMAC_CIS_WBEIS) + dev_err(chan2dev(&atchan->chan), "write bus error!!!"); + if (atchan->irq_status & AT_XDMAC_CIS_ROIS) + dev_err(chan2dev(&atchan->chan), "request overflow error!!!"); + + spin_lock_bh(&atchan->lock); + + /* Channel must be disabled first as it's not done automatically */ + at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask); + while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask) + cpu_relax(); + + bad_desc = list_first_entry(&atchan->xfers_list, + struct at_xdmac_desc, + xfer_node); + + spin_unlock_bh(&atchan->lock); + + /* Print bad descriptor's details if needed */ + dev_dbg(chan2dev(&atchan->chan), + "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n", + __func__, &bad_desc->lld.mbr_sa, &bad_desc->lld.mbr_da, + bad_desc->lld.mbr_ubc); + + /* Then continue with usual descriptor management */ +} + static void at_xdmac_tasklet(unsigned long data) { struct at_xdmac_chan *atchan = (struct at_xdmac_chan *)data; @@ -1594,12 +1634,8 @@ static void at_xdmac_tasklet(unsigned long data) || (atchan->irq_status & error_mask)) { struct dma_async_tx_descriptor *txd; - if (atchan->irq_status & AT_XDMAC_CIS_RBEIS) - dev_err(chan2dev(&atchan->chan), "read bus error!!!"); - if (atchan->irq_status & AT_XDMAC_CIS_WBEIS) - dev_err(chan2dev(&atchan->chan), "write bus error!!!"); - if (atchan->irq_status & AT_XDMAC_CIS_ROIS) - dev_err(chan2dev(&atchan->chan), "request overflow error!!!"); + if (atchan->irq_status & error_mask) + at_xdmac_handle_error(atchan); spin_lock(&atchan->lock); desc = list_first_entry(&atchan->xfers_list, -- cgit v1.2.3 From 38a829a389e33d571a3c599fa762d88b3d1f72c4 Mon Sep 17 00:00:00 2001 From: Nicolas Ferre Date: Wed, 3 Apr 2019 12:23:59 +0200 Subject: dmaengine: at_xdmac: only monitor overflow errors for peripheral xfer The overflow error flag (ROI: Request Overflow Error) is only relevant for the case when the channel handles a peripheral synchronized transfer. Not in the case of memory to memory transfer where there is no hardware request signal. Remove the use of this interrupt source in such a case. It's based on the first descriptor which holds the configuration for the whole linked list transfer. Signed-off-by: Nicolas Ferre Acked-by: Ludovic Desroches Signed-off-by: Vinod Koul --- drivers/dma/at_xdmac.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index 1dd7edaefbdc..06cbe54e4c30 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -308,6 +308,11 @@ static inline int at_xdmac_csize(u32 maxburst) return csize; }; +static inline bool at_xdmac_chan_is_peripheral_xfer(u32 cfg) +{ + return cfg & AT_XDMAC_CC_TYPE_PER_TRAN; +} + static inline u8 at_xdmac_get_dwidth(u32 cfg) { return (cfg & AT_XDMAC_CC_DWIDTH_MASK) >> AT_XDMAC_CC_DWIDTH_OFFSET; @@ -389,7 +394,13 @@ static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan, at_xdmac_chan_read(atchan, AT_XDMAC_CUBC)); at_xdmac_chan_write(atchan, AT_XDMAC_CID, 0xffffffff); - reg = AT_XDMAC_CIE_RBEIE | AT_XDMAC_CIE_WBEIE | AT_XDMAC_CIE_ROIE; + reg = AT_XDMAC_CIE_RBEIE | AT_XDMAC_CIE_WBEIE; + /* + * Request Overflow Error is only for peripheral synchronized transfers + */ + if (at_xdmac_chan_is_peripheral_xfer(first->lld.mbr_cfg)) + reg |= AT_XDMAC_CIE_ROIE; + /* * There is no end of list when doing cyclic dma, we need to get * an interrupt after each periods. -- cgit v1.2.3 From 56009f0d2f54e4ce4305d65ce589eee6c22ac25f Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Tue, 26 Mar 2019 15:06:28 +0200 Subject: dmaengine: axi-dmac: Infer synthesis configuration parameters hardware Some synthesis time configuration parameters of the DMA controller can be inferred from the hardware itself. Use this information as it is more reliably than the information specified in the devicetree which might be outdated if the HDL project got changed. Deprecate the devicetree properties that can be inferred from the hardware itself. Signed-off-by: Lars-Peter Clausen Signed-off-by: Alexandru Ardelean Signed-off-by: Vinod Koul --- .../devicetree/bindings/dma/adi,axi-dmac.txt | 4 +-- drivers/dma/dma-axi-dmac.c | 32 ++++++++++++++-------- 2 files changed, 22 insertions(+), 14 deletions(-) diff --git a/Documentation/devicetree/bindings/dma/adi,axi-dmac.txt b/Documentation/devicetree/bindings/dma/adi,axi-dmac.txt index 47cb1d14b690..b38ee732efa9 100644 --- a/Documentation/devicetree/bindings/dma/adi,axi-dmac.txt +++ b/Documentation/devicetree/bindings/dma/adi,axi-dmac.txt @@ -18,7 +18,6 @@ Required properties for adi,channels sub-node: Required channel sub-node properties: - reg: Which channel this node refers to. - - adi,length-width: Width of the DMA transfer length register. - adi,source-bus-width, adi,destination-bus-width: Width of the source or destination bus in bits. - adi,source-bus-type, @@ -28,7 +27,8 @@ Required channel sub-node properties: 1 (AXI_DMAC_TYPE_AXI_STREAM): Streaming AXI interface 2 (AXI_DMAC_TYPE_AXI_FIFO): FIFO interface -Optional channel properties: +Deprecated optional channel properties: + - adi,length-width: Width of the DMA transfer length register. - adi,cyclic: Must be set if the channel supports hardware cyclic DMA transfers. - adi,2d: Must be set if the channel supports hardware 2D DMA transfers. diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c index 0fe3a931d8d5..eecb367b4f3e 100644 --- a/drivers/dma/dma-axi-dmac.c +++ b/drivers/dma/dma-axi-dmac.c @@ -618,15 +618,6 @@ static int axi_dmac_parse_chan_dt(struct device_node *of_chan, return ret; chan->dest_width = val / 8; - ret = of_property_read_u32(of_chan, "adi,length-width", &val); - if (ret) - return ret; - - if (val >= 32) - chan->max_length = UINT_MAX; - else - chan->max_length = (1ULL << val) - 1; - chan->align_mask = max(chan->dest_width, chan->src_width) - 1; if (axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan)) @@ -638,12 +629,27 @@ static int axi_dmac_parse_chan_dt(struct device_node *of_chan, else chan->direction = DMA_DEV_TO_DEV; - chan->hw_cyclic = of_property_read_bool(of_chan, "adi,cyclic"); - chan->hw_2d = of_property_read_bool(of_chan, "adi,2d"); - return 0; } +static void axi_dmac_detect_caps(struct axi_dmac *dmac) +{ + struct axi_dmac_chan *chan = &dmac->chan; + + axi_dmac_write(dmac, AXI_DMAC_REG_FLAGS, AXI_DMAC_FLAG_CYCLIC); + if (axi_dmac_read(dmac, AXI_DMAC_REG_FLAGS) == AXI_DMAC_FLAG_CYCLIC) + chan->hw_cyclic = true; + + axi_dmac_write(dmac, AXI_DMAC_REG_Y_LENGTH, 1); + if (axi_dmac_read(dmac, AXI_DMAC_REG_Y_LENGTH) == 1) + chan->hw_2d = true; + + axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, 0xffffffff); + chan->max_length = axi_dmac_read(dmac, AXI_DMAC_REG_X_LENGTH); + if (chan->max_length != UINT_MAX) + chan->max_length++; +} + static int axi_dmac_probe(struct platform_device *pdev) { struct device_node *of_channels, *of_chan; @@ -716,6 +722,8 @@ static int axi_dmac_probe(struct platform_device *pdev) if (ret < 0) return ret; + axi_dmac_detect_caps(dmac); + axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_MASK, 0x00); ret = dma_async_device_register(dma_dev); -- cgit v1.2.3 From 648865a79d8ee3d1aa64aab5eb2a9d12eeed14f9 Mon Sep 17 00:00:00 2001 From: Alexandru Ardelean Date: Tue, 26 Mar 2019 16:05:20 +0200 Subject: dmaengine: axi-dmac: Don't check the number of frames for alignment In 2D transfers (for the AXI DMAC), the number of frames (numf) represents Y_LENGTH, and the length of a frame is X_LENGTH. 2D transfers are useful for video transfers where screen resolutions ( X * Y ) are typically aligned for X, but not for Y. There is no requirement for Y_LENGTH to be aligned to the bus-width (or anything), and this is also true for AXI DMAC. Checking the Y_LENGTH for alignment causes false errors when initiating DMA transfers. This change fixes this by checking only that the Y_LENGTH is non-zero. Fixes: 0e3b67b348b8 ("dmaengine: Add support for the Analog Devices AXI-DMAC DMA controller") Signed-off-by: Alexandru Ardelean Signed-off-by: Vinod Koul --- drivers/dma/dma-axi-dmac.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c index eecb367b4f3e..cdbef59a9542 100644 --- a/drivers/dma/dma-axi-dmac.c +++ b/drivers/dma/dma-axi-dmac.c @@ -526,7 +526,7 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_interleaved( if (chan->hw_2d) { if (!axi_dmac_check_len(chan, xt->sgl[0].size) || - !axi_dmac_check_len(chan, xt->numf)) + xt->numf == 0) return NULL; if (xt->sgl[0].size + dst_icg > chan->max_length || xt->sgl[0].size + src_icg > chan->max_length) -- cgit v1.2.3 From 9a05045d2a681d36c80aafc08d9d0b63d4ddbc66 Mon Sep 17 00:00:00 2001 From: Dragos Bogdan Date: Tue, 26 Mar 2019 16:07:56 +0200 Subject: dmaengine: axi-dmac: Enable DMA_INTERLEAVE capability Since device_prep_interleaved_dma() is already implemented, the DMA_INTERLEAVE capability should be set. Signed-off-by: Dragos Bogdan Signed-off-by: Alexandru Ardelean Signed-off-by: Vinod Koul --- drivers/dma/dma-axi-dmac.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c index cdbef59a9542..f32fdf21edbd 100644 --- a/drivers/dma/dma-axi-dmac.c +++ b/drivers/dma/dma-axi-dmac.c @@ -699,6 +699,7 @@ static int axi_dmac_probe(struct platform_device *pdev) dma_dev = &dmac->dma_dev; dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask); + dma_cap_set(DMA_INTERLEAVE, dma_dev->cap_mask); dma_dev->device_free_chan_resources = axi_dmac_free_chan_resources; dma_dev->device_tx_status = dma_cookie_status; dma_dev->device_issue_pending = axi_dmac_issue_pending; -- cgit v1.2.3 From 2da254cc7908105a60a6bb219d18e8dced03dcb9 Mon Sep 17 00:00:00 2001 From: Sugar Zhang Date: Wed, 3 Apr 2019 19:06:22 +0800 Subject: dmaengine: pl330: _stop: clear interrupt status This patch kill instructs the DMAC to immediately terminate execution of a thread. and then clear the interrupt status, at last, stop generating interrupts for DMA_SEV. to guarantee the next dma start is clean. otherwise, one interrupt maybe leave to next start and make some mistake. we can reporduce the problem as follows: DMASEV: modify the event-interrupt resource, and if the INTEN sets function as interrupt, the DMAC will set irq HIGH to generate interrupt. write INTCLR to clear interrupt. DMA EXECUTING INSTRUCTS DMA TERMINATE | | | | ... _stop | | | spin_lock_irqsave DMASEV | | | | mask INTEN | | | DMAKILL | | | spin_unlock_irqrestore in above case, a interrupt was left, and if we unmask INTEN, the DMAC will set irq HIGH to generate interrupt. to fix this, do as follows: DMA EXECUTING INSTRUCTS DMA TERMINATE | | | | ... _stop | | | spin_lock_irqsave DMASEV | | | | DMAKILL | | | clear INTCLR | mask INTEN | | | spin_unlock_irqrestore Signed-off-by: Sugar Zhang Signed-off-by: Vinod Koul --- drivers/dma/pl330.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index c72f6fd79c43..6e6837214210 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -967,6 +967,7 @@ static void _stop(struct pl330_thread *thrd) { void __iomem *regs = thrd->dmac->base; u8 insn[6] = {0, 0, 0, 0, 0, 0}; + u32 inten = readl(regs + INTEN); if (_state(thrd) == PL330_STATE_FAULT_COMPLETING) UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING); @@ -979,10 +980,13 @@ static void _stop(struct pl330_thread *thrd) _emit_KILL(0, insn); - /* Stop generating interrupts for SEV */ - writel(readl(regs + INTEN) & ~(1 << thrd->ev), regs + INTEN); - _execute_DBGINSN(thrd, insn, is_manager(thrd)); + + /* clear the event */ + if (inten & (1 << thrd->ev)) + writel(1 << thrd->ev, regs + INTCLR); + /* Stop generating interrupts for SEV */ + writel(inten & ~(1 << thrd->ev), regs + INTEN); } /* Start doing req 'idx' of thread 'thrd' */ -- cgit v1.2.3 From c7266d26dc12bc056c0a7ffdbd4f4df41a8c25da Mon Sep 17 00:00:00 2001 From: Michal Suchanek Date: Thu, 4 Apr 2019 19:25:03 +0200 Subject: dmaengine: bcm2835: Drop duplicate capability setting. Signed-off-by: Michal Suchanek Acked-by: Stefan Wahren Signed-off-by: Vinod Koul --- drivers/dma/bcm2835-dma.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c index ec8a291d62ba..e38b19dd2895 100644 --- a/drivers/dma/bcm2835-dma.c +++ b/drivers/dma/bcm2835-dma.c @@ -891,7 +891,6 @@ static int bcm2835_dma_probe(struct platform_device *pdev) dma_cap_set(DMA_SLAVE, od->ddev.cap_mask); dma_cap_set(DMA_PRIVATE, od->ddev.cap_mask); dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask); - dma_cap_set(DMA_SLAVE, od->ddev.cap_mask); dma_cap_set(DMA_MEMCPY, od->ddev.cap_mask); od->ddev.device_alloc_chan_resources = bcm2835_dma_alloc_chan_resources; od->ddev.device_free_chan_resources = bcm2835_dma_free_chan_resources; -- cgit v1.2.3 From ffcfc20f7489ecc1b5fbde68dae14f72c47330ab Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Tue, 9 Apr 2019 18:02:19 +0300 Subject: dmaengine: idma64: Move driver name to the header There are two drivers that are relying on the iDMA 64-bit driver name to match. Instead of duplicating string in both of them, dedicate a header file and share it between users. Signed-off-by: Andy Shevchenko Signed-off-by: Vinod Koul --- drivers/dma/idma64.c | 9 ++++----- drivers/mfd/intel-lpss.c | 4 ++-- include/linux/dma/idma64.h | 14 ++++++++++++++ 3 files changed, 20 insertions(+), 7 deletions(-) create mode 100644 include/linux/dma/idma64.h diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c index 83796a33dc16..07fd4f25cdd8 100644 --- a/drivers/dma/idma64.c +++ b/drivers/dma/idma64.c @@ -19,10 +19,9 @@ #include #include -#include "idma64.h" +#include -/* Platform driver name */ -#define DRV_NAME "idma64" +#include "idma64.h" /* For now we support only two channels */ #define IDMA64_NR_CHAN 2 @@ -699,7 +698,7 @@ static struct platform_driver idma64_platform_driver = { .probe = idma64_platform_probe, .remove = idma64_platform_remove, .driver = { - .name = DRV_NAME, + .name = LPSS_IDMA64_DRIVER_NAME, .pm = &idma64_dev_pm_ops, }, }; @@ -709,4 +708,4 @@ module_platform_driver(idma64_platform_driver); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("iDMA64 core driver"); MODULE_AUTHOR("Andy Shevchenko "); -MODULE_ALIAS("platform:" DRV_NAME); +MODULE_ALIAS("platform:" LPSS_IDMA64_DRIVER_NAME); diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c index 50bffc3382d7..45221e092ecf 100644 --- a/drivers/mfd/intel-lpss.c +++ b/drivers/mfd/intel-lpss.c @@ -28,6 +28,8 @@ #include #include +#include + #include "intel-lpss.h" #define LPSS_DEV_OFFSET 0x000 @@ -96,8 +98,6 @@ static const struct resource intel_lpss_idma64_resources[] = { DEFINE_RES_IRQ(0), }; -#define LPSS_IDMA64_DRIVER_NAME "idma64" - /* * Cells needs to be ordered so that the iDMA is created first. This is * because we need to be sure the DMA is available when the host controller diff --git a/include/linux/dma/idma64.h b/include/linux/dma/idma64.h new file mode 100644 index 000000000000..621cfae60554 --- /dev/null +++ b/include/linux/dma/idma64.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Definitions for the Intel integrated DMA 64-bit + * + * Copyright (C) 2019 Intel Corporation + */ + +#ifndef __LINUX_DMA_IDMA64_H__ +#define __LINUX_DMA_IDMA64_H__ + +/* Platform driver name */ +#define LPSS_IDMA64_DRIVER_NAME "idma64" + +#endif /* __LINUX_DMA_IDMA64_H__ */ -- cgit v1.2.3 From 9e1630b809ec2a6bf3248441563669fcd1e02646 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Mon, 15 Apr 2019 16:30:28 +0100 Subject: dmaengine: xgene-dma: fix spelling mistake "descripto" -> "descriptor" There is a spelling mistake in a chan_dbg message, fix it. Signed-off-by: Colin Ian King Signed-off-by: Vinod Koul --- drivers/dma/xgene-dma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c index 002323661d07..8d174dc5dccd 100644 --- a/drivers/dma/xgene-dma.c +++ b/drivers/dma/xgene-dma.c @@ -797,7 +797,7 @@ static int xgene_dma_alloc_chan_resources(struct dma_chan *dchan) return -ENOMEM; } - chan_dbg(chan, "Allocate descripto pool\n"); + chan_dbg(chan, "Allocate descriptor pool\n"); return 1; } -- cgit v1.2.3 From 941acd566b1807b291bbdca31cc5158e26ffcf83 Mon Sep 17 00:00:00 2001 From: "Angus Ainslie (Purism)" Date: Fri, 29 Mar 2019 08:21:29 -0700 Subject: dmaengine: imx-sdma: Only check ratio on parts that support 1:1 On imx8mq B0 chip, AHB/SDMA clock ratio 2:1 can't be supported, since SDMA clock ratio has to be increased to 250Mhz, AHB can't reach to 500Mhz, so use 1:1 instead. To limit this change to the imx8mq for now this patch also adds an im8mq-sdma compatible string. Signed-off-by: Angus Ainslie (Purism) Acked-by: Robin Gong Signed-off-by: Vinod Koul --- drivers/dma/imx-sdma.c | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 5f3c1378b90e..99d9f431ae2c 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -419,6 +419,7 @@ struct sdma_driver_data { int chnenbl0; int num_events; struct sdma_script_start_addrs *script_addrs; + bool check_ratio; }; struct sdma_engine { @@ -557,6 +558,13 @@ static struct sdma_driver_data sdma_imx7d = { .script_addrs = &sdma_script_imx7d, }; +static struct sdma_driver_data sdma_imx8mq = { + .chnenbl0 = SDMA_CHNENBL0_IMX35, + .num_events = 48, + .script_addrs = &sdma_script_imx7d, + .check_ratio = 1, +}; + static const struct platform_device_id sdma_devtypes[] = { { .name = "imx25-sdma", @@ -579,6 +587,9 @@ static const struct platform_device_id sdma_devtypes[] = { }, { .name = "imx7d-sdma", .driver_data = (unsigned long)&sdma_imx7d, + }, { + .name = "imx8mq-sdma", + .driver_data = (unsigned long)&sdma_imx8mq, }, { /* sentinel */ } @@ -593,6 +604,7 @@ static const struct of_device_id sdma_dt_ids[] = { { .compatible = "fsl,imx31-sdma", .data = &sdma_imx31, }, { .compatible = "fsl,imx25-sdma", .data = &sdma_imx25, }, { .compatible = "fsl,imx7d-sdma", .data = &sdma_imx7d, }, + { .compatible = "fsl,imx8mq-sdma", .data = &sdma_imx8mq, }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, sdma_dt_ids); @@ -1852,7 +1864,8 @@ static int sdma_init(struct sdma_engine *sdma) if (ret) goto disable_clk_ipg; - if (clk_get_rate(sdma->clk_ahb) == clk_get_rate(sdma->clk_ipg)) + if (sdma->drvdata->check_ratio && + (clk_get_rate(sdma->clk_ahb) == clk_get_rate(sdma->clk_ipg))) sdma->clk_ratio = 1; /* Be sure SDMA has not started yet */ -- cgit v1.2.3 From 8a6061c34a54a997db1ded6d89b7db6cbe3f359e Mon Sep 17 00:00:00 2001 From: Hiroyuki Yokoyama Date: Wed, 10 Apr 2019 20:26:57 +0200 Subject: dmaengine: rcar-dmac: Update copyright information MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Update copyright and string for Gen3. Signed-off-by: Hiroyuki Yokoyama Signed-off-by: Niklas Söderlund Reviewed-by: Simon Horman Signed-off-by: Vinod Koul --- drivers/dma/sh/rcar-dmac.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c index 2b4f25698169..580ca1454fe8 100644 --- a/drivers/dma/sh/rcar-dmac.c +++ b/drivers/dma/sh/rcar-dmac.c @@ -1,8 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Renesas R-Car Gen2 DMA Controller Driver + * Renesas R-Car Gen2/Gen3 DMA Controller Driver * - * Copyright (C) 2014 Renesas Electronics Inc. + * Copyright (C) 2014-2019 Renesas Electronics Inc. * * Author: Laurent Pinchart */ -- cgit v1.2.3 From f4fd2ec08f17b34f1c7c18414d5fc882efd51e83 Mon Sep 17 00:00:00 2001 From: Fabien Dessenne Date: Wed, 24 Apr 2019 11:21:25 +0200 Subject: dmaengine: stm32-dma: use platform_get_irq() platform_get_resource(pdev, IORESOURCE_IRQ) is not recommended for requesting IRQ's resources, as they can be not ready yet. Using platform_get_irq() instead is preferred for getting IRQ even if it was not retrieved earlier. Signed-off-by: Fabien Dessenne Reviewed-by: Pierre-Yves MORDRET Signed-off-by: Vinod Koul --- drivers/dma/stm32-dma.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c index ba239b529fa9..33068185c0fe 100644 --- a/drivers/dma/stm32-dma.c +++ b/drivers/dma/stm32-dma.c @@ -1302,13 +1302,14 @@ static int stm32_dma_probe(struct platform_device *pdev) for (i = 0; i < STM32_DMA_MAX_CHANNELS; i++) { chan = &dmadev->chan[i]; - res = platform_get_resource(pdev, IORESOURCE_IRQ, i); - if (!res) { - ret = -EINVAL; - dev_err(&pdev->dev, "No irq resource for chan %d\n", i); + chan->irq = platform_get_irq(pdev, i); + if (chan->irq < 0) { + ret = chan->irq; + if (ret != -EPROBE_DEFER) + dev_err(&pdev->dev, + "No irq resource for chan %d\n", i); goto err_unregister; } - chan->irq = res->start; ret = devm_request_irq(&pdev->dev, chan->irq, stm32_dma_chan_irq, 0, dev_name(chan2dev(chan)), chan); -- cgit v1.2.3 From c6504be53972cd57326196eb2a18b2b182c26c5c Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Fri, 26 Apr 2019 22:30:27 +0530 Subject: dmaengine: stm32-dma: Fix unsigned variable compared with zero Commit f4fd2ec08f17: ("dmaengine: stm32-dma: use platform_get_irq()") used unsigned variable irq to store the results and check later for negative errors, so update the code to use signed variable for this Fixes: f4fd2ec08f17 ("dmaengine: stm32-dma: use platform_get_irq()") Reported-by: kbuild test robot Reported-by: Julia Lawall Acked-by: Julia Lawall Signed-off-by: Vinod Koul --- drivers/dma/stm32-dma.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c index 33068185c0fe..dde796686736 100644 --- a/drivers/dma/stm32-dma.c +++ b/drivers/dma/stm32-dma.c @@ -1303,13 +1303,15 @@ static int stm32_dma_probe(struct platform_device *pdev) for (i = 0; i < STM32_DMA_MAX_CHANNELS; i++) { chan = &dmadev->chan[i]; chan->irq = platform_get_irq(pdev, i); - if (chan->irq < 0) { - ret = chan->irq; + ret = platform_get_irq(pdev, i); + if (ret < 0) { if (ret != -EPROBE_DEFER) dev_err(&pdev->dev, "No irq resource for chan %d\n", i); goto err_unregister; } + chan->irq = ret; + ret = devm_request_irq(&pdev->dev, chan->irq, stm32_dma_chan_irq, 0, dev_name(chan2dev(chan)), chan); -- cgit v1.2.3 From 95d47fb71d185ccd6b0f97397cfb1569f4761ba1 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Sun, 28 Apr 2019 12:00:24 +0800 Subject: dmaengine: bcm-sba-raid: Use dev_get_drvdata() Using dev_get_drvdata directly. Cc: Vinod Koul Cc: dmaengine@vger.kernel.org Signed-off-by: Kefeng Wang Signed-off-by: Vinod Koul --- drivers/dma/bcm-sba-raid.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/dma/bcm-sba-raid.c b/drivers/dma/bcm-sba-raid.c index 72878ac5c78d..fa81d0177765 100644 --- a/drivers/dma/bcm-sba-raid.c +++ b/drivers/dma/bcm-sba-raid.c @@ -1459,8 +1459,7 @@ static void sba_receive_message(struct mbox_client *cl, void *msg) static int sba_debugfs_stats_show(struct seq_file *file, void *offset) { - struct platform_device *pdev = to_platform_device(file->private); - struct sba_device *sba = platform_get_drvdata(pdev); + struct sba_device *sba = dev_get_drvdata(file->private); /* Write stats in file */ sba_write_stats_in_seqfile(sba, file); -- cgit v1.2.3 From 66c30aa679172df624cd68b0e1cfe757f6d1bc72 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Sun, 28 Apr 2019 12:00:25 +0800 Subject: dmaengine: nbpfaxi: Use dev_get_drvdata() Using dev_get_drvdata directly. Cc: Vinod Koul Cc: dmaengine@vger.kernel.org Signed-off-by: Kefeng Wang Signed-off-by: Vinod Koul --- drivers/dma/nbpfaxi.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c index a67b292190f4..594409a6e975 100644 --- a/drivers/dma/nbpfaxi.c +++ b/drivers/dma/nbpfaxi.c @@ -1491,14 +1491,14 @@ MODULE_DEVICE_TABLE(platform, nbpf_ids); #ifdef CONFIG_PM static int nbpf_runtime_suspend(struct device *dev) { - struct nbpf_device *nbpf = platform_get_drvdata(to_platform_device(dev)); + struct nbpf_device *nbpf = dev_get_drvdata(dev); clk_disable_unprepare(nbpf->clk); return 0; } static int nbpf_runtime_resume(struct device *dev) { - struct nbpf_device *nbpf = platform_get_drvdata(to_platform_device(dev)); + struct nbpf_device *nbpf = dev_get_drvdata(dev); return clk_prepare_enable(nbpf->clk); } #endif -- cgit v1.2.3 From 2a4885abf5fbc2dde4be91a11b00cbce2c923f47 Mon Sep 17 00:00:00 2001 From: Arnaud Pouliquen Date: Thu, 2 May 2019 11:28:42 +0200 Subject: dmaengine: stm32-dma: fix residue calculation in stm32-dma In double buffer mode, during residue calculation, the DMA can automatically switch to the next transfer. Indeed the CT bit that gives position in the double buffer can has been updated by the hardware, during calculation. In this case the SxNDTR register value can not be trusted. If a transition is detected we consider that the DMA has switched to the beginning of next sg. Signed-off-by: Arnaud Pouliquen Signed-off-by: Pierre-Yves MORDRET Signed-off-by: Vinod Koul --- drivers/dma/stm32-dma.c | 90 ++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 77 insertions(+), 13 deletions(-) diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c index dde796686736..88d9c6c4389f 100644 --- a/drivers/dma/stm32-dma.c +++ b/drivers/dma/stm32-dma.c @@ -1042,33 +1042,97 @@ static u32 stm32_dma_get_remaining_bytes(struct stm32_dma_chan *chan) return ndtr << width; } +/** + * stm32_dma_is_current_sg - check that expected sg_req is currently transferred + * @chan: dma channel + * + * This function called when IRQ are disable, checks that the hardware has not + * switched on the next transfer in double buffer mode. The test is done by + * comparing the next_sg memory address with the hardware related register + * (based on CT bit value). + * + * Returns true if expected current transfer is still running or double + * buffer mode is not activated. + */ +static bool stm32_dma_is_current_sg(struct stm32_dma_chan *chan) +{ + struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); + struct stm32_dma_sg_req *sg_req; + u32 dma_scr, dma_smar, id; + + id = chan->id; + dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id)); + + if (!(dma_scr & STM32_DMA_SCR_DBM)) + return true; + + sg_req = &chan->desc->sg_req[chan->next_sg]; + + if (dma_scr & STM32_DMA_SCR_CT) { + dma_smar = stm32_dma_read(dmadev, STM32_DMA_SM0AR(id)); + return (dma_smar == sg_req->chan_reg.dma_sm0ar); + } + + dma_smar = stm32_dma_read(dmadev, STM32_DMA_SM1AR(id)); + + return (dma_smar == sg_req->chan_reg.dma_sm1ar); +} + static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan, struct stm32_dma_desc *desc, u32 next_sg) { u32 modulo, burst_size; - u32 residue = 0; + u32 residue; + u32 n_sg = next_sg; + struct stm32_dma_sg_req *sg_req = &chan->desc->sg_req[chan->next_sg]; int i; /* - * In cyclic mode, for the last period, residue = remaining bytes from - * NDTR + * Calculate the residue means compute the descriptors + * information: + * - the sg_req currently transferred + * - the Hardware remaining position in this sg (NDTR bits field). + * + * A race condition may occur if DMA is running in cyclic or double + * buffer mode, since the DMA register are automatically reloaded at end + * of period transfer. The hardware may have switched to the next + * transfer (CT bit updated) just before the position (SxNDTR reg) is + * read. + * In this case the SxNDTR reg could (or not) correspond to the new + * transfer position, and not the expected one. + * The strategy implemented in the stm32 driver is to: + * - read the SxNDTR register + * - crosscheck that hardware is still in current transfer. + * In case of switch, we can assume that the DMA is at the beginning of + * the next transfer. So we approximate the residue in consequence, by + * pointing on the beginning of next transfer. + * + * This race condition doesn't apply for none cyclic mode, as double + * buffer is not used. In such situation registers are updated by the + * software. */ - if (chan->desc->cyclic && next_sg == 0) { - residue = stm32_dma_get_remaining_bytes(chan); - goto end; + + residue = stm32_dma_get_remaining_bytes(chan); + + if (!stm32_dma_is_current_sg(chan)) { + n_sg++; + if (n_sg == chan->desc->num_sgs) + n_sg = 0; + residue = sg_req->len; } /* - * For all other periods in cyclic mode, and in sg mode, - * residue = remaining bytes from NDTR + remaining periods/sg to be - * transferred + * In cyclic mode, for the last period, residue = remaining bytes + * from NDTR, + * else for all other periods in cyclic mode, and in sg mode, + * residue = remaining bytes from NDTR + remaining + * periods/sg to be transferred */ - for (i = next_sg; i < desc->num_sgs; i++) - residue += desc->sg_req[i].len; - residue += stm32_dma_get_remaining_bytes(chan); + if (!chan->desc->cyclic || n_sg != 0) + for (i = n_sg; i < desc->num_sgs; i++) + residue += desc->sg_req[i].len; -end: if (!chan->mem_burst) return residue; -- cgit v1.2.3 From 32685552fdc8a28014f9159a05587e8049fead70 Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Sat, 4 May 2019 11:52:23 +0200 Subject: dmaengine: fsl-edma: Fix typo in Vybrid name Fix typo in comment for Vybrid SoC family. Signed-off-by: Krzysztof Kozlowski Signed-off-by: Vinod Koul --- drivers/dma/fsl-edma-common.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/fsl-edma-common.h b/drivers/dma/fsl-edma-common.h index b435d8e1e3a1..c53f76eeb4d3 100644 --- a/drivers/dma/fsl-edma-common.h +++ b/drivers/dma/fsl-edma-common.h @@ -136,7 +136,7 @@ struct fsl_edma_desc { }; enum edma_version { - v1, /* 32ch, Vybdir, mpc57x, etc */ + v1, /* 32ch, Vybrid, mpc57x, etc */ v2, /* 64ch Coldfire */ }; -- cgit v1.2.3 From e095189a54102d53daa07c528fefd4f4d65c8b04 Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Sat, 4 May 2019 11:52:25 +0200 Subject: dmaengine: fsl-edma: Adjust indentation Fix indentation and remove unneeded space after 'return' keyword. This fixes checkpatch warning: WARNING: Statements should start on a tabstop Signed-off-by: Krzysztof Kozlowski Signed-off-by: Vinod Koul --- drivers/dma/fsl-edma.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c index 75e8a7ba3a22..d641ef85a634 100644 --- a/drivers/dma/fsl-edma.c +++ b/drivers/dma/fsl-edma.c @@ -144,21 +144,21 @@ fsl_edma_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma fsl_edma_irq_handler, 0, "eDMA", fsl_edma); if (ret) { dev_err(&pdev->dev, "Can't register eDMA IRQ.\n"); - return ret; + return ret; } } else { ret = devm_request_irq(&pdev->dev, fsl_edma->txirq, fsl_edma_tx_handler, 0, "eDMA tx", fsl_edma); if (ret) { dev_err(&pdev->dev, "Can't register eDMA tx IRQ.\n"); - return ret; + return ret; } ret = devm_request_irq(&pdev->dev, fsl_edma->errirq, fsl_edma_err_handler, 0, "eDMA err", fsl_edma); if (ret) { dev_err(&pdev->dev, "Can't register eDMA err IRQ.\n"); - return ret; + return ret; } } -- cgit v1.2.3 From 0b515abb6b7eb08e90bdfc01fc8fbdd112c15d81 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 3 May 2019 16:15:07 +0300 Subject: dmaengine: at_xdmac: remove a stray bottom half unlock We switched this code from spin_lock_bh() to vanilla spin_lock() but there was one stray spin_unlock_bh() that was overlooked. This patch converts it to spin_unlock() as well. Fixes: d8570d018f69 ("dmaengine: at_xdmac: move spin_lock_bh to spin_lock in tasklet") Signed-off-by: Dan Carpenter Signed-off-by: Vinod Koul --- drivers/dma/at_xdmac.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index 06cbe54e4c30..e4ae2ee46d3f 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -1655,7 +1655,7 @@ static void at_xdmac_tasklet(unsigned long data) dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc); if (!desc->active_xfer) { dev_err(chan2dev(&atchan->chan), "Xfer not active: exiting"); - spin_unlock_bh(&atchan->lock); + spin_unlock(&atchan->lock); return; } -- cgit v1.2.3 From ded1f3db4cd64bcd8f2b4f89aa66164a142b2895 Mon Sep 17 00:00:00 2001 From: Sameer Pujar Date: Thu, 2 May 2019 18:25:12 +0530 Subject: dmaengine: tegra210-adma: prepare for supporting newer Tegra chips This is a preparatory patch to add support for Tegra186 and Tegra194 chips. Following changes are necessary to make driver code generic. * chip_data structure is enhanced to have chip specific details and following are the additions to the structure * Offset addresses for ADMA global and channel registers * Offset values for Tx and Rx channel selection * Maximum supported Tx and Rx channels * Tx and Rx channel request mask * ADMA channel register space size * Make use of above chip_data to generalise the driver code Support for Tegra186 and Tegra194 will be added in subsequent patches of the series. Signed-off-by: Sameer Pujar Reviewed-by: Jon Hunter Signed-off-by: Vinod Koul --- drivers/dma/tegra210-adma.c | 91 ++++++++++++++++++++++++++++----------------- 1 file changed, 57 insertions(+), 34 deletions(-) diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c index 253d3128fec3..9aee015609bd 100644 --- a/drivers/dma/tegra210-adma.c +++ b/drivers/dma/tegra210-adma.c @@ -36,10 +36,6 @@ #define ADMA_CH_INT_CLEAR 0x1c #define ADMA_CH_CTRL 0x24 -#define ADMA_CH_CTRL_TX_REQ(val) (((val) & 0xf) << 28) -#define ADMA_CH_CTRL_TX_REQ_MAX 10 -#define ADMA_CH_CTRL_RX_REQ(val) (((val) & 0xf) << 24) -#define ADMA_CH_CTRL_RX_REQ_MAX 10 #define ADMA_CH_CTRL_DIR(val) (((val) & 0xf) << 12) #define ADMA_CH_CTRL_DIR_AHUB2MEM 2 #define ADMA_CH_CTRL_DIR_MEM2AHUB 4 @@ -57,8 +53,8 @@ #define ADMA_CH_FIFO_CTRL 0x2c #define ADMA_CH_FIFO_CTRL_OVRFW_THRES(val) (((val) & 0xf) << 24) #define ADMA_CH_FIFO_CTRL_STARV_THRES(val) (((val) & 0xf) << 16) -#define ADMA_CH_FIFO_CTRL_TX_SIZE(val) (((val) & 0xf) << 8) -#define ADMA_CH_FIFO_CTRL_RX_SIZE(val) ((val) & 0xf) +#define ADMA_CH_FIFO_CTRL_TX_FIFO_SIZE_SHIFT 8 +#define ADMA_CH_FIFO_CTRL_RX_FIFO_SIZE_SHIFT 0 #define ADMA_CH_LOWER_SRC_ADDR 0x34 #define ADMA_CH_LOWER_TRG_ADDR 0x3c @@ -68,25 +64,38 @@ #define ADMA_CH_XFER_STATUS 0x54 #define ADMA_CH_XFER_STATUS_COUNT_MASK 0xffff -#define ADMA_GLOBAL_CMD 0xc00 -#define ADMA_GLOBAL_SOFT_RESET 0xc04 -#define ADMA_GLOBAL_INT_CLEAR 0xc20 -#define ADMA_GLOBAL_CTRL 0xc24 - -#define ADMA_CH_REG_OFFSET(a) (a * 0x80) +#define ADMA_GLOBAL_CMD 0x00 +#define ADMA_GLOBAL_SOFT_RESET 0x04 #define ADMA_CH_FIFO_CTRL_DEFAULT (ADMA_CH_FIFO_CTRL_OVRFW_THRES(1) | \ - ADMA_CH_FIFO_CTRL_STARV_THRES(1) | \ - ADMA_CH_FIFO_CTRL_TX_SIZE(3) | \ - ADMA_CH_FIFO_CTRL_RX_SIZE(3)) + ADMA_CH_FIFO_CTRL_STARV_THRES(1)) + +#define ADMA_CH_REG_FIELD_VAL(val, mask, shift) (((val) & mask) << shift) + struct tegra_adma; /* * struct tegra_adma_chip_data - Tegra chip specific data + * @global_reg_offset: Register offset of DMA global register. + * @global_int_clear: Register offset of DMA global interrupt clear. + * @ch_req_tx_shift: Register offset for AHUB transmit channel select. + * @ch_req_rx_shift: Register offset for AHUB receive channel select. + * @ch_base_offset: Reister offset of DMA channel registers. + * @ch_req_mask: Mask for Tx or Rx channel select. + * @ch_req_max: Maximum number of Tx or Rx channels available. + * @ch_reg_size: Size of DMA channel register space. * @nr_channels: Number of DMA channels available. */ struct tegra_adma_chip_data { - int nr_channels; + unsigned int global_reg_offset; + unsigned int global_int_clear; + unsigned int ch_req_tx_shift; + unsigned int ch_req_rx_shift; + unsigned int ch_base_offset; + unsigned int ch_req_mask; + unsigned int ch_req_max; + unsigned int ch_reg_size; + unsigned int nr_channels; }; /* @@ -148,18 +157,20 @@ struct tegra_adma { /* Used to store global command register state when suspending */ unsigned int global_cmd; + const struct tegra_adma_chip_data *cdata; + /* Last member of the structure */ struct tegra_adma_chan channels[0]; }; static inline void tdma_write(struct tegra_adma *tdma, u32 reg, u32 val) { - writel(val, tdma->base_addr + reg); + writel(val, tdma->base_addr + tdma->cdata->global_reg_offset + reg); } static inline u32 tdma_read(struct tegra_adma *tdma, u32 reg) { - return readl(tdma->base_addr + reg); + return readl(tdma->base_addr + tdma->cdata->global_reg_offset + reg); } static inline void tdma_ch_write(struct tegra_adma_chan *tdc, u32 reg, u32 val) @@ -209,14 +220,16 @@ static int tegra_adma_init(struct tegra_adma *tdma) int ret; /* Clear any interrupts */ - tdma_write(tdma, ADMA_GLOBAL_INT_CLEAR, 0x1); + tdma_write(tdma, tdma->cdata->global_int_clear, 0x1); /* Assert soft reset */ tdma_write(tdma, ADMA_GLOBAL_SOFT_RESET, 0x1); /* Wait for reset to clear */ ret = readx_poll_timeout(readl, - tdma->base_addr + ADMA_GLOBAL_SOFT_RESET, + tdma->base_addr + + tdma->cdata->global_reg_offset + + ADMA_GLOBAL_SOFT_RESET, status, status == 0, 20, 10000); if (ret) return ret; @@ -236,13 +249,13 @@ static int tegra_adma_request_alloc(struct tegra_adma_chan *tdc, if (tdc->sreq_reserved) return tdc->sreq_dir == direction ? 0 : -EINVAL; + if (sreq_index > tdma->cdata->ch_req_max) { + dev_err(tdma->dev, "invalid DMA request\n"); + return -EINVAL; + } + switch (direction) { case DMA_MEM_TO_DEV: - if (sreq_index > ADMA_CH_CTRL_TX_REQ_MAX) { - dev_err(tdma->dev, "invalid DMA request\n"); - return -EINVAL; - } - if (test_and_set_bit(sreq_index, &tdma->tx_requests_reserved)) { dev_err(tdma->dev, "DMA request reserved\n"); return -EINVAL; @@ -250,11 +263,6 @@ static int tegra_adma_request_alloc(struct tegra_adma_chan *tdc, break; case DMA_DEV_TO_MEM: - if (sreq_index > ADMA_CH_CTRL_RX_REQ_MAX) { - dev_err(tdma->dev, "invalid DMA request\n"); - return -EINVAL; - } - if (test_and_set_bit(sreq_index, &tdma->rx_requests_reserved)) { dev_err(tdma->dev, "DMA request reserved\n"); return -EINVAL; @@ -487,6 +495,7 @@ static int tegra_adma_set_xfer_params(struct tegra_adma_chan *tdc, enum dma_transfer_direction direction) { struct tegra_adma_chan_regs *ch_regs = &desc->ch_regs; + const struct tegra_adma_chip_data *cdata = tdc->tdma->cdata; unsigned int burst_size, adma_dir; if (desc->num_periods > ADMA_CH_CONFIG_MAX_BUFS) @@ -497,7 +506,9 @@ static int tegra_adma_set_xfer_params(struct tegra_adma_chan *tdc, adma_dir = ADMA_CH_CTRL_DIR_MEM2AHUB; burst_size = fls(tdc->sconfig.dst_maxburst); ch_regs->config = ADMA_CH_CONFIG_SRC_BUF(desc->num_periods - 1); - ch_regs->ctrl = ADMA_CH_CTRL_TX_REQ(tdc->sreq_index); + ch_regs->ctrl = ADMA_CH_REG_FIELD_VAL(tdc->sreq_index, + cdata->ch_req_mask, + cdata->ch_req_tx_shift); ch_regs->src_addr = buf_addr; break; @@ -505,7 +516,9 @@ static int tegra_adma_set_xfer_params(struct tegra_adma_chan *tdc, adma_dir = ADMA_CH_CTRL_DIR_AHUB2MEM; burst_size = fls(tdc->sconfig.src_maxburst); ch_regs->config = ADMA_CH_CONFIG_TRG_BUF(desc->num_periods - 1); - ch_regs->ctrl = ADMA_CH_CTRL_RX_REQ(tdc->sreq_index); + ch_regs->ctrl = ADMA_CH_REG_FIELD_VAL(tdc->sreq_index, + cdata->ch_req_mask, + cdata->ch_req_rx_shift); ch_regs->trg_addr = buf_addr; break; @@ -658,7 +671,15 @@ static int tegra_adma_runtime_resume(struct device *dev) } static const struct tegra_adma_chip_data tegra210_chip_data = { - .nr_channels = 22, + .global_reg_offset = 0xc00, + .global_int_clear = 0x20, + .ch_req_tx_shift = 28, + .ch_req_rx_shift = 24, + .ch_base_offset = 0, + .ch_req_mask = 0xf, + .ch_req_max = 10, + .ch_reg_size = 0x80, + .nr_channels = 22, }; static const struct of_device_id tegra_adma_of_match[] = { @@ -687,6 +708,7 @@ static int tegra_adma_probe(struct platform_device *pdev) return -ENOMEM; tdma->dev = &pdev->dev; + tdma->cdata = cdata; tdma->nr_channels = cdata->nr_channels; platform_set_drvdata(pdev, tdma); @@ -715,7 +737,8 @@ static int tegra_adma_probe(struct platform_device *pdev) for (i = 0; i < tdma->nr_channels; i++) { struct tegra_adma_chan *tdc = &tdma->channels[i]; - tdc->chan_addr = tdma->base_addr + ADMA_CH_REG_OFFSET(i); + tdc->chan_addr = tdma->base_addr + cdata->ch_base_offset + + (cdata->ch_reg_size * i); tdc->irq = of_irq_get(pdev->dev.of_node, i); if (tdc->irq <= 0) { -- cgit v1.2.3 From c0e74dd25422e32f2e65ee868c8b5c828b8c5b61 Mon Sep 17 00:00:00 2001 From: Sameer Pujar Date: Thu, 2 May 2019 18:25:13 +0530 Subject: Documentation: DT: Add compatibility binding for Tegra186 Tegra186 Audio DMA controller has new updates from Tegra210 version. Thus add new compatibility binding string and the same can be used by Tegra194 as well. Signed-off-by: Sameer Pujar Reviewed-by: Jon Hunter Signed-off-by: Vinod Koul --- Documentation/devicetree/bindings/dma/nvidia,tegra210-adma.txt | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/Documentation/devicetree/bindings/dma/nvidia,tegra210-adma.txt b/Documentation/devicetree/bindings/dma/nvidia,tegra210-adma.txt index 2f35b047f772..245d3063715c 100644 --- a/Documentation/devicetree/bindings/dma/nvidia,tegra210-adma.txt +++ b/Documentation/devicetree/bindings/dma/nvidia,tegra210-adma.txt @@ -4,7 +4,9 @@ The Tegra Audio DMA controller that is used for transferring data between system memory and the Audio Processing Engine (APE). Required properties: -- compatible: Must be "nvidia,tegra210-adma". +- compatible: Should contain one of the following: + - "nvidia,tegra210-adma": for Tegra210 + - "nvidia,tegra186-adma": for Tegra186 and Tegra194 - reg: Should contain DMA registers location and length. This should be a single entry that includes all of the per-channel registers in one contiguous bank. -- cgit v1.2.3 From 433de642a76c9e68641ee5a8c7b2720d4e8629a5 Mon Sep 17 00:00:00 2001 From: Sameer Pujar Date: Thu, 2 May 2019 18:25:14 +0530 Subject: dmaengine: tegra210-adma: add support for Tegra186/Tegra194 Add Tegra186 specific macro defines and chip_data structure for chip specific information. New compatibility is added to select relevant chip details. There is no major change for Tegra194 and hence it can use the same chip data. The bits in the BURST_SIZE field of the ADMA CH_CONFIG register are encoded differently on Tegra186 and Tegra194 compared with Tegra210. On Tegra210 the bits are encoded as follows ... 1 = WORD_1 2 = WORDS_2 3 = WORDS_4 4 = WORDS_8 5 = WORDS_16 Where as on Tegra186 and Tegra194 the bits are encoded as ... 0 = WORD_1 1 = WORDS_2 2 = WORDS_3 3 = WORDS_4 4 = WORDS_5 ... 15 = WORDS_16 Add helper functions for generating the correct burst size. Signed-off-by: Sameer Pujar Reviewed-by: Jon Hunter Signed-off-by: Vinod Koul --- drivers/dma/tegra210-adma.c | 45 +++++++++++++++++++++++++++++++++++++-------- 1 file changed, 37 insertions(+), 8 deletions(-) diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c index 9aee015609bd..115ee10f067c 100644 --- a/drivers/dma/tegra210-adma.c +++ b/drivers/dma/tegra210-adma.c @@ -45,8 +45,8 @@ #define ADMA_CH_CONFIG 0x28 #define ADMA_CH_CONFIG_SRC_BUF(val) (((val) & 0x7) << 28) #define ADMA_CH_CONFIG_TRG_BUF(val) (((val) & 0x7) << 24) -#define ADMA_CH_CONFIG_BURST_SIZE(val) (((val) & 0x7) << 20) -#define ADMA_CH_CONFIG_BURST_16 5 +#define ADMA_CH_CONFIG_BURST_SIZE_SHIFT 20 +#define ADMA_CH_CONFIG_MAX_BURST_SIZE 16 #define ADMA_CH_CONFIG_WEIGHT_FOR_WRR(val) ((val) & 0xf) #define ADMA_CH_CONFIG_MAX_BUFS 8 @@ -87,6 +87,7 @@ struct tegra_adma; * @nr_channels: Number of DMA channels available. */ struct tegra_adma_chip_data { + unsigned int (*adma_get_burst_config)(unsigned int burst_size); unsigned int global_reg_offset; unsigned int global_int_clear; unsigned int ch_req_tx_shift; @@ -489,6 +490,22 @@ static enum dma_status tegra_adma_tx_status(struct dma_chan *dc, return ret; } +static unsigned int tegra210_adma_get_burst_config(unsigned int burst_size) +{ + if (!burst_size || burst_size > ADMA_CH_CONFIG_MAX_BURST_SIZE) + burst_size = ADMA_CH_CONFIG_MAX_BURST_SIZE; + + return fls(burst_size) << ADMA_CH_CONFIG_BURST_SIZE_SHIFT; +} + +static unsigned int tegra186_adma_get_burst_config(unsigned int burst_size) +{ + if (!burst_size || burst_size > ADMA_CH_CONFIG_MAX_BURST_SIZE) + burst_size = ADMA_CH_CONFIG_MAX_BURST_SIZE; + + return (burst_size - 1) << ADMA_CH_CONFIG_BURST_SIZE_SHIFT; +} + static int tegra_adma_set_xfer_params(struct tegra_adma_chan *tdc, struct tegra_adma_desc *desc, dma_addr_t buf_addr, @@ -504,7 +521,7 @@ static int tegra_adma_set_xfer_params(struct tegra_adma_chan *tdc, switch (direction) { case DMA_MEM_TO_DEV: adma_dir = ADMA_CH_CTRL_DIR_MEM2AHUB; - burst_size = fls(tdc->sconfig.dst_maxburst); + burst_size = tdc->sconfig.dst_maxburst; ch_regs->config = ADMA_CH_CONFIG_SRC_BUF(desc->num_periods - 1); ch_regs->ctrl = ADMA_CH_REG_FIELD_VAL(tdc->sreq_index, cdata->ch_req_mask, @@ -514,7 +531,7 @@ static int tegra_adma_set_xfer_params(struct tegra_adma_chan *tdc, case DMA_DEV_TO_MEM: adma_dir = ADMA_CH_CTRL_DIR_AHUB2MEM; - burst_size = fls(tdc->sconfig.src_maxburst); + burst_size = tdc->sconfig.src_maxburst; ch_regs->config = ADMA_CH_CONFIG_TRG_BUF(desc->num_periods - 1); ch_regs->ctrl = ADMA_CH_REG_FIELD_VAL(tdc->sreq_index, cdata->ch_req_mask, @@ -527,13 +544,10 @@ static int tegra_adma_set_xfer_params(struct tegra_adma_chan *tdc, return -EINVAL; } - if (!burst_size || burst_size > ADMA_CH_CONFIG_BURST_16) - burst_size = ADMA_CH_CONFIG_BURST_16; - ch_regs->ctrl |= ADMA_CH_CTRL_DIR(adma_dir) | ADMA_CH_CTRL_MODE_CONTINUOUS | ADMA_CH_CTRL_FLOWCTRL_EN; - ch_regs->config |= ADMA_CH_CONFIG_BURST_SIZE(burst_size); + ch_regs->config |= cdata->adma_get_burst_config(burst_size); ch_regs->config |= ADMA_CH_CONFIG_WEIGHT_FOR_WRR(1); ch_regs->fifo_ctrl = ADMA_CH_FIFO_CTRL_DEFAULT; ch_regs->tc = desc->period_len & ADMA_CH_TC_COUNT_MASK; @@ -671,6 +685,7 @@ static int tegra_adma_runtime_resume(struct device *dev) } static const struct tegra_adma_chip_data tegra210_chip_data = { + .adma_get_burst_config = tegra210_adma_get_burst_config, .global_reg_offset = 0xc00, .global_int_clear = 0x20, .ch_req_tx_shift = 28, @@ -682,8 +697,22 @@ static const struct tegra_adma_chip_data tegra210_chip_data = { .nr_channels = 22, }; +static const struct tegra_adma_chip_data tegra186_chip_data = { + .adma_get_burst_config = tegra186_adma_get_burst_config, + .global_reg_offset = 0, + .global_int_clear = 0x402c, + .ch_req_tx_shift = 27, + .ch_req_rx_shift = 22, + .ch_base_offset = 0x10000, + .ch_req_mask = 0x1f, + .ch_req_max = 20, + .ch_reg_size = 0x100, + .nr_channels = 32, +}; + static const struct of_device_id tegra_adma_of_match[] = { { .compatible = "nvidia,tegra210-adma", .data = &tegra210_chip_data }, + { .compatible = "nvidia,tegra186-adma", .data = &tegra186_chip_data }, { }, }; MODULE_DEVICE_TABLE(of, tegra_adma_of_match); -- cgit v1.2.3 From 94dc8f4ed47983b7280f488617e49346f89513fd Mon Sep 17 00:00:00 2001 From: Sameer Pujar Date: Thu, 2 May 2019 18:25:15 +0530 Subject: dmaengine: tegra210-adma: add pause/resume support During an audio playback session it is observed that, audio goes off after few seconds of continuous pause and play. No audio is heard even when the playback is resumed. The reason for above is, currently ADMA driver does not handle DMA_PAUSE/ DMA_RESUME and relevant callbacks for dma_device are not implemented. This patch implements device_pause and device_resume callbacks for the device. During pause TRANSFER_PAUSE bit of dma channel control register is set and the same is cleared during resume. Signed-off-by: Sameer Pujar Reviewed-by: Jon Hunter Signed-off-by: Vinod Koul --- drivers/dma/tegra210-adma.c | 51 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c index 115ee10f067c..f26c458d584e 100644 --- a/drivers/dma/tegra210-adma.c +++ b/drivers/dma/tegra210-adma.c @@ -30,6 +30,7 @@ #define ADMA_CH_CMD 0x00 #define ADMA_CH_STATUS 0x0c #define ADMA_CH_STATUS_XFER_EN BIT(0) +#define ADMA_CH_STATUS_XFER_PAUSED BIT(1) #define ADMA_CH_INT_STATUS 0x10 #define ADMA_CH_INT_STATUS_XFER_DONE BIT(0) @@ -41,6 +42,7 @@ #define ADMA_CH_CTRL_DIR_MEM2AHUB 4 #define ADMA_CH_CTRL_MODE_CONTINUOUS (2 << 8) #define ADMA_CH_CTRL_FLOWCTRL_EN BIT(1) +#define ADMA_CH_CTRL_XFER_PAUSE_SHIFT 0 #define ADMA_CH_CONFIG 0x28 #define ADMA_CH_CONFIG_SRC_BUF(val) (((val) & 0x7) << 28) @@ -67,6 +69,8 @@ #define ADMA_GLOBAL_CMD 0x00 #define ADMA_GLOBAL_SOFT_RESET 0x04 +#define TEGRA_ADMA_BURST_COMPLETE_TIME 20 + #define ADMA_CH_FIFO_CTRL_DEFAULT (ADMA_CH_FIFO_CTRL_OVRFW_THRES(1) | \ ADMA_CH_FIFO_CTRL_STARV_THRES(1)) @@ -437,6 +441,51 @@ static void tegra_adma_issue_pending(struct dma_chan *dc) spin_unlock_irqrestore(&tdc->vc.lock, flags); } +static bool tegra_adma_is_paused(struct tegra_adma_chan *tdc) +{ + u32 csts; + + csts = tdma_ch_read(tdc, ADMA_CH_STATUS); + csts &= ADMA_CH_STATUS_XFER_PAUSED; + + return csts ? true : false; +} + +static int tegra_adma_pause(struct dma_chan *dc) +{ + struct tegra_adma_chan *tdc = to_tegra_adma_chan(dc); + struct tegra_adma_desc *desc = tdc->desc; + struct tegra_adma_chan_regs *ch_regs = &desc->ch_regs; + int dcnt = 10; + + ch_regs->ctrl = tdma_ch_read(tdc, ADMA_CH_CTRL); + ch_regs->ctrl |= (1 << ADMA_CH_CTRL_XFER_PAUSE_SHIFT); + tdma_ch_write(tdc, ADMA_CH_CTRL, ch_regs->ctrl); + + while (dcnt-- && !tegra_adma_is_paused(tdc)) + udelay(TEGRA_ADMA_BURST_COMPLETE_TIME); + + if (dcnt < 0) { + dev_err(tdc2dev(tdc), "unable to pause DMA channel\n"); + return -EBUSY; + } + + return 0; +} + +static int tegra_adma_resume(struct dma_chan *dc) +{ + struct tegra_adma_chan *tdc = to_tegra_adma_chan(dc); + struct tegra_adma_desc *desc = tdc->desc; + struct tegra_adma_chan_regs *ch_regs = &desc->ch_regs; + + ch_regs->ctrl = tdma_ch_read(tdc, ADMA_CH_CTRL); + ch_regs->ctrl &= ~(1 << ADMA_CH_CTRL_XFER_PAUSE_SHIFT); + tdma_ch_write(tdc, ADMA_CH_CTRL, ch_regs->ctrl); + + return 0; +} + static int tegra_adma_terminate_all(struct dma_chan *dc) { struct tegra_adma_chan *tdc = to_tegra_adma_chan(dc); @@ -798,6 +847,8 @@ static int tegra_adma_probe(struct platform_device *pdev) tdma->dma_dev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); tdma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); tdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; + tdma->dma_dev.device_pause = tegra_adma_pause; + tdma->dma_dev.device_resume = tegra_adma_resume; ret = dma_async_device_register(&tdma->dma_dev); if (ret < 0) { -- cgit v1.2.3 From f030e419501cb95e961e9ed35c493b5d46a04eca Mon Sep 17 00:00:00 2001 From: Sameer Pujar Date: Thu, 2 May 2019 18:25:16 +0530 Subject: dmaengine: tegra210-dma: free dma controller in remove() Following kernel panic is seen during DMA driver unload->load sequence ========================================================================== Unable to handle kernel paging request at virtual address ffffff8001198880 Internal error: Oops: 86000007 [#1] PREEMPT SMP CPU: 0 PID: 5907 Comm: HwBinder:4123_1 Tainted: G C 4.9.128-tegra-g065839f Hardware name: galen (DT) task: ffffffc3590d1a80 task.stack: ffffffc3d0678000 PC is at 0xffffff8001198880 LR is at of_dma_request_slave_channel+0xd8/0x1f8 pc : [] lr : [] pstate: 60400045 sp : ffffffc3d067b710 x29: ffffffc3d067b710 x28: 000000000000002f x27: ffffff800949e000 x26: ffffff800949e750 x25: ffffff800949e000 x24: ffffffbefe817d84 x23: ffffff8009f77cb0 x22: 0000000000000028 x21: ffffffc3ffda49c8 x20: 0000000000000029 x19: 0000000000000001 x18: ffffffffffffffff x17: 0000000000000000 x16: ffffff80082b66a0 x15: ffffff8009e78250 x14: 000000000000000a x13: 0000000000000038 x12: 0101010101010101 x11: 0000000000000030 x10: 0101010101010101 x9 : fffffffffffffffc x8 : 7f7f7f7f7f7f7f7f x7 : 62ff726b6b64622c x6 : 0000000000008064 x5 : 6400000000000000 x4 : ffffffbefe817c44 x3 : ffffffc3ffda3e08 x2 : ffffff8001198880 x1 : ffffffc3d48323c0 x0 : ffffffc3d067b788 Process HwBinder:4123_1 (pid: 5907, stack limit = 0xffffffc3d0678028) Call trace: [] 0xffffff8001198880 [] dma_request_chan+0x50/0x1f0 [] dma_request_slave_channel+0x28/0x40 [] tegra_alt_pcm_open+0x114/0x170 [] soc_pcm_open+0x10c/0x878 [] snd_pcm_open_substream+0xc0/0x170 [] snd_pcm_open+0xc4/0x240 [] snd_pcm_playback_open+0x58/0x80 [] snd_open+0xb4/0x178 [] chrdev_open+0xb8/0x1d0 [] do_dentry_open+0x214/0x318 [] vfs_open+0x58/0x88 [] do_last+0x450/0xde0 [] path_openat+0xa8/0x368 [] do_filp_open+0x8c/0x110 [] do_sys_open+0x164/0x220 [] compat_SyS_openat+0x3c/0x50 [] el0_svc_naked+0x34/0x38 ---[ end trace 67e6d544e65b5145 ]--- Kernel panic - not syncing: Fatal exception ========================================================================== In device probe(), of_dma_controller_register() registers DMA controller. But when driver is removed, this is not freed. During driver reload this results in data abort and kernel panic. Add of_dma_controller_free() in driver remove path to fix the issue. Fixes: f46b195799b5 ("dmaengine: tegra-adma: Add support for Tegra210 ADMA") Signed-off-by: Sameer Pujar Reviewed-by: Jon Hunter Signed-off-by: Vinod Koul --- drivers/dma/tegra210-adma.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c index f26c458d584e..953669d35479 100644 --- a/drivers/dma/tegra210-adma.c +++ b/drivers/dma/tegra210-adma.c @@ -888,6 +888,7 @@ static int tegra_adma_remove(struct platform_device *pdev) struct tegra_adma *tdma = platform_get_drvdata(pdev); int i; + of_dma_controller_free(pdev->dev.of_node); dma_async_device_unregister(&tdma->dma_dev); for (i = 0; i < tdma->nr_channels; ++i) -- cgit v1.2.3 From f33e7bb3eb922618612a90f0a828c790e8880773 Mon Sep 17 00:00:00 2001 From: Sameer Pujar Date: Thu, 2 May 2019 18:25:17 +0530 Subject: dmaengine: tegra210-adma: restore channel status Status of ADMA channel registers is not saved and restored during system suspend. During active playback if system enters suspend, this results in wrong state of channel registers during system resume and playback fails to resume properly. Fix this by saving following channel registers in runtime suspend and restore during runtime resume. * ADMA_CH_LOWER_SRC_ADDR * ADMA_CH_LOWER_TRG_ADDR * ADMA_CH_FIFO_CTRL * ADMA_CH_CONFIG * ADMA_CH_CTRL * ADMA_CH_CMD * ADMA_CH_TC Runtime PM calls will be inovked during system resume path if a playback or capture needs to be resumed. Hence above changes work fine for system suspend case. Fixes: f46b195799b5 ("dmaengine: tegra-adma: Add support for Tegra210 ADMA") Signed-off-by: Sameer Pujar Reviewed-by: Jon Hunter Signed-off-by: Vinod Koul --- drivers/dma/tegra210-adma.c | 46 ++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 45 insertions(+), 1 deletion(-) diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c index 953669d35479..21f6be16d013 100644 --- a/drivers/dma/tegra210-adma.c +++ b/drivers/dma/tegra210-adma.c @@ -112,6 +112,7 @@ struct tegra_adma_chan_regs { unsigned int src_addr; unsigned int trg_addr; unsigned int fifo_ctrl; + unsigned int cmd; unsigned int tc; }; @@ -141,6 +142,7 @@ struct tegra_adma_chan { enum dma_transfer_direction sreq_dir; unsigned int sreq_index; bool sreq_reserved; + struct tegra_adma_chan_regs ch_regs; /* Transfer count and position info */ unsigned int tx_buf_count; @@ -711,8 +713,30 @@ static struct dma_chan *tegra_dma_of_xlate(struct of_phandle_args *dma_spec, static int tegra_adma_runtime_suspend(struct device *dev) { struct tegra_adma *tdma = dev_get_drvdata(dev); + struct tegra_adma_chan_regs *ch_reg; + struct tegra_adma_chan *tdc; + int i; tdma->global_cmd = tdma_read(tdma, ADMA_GLOBAL_CMD); + if (!tdma->global_cmd) + goto clk_disable; + + for (i = 0; i < tdma->nr_channels; i++) { + tdc = &tdma->channels[i]; + ch_reg = &tdc->ch_regs; + ch_reg->cmd = tdma_ch_read(tdc, ADMA_CH_CMD); + /* skip if channel is not active */ + if (!ch_reg->cmd) + continue; + ch_reg->tc = tdma_ch_read(tdc, ADMA_CH_TC); + ch_reg->src_addr = tdma_ch_read(tdc, ADMA_CH_LOWER_SRC_ADDR); + ch_reg->trg_addr = tdma_ch_read(tdc, ADMA_CH_LOWER_TRG_ADDR); + ch_reg->ctrl = tdma_ch_read(tdc, ADMA_CH_CTRL); + ch_reg->fifo_ctrl = tdma_ch_read(tdc, ADMA_CH_FIFO_CTRL); + ch_reg->config = tdma_ch_read(tdc, ADMA_CH_CONFIG); + } + +clk_disable: clk_disable_unprepare(tdma->ahub_clk); return 0; @@ -721,7 +745,9 @@ static int tegra_adma_runtime_suspend(struct device *dev) static int tegra_adma_runtime_resume(struct device *dev) { struct tegra_adma *tdma = dev_get_drvdata(dev); - int ret; + struct tegra_adma_chan_regs *ch_reg; + struct tegra_adma_chan *tdc; + int ret, i; ret = clk_prepare_enable(tdma->ahub_clk); if (ret) { @@ -730,6 +756,24 @@ static int tegra_adma_runtime_resume(struct device *dev) } tdma_write(tdma, ADMA_GLOBAL_CMD, tdma->global_cmd); + if (!tdma->global_cmd) + return 0; + + for (i = 0; i < tdma->nr_channels; i++) { + tdc = &tdma->channels[i]; + ch_reg = &tdc->ch_regs; + /* skip if channel was not active earlier */ + if (!ch_reg->cmd) + continue; + tdma_ch_write(tdc, ADMA_CH_TC, ch_reg->tc); + tdma_ch_write(tdc, ADMA_CH_LOWER_SRC_ADDR, ch_reg->src_addr); + tdma_ch_write(tdc, ADMA_CH_LOWER_TRG_ADDR, ch_reg->trg_addr); + tdma_ch_write(tdc, ADMA_CH_CTRL, ch_reg->ctrl); + tdma_ch_write(tdc, ADMA_CH_FIFO_CTRL, ch_reg->fifo_ctrl); + tdma_ch_write(tdc, ADMA_CH_CONFIG, ch_reg->config); + tdma_ch_write(tdc, ADMA_CH_CMD, ch_reg->cmd); + } + return 0; } -- cgit v1.2.3