summaryrefslogtreecommitdiff
path: root/drivers/dma
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/Kconfig12
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/dmaengine.c7
-rw-r--r--drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c6
-rw-r--r--drivers/dma/dw-edma/Kconfig5
-rw-r--r--drivers/dma/dw-edma/dw-edma-core.c196
-rw-r--r--drivers/dma/dw-edma/dw-edma-core.h10
-rw-r--r--drivers/dma/dw-edma/dw-edma-pcie.c56
-rw-r--r--drivers/dma/dw-edma/dw-edma-v0-core.c100
-rw-r--r--drivers/dma/dw-edma/dw-edma-v0-core.h1
-rw-r--r--drivers/dma/dw-edma/dw-edma-v0-debugfs.c374
-rw-r--r--drivers/dma/dw-edma/dw-edma-v0-debugfs.h5
-rw-r--r--drivers/dma/idxd/cdev.c2
-rw-r--r--drivers/dma/idxd/device.c16
-rw-r--r--drivers/dma/imx-sdma.c4
-rw-r--r--drivers/dma/lgm/lgm-dma.c10
-rw-r--r--drivers/dma/mmp_tdma.c7
-rw-r--r--drivers/dma/ptdma/ptdma-dev.c7
-rw-r--r--drivers/dma/ptdma/ptdma.h2
-rw-r--r--drivers/dma/qcom/gpi.c1
-rw-r--r--drivers/dma/s3c24xx-dma.c1428
-rw-r--r--drivers/dma/tegra186-gpc-dma.c1
-rw-r--r--drivers/dma/tegra210-adma.c2
-rw-r--r--drivers/dma/ti/k3-udma.c5
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c4
25 files changed, 417 insertions, 1845 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 472dc315b889..fb7073fc034f 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -610,18 +610,6 @@ config SPRD_DMA
help
Enable support for the on-chip DMA controller on Spreadtrum platform.
-config S3C24XX_DMAC
- bool "Samsung S3C24XX DMA support"
- depends on ARCH_S3C24XX || COMPILE_TEST
- select DMA_ENGINE
- select DMA_VIRTUAL_CHANNELS
- help
- Support for the Samsung S3C24XX DMA controller driver. The
- DMA controller is having multiple DMA channels which can be
- configured for different peripherals like audio, UART, SPI.
- The DMA controller can transfer data from memory to peripheral,
- periphal to memory, periphal to periphal and memory to memory.
-
config TXX9_DMAC
tristate "Toshiba TXx9 SoC DMA support"
depends on MACH_TX49XX
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 5b55ada052a7..a4fd1ce29510 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -70,7 +70,6 @@ obj-$(CONFIG_STM32_DMA) += stm32-dma.o
obj-$(CONFIG_STM32_DMAMUX) += stm32-dmamux.o
obj-$(CONFIG_STM32_MDMA) += stm32-mdma.o
obj-$(CONFIG_SPRD_DMA) += sprd-dma.o
-obj-$(CONFIG_S3C24XX_DMAC) += s3c24xx-dma.o
obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
obj-$(CONFIG_TEGRA186_GPC_DMA) += tegra186-gpc-dma.o
obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index ed28be3b1fc7..c24bca210104 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -451,7 +451,8 @@ static int dma_chan_get(struct dma_chan *chan)
/* The channel is already in use, update client count */
if (chan->client_count) {
__module_get(owner);
- goto out;
+ chan->client_count++;
+ return 0;
}
if (!try_module_get(owner))
@@ -470,11 +471,11 @@ static int dma_chan_get(struct dma_chan *chan)
goto err_out;
}
+ chan->client_count++;
+
if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
balance_ref_count(chan);
-out:
- chan->client_count++;
return 0;
err_out:
diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
index d0de8b032d49..4169e1d7d5ca 100644
--- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
+++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
@@ -1016,6 +1016,11 @@ static noinline void axi_chan_handle_err(struct axi_dma_chan *chan, u32 status)
/* The bad descriptor currently is in the head of vc list */
vd = vchan_next_desc(&chan->vc);
+ if (!vd) {
+ dev_err(chan2dev(chan), "BUG: %s, IRQ with no descriptors\n",
+ axi_chan_name(chan));
+ goto out;
+ }
/* Remove the completed descriptor from issued list */
list_del(&vd->node);
@@ -1030,6 +1035,7 @@ static noinline void axi_chan_handle_err(struct axi_dma_chan *chan, u32 status)
/* Try to restart the controller */
axi_chan_start_first_queued(chan);
+out:
spin_unlock_irqrestore(&chan->vc.lock, flags);
}
diff --git a/drivers/dma/dw-edma/Kconfig b/drivers/dma/dw-edma/Kconfig
index 7ff17b2db6a1..2b6f2679508d 100644
--- a/drivers/dma/dw-edma/Kconfig
+++ b/drivers/dma/dw-edma/Kconfig
@@ -9,11 +9,14 @@ config DW_EDMA
Support the Synopsys DesignWare eDMA controller, normally
implemented on endpoints SoCs.
+if DW_EDMA
+
config DW_EDMA_PCIE
tristate "Synopsys DesignWare eDMA PCIe driver"
depends on PCI && PCI_MSI
- select DW_EDMA
help
Provides a glue-logic between the Synopsys DesignWare
eDMA controller and an endpoint PCIe device. This also serves
as a reference design to whom desires to use this IP.
+
+endif # DW_EDMA
diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c
index c54b24ff5206..1906a836f0aa 100644
--- a/drivers/dma/dw-edma/dw-edma-core.c
+++ b/drivers/dma/dw-edma/dw-edma-core.c
@@ -39,6 +39,17 @@ struct dw_edma_desc *vd2dw_edma_desc(struct virt_dma_desc *vd)
return container_of(vd, struct dw_edma_desc, vd);
}
+static inline
+u64 dw_edma_get_pci_address(struct dw_edma_chan *chan, phys_addr_t cpu_addr)
+{
+ struct dw_edma_chip *chip = chan->dw->chip;
+
+ if (chip->ops->pci_address)
+ return chip->ops->pci_address(chip->dev, cpu_addr);
+
+ return cpu_addr;
+}
+
static struct dw_edma_burst *dw_edma_alloc_burst(struct dw_edma_chunk *chunk)
{
struct dw_edma_burst *burst;
@@ -197,6 +208,24 @@ static void dw_edma_start_transfer(struct dw_edma_chan *chan)
desc->chunks_alloc--;
}
+static void dw_edma_device_caps(struct dma_chan *dchan,
+ struct dma_slave_caps *caps)
+{
+ struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
+
+ if (chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL) {
+ if (chan->dir == EDMA_DIR_READ)
+ caps->directions = BIT(DMA_DEV_TO_MEM);
+ else
+ caps->directions = BIT(DMA_MEM_TO_DEV);
+ } else {
+ if (chan->dir == EDMA_DIR_WRITE)
+ caps->directions = BIT(DMA_DEV_TO_MEM);
+ else
+ caps->directions = BIT(DMA_MEM_TO_DEV);
+ }
+}
+
static int dw_edma_device_config(struct dma_chan *dchan,
struct dma_slave_config *config)
{
@@ -327,11 +356,12 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
{
struct dw_edma_chan *chan = dchan2dw_edma_chan(xfer->dchan);
enum dma_transfer_direction dir = xfer->direction;
- phys_addr_t src_addr, dst_addr;
struct scatterlist *sg = NULL;
struct dw_edma_chunk *chunk;
struct dw_edma_burst *burst;
struct dw_edma_desc *desc;
+ u64 src_addr, dst_addr;
+ size_t fsz = 0;
u32 cnt = 0;
int i;
@@ -381,9 +411,9 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
if (xfer->xfer.sg.len < 1)
return NULL;
} else if (xfer->type == EDMA_XFER_INTERLEAVED) {
- if (!xfer->xfer.il->numf)
+ if (!xfer->xfer.il->numf || xfer->xfer.il->frame_size < 1)
return NULL;
- if (xfer->xfer.il->numf > 0 && xfer->xfer.il->frame_size > 0)
+ if (!xfer->xfer.il->src_inc || !xfer->xfer.il->dst_inc)
return NULL;
} else {
return NULL;
@@ -405,16 +435,19 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
dst_addr = chan->config.dst_addr;
}
+ if (dir == DMA_DEV_TO_MEM)
+ src_addr = dw_edma_get_pci_address(chan, (phys_addr_t)src_addr);
+ else
+ dst_addr = dw_edma_get_pci_address(chan, (phys_addr_t)dst_addr);
+
if (xfer->type == EDMA_XFER_CYCLIC) {
cnt = xfer->xfer.cyclic.cnt;
} else if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
cnt = xfer->xfer.sg.len;
sg = xfer->xfer.sg.sgl;
} else if (xfer->type == EDMA_XFER_INTERLEAVED) {
- if (xfer->xfer.il->numf > 0)
- cnt = xfer->xfer.il->numf;
- else
- cnt = xfer->xfer.il->frame_size;
+ cnt = xfer->xfer.il->numf * xfer->xfer.il->frame_size;
+ fsz = xfer->xfer.il->frame_size;
}
for (i = 0; i < cnt; i++) {
@@ -436,7 +469,7 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
else if (xfer->type == EDMA_XFER_SCATTER_GATHER)
burst->sz = sg_dma_len(sg);
else if (xfer->type == EDMA_XFER_INTERLEAVED)
- burst->sz = xfer->xfer.il->sgl[i].size;
+ burst->sz = xfer->xfer.il->sgl[i % fsz].size;
chunk->ll_region.sz += burst->sz;
desc->alloc_sz += burst->sz;
@@ -455,6 +488,8 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
* and destination addresses are increased
* by the same portion (data length)
*/
+ } else if (xfer->type == EDMA_XFER_INTERLEAVED) {
+ burst->dar = dst_addr;
}
} else {
burst->dar = dst_addr;
@@ -470,25 +505,24 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
* and destination addresses are increased
* by the same portion (data length)
*/
+ } else if (xfer->type == EDMA_XFER_INTERLEAVED) {
+ burst->sar = src_addr;
}
}
if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
sg = sg_next(sg);
- } else if (xfer->type == EDMA_XFER_INTERLEAVED &&
- xfer->xfer.il->frame_size > 0) {
+ } else if (xfer->type == EDMA_XFER_INTERLEAVED) {
struct dma_interleaved_template *il = xfer->xfer.il;
- struct data_chunk *dc = &il->sgl[i];
+ struct data_chunk *dc = &il->sgl[i % fsz];
- if (il->src_sgl) {
- src_addr += burst->sz;
+ src_addr += burst->sz;
+ if (il->src_sgl)
src_addr += dmaengine_get_src_icg(il, dc);
- }
- if (il->dst_sgl) {
- dst_addr += burst->sz;
+ dst_addr += burst->sz;
+ if (il->dst_sgl)
dst_addr += dmaengine_get_dst_icg(il, dc);
- }
}
}
@@ -701,92 +735,76 @@ static void dw_edma_free_chan_resources(struct dma_chan *dchan)
}
}
-static int dw_edma_channel_setup(struct dw_edma *dw, bool write,
- u32 wr_alloc, u32 rd_alloc)
+static int dw_edma_channel_setup(struct dw_edma *dw, u32 wr_alloc, u32 rd_alloc)
{
struct dw_edma_chip *chip = dw->chip;
- struct dw_edma_region *dt_region;
struct device *dev = chip->dev;
struct dw_edma_chan *chan;
struct dw_edma_irq *irq;
struct dma_device *dma;
- u32 alloc, off_alloc;
- u32 i, j, cnt;
- int err = 0;
+ u32 i, ch_cnt;
u32 pos;
- if (write) {
- i = 0;
- cnt = dw->wr_ch_cnt;
- dma = &dw->wr_edma;
- alloc = wr_alloc;
- off_alloc = 0;
- } else {
- i = dw->wr_ch_cnt;
- cnt = dw->rd_ch_cnt;
- dma = &dw->rd_edma;
- alloc = rd_alloc;
- off_alloc = wr_alloc;
- }
+ ch_cnt = dw->wr_ch_cnt + dw->rd_ch_cnt;
+ dma = &dw->dma;
INIT_LIST_HEAD(&dma->channels);
- for (j = 0; (alloc || dw->nr_irqs == 1) && j < cnt; j++, i++) {
+
+ for (i = 0; i < ch_cnt; i++) {
chan = &dw->chan[i];
- dt_region = devm_kzalloc(dev, sizeof(*dt_region), GFP_KERNEL);
- if (!dt_region)
- return -ENOMEM;
+ chan->dw = dw;
- chan->vc.chan.private = dt_region;
+ if (i < dw->wr_ch_cnt) {
+ chan->id = i;
+ chan->dir = EDMA_DIR_WRITE;
+ } else {
+ chan->id = i - dw->wr_ch_cnt;
+ chan->dir = EDMA_DIR_READ;
+ }
- chan->dw = dw;
- chan->id = j;
- chan->dir = write ? EDMA_DIR_WRITE : EDMA_DIR_READ;
chan->configured = false;
chan->request = EDMA_REQ_NONE;
chan->status = EDMA_ST_IDLE;
- if (write)
- chan->ll_max = (chip->ll_region_wr[j].sz / EDMA_LL_SZ);
+ if (chan->dir == EDMA_DIR_WRITE)
+ chan->ll_max = (chip->ll_region_wr[chan->id].sz / EDMA_LL_SZ);
else
- chan->ll_max = (chip->ll_region_rd[j].sz / EDMA_LL_SZ);
+ chan->ll_max = (chip->ll_region_rd[chan->id].sz / EDMA_LL_SZ);
chan->ll_max -= 1;
dev_vdbg(dev, "L. List:\tChannel %s[%u] max_cnt=%u\n",
- write ? "write" : "read", j, chan->ll_max);
+ chan->dir == EDMA_DIR_WRITE ? "write" : "read",
+ chan->id, chan->ll_max);
if (dw->nr_irqs == 1)
pos = 0;
+ else if (chan->dir == EDMA_DIR_WRITE)
+ pos = chan->id % wr_alloc;
else
- pos = off_alloc + (j % alloc);
+ pos = wr_alloc + chan->id % rd_alloc;
irq = &dw->irq[pos];
- if (write)
- irq->wr_mask |= BIT(j);
+ if (chan->dir == EDMA_DIR_WRITE)
+ irq->wr_mask |= BIT(chan->id);
else
- irq->rd_mask |= BIT(j);
+ irq->rd_mask |= BIT(chan->id);
irq->dw = dw;
memcpy(&chan->msi, &irq->msi, sizeof(chan->msi));
dev_vdbg(dev, "MSI:\t\tChannel %s[%u] addr=0x%.8x%.8x, data=0x%.8x\n",
- write ? "write" : "read", j,
+ chan->dir == EDMA_DIR_WRITE ? "write" : "read", chan->id,
chan->msi.address_hi, chan->msi.address_lo,
chan->msi.data);
chan->vc.desc_free = vchan_free_desc;
- vchan_init(&chan->vc, dma);
+ chan->vc.chan.private = chan->dir == EDMA_DIR_WRITE ?
+ &dw->chip->dt_region_wr[chan->id] :
+ &dw->chip->dt_region_rd[chan->id];
- if (write) {
- dt_region->paddr = chip->dt_region_wr[j].paddr;
- dt_region->vaddr = chip->dt_region_wr[j].vaddr;
- dt_region->sz = chip->dt_region_wr[j].sz;
- } else {
- dt_region->paddr = chip->dt_region_rd[j].paddr;
- dt_region->vaddr = chip->dt_region_rd[j].vaddr;
- dt_region->sz = chip->dt_region_rd[j].sz;
- }
+ vchan_init(&chan->vc, dma);
dw_edma_v0_core_device_config(chan);
}
@@ -797,16 +815,16 @@ static int dw_edma_channel_setup(struct dw_edma *dw, bool write,
dma_cap_set(DMA_CYCLIC, dma->cap_mask);
dma_cap_set(DMA_PRIVATE, dma->cap_mask);
dma_cap_set(DMA_INTERLEAVE, dma->cap_mask);
- dma->directions = BIT(write ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV);
+ dma->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
dma->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
dma->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
dma->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
- dma->chancnt = cnt;
/* Set DMA channel callbacks */
dma->dev = chip->dev;
dma->device_alloc_chan_resources = dw_edma_alloc_chan_resources;
dma->device_free_chan_resources = dw_edma_free_chan_resources;
+ dma->device_caps = dw_edma_device_caps;
dma->device_config = dw_edma_device_config;
dma->device_pause = dw_edma_device_pause;
dma->device_resume = dw_edma_device_resume;
@@ -820,9 +838,7 @@ static int dw_edma_channel_setup(struct dw_edma *dw, bool write,
dma_set_max_seg_size(dma->dev, U32_MAX);
/* Register DMA device */
- err = dma_async_device_register(dma);
-
- return err;
+ return dma_async_device_register(dma);
}
static inline void dw_edma_dec_irq_alloc(int *nr_irqs, u32 *alloc, u16 cnt)
@@ -893,10 +909,8 @@ static int dw_edma_irq_request(struct dw_edma *dw,
dw_edma_interrupt_read,
IRQF_SHARED, dw->name,
&dw->irq[i]);
- if (err) {
- dw->nr_irqs = i;
- return err;
- }
+ if (err)
+ goto err_irq_free;
if (irq_get_msi_desc(irq))
get_cached_msi_msg(irq, &dw->irq[i].msi);
@@ -905,6 +919,14 @@ static int dw_edma_irq_request(struct dw_edma *dw,
dw->nr_irqs = i;
}
+ return 0;
+
+err_irq_free:
+ for (i--; i >= 0; i--) {
+ irq = chip->ops->irq_vector(dev, i);
+ free_irq(irq, &dw->irq[i]);
+ }
+
return err;
}
@@ -951,7 +973,8 @@ int dw_edma_probe(struct dw_edma_chip *chip)
if (!dw->chan)
return -ENOMEM;
- snprintf(dw->name, sizeof(dw->name), "dw-edma-core:%d", chip->id);
+ snprintf(dw->name, sizeof(dw->name), "dw-edma-core:%s",
+ dev_name(chip->dev));
/* Disable eDMA, only to establish the ideal initial conditions */
dw_edma_v0_core_off(dw);
@@ -961,13 +984,8 @@ int dw_edma_probe(struct dw_edma_chip *chip)
if (err)
return err;
- /* Setup write channels */
- err = dw_edma_channel_setup(dw, true, wr_alloc, rd_alloc);
- if (err)
- goto err_irq_free;
-
- /* Setup read channels */
- err = dw_edma_channel_setup(dw, false, wr_alloc, rd_alloc);
+ /* Setup write/read channels */
+ err = dw_edma_channel_setup(dw, wr_alloc, rd_alloc);
if (err)
goto err_irq_free;
@@ -993,6 +1011,10 @@ int dw_edma_remove(struct dw_edma_chip *chip)
struct dw_edma *dw = chip->dw;
int i;
+ /* Skip removal if no private data found */
+ if (!dw)
+ return -ENODEV;
+
/* Disable eDMA */
dw_edma_v0_core_off(dw);
@@ -1001,23 +1023,13 @@ int dw_edma_remove(struct dw_edma_chip *chip)
free_irq(chip->ops->irq_vector(dev, i), &dw->irq[i]);
/* Deregister eDMA device */
- dma_async_device_unregister(&dw->wr_edma);
- list_for_each_entry_safe(chan, _chan, &dw->wr_edma.channels,
+ dma_async_device_unregister(&dw->dma);
+ list_for_each_entry_safe(chan, _chan, &dw->dma.channels,
vc.chan.device_node) {
tasklet_kill(&chan->vc.task);
list_del(&chan->vc.chan.device_node);
}
- dma_async_device_unregister(&dw->rd_edma);
- list_for_each_entry_safe(chan, _chan, &dw->rd_edma.channels,
- vc.chan.device_node) {
- tasklet_kill(&chan->vc.task);
- list_del(&chan->vc.chan.device_node);
- }
-
- /* Turn debugfs off */
- dw_edma_v0_core_debugfs_off(dw);
-
return 0;
}
EXPORT_SYMBOL_GPL(dw_edma_remove);
diff --git a/drivers/dma/dw-edma/dw-edma-core.h b/drivers/dma/dw-edma/dw-edma-core.h
index 85df2d511907..0ab2b6dba880 100644
--- a/drivers/dma/dw-edma/dw-edma-core.h
+++ b/drivers/dma/dw-edma/dw-edma-core.h
@@ -96,12 +96,11 @@ struct dw_edma_irq {
};
struct dw_edma {
- char name[20];
+ char name[32];
- struct dma_device wr_edma;
- u16 wr_ch_cnt;
+ struct dma_device dma;
- struct dma_device rd_edma;
+ u16 wr_ch_cnt;
u16 rd_ch_cnt;
struct dw_edma_irq *irq;
@@ -112,9 +111,6 @@ struct dw_edma {
raw_spinlock_t lock; /* Only for legacy */
struct dw_edma_chip *chip;
-#ifdef CONFIG_DEBUG_FS
- struct dentry *debugfs;
-#endif /* CONFIG_DEBUG_FS */
};
struct dw_edma_sg {
diff --git a/drivers/dma/dw-edma/dw-edma-pcie.c b/drivers/dma/dw-edma/dw-edma-pcie.c
index d6b5e2463884..2b40f2b44f5e 100644
--- a/drivers/dma/dw-edma/dw-edma-pcie.c
+++ b/drivers/dma/dw-edma/dw-edma-pcie.c
@@ -95,8 +95,23 @@ static int dw_edma_pcie_irq_vector(struct device *dev, unsigned int nr)
return pci_irq_vector(to_pci_dev(dev), nr);
}
+static u64 dw_edma_pcie_address(struct device *dev, phys_addr_t cpu_addr)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct pci_bus_region region;
+ struct resource res = {
+ .flags = IORESOURCE_MEM,
+ .start = cpu_addr,
+ .end = cpu_addr,
+ };
+
+ pcibios_resource_to_bus(pdev->bus, &region, &res);
+ return region.start;
+}
+
static const struct dw_edma_core_ops dw_edma_pcie_core_ops = {
.irq_vector = dw_edma_pcie_irq_vector,
+ .pci_address = dw_edma_pcie_address,
};
static void dw_edma_pcie_get_vsec_dma_data(struct pci_dev *pdev,
@@ -207,7 +222,6 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
/* Data structure initialization */
chip->dev = dev;
- chip->id = pdev->devfn;
chip->mf = vsec_data.mf;
chip->nr_irqs = nr_irqs;
@@ -226,21 +240,21 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
struct dw_edma_block *ll_block = &vsec_data.ll_wr[i];
struct dw_edma_block *dt_block = &vsec_data.dt_wr[i];
- ll_region->vaddr = pcim_iomap_table(pdev)[ll_block->bar];
- if (!ll_region->vaddr)
+ ll_region->vaddr.io = pcim_iomap_table(pdev)[ll_block->bar];
+ if (!ll_region->vaddr.io)
return -ENOMEM;
- ll_region->vaddr += ll_block->off;
- ll_region->paddr = pdev->resource[ll_block->bar].start;
+ ll_region->vaddr.io += ll_block->off;
+ ll_region->paddr = pci_bus_address(pdev, ll_block->bar);
ll_region->paddr += ll_block->off;
ll_region->sz = ll_block->sz;
- dt_region->vaddr = pcim_iomap_table(pdev)[dt_block->bar];
- if (!dt_region->vaddr)
+ dt_region->vaddr.io = pcim_iomap_table(pdev)[dt_block->bar];
+ if (!dt_region->vaddr.io)
return -ENOMEM;
- dt_region->vaddr += dt_block->off;
- dt_region->paddr = pdev->resource[dt_block->bar].start;
+ dt_region->vaddr.io += dt_block->off;
+ dt_region->paddr = pci_bus_address(pdev, dt_block->bar);
dt_region->paddr += dt_block->off;
dt_region->sz = dt_block->sz;
}
@@ -251,21 +265,21 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
struct dw_edma_block *ll_block = &vsec_data.ll_rd[i];
struct dw_edma_block *dt_block = &vsec_data.dt_rd[i];
- ll_region->vaddr = pcim_iomap_table(pdev)[ll_block->bar];
- if (!ll_region->vaddr)
+ ll_region->vaddr.io = pcim_iomap_table(pdev)[ll_block->bar];
+ if (!ll_region->vaddr.io)
return -ENOMEM;
- ll_region->vaddr += ll_block->off;
- ll_region->paddr = pdev->resource[ll_block->bar].start;
+ ll_region->vaddr.io += ll_block->off;
+ ll_region->paddr = pci_bus_address(pdev, ll_block->bar);
ll_region->paddr += ll_block->off;
ll_region->sz = ll_block->sz;
- dt_region->vaddr = pcim_iomap_table(pdev)[dt_block->bar];
- if (!dt_region->vaddr)
+ dt_region->vaddr.io = pcim_iomap_table(pdev)[dt_block->bar];
+ if (!dt_region->vaddr.io)
return -ENOMEM;
- dt_region->vaddr += dt_block->off;
- dt_region->paddr = pdev->resource[dt_block->bar].start;
+ dt_region->vaddr.io += dt_block->off;
+ dt_region->paddr = pci_bus_address(pdev, dt_block->bar);
dt_region->paddr += dt_block->off;
dt_region->sz = dt_block->sz;
}
@@ -289,24 +303,24 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
pci_dbg(pdev, "L. List:\tWRITE CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
i, vsec_data.ll_wr[i].bar,
vsec_data.ll_wr[i].off, chip->ll_region_wr[i].sz,
- chip->ll_region_wr[i].vaddr, &chip->ll_region_wr[i].paddr);
+ chip->ll_region_wr[i].vaddr.io, &chip->ll_region_wr[i].paddr);
pci_dbg(pdev, "Data:\tWRITE CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
i, vsec_data.dt_wr[i].bar,
vsec_data.dt_wr[i].off, chip->dt_region_wr[i].sz,
- chip->dt_region_wr[i].vaddr, &chip->dt_region_wr[i].paddr);
+ chip->dt_region_wr[i].vaddr.io, &chip->dt_region_wr[i].paddr);
}
for (i = 0; i < chip->ll_rd_cnt; i++) {
pci_dbg(pdev, "L. List:\tREAD CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
i, vsec_data.ll_rd[i].bar,
vsec_data.ll_rd[i].off, chip->ll_region_rd[i].sz,
- chip->ll_region_rd[i].vaddr, &chip->ll_region_rd[i].paddr);
+ chip->ll_region_rd[i].vaddr.io, &chip->ll_region_rd[i].paddr);
pci_dbg(pdev, "Data:\tREAD CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
i, vsec_data.dt_rd[i].bar,
vsec_data.dt_rd[i].off, chip->dt_region_rd[i].sz,
- chip->dt_region_rd[i].vaddr, &chip->dt_region_rd[i].paddr);
+ chip->dt_region_rd[i].vaddr.io, &chip->dt_region_rd[i].paddr);
}
pci_dbg(pdev, "Nr. IRQs:\t%u\n", chip->nr_irqs);
diff --git a/drivers/dma/dw-edma/dw-edma-v0-core.c b/drivers/dma/dw-edma/dw-edma-v0-core.c
index 77e6cfe52e0a..72e79a0c0a4e 100644
--- a/drivers/dma/dw-edma/dw-edma-v0-core.c
+++ b/drivers/dma/dw-edma/dw-edma-v0-core.c
@@ -8,6 +8,8 @@
#include <linux/bitfield.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+
#include "dw-edma-core.h"
#include "dw-edma-v0-core.h"
#include "dw-edma-v0-regs.h"
@@ -53,8 +55,6 @@ static inline struct dw_edma_v0_regs __iomem *__dw_regs(struct dw_edma *dw)
SET_32(dw, rd_##name, value); \
} while (0)
-#ifdef CONFIG_64BIT
-
#define SET_64(dw, name, value) \
writeq(value, &(__dw_regs(dw)->name))
@@ -80,8 +80,6 @@ static inline struct dw_edma_v0_regs __iomem *__dw_regs(struct dw_edma *dw)
SET_64(dw, rd_##name, value); \
} while (0)
-#endif /* CONFIG_64BIT */
-
#define SET_COMPAT(dw, name, value) \
writel(value, &(__dw_regs(dw)->type.unroll.name))
@@ -161,11 +159,6 @@ static inline u32 readl_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
#define GET_CH_32(dw, dir, ch, name) \
readl_ch(dw, dir, ch, &(__dw_ch_regs(dw, dir, ch)->name))
-#define SET_LL_32(ll, value) \
- writel(value, ll)
-
-#ifdef CONFIG_64BIT
-
static inline void writeq_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
u64 value, void __iomem *addr)
{
@@ -192,7 +185,7 @@ static inline void writeq_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
static inline u64 readq_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
const void __iomem *addr)
{
- u32 value;
+ u64 value;
if (dw->chip->mf == EDMA_MF_EDMA_LEGACY) {
u32 viewport_sel;
@@ -222,11 +215,6 @@ static inline u64 readq_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
#define GET_CH_64(dw, dir, ch, name) \
readq_ch(dw, dir, ch, &(__dw_ch_regs(dw, dir, ch)->name))
-#define SET_LL_64(ll, value) \
- writeq(value, ll)
-
-#endif /* CONFIG_64BIT */
-
/* eDMA management callbacks */
void dw_edma_v0_core_off(struct dw_edma *dw)
{
@@ -298,17 +286,53 @@ u32 dw_edma_v0_core_status_abort_int(struct dw_edma *dw, enum dw_edma_dir dir)
GET_RW_32(dw, dir, int_status));
}
+static void dw_edma_v0_write_ll_data(struct dw_edma_chunk *chunk, int i,
+ u32 control, u32 size, u64 sar, u64 dar)
+{
+ ptrdiff_t ofs = i * sizeof(struct dw_edma_v0_lli);
+
+ if (chunk->chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL) {
+ struct dw_edma_v0_lli *lli = chunk->ll_region.vaddr.mem + ofs;
+
+ lli->control = control;
+ lli->transfer_size = size;
+ lli->sar.reg = sar;
+ lli->dar.reg = dar;
+ } else {
+ struct dw_edma_v0_lli __iomem *lli = chunk->ll_region.vaddr.io + ofs;
+
+ writel(control, &lli->control);
+ writel(size, &lli->transfer_size);
+ writeq(sar, &lli->sar.reg);
+ writeq(dar, &lli->dar.reg);
+ }
+}
+
+static void dw_edma_v0_write_ll_link(struct dw_edma_chunk *chunk,
+ int i, u32 control, u64 pointer)
+{
+ ptrdiff_t ofs = i * sizeof(struct dw_edma_v0_lli);
+
+ if (chunk->chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL) {
+ struct dw_edma_v0_llp *llp = chunk->ll_region.vaddr.mem + ofs;
+
+ llp->control = control;
+ llp->llp.reg = pointer;
+ } else {
+ struct dw_edma_v0_llp __iomem *llp = chunk->ll_region.vaddr.io + ofs;
+
+ writel(control, &llp->control);
+ writeq(pointer, &llp->llp.reg);
+ }
+}
+
static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
{
struct dw_edma_burst *child;
struct dw_edma_chan *chan = chunk->chan;
- struct dw_edma_v0_lli __iomem *lli;
- struct dw_edma_v0_llp __iomem *llp;
u32 control = 0, i = 0;
int j;
- lli = chunk->ll_region.vaddr;
-
if (chunk->cb)
control = DW_EDMA_V0_CB;
@@ -320,41 +344,16 @@ static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
if (!(chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL))
control |= DW_EDMA_V0_RIE;
}
- /* Channel control */
- SET_LL_32(&lli[i].control, control);
- /* Transfer size */
- SET_LL_32(&lli[i].transfer_size, child->sz);
- /* SAR */
- #ifdef CONFIG_64BIT
- SET_LL_64(&lli[i].sar.reg, child->sar);
- #else /* CONFIG_64BIT */
- SET_LL_32(&lli[i].sar.lsb, lower_32_bits(child->sar));
- SET_LL_32(&lli[i].sar.msb, upper_32_bits(child->sar));
- #endif /* CONFIG_64BIT */
- /* DAR */
- #ifdef CONFIG_64BIT
- SET_LL_64(&lli[i].dar.reg, child->dar);
- #else /* CONFIG_64BIT */
- SET_LL_32(&lli[i].dar.lsb, lower_32_bits(child->dar));
- SET_LL_32(&lli[i].dar.msb, upper_32_bits(child->dar));
- #endif /* CONFIG_64BIT */
- i++;
+
+ dw_edma_v0_write_ll_data(chunk, i++, control, child->sz,
+ child->sar, child->dar);
}
- llp = (void __iomem *)&lli[i];
control = DW_EDMA_V0_LLP | DW_EDMA_V0_TCB;
if (!chunk->cb)
control |= DW_EDMA_V0_CB;
- /* Channel control */
- SET_LL_32(&llp->control, control);
- /* Linked list */
- #ifdef CONFIG_64BIT
- SET_LL_64(&llp->llp.reg, chunk->ll_region.paddr);
- #else /* CONFIG_64BIT */
- SET_LL_32(&llp->llp.lsb, lower_32_bits(chunk->ll_region.paddr));
- SET_LL_32(&llp->llp.msb, upper_32_bits(chunk->ll_region.paddr));
- #endif /* CONFIG_64BIT */
+ dw_edma_v0_write_ll_link(chunk, i, control, chunk->ll_region.paddr);
}
void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
@@ -504,8 +503,3 @@ void dw_edma_v0_core_debugfs_on(struct dw_edma *dw)
{
dw_edma_v0_debugfs_on(dw);
}
-
-void dw_edma_v0_core_debugfs_off(struct dw_edma *dw)
-{
- dw_edma_v0_debugfs_off(dw);
-}
diff --git a/drivers/dma/dw-edma/dw-edma-v0-core.h b/drivers/dma/dw-edma/dw-edma-v0-core.h
index 75aec6d31b21..ab96a1f48080 100644
--- a/drivers/dma/dw-edma/dw-edma-v0-core.h
+++ b/drivers/dma/dw-edma/dw-edma-v0-core.h
@@ -23,6 +23,5 @@ void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first);
int dw_edma_v0_core_device_config(struct dw_edma_chan *chan);
/* eDMA debug fs callbacks */
void dw_edma_v0_core_debugfs_on(struct dw_edma *dw);
-void dw_edma_v0_core_debugfs_off(struct dw_edma *dw);
#endif /* _DW_EDMA_V0_CORE_H */
diff --git a/drivers/dma/dw-edma/dw-edma-v0-debugfs.c b/drivers/dma/dw-edma/dw-edma-v0-debugfs.c
index 5226c9014703..0745d9e7d259 100644
--- a/drivers/dma/dw-edma/dw-edma-v0-debugfs.c
+++ b/drivers/dma/dw-edma/dw-edma-v0-debugfs.c
@@ -13,76 +13,79 @@
#include "dw-edma-v0-regs.h"
#include "dw-edma-core.h"
-#define REGS_ADDR(name) \
- ((void __force *)&regs->name)
-#define REGISTER(name) \
- { #name, REGS_ADDR(name) }
-
-#define WR_REGISTER(name) \
- { #name, REGS_ADDR(wr_##name) }
-#define RD_REGISTER(name) \
- { #name, REGS_ADDR(rd_##name) }
-
-#define WR_REGISTER_LEGACY(name) \
- { #name, REGS_ADDR(type.legacy.wr_##name) }
+#define REGS_ADDR(dw, name) \
+ ({ \
+ struct dw_edma_v0_regs __iomem *__regs = (dw)->chip->reg_base; \
+ \
+ (void __iomem *)&__regs->name; \
+ })
+
+#define REGS_CH_ADDR(dw, name, _dir, _ch) \
+ ({ \
+ struct dw_edma_v0_ch_regs __iomem *__ch_regs; \
+ \
+ if ((dw)->chip->mf == EDMA_MF_EDMA_LEGACY) \
+ __ch_regs = REGS_ADDR(dw, type.legacy.ch); \
+ else if (_dir == EDMA_DIR_READ) \
+ __ch_regs = REGS_ADDR(dw, type.unroll.ch[_ch].rd); \
+ else \
+ __ch_regs = REGS_ADDR(dw, type.unroll.ch[_ch].wr); \
+ \
+ (void __iomem *)&__ch_regs->name; \
+ })
+
+#define REGISTER(dw, name) \
+ { dw, #name, REGS_ADDR(dw, name) }
+
+#define CTX_REGISTER(dw, name, dir, ch) \
+ { dw, #name, REGS_CH_ADDR(dw, name, dir, ch), dir, ch }
+
+#define WR_REGISTER(dw, name) \
+ { dw, #name, REGS_ADDR(dw, wr_##name) }
+#define RD_REGISTER(dw, name) \
+ { dw, #name, REGS_ADDR(dw, rd_##name) }
+
+#define WR_REGISTER_LEGACY(dw, name) \
+ { dw, #name, REGS_ADDR(dw, type.legacy.wr_##name) }
#define RD_REGISTER_LEGACY(name) \
- { #name, REGS_ADDR(type.legacy.rd_##name) }
+ { dw, #name, REGS_ADDR(dw, type.legacy.rd_##name) }
-#define WR_REGISTER_UNROLL(name) \
- { #name, REGS_ADDR(type.unroll.wr_##name) }
-#define RD_REGISTER_UNROLL(name) \
- { #name, REGS_ADDR(type.unroll.rd_##name) }
+#define WR_REGISTER_UNROLL(dw, name) \
+ { dw, #name, REGS_ADDR(dw, type.unroll.wr_##name) }
+#define RD_REGISTER_UNROLL(dw, name) \
+ { dw, #name, REGS_ADDR(dw, type.unroll.rd_##name) }
#define WRITE_STR "write"
#define READ_STR "read"
#define CHANNEL_STR "channel"
#define REGISTERS_STR "registers"
-static struct dw_edma *dw;
-static struct dw_edma_v0_regs __iomem *regs;
-
-static struct {
- void __iomem *start;
- void __iomem *end;
-} lim[2][EDMA_V0_MAX_NR_CH];
-
-struct debugfs_entries {
+struct dw_edma_debugfs_entry {
+ struct dw_edma *dw;
const char *name;
- dma_addr_t *reg;
+ void __iomem *reg;
+ enum dw_edma_dir dir;
+ u16 ch;
};
static int dw_edma_debugfs_u32_get(void *data, u64 *val)
{
- void __iomem *reg = (void __force __iomem *)data;
+ struct dw_edma_debugfs_entry *entry = data;
+ struct dw_edma *dw = entry->dw;
+ void __iomem *reg = entry->reg;
+
if (dw->chip->mf == EDMA_MF_EDMA_LEGACY &&
- reg >= (void __iomem *)&regs->type.legacy.ch) {
- void __iomem *ptr = &regs->type.legacy.ch;
- u32 viewport_sel = 0;
+ reg >= REGS_ADDR(dw, type.legacy.ch)) {
unsigned long flags;
- u16 ch;
-
- for (ch = 0; ch < dw->wr_ch_cnt; ch++)
- if (lim[0][ch].start >= reg && reg < lim[0][ch].end) {
- ptr += (reg - lim[0][ch].start);
- goto legacy_sel_wr;
- }
-
- for (ch = 0; ch < dw->rd_ch_cnt; ch++)
- if (lim[1][ch].start >= reg && reg < lim[1][ch].end) {
- ptr += (reg - lim[1][ch].start);
- goto legacy_sel_rd;
- }
-
- return 0;
-legacy_sel_rd:
- viewport_sel = BIT(31);
-legacy_sel_wr:
- viewport_sel |= FIELD_PREP(EDMA_V0_VIEWPORT_MASK, ch);
+ u32 viewport_sel;
+
+ viewport_sel = entry->dir == EDMA_DIR_READ ? BIT(31) : 0;
+ viewport_sel |= FIELD_PREP(EDMA_V0_VIEWPORT_MASK, entry->ch);
raw_spin_lock_irqsave(&dw->lock, flags);
- writel(viewport_sel, &regs->type.legacy.viewport_sel);
- *val = readl(ptr);
+ writel(viewport_sel, REGS_ADDR(dw, type.legacy.viewport_sel));
+ *val = readl(reg);
raw_spin_unlock_irqrestore(&dw->lock, flags);
} else {
@@ -93,222 +96,197 @@ legacy_sel_wr:
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_x32, dw_edma_debugfs_u32_get, NULL, "0x%08llx\n");
-static void dw_edma_debugfs_create_x32(const struct debugfs_entries entries[],
- int nr_entries, struct dentry *dir)
+static void dw_edma_debugfs_create_x32(struct dw_edma *dw,
+ const struct dw_edma_debugfs_entry ini[],
+ int nr_entries, struct dentry *dent)
{
+ struct dw_edma_debugfs_entry *entries;
int i;
+ entries = devm_kcalloc(dw->chip->dev, nr_entries, sizeof(*entries),
+ GFP_KERNEL);
+ if (!entries)
+ return;
+
for (i = 0; i < nr_entries; i++) {
- if (!debugfs_create_file_unsafe(entries[i].name, 0444, dir,
- entries[i].reg, &fops_x32))
- break;
+ entries[i] = ini[i];
+
+ debugfs_create_file_unsafe(entries[i].name, 0444, dent,
+ &entries[i], &fops_x32);
}
}
-static void dw_edma_debugfs_regs_ch(struct dw_edma_v0_ch_regs __iomem *regs,
- struct dentry *dir)
+static void dw_edma_debugfs_regs_ch(struct dw_edma *dw, enum dw_edma_dir dir,
+ u16 ch, struct dentry *dent)
{
- int nr_entries;
- const struct debugfs_entries debugfs_regs[] = {
- REGISTER(ch_control1),
- REGISTER(ch_control2),
- REGISTER(transfer_size),
- REGISTER(sar.lsb),
- REGISTER(sar.msb),
- REGISTER(dar.lsb),
- REGISTER(dar.msb),
- REGISTER(llp.lsb),
- REGISTER(llp.msb),
+ struct dw_edma_debugfs_entry debugfs_regs[] = {
+ CTX_REGISTER(dw, ch_control1, dir, ch),
+ CTX_REGISTER(dw, ch_control2, dir, ch),
+ CTX_REGISTER(dw, transfer_size, dir, ch),
+ CTX_REGISTER(dw, sar.lsb, dir, ch),
+ CTX_REGISTER(dw, sar.msb, dir, ch),
+ CTX_REGISTER(dw, dar.lsb, dir, ch),
+ CTX_REGISTER(dw, dar.msb, dir, ch),
+ CTX_REGISTER(dw, llp.lsb, dir, ch),
+ CTX_REGISTER(dw, llp.msb, dir, ch),
};
+ int nr_entries;
nr_entries = ARRAY_SIZE(debugfs_regs);
- dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, dir);
+ dw_edma_debugfs_create_x32(dw, debugfs_regs, nr_entries, dent);
}
-static void dw_edma_debugfs_regs_wr(struct dentry *dir)
+static noinline_for_stack void
+dw_edma_debugfs_regs_wr(struct dw_edma *dw, struct dentry *dent)
{
- const struct debugfs_entries debugfs_regs[] = {
+ const struct dw_edma_debugfs_entry debugfs_regs[] = {
/* eDMA global registers */
- WR_REGISTER(engine_en),
- WR_REGISTER(doorbell),
- WR_REGISTER(ch_arb_weight.lsb),
- WR_REGISTER(ch_arb_weight.msb),
+ WR_REGISTER(dw, engine_en),
+ WR_REGISTER(dw, doorbell),
+ WR_REGISTER(dw, ch_arb_weight.lsb),
+ WR_REGISTER(dw, ch_arb_weight.msb),
/* eDMA interrupts registers */
- WR_REGISTER(int_status),
- WR_REGISTER(int_mask),
- WR_REGISTER(int_clear),
- WR_REGISTER(err_status),
- WR_REGISTER(done_imwr.lsb),
- WR_REGISTER(done_imwr.msb),
- WR_REGISTER(abort_imwr.lsb),
- WR_REGISTER(abort_imwr.msb),
- WR_REGISTER(ch01_imwr_data),
- WR_REGISTER(ch23_imwr_data),
- WR_REGISTER(ch45_imwr_data),
- WR_REGISTER(ch67_imwr_data),
- WR_REGISTER(linked_list_err_en),
+ WR_REGISTER(dw, int_status),
+ WR_REGISTER(dw, int_mask),
+ WR_REGISTER(dw, int_clear),
+ WR_REGISTER(dw, err_status),
+ WR_REGISTER(dw, done_imwr.lsb),
+ WR_REGISTER(dw, done_imwr.msb),
+ WR_REGISTER(dw, abort_imwr.lsb),
+ WR_REGISTER(dw, abort_imwr.msb),
+ WR_REGISTER(dw, ch01_imwr_data),
+ WR_REGISTER(dw, ch23_imwr_data),
+ WR_REGISTER(dw, ch45_imwr_data),
+ WR_REGISTER(dw, ch67_imwr_data),
+ WR_REGISTER(dw, linked_list_err_en),
};
- const struct debugfs_entries debugfs_unroll_regs[] = {
+ const struct dw_edma_debugfs_entry debugfs_unroll_regs[] = {
/* eDMA channel context grouping */
- WR_REGISTER_UNROLL(engine_chgroup),
- WR_REGISTER_UNROLL(engine_hshake_cnt.lsb),
- WR_REGISTER_UNROLL(engine_hshake_cnt.msb),
- WR_REGISTER_UNROLL(ch0_pwr_en),
- WR_REGISTER_UNROLL(ch1_pwr_en),
- WR_REGISTER_UNROLL(ch2_pwr_en),
- WR_REGISTER_UNROLL(ch3_pwr_en),
- WR_REGISTER_UNROLL(ch4_pwr_en),
- WR_REGISTER_UNROLL(ch5_pwr_en),
- WR_REGISTER_UNROLL(ch6_pwr_en),
- WR_REGISTER_UNROLL(ch7_pwr_en),
+ WR_REGISTER_UNROLL(dw, engine_chgroup),
+ WR_REGISTER_UNROLL(dw, engine_hshake_cnt.lsb),
+ WR_REGISTER_UNROLL(dw, engine_hshake_cnt.msb),
+ WR_REGISTER_UNROLL(dw, ch0_pwr_en),
+ WR_REGISTER_UNROLL(dw, ch1_pwr_en),
+ WR_REGISTER_UNROLL(dw, ch2_pwr_en),
+ WR_REGISTER_UNROLL(dw, ch3_pwr_en),
+ WR_REGISTER_UNROLL(dw, ch4_pwr_en),
+ WR_REGISTER_UNROLL(dw, ch5_pwr_en),
+ WR_REGISTER_UNROLL(dw, ch6_pwr_en),
+ WR_REGISTER_UNROLL(dw, ch7_pwr_en),
};
- struct dentry *regs_dir, *ch_dir;
+ struct dentry *regs_dent, *ch_dent;
int nr_entries, i;
char name[16];
- regs_dir = debugfs_create_dir(WRITE_STR, dir);
- if (!regs_dir)
- return;
+ regs_dent = debugfs_create_dir(WRITE_STR, dent);
nr_entries = ARRAY_SIZE(debugfs_regs);
- dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, regs_dir);
+ dw_edma_debugfs_create_x32(dw, debugfs_regs, nr_entries, regs_dent);
if (dw->chip->mf == EDMA_MF_HDMA_COMPAT) {
nr_entries = ARRAY_SIZE(debugfs_unroll_regs);
- dw_edma_debugfs_create_x32(debugfs_unroll_regs, nr_entries,
- regs_dir);
+ dw_edma_debugfs_create_x32(dw, debugfs_unroll_regs, nr_entries,
+ regs_dent);
}
for (i = 0; i < dw->wr_ch_cnt; i++) {
snprintf(name, sizeof(name), "%s:%d", CHANNEL_STR, i);
- ch_dir = debugfs_create_dir(name, regs_dir);
- if (!ch_dir)
- return;
-
- dw_edma_debugfs_regs_ch(&regs->type.unroll.ch[i].wr, ch_dir);
+ ch_dent = debugfs_create_dir(name, regs_dent);
- lim[0][i].start = &regs->type.unroll.ch[i].wr;
- lim[0][i].end = &regs->type.unroll.ch[i].padding_1[0];
+ dw_edma_debugfs_regs_ch(dw, EDMA_DIR_WRITE, i, ch_dent);
}
}
-static void dw_edma_debugfs_regs_rd(struct dentry *dir)
+static noinline_for_stack void dw_edma_debugfs_regs_rd(struct dw_edma *dw,
+ struct dentry *dent)
{
- const struct debugfs_entries debugfs_regs[] = {
+ const struct dw_edma_debugfs_entry debugfs_regs[] = {
/* eDMA global registers */
- RD_REGISTER(engine_en),
- RD_REGISTER(doorbell),
- RD_REGISTER(ch_arb_weight.lsb),
- RD_REGISTER(ch_arb_weight.msb),
+ RD_REGISTER(dw, engine_en),
+ RD_REGISTER(dw, doorbell),
+ RD_REGISTER(dw, ch_arb_weight.lsb),
+ RD_REGISTER(dw, ch_arb_weight.msb),
/* eDMA interrupts registers */
- RD_REGISTER(int_status),
- RD_REGISTER(int_mask),
- RD_REGISTER(int_clear),
- RD_REGISTER(err_status.lsb),
- RD_REGISTER(err_status.msb),
- RD_REGISTER(linked_list_err_en),
- RD_REGISTER(done_imwr.lsb),
- RD_REGISTER(done_imwr.msb),
- RD_REGISTER(abort_imwr.lsb),
- RD_REGISTER(abort_imwr.msb),
- RD_REGISTER(ch01_imwr_data),
- RD_REGISTER(ch23_imwr_data),
- RD_REGISTER(ch45_imwr_data),
- RD_REGISTER(ch67_imwr_data),
+ RD_REGISTER(dw, int_status),
+ RD_REGISTER(dw, int_mask),
+ RD_REGISTER(dw, int_clear),
+ RD_REGISTER(dw, err_status.lsb),
+ RD_REGISTER(dw, err_status.msb),
+ RD_REGISTER(dw, linked_list_err_en),
+ RD_REGISTER(dw, done_imwr.lsb),
+ RD_REGISTER(dw, done_imwr.msb),
+ RD_REGISTER(dw, abort_imwr.lsb),
+ RD_REGISTER(dw, abort_imwr.msb),
+ RD_REGISTER(dw, ch01_imwr_data),
+ RD_REGISTER(dw, ch23_imwr_data),
+ RD_REGISTER(dw, ch45_imwr_data),
+ RD_REGISTER(dw, ch67_imwr_data),
};
- const struct debugfs_entries debugfs_unroll_regs[] = {
+ const struct dw_edma_debugfs_entry debugfs_unroll_regs[] = {
/* eDMA channel context grouping */
- RD_REGISTER_UNROLL(engine_chgroup),
- RD_REGISTER_UNROLL(engine_hshake_cnt.lsb),
- RD_REGISTER_UNROLL(engine_hshake_cnt.msb),
- RD_REGISTER_UNROLL(ch0_pwr_en),
- RD_REGISTER_UNROLL(ch1_pwr_en),
- RD_REGISTER_UNROLL(ch2_pwr_en),
- RD_REGISTER_UNROLL(ch3_pwr_en),
- RD_REGISTER_UNROLL(ch4_pwr_en),
- RD_REGISTER_UNROLL(ch5_pwr_en),
- RD_REGISTER_UNROLL(ch6_pwr_en),
- RD_REGISTER_UNROLL(ch7_pwr_en),
+ RD_REGISTER_UNROLL(dw, engine_chgroup),
+ RD_REGISTER_UNROLL(dw, engine_hshake_cnt.lsb),
+ RD_REGISTER_UNROLL(dw, engine_hshake_cnt.msb),
+ RD_REGISTER_UNROLL(dw, ch0_pwr_en),
+ RD_REGISTER_UNROLL(dw, ch1_pwr_en),
+ RD_REGISTER_UNROLL(dw, ch2_pwr_en),
+ RD_REGISTER_UNROLL(dw, ch3_pwr_en),
+ RD_REGISTER_UNROLL(dw, ch4_pwr_en),
+ RD_REGISTER_UNROLL(dw, ch5_pwr_en),
+ RD_REGISTER_UNROLL(dw, ch6_pwr_en),
+ RD_REGISTER_UNROLL(dw, ch7_pwr_en),
};
- struct dentry *regs_dir, *ch_dir;
+ struct dentry *regs_dent, *ch_dent;
int nr_entries, i;
char name[16];
- regs_dir = debugfs_create_dir(READ_STR, dir);
- if (!regs_dir)
- return;
+ regs_dent = debugfs_create_dir(READ_STR, dent);
nr_entries = ARRAY_SIZE(debugfs_regs);
- dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, regs_dir);
+ dw_edma_debugfs_create_x32(dw, debugfs_regs, nr_entries, regs_dent);
if (dw->chip->mf == EDMA_MF_HDMA_COMPAT) {
nr_entries = ARRAY_SIZE(debugfs_unroll_regs);
- dw_edma_debugfs_create_x32(debugfs_unroll_regs, nr_entries,
- regs_dir);
+ dw_edma_debugfs_create_x32(dw, debugfs_unroll_regs, nr_entries,
+ regs_dent);
}
for (i = 0; i < dw->rd_ch_cnt; i++) {
snprintf(name, sizeof(name), "%s:%d", CHANNEL_STR, i);
- ch_dir = debugfs_create_dir(name, regs_dir);
- if (!ch_dir)
- return;
-
- dw_edma_debugfs_regs_ch(&regs->type.unroll.ch[i].rd, ch_dir);
+ ch_dent = debugfs_create_dir(name, regs_dent);
- lim[1][i].start = &regs->type.unroll.ch[i].rd;
- lim[1][i].end = &regs->type.unroll.ch[i].padding_2[0];
+ dw_edma_debugfs_regs_ch(dw, EDMA_DIR_READ, i, ch_dent);
}
}
-static void dw_edma_debugfs_regs(void)
+static void dw_edma_debugfs_regs(struct dw_edma *dw)
{
- const struct debugfs_entries debugfs_regs[] = {
- REGISTER(ctrl_data_arb_prior),
- REGISTER(ctrl),
+ const struct dw_edma_debugfs_entry debugfs_regs[] = {
+ REGISTER(dw, ctrl_data_arb_prior),
+ REGISTER(dw, ctrl),
};
- struct dentry *regs_dir;
+ struct dentry *regs_dent;
int nr_entries;
- regs_dir = debugfs_create_dir(REGISTERS_STR, dw->debugfs);
- if (!regs_dir)
- return;
+ regs_dent = debugfs_create_dir(REGISTERS_STR, dw->dma.dbg_dev_root);
nr_entries = ARRAY_SIZE(debugfs_regs);
- dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, regs_dir);
+ dw_edma_debugfs_create_x32(dw, debugfs_regs, nr_entries, regs_dent);
- dw_edma_debugfs_regs_wr(regs_dir);
- dw_edma_debugfs_regs_rd(regs_dir);
+ dw_edma_debugfs_regs_wr(dw, regs_dent);
+ dw_edma_debugfs_regs_rd(dw, regs_dent);
}
-void dw_edma_v0_debugfs_on(struct dw_edma *_dw)
+void dw_edma_v0_debugfs_on(struct dw_edma *dw)
{
- dw = _dw;
- if (!dw)
- return;
-
- regs = dw->chip->reg_base;
- if (!regs)
- return;
-
- dw->debugfs = debugfs_create_dir(dw->name, NULL);
- if (!dw->debugfs)
+ if (!debugfs_initialized())
return;
- debugfs_create_u32("mf", 0444, dw->debugfs, &dw->chip->mf);
- debugfs_create_u16("wr_ch_cnt", 0444, dw->debugfs, &dw->wr_ch_cnt);
- debugfs_create_u16("rd_ch_cnt", 0444, dw->debugfs, &dw->rd_ch_cnt);
-
- dw_edma_debugfs_regs();
-}
-
-void dw_edma_v0_debugfs_off(struct dw_edma *_dw)
-{
- dw = _dw;
- if (!dw)
- return;
+ debugfs_create_u32("mf", 0444, dw->dma.dbg_dev_root, &dw->chip->mf);
+ debugfs_create_u16("wr_ch_cnt", 0444, dw->dma.dbg_dev_root, &dw->wr_ch_cnt);
+ debugfs_create_u16("rd_ch_cnt", 0444, dw->dma.dbg_dev_root, &dw->rd_ch_cnt);
- debugfs_remove_recursive(dw->debugfs);
- dw->debugfs = NULL;
+ dw_edma_debugfs_regs(dw);
}
diff --git a/drivers/dma/dw-edma/dw-edma-v0-debugfs.h b/drivers/dma/dw-edma/dw-edma-v0-debugfs.h
index 3391b86edf5a..fb3342d97d6d 100644
--- a/drivers/dma/dw-edma/dw-edma-v0-debugfs.h
+++ b/drivers/dma/dw-edma/dw-edma-v0-debugfs.h
@@ -13,15 +13,10 @@
#ifdef CONFIG_DEBUG_FS
void dw_edma_v0_debugfs_on(struct dw_edma *dw);
-void dw_edma_v0_debugfs_off(struct dw_edma *dw);
#else
static inline void dw_edma_v0_debugfs_on(struct dw_edma *dw)
{
}
-
-static inline void dw_edma_v0_debugfs_off(struct dw_edma *dw)
-{
-}
#endif /* CONFIG_DEBUG_FS */
#endif /* _DW_EDMA_V0_DEBUG_FS_H */
diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
index e13e92609943..674bfefca088 100644
--- a/drivers/dma/idxd/cdev.c
+++ b/drivers/dma/idxd/cdev.c
@@ -201,7 +201,7 @@ static int idxd_cdev_mmap(struct file *filp, struct vm_area_struct *vma)
if (rc < 0)
return rc;
- vma->vm_flags |= VM_DONTCOPY;
+ vm_flags_set(vma, VM_DONTCOPY);
pfn = (base + idxd_get_wq_portal_full_offset(wq->id,
IDXD_PORTAL_LIMITED)) >> PAGE_SHIFT;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
index 751675391e8c..5f321f3b4242 100644
--- a/drivers/dma/idxd/device.c
+++ b/drivers/dma/idxd/device.c
@@ -1172,8 +1172,19 @@ static void idxd_flush_pending_descs(struct idxd_irq_entry *ie)
spin_unlock(&ie->list_lock);
list_for_each_entry_safe(desc, itr, &flist, list) {
+ struct dma_async_tx_descriptor *tx;
+
list_del(&desc->list);
ctype = desc->completion->status ? IDXD_COMPLETE_NORMAL : IDXD_COMPLETE_ABORT;
+ /*
+ * wq is being disabled. Any remaining descriptors are
+ * likely to be stuck and can be dropped. callback could
+ * point to code that is no longer accessible, for example
+ * if dmatest module has been unloaded.
+ */
+ tx = &desc->txd;
+ tx->callback = NULL;
+ tx->callback_result = NULL;
idxd_dma_complete_txd(desc, ctype, true);
}
}
@@ -1390,8 +1401,7 @@ err_res_alloc:
err_irq:
idxd_wq_unmap_portal(wq);
err_map_portal:
- rc = idxd_wq_disable(wq, false);
- if (rc < 0)
+ if (idxd_wq_disable(wq, false))
dev_dbg(dev, "wq %s disable failed\n", dev_name(wq_confdev(wq)));
err:
return rc;
@@ -1408,11 +1418,11 @@ void drv_disable_wq(struct idxd_wq *wq)
dev_warn(dev, "Clients has claim on wq %d: %d\n",
wq->id, idxd_wq_refcount(wq));
- idxd_wq_free_resources(wq);
idxd_wq_unmap_portal(wq);
idxd_wq_drain(wq);
idxd_wq_free_irq(wq);
idxd_wq_reset(wq);
+ idxd_wq_free_resources(wq);
percpu_ref_exit(&wq->wq_active);
wq->type = IDXD_WQT_NONE;
wq->client_count = 0;
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 0005ab059a4f..7a912f90c2a9 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -1533,10 +1533,12 @@ static struct sdma_desc *sdma_transfer_init(struct sdma_channel *sdmac,
sdma_config_ownership(sdmac, false, true, false);
if (sdma_load_context(sdmac))
- goto err_desc_out;
+ goto err_bd_out;
return desc;
+err_bd_out:
+ sdma_free_bd(desc);
err_desc_out:
kfree(desc);
err_out:
diff --git a/drivers/dma/lgm/lgm-dma.c b/drivers/dma/lgm/lgm-dma.c
index 9b9184f964be..1709d159af7e 100644
--- a/drivers/dma/lgm/lgm-dma.c
+++ b/drivers/dma/lgm/lgm-dma.c
@@ -914,7 +914,7 @@ static void ldma_dev_init(struct ldma_dev *d)
}
}
-static int ldma_cfg_init(struct ldma_dev *d)
+static int ldma_parse_dt(struct ldma_dev *d)
{
struct fwnode_handle *fwnode = dev_fwnode(d->dev);
struct ldma_port *p;
@@ -1661,10 +1661,6 @@ static int intel_ldma_probe(struct platform_device *pdev)
p->ldev = d;
}
- ret = ldma_cfg_init(d);
- if (ret)
- return ret;
-
dma_dev->dev = &pdev->dev;
ch_mask = (unsigned long)d->channels_mask;
@@ -1675,6 +1671,10 @@ static int intel_ldma_probe(struct platform_device *pdev)
ldma_dma_init_v3X(j, d);
}
+ ret = ldma_parse_dt(d);
+ if (ret)
+ return ret;
+
dma_dev->device_alloc_chan_resources = ldma_alloc_chan_resources;
dma_dev->device_free_chan_resources = ldma_free_chan_resources;
dma_dev->device_terminate_all = ldma_terminate_all;
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index e956702932aa..d49fa6bc6775 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -15,7 +15,7 @@
#include <linux/dmaengine.h>
#include <linux/platform_device.h>
#include <linux/device.h>
-#include <linux/platform_data/dma-mmp_tdma.h>
+#include <linux/genalloc.h>
#include <linux/of_device.h>
#include <linux/of_dma.h>
@@ -668,10 +668,7 @@ static int mmp_tdma_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&tdev->device.channels);
- if (pdev->dev.of_node)
- pool = of_gen_pool_get(pdev->dev.of_node, "asram", 0);
- else
- pool = sram_get_gpool("asram");
+ pool = of_gen_pool_get(pdev->dev.of_node, "asram", 0);
if (!pool) {
dev_err(&pdev->dev, "asram pool not available\n");
return -ENOMEM;
diff --git a/drivers/dma/ptdma/ptdma-dev.c b/drivers/dma/ptdma/ptdma-dev.c
index 377da23012ac..a2bf13ff18b6 100644
--- a/drivers/dma/ptdma/ptdma-dev.c
+++ b/drivers/dma/ptdma/ptdma-dev.c
@@ -71,12 +71,13 @@ static int pt_core_execute_cmd(struct ptdma_desc *desc, struct pt_cmd_queue *cmd
bool soc = FIELD_GET(DWORD0_SOC, desc->dw0);
u8 *q_desc = (u8 *)&cmd_q->qbase[cmd_q->qidx];
u32 tail;
+ unsigned long flags;
if (soc) {
desc->dw0 |= FIELD_PREP(DWORD0_IOC, desc->dw0);
desc->dw0 &= ~DWORD0_SOC;
}
- mutex_lock(&cmd_q->q_mutex);
+ spin_lock_irqsave(&cmd_q->q_lock, flags);
/* Copy 32-byte command descriptor to hw queue. */
memcpy(q_desc, desc, 32);
@@ -91,7 +92,7 @@ static int pt_core_execute_cmd(struct ptdma_desc *desc, struct pt_cmd_queue *cmd
/* Turn the queue back on using our cached control register */
pt_start_queue(cmd_q);
- mutex_unlock(&cmd_q->q_mutex);
+ spin_unlock_irqrestore(&cmd_q->q_lock, flags);
return 0;
}
@@ -199,7 +200,7 @@ int pt_core_init(struct pt_device *pt)
cmd_q->pt = pt;
cmd_q->dma_pool = dma_pool;
- mutex_init(&cmd_q->q_mutex);
+ spin_lock_init(&cmd_q->q_lock);
/* Page alignment satisfies our needs for N <= 128 */
cmd_q->qsize = Q_SIZE(Q_DESC_SIZE);
diff --git a/drivers/dma/ptdma/ptdma.h b/drivers/dma/ptdma/ptdma.h
index d093c43b7d13..21b4bf895200 100644
--- a/drivers/dma/ptdma/ptdma.h
+++ b/drivers/dma/ptdma/ptdma.h
@@ -196,7 +196,7 @@ struct pt_cmd_queue {
struct ptdma_desc *qbase;
/* Aligned queue start address (per requirement) */
- struct mutex q_mutex ____cacheline_aligned;
+ spinlock_t q_lock ____cacheline_aligned;
unsigned int qidx;
unsigned int qsize;
diff --git a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c
index 061add832295..59a36cbf9b5f 100644
--- a/drivers/dma/qcom/gpi.c
+++ b/drivers/dma/qcom/gpi.c
@@ -1756,6 +1756,7 @@ static int gpi_create_spi_tre(struct gchan *chan, struct gpi_desc *desc,
tre->dword[3] = u32_encode_bits(TRE_TYPE_GO, TRE_FLAGS_TYPE);
if (spi->cmd == SPI_RX) {
tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_IEOB);
+ tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_LINK);
} else if (spi->cmd == SPI_TX) {
tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_CHAIN);
} else { /* SPI_DUPLEX */
diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c
deleted file mode 100644
index a09eeb545f7d..000000000000
--- a/drivers/dma/s3c24xx-dma.c
+++ /dev/null
@@ -1,1428 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * S3C24XX DMA handling
- *
- * Copyright (c) 2013 Heiko Stuebner <heiko@sntech.de>
- *
- * based on amba-pl08x.c
- *
- * Copyright (c) 2006 ARM Ltd.
- * Copyright (c) 2010 ST-Ericsson SA
- *
- * Author: Peter Pearse <peter.pearse@arm.com>
- * Author: Linus Walleij <linus.walleij@stericsson.com>
- *
- * The DMA controllers in S3C24XX SoCs have a varying number of DMA signals
- * that can be routed to any of the 4 to 8 hardware-channels.
- *
- * Therefore on these DMA controllers the number of channels
- * and the number of incoming DMA signals are two totally different things.
- * It is usually not possible to theoretically handle all physical signals,
- * so a multiplexing scheme with possible denial of use is necessary.
- *
- * Open items:
- * - bursts
- */
-
-#include <linux/platform_device.h>
-#include <linux/types.h>
-#include <linux/dmaengine.h>
-#include <linux/dma-mapping.h>
-#include <linux/interrupt.h>
-#include <linux/clk.h>
-#include <linux/module.h>
-#include <linux/mod_devicetable.h>
-#include <linux/slab.h>
-#include <linux/platform_data/dma-s3c24xx.h>
-
-#include "dmaengine.h"
-#include "virt-dma.h"
-
-#define MAX_DMA_CHANNELS 8
-
-#define S3C24XX_DISRC 0x00
-#define S3C24XX_DISRCC 0x04
-#define S3C24XX_DISRCC_INC_INCREMENT 0
-#define S3C24XX_DISRCC_INC_FIXED BIT(0)
-#define S3C24XX_DISRCC_LOC_AHB 0
-#define S3C24XX_DISRCC_LOC_APB BIT(1)
-
-#define S3C24XX_DIDST 0x08
-#define S3C24XX_DIDSTC 0x0c
-#define S3C24XX_DIDSTC_INC_INCREMENT 0
-#define S3C24XX_DIDSTC_INC_FIXED BIT(0)
-#define S3C24XX_DIDSTC_LOC_AHB 0
-#define S3C24XX_DIDSTC_LOC_APB BIT(1)
-#define S3C24XX_DIDSTC_INT_TC0 0
-#define S3C24XX_DIDSTC_INT_RELOAD BIT(2)
-
-#define S3C24XX_DCON 0x10
-
-#define S3C24XX_DCON_TC_MASK 0xfffff
-#define S3C24XX_DCON_DSZ_BYTE (0 << 20)
-#define S3C24XX_DCON_DSZ_HALFWORD (1 << 20)
-#define S3C24XX_DCON_DSZ_WORD (2 << 20)
-#define S3C24XX_DCON_DSZ_MASK (3 << 20)
-#define S3C24XX_DCON_DSZ_SHIFT 20
-#define S3C24XX_DCON_AUTORELOAD 0
-#define S3C24XX_DCON_NORELOAD BIT(22)
-#define S3C24XX_DCON_HWTRIG BIT(23)
-#define S3C24XX_DCON_HWSRC_SHIFT 24
-#define S3C24XX_DCON_SERV_SINGLE 0
-#define S3C24XX_DCON_SERV_WHOLE BIT(27)
-#define S3C24XX_DCON_TSZ_UNIT 0
-#define S3C24XX_DCON_TSZ_BURST4 BIT(28)
-#define S3C24XX_DCON_INT BIT(29)
-#define S3C24XX_DCON_SYNC_PCLK 0
-#define S3C24XX_DCON_SYNC_HCLK BIT(30)
-#define S3C24XX_DCON_DEMAND 0
-#define S3C24XX_DCON_HANDSHAKE BIT(31)
-
-#define S3C24XX_DSTAT 0x14
-#define S3C24XX_DSTAT_STAT_BUSY BIT(20)
-#define S3C24XX_DSTAT_CURRTC_MASK 0xfffff
-
-#define S3C24XX_DMASKTRIG 0x20
-#define S3C24XX_DMASKTRIG_SWTRIG BIT(0)
-#define S3C24XX_DMASKTRIG_ON BIT(1)
-#define S3C24XX_DMASKTRIG_STOP BIT(2)
-
-#define S3C24XX_DMAREQSEL 0x24
-#define S3C24XX_DMAREQSEL_HW BIT(0)
-
-/*
- * S3C2410, S3C2440 and S3C2442 SoCs cannot select any physical channel
- * for a DMA source. Instead only specific channels are valid.
- * All of these SoCs have 4 physical channels and the number of request
- * source bits is 3. Additionally we also need 1 bit to mark the channel
- * as valid.
- * Therefore we separate the chansel element of the channel data into 4
- * parts of 4 bits each, to hold the information if the channel is valid
- * and the hw request source to use.
- *
- * Example:
- * SDI is valid on channels 0, 2 and 3 - with varying hw request sources.
- * For it the chansel field would look like
- *
- * ((BIT(3) | 1) << 3 * 4) | // channel 3, with request source 1
- * ((BIT(3) | 2) << 2 * 4) | // channel 2, with request source 2
- * ((BIT(3) | 2) << 0 * 4) // channel 0, with request source 2
- */
-#define S3C24XX_CHANSEL_WIDTH 4
-#define S3C24XX_CHANSEL_VALID BIT(3)
-#define S3C24XX_CHANSEL_REQ_MASK 7
-
-/*
- * struct soc_data - vendor-specific config parameters for individual SoCs
- * @stride: spacing between the registers of each channel
- * @has_reqsel: does the controller use the newer requestselection mechanism
- * @has_clocks: are controllable dma-clocks present
- */
-struct soc_data {
- int stride;
- bool has_reqsel;
- bool has_clocks;
-};
-
-/*
- * enum s3c24xx_dma_chan_state - holds the virtual channel states
- * @S3C24XX_DMA_CHAN_IDLE: the channel is idle
- * @S3C24XX_DMA_CHAN_RUNNING: the channel has allocated a physical transport
- * channel and is running a transfer on it
- * @S3C24XX_DMA_CHAN_WAITING: the channel is waiting for a physical transport
- * channel to become available (only pertains to memcpy channels)
- */
-enum s3c24xx_dma_chan_state {
- S3C24XX_DMA_CHAN_IDLE,
- S3C24XX_DMA_CHAN_RUNNING,
- S3C24XX_DMA_CHAN_WAITING,
-};
-
-/*
- * struct s3c24xx_sg - structure containing data per sg
- * @src_addr: src address of sg
- * @dst_addr: dst address of sg
- * @len: transfer len in bytes
- * @node: node for txd's dsg_list
- */
-struct s3c24xx_sg {
- dma_addr_t src_addr;
- dma_addr_t dst_addr;
- size_t len;
- struct list_head node;
-};
-
-/*
- * struct s3c24xx_txd - wrapper for struct dma_async_tx_descriptor
- * @vd: virtual DMA descriptor
- * @dsg_list: list of children sg's
- * @at: sg currently being transfered
- * @width: transfer width
- * @disrcc: value for source control register
- * @didstc: value for destination control register
- * @dcon: base value for dcon register
- * @cyclic: indicate cyclic transfer
- */
-struct s3c24xx_txd {
- struct virt_dma_desc vd;
- struct list_head dsg_list;
- struct list_head *at;
- u8 width;
- u32 disrcc;
- u32 didstc;
- u32 dcon;
- bool cyclic;
-};
-
-struct s3c24xx_dma_chan;
-
-/*
- * struct s3c24xx_dma_phy - holder for the physical channels
- * @id: physical index to this channel
- * @valid: does the channel have all required elements
- * @base: virtual memory base (remapped) for the this channel
- * @irq: interrupt for this channel
- * @clk: clock for this channel
- * @lock: a lock to use when altering an instance of this struct
- * @serving: virtual channel currently being served by this physicalchannel
- * @host: a pointer to the host (internal use)
- */
-struct s3c24xx_dma_phy {
- unsigned int id;
- bool valid;
- void __iomem *base;
- int irq;
- struct clk *clk;
- spinlock_t lock;
- struct s3c24xx_dma_chan *serving;
- struct s3c24xx_dma_engine *host;
-};
-
-/*
- * struct s3c24xx_dma_chan - this structure wraps a DMA ENGINE channel
- * @id: the id of the channel
- * @name: name of the channel
- * @vc: wrapped virtual channel
- * @phy: the physical channel utilized by this channel, if there is one
- * @runtime_addr: address for RX/TX according to the runtime config
- * @at: active transaction on this channel
- * @lock: a lock for this channel data
- * @host: a pointer to the host (internal use)
- * @state: whether the channel is idle, running etc
- * @slave: whether this channel is a device (slave) or for memcpy
- */
-struct s3c24xx_dma_chan {
- int id;
- const char *name;
- struct virt_dma_chan vc;
- struct s3c24xx_dma_phy *phy;
- struct dma_slave_config cfg;
- struct s3c24xx_txd *at;
- struct s3c24xx_dma_engine *host;
- enum s3c24xx_dma_chan_state state;
- bool slave;
-};
-
-/*
- * struct s3c24xx_dma_engine - the local state holder for the S3C24XX
- * @pdev: the corresponding platform device
- * @pdata: platform data passed in from the platform/machine
- * @base: virtual memory base (remapped)
- * @slave: slave engine for this instance
- * @memcpy: memcpy engine for this instance
- * @phy_chans: array of data for the physical channels
- */
-struct s3c24xx_dma_engine {
- struct platform_device *pdev;
- const struct s3c24xx_dma_platdata *pdata;
- struct soc_data *sdata;
- void __iomem *base;
- struct dma_device slave;
- struct dma_device memcpy;
- struct s3c24xx_dma_phy *phy_chans;
-};
-
-/*
- * Physical channel handling
- */
-
-/*
- * Check whether a certain channel is busy or not.
- */
-static int s3c24xx_dma_phy_busy(struct s3c24xx_dma_phy *phy)
-{
- unsigned int val = readl(phy->base + S3C24XX_DSTAT);
- return val & S3C24XX_DSTAT_STAT_BUSY;
-}
-
-static bool s3c24xx_dma_phy_valid(struct s3c24xx_dma_chan *s3cchan,
- struct s3c24xx_dma_phy *phy)
-{
- struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
- const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata;
- struct s3c24xx_dma_channel *cdata = &pdata->channels[s3cchan->id];
- int phyvalid;
-
- /* every phy is valid for memcopy channels */
- if (!s3cchan->slave)
- return true;
-
- /* On newer variants all phys can be used for all virtual channels */
- if (s3cdma->sdata->has_reqsel)
- return true;
-
- phyvalid = (cdata->chansel >> (phy->id * S3C24XX_CHANSEL_WIDTH));
- return (phyvalid & S3C24XX_CHANSEL_VALID) ? true : false;
-}
-
-/*
- * Allocate a physical channel for a virtual channel
- *
- * Try to locate a physical channel to be used for this transfer. If all
- * are taken return NULL and the requester will have to cope by using
- * some fallback PIO mode or retrying later.
- */
-static
-struct s3c24xx_dma_phy *s3c24xx_dma_get_phy(struct s3c24xx_dma_chan *s3cchan)
-{
- struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
- struct s3c24xx_dma_phy *phy = NULL;
- unsigned long flags;
- int i;
- int ret;
-
- for (i = 0; i < s3cdma->pdata->num_phy_channels; i++) {
- phy = &s3cdma->phy_chans[i];
-
- if (!phy->valid)
- continue;
-
- if (!s3c24xx_dma_phy_valid(s3cchan, phy))
- continue;
-
- spin_lock_irqsave(&phy->lock, flags);
-
- if (!phy->serving) {
- phy->serving = s3cchan;
- spin_unlock_irqrestore(&phy->lock, flags);
- break;
- }
-
- spin_unlock_irqrestore(&phy->lock, flags);
- }
-
- /* No physical channel available, cope with it */
- if (i == s3cdma->pdata->num_phy_channels) {
- dev_warn(&s3cdma->pdev->dev, "no phy channel available\n");
- return NULL;
- }
-
- /* start the phy clock */
- if (s3cdma->sdata->has_clocks) {
- ret = clk_enable(phy->clk);
- if (ret) {
- dev_err(&s3cdma->pdev->dev, "could not enable clock for channel %d, err %d\n",
- phy->id, ret);
- phy->serving = NULL;
- return NULL;
- }
- }
-
- return phy;
-}
-
-/*
- * Mark the physical channel as free.
- *
- * This drops the link between the physical and virtual channel.
- */
-static inline void s3c24xx_dma_put_phy(struct s3c24xx_dma_phy *phy)
-{
- struct s3c24xx_dma_engine *s3cdma = phy->host;
-
- if (s3cdma->sdata->has_clocks)
- clk_disable(phy->clk);
-
- phy->serving = NULL;
-}
-
-/*
- * Stops the channel by writing the stop bit.
- * This should not be used for an on-going transfer, but as a method of
- * shutting down a channel (eg, when it's no longer used) or terminating a
- * transfer.
- */
-static void s3c24xx_dma_terminate_phy(struct s3c24xx_dma_phy *phy)
-{
- writel(S3C24XX_DMASKTRIG_STOP, phy->base + S3C24XX_DMASKTRIG);
-}
-
-/*
- * Virtual channel handling
- */
-
-static inline
-struct s3c24xx_dma_chan *to_s3c24xx_dma_chan(struct dma_chan *chan)
-{
- return container_of(chan, struct s3c24xx_dma_chan, vc.chan);
-}
-
-static u32 s3c24xx_dma_getbytes_chan(struct s3c24xx_dma_chan *s3cchan)
-{
- struct s3c24xx_dma_phy *phy = s3cchan->phy;
- struct s3c24xx_txd *txd = s3cchan->at;
- u32 tc = readl(phy->base + S3C24XX_DSTAT) & S3C24XX_DSTAT_CURRTC_MASK;
-
- return tc * txd->width;
-}
-
-static int s3c24xx_dma_set_runtime_config(struct dma_chan *chan,
- struct dma_slave_config *config)
-{
- struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
- unsigned long flags;
- int ret = 0;
-
- /* Reject definitely invalid configurations */
- if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
- config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
- return -EINVAL;
-
- spin_lock_irqsave(&s3cchan->vc.lock, flags);
-
- if (!s3cchan->slave) {
- ret = -EINVAL;
- goto out;
- }
-
- s3cchan->cfg = *config;
-
-out:
- spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
- return ret;
-}
-
-/*
- * Transfer handling
- */
-
-static inline
-struct s3c24xx_txd *to_s3c24xx_txd(struct dma_async_tx_descriptor *tx)
-{
- return container_of(tx, struct s3c24xx_txd, vd.tx);
-}
-
-static struct s3c24xx_txd *s3c24xx_dma_get_txd(void)
-{
- struct s3c24xx_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
-
- if (txd) {
- INIT_LIST_HEAD(&txd->dsg_list);
- txd->dcon = S3C24XX_DCON_INT | S3C24XX_DCON_NORELOAD;
- }
-
- return txd;
-}
-
-static void s3c24xx_dma_free_txd(struct s3c24xx_txd *txd)
-{
- struct s3c24xx_sg *dsg, *_dsg;
-
- list_for_each_entry_safe(dsg, _dsg, &txd->dsg_list, node) {
- list_del(&dsg->node);
- kfree(dsg);
- }
-
- kfree(txd);
-}
-
-static void s3c24xx_dma_start_next_sg(struct s3c24xx_dma_chan *s3cchan,
- struct s3c24xx_txd *txd)
-{
- struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
- struct s3c24xx_dma_phy *phy = s3cchan->phy;
- const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata;
- struct s3c24xx_sg *dsg = list_entry(txd->at, struct s3c24xx_sg, node);
- u32 dcon = txd->dcon;
- u32 val;
-
- /* transfer-size and -count from len and width */
- switch (txd->width) {
- case 1:
- dcon |= S3C24XX_DCON_DSZ_BYTE | dsg->len;
- break;
- case 2:
- dcon |= S3C24XX_DCON_DSZ_HALFWORD | (dsg->len / 2);
- break;
- case 4:
- dcon |= S3C24XX_DCON_DSZ_WORD | (dsg->len / 4);
- break;
- }
-
- if (s3cchan->slave) {
- struct s3c24xx_dma_channel *cdata =
- &pdata->channels[s3cchan->id];
-
- if (s3cdma->sdata->has_reqsel) {
- writel_relaxed((cdata->chansel << 1) |
- S3C24XX_DMAREQSEL_HW,
- phy->base + S3C24XX_DMAREQSEL);
- } else {
- int csel = cdata->chansel >> (phy->id *
- S3C24XX_CHANSEL_WIDTH);
-
- csel &= S3C24XX_CHANSEL_REQ_MASK;
- dcon |= csel << S3C24XX_DCON_HWSRC_SHIFT;
- dcon |= S3C24XX_DCON_HWTRIG;
- }
- } else {
- if (s3cdma->sdata->has_reqsel)
- writel_relaxed(0, phy->base + S3C24XX_DMAREQSEL);
- }
-
- writel_relaxed(dsg->src_addr, phy->base + S3C24XX_DISRC);
- writel_relaxed(txd->disrcc, phy->base + S3C24XX_DISRCC);
- writel_relaxed(dsg->dst_addr, phy->base + S3C24XX_DIDST);
- writel_relaxed(txd->didstc, phy->base + S3C24XX_DIDSTC);
- writel_relaxed(dcon, phy->base + S3C24XX_DCON);
-
- val = readl_relaxed(phy->base + S3C24XX_DMASKTRIG);
- val &= ~S3C24XX_DMASKTRIG_STOP;
- val |= S3C24XX_DMASKTRIG_ON;
-
- /* trigger the dma operation for memcpy transfers */
- if (!s3cchan->slave)
- val |= S3C24XX_DMASKTRIG_SWTRIG;
-
- writel(val, phy->base + S3C24XX_DMASKTRIG);
-}
-
-/*
- * Set the initial DMA register values and start first sg.
- */
-static void s3c24xx_dma_start_next_txd(struct s3c24xx_dma_chan *s3cchan)
-{
- struct s3c24xx_dma_phy *phy = s3cchan->phy;
- struct virt_dma_desc *vd = vchan_next_desc(&s3cchan->vc);
- struct s3c24xx_txd *txd = to_s3c24xx_txd(&vd->tx);
-
- list_del(&txd->vd.node);
-
- s3cchan->at = txd;
-
- /* Wait for channel inactive */
- while (s3c24xx_dma_phy_busy(phy))
- cpu_relax();
-
- /* point to the first element of the sg list */
- txd->at = txd->dsg_list.next;
- s3c24xx_dma_start_next_sg(s3cchan, txd);
-}
-
-/*
- * Try to allocate a physical channel. When successful, assign it to
- * this virtual channel, and initiate the next descriptor. The
- * virtual channel lock must be held at this point.
- */
-static void s3c24xx_dma_phy_alloc_and_start(struct s3c24xx_dma_chan *s3cchan)
-{
- struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
- struct s3c24xx_dma_phy *phy;
-
- phy = s3c24xx_dma_get_phy(s3cchan);
- if (!phy) {
- dev_dbg(&s3cdma->pdev->dev, "no physical channel available for xfer on %s\n",
- s3cchan->name);
- s3cchan->state = S3C24XX_DMA_CHAN_WAITING;
- return;
- }
-
- dev_dbg(&s3cdma->pdev->dev, "allocated physical channel %d for xfer on %s\n",
- phy->id, s3cchan->name);
-
- s3cchan->phy = phy;
- s3cchan->state = S3C24XX_DMA_CHAN_RUNNING;
-
- s3c24xx_dma_start_next_txd(s3cchan);
-}
-
-static void s3c24xx_dma_phy_reassign_start(struct s3c24xx_dma_phy *phy,
- struct s3c24xx_dma_chan *s3cchan)
-{
- struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
-
- dev_dbg(&s3cdma->pdev->dev, "reassigned physical channel %d for xfer on %s\n",
- phy->id, s3cchan->name);
-
- /*
- * We do this without taking the lock; we're really only concerned
- * about whether this pointer is NULL or not, and we're guaranteed
- * that this will only be called when it _already_ is non-NULL.
- */
- phy->serving = s3cchan;
- s3cchan->phy = phy;
- s3cchan->state = S3C24XX_DMA_CHAN_RUNNING;
- s3c24xx_dma_start_next_txd(s3cchan);
-}
-
-/*
- * Free a physical DMA channel, potentially reallocating it to another
- * virtual channel if we have any pending.
- */
-static void s3c24xx_dma_phy_free(struct s3c24xx_dma_chan *s3cchan)
-{
- struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
- struct s3c24xx_dma_chan *p, *next;
-
-retry:
- next = NULL;
-
- /* Find a waiting virtual channel for the next transfer. */
- list_for_each_entry(p, &s3cdma->memcpy.channels, vc.chan.device_node)
- if (p->state == S3C24XX_DMA_CHAN_WAITING) {
- next = p;
- break;
- }
-
- if (!next) {
- list_for_each_entry(p, &s3cdma->slave.channels,
- vc.chan.device_node)
- if (p->state == S3C24XX_DMA_CHAN_WAITING &&
- s3c24xx_dma_phy_valid(p, s3cchan->phy)) {
- next = p;
- break;
- }
- }
-
- /* Ensure that the physical channel is stopped */
- s3c24xx_dma_terminate_phy(s3cchan->phy);
-
- if (next) {
- bool success;
-
- /*
- * Eww. We know this isn't going to deadlock
- * but lockdep probably doesn't.
- */
- spin_lock(&next->vc.lock);
- /* Re-check the state now that we have the lock */
- success = next->state == S3C24XX_DMA_CHAN_WAITING;
- if (success)
- s3c24xx_dma_phy_reassign_start(s3cchan->phy, next);
- spin_unlock(&next->vc.lock);
-
- /* If the state changed, try to find another channel */
- if (!success)
- goto retry;
- } else {
- /* No more jobs, so free up the physical channel */
- s3c24xx_dma_put_phy(s3cchan->phy);
- }
-
- s3cchan->phy = NULL;
- s3cchan->state = S3C24XX_DMA_CHAN_IDLE;
-}
-
-static void s3c24xx_dma_desc_free(struct virt_dma_desc *vd)
-{
- struct s3c24xx_txd *txd = to_s3c24xx_txd(&vd->tx);
- struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(vd->tx.chan);
-
- if (!s3cchan->slave)
- dma_descriptor_unmap(&vd->tx);
-
- s3c24xx_dma_free_txd(txd);
-}
-
-static irqreturn_t s3c24xx_dma_irq(int irq, void *data)
-{
- struct s3c24xx_dma_phy *phy = data;
- struct s3c24xx_dma_chan *s3cchan = phy->serving;
- struct s3c24xx_txd *txd;
-
- dev_dbg(&phy->host->pdev->dev, "interrupt on channel %d\n", phy->id);
-
- /*
- * Interrupts happen to notify the completion of a transfer and the
- * channel should have moved into its stop state already on its own.
- * Therefore interrupts on channels not bound to a virtual channel
- * should never happen. Nevertheless send a terminate command to the
- * channel if the unlikely case happens.
- */
- if (unlikely(!s3cchan)) {
- dev_err(&phy->host->pdev->dev, "interrupt on unused channel %d\n",
- phy->id);
-
- s3c24xx_dma_terminate_phy(phy);
-
- return IRQ_HANDLED;
- }
-
- spin_lock(&s3cchan->vc.lock);
- txd = s3cchan->at;
- if (txd) {
- /* when more sg's are in this txd, start the next one */
- if (!list_is_last(txd->at, &txd->dsg_list)) {
- txd->at = txd->at->next;
- if (txd->cyclic)
- vchan_cyclic_callback(&txd->vd);
- s3c24xx_dma_start_next_sg(s3cchan, txd);
- } else if (!txd->cyclic) {
- s3cchan->at = NULL;
- vchan_cookie_complete(&txd->vd);
-
- /*
- * And start the next descriptor (if any),
- * otherwise free this channel.
- */
- if (vchan_next_desc(&s3cchan->vc))
- s3c24xx_dma_start_next_txd(s3cchan);
- else
- s3c24xx_dma_phy_free(s3cchan);
- } else {
- vchan_cyclic_callback(&txd->vd);
-
- /* Cyclic: reset at beginning */
- txd->at = txd->dsg_list.next;
- s3c24xx_dma_start_next_sg(s3cchan, txd);
- }
- }
- spin_unlock(&s3cchan->vc.lock);
-
- return IRQ_HANDLED;
-}
-
-/*
- * The DMA ENGINE API
- */
-
-static int s3c24xx_dma_terminate_all(struct dma_chan *chan)
-{
- struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
- struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
- LIST_HEAD(head);
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&s3cchan->vc.lock, flags);
-
- if (!s3cchan->phy && !s3cchan->at) {
- dev_err(&s3cdma->pdev->dev, "trying to terminate already stopped channel %d\n",
- s3cchan->id);
- ret = -EINVAL;
- goto unlock;
- }
-
- s3cchan->state = S3C24XX_DMA_CHAN_IDLE;
-
- /* Mark physical channel as free */
- if (s3cchan->phy)
- s3c24xx_dma_phy_free(s3cchan);
-
- /* Dequeue current job */
- if (s3cchan->at) {
- vchan_terminate_vdesc(&s3cchan->at->vd);
- s3cchan->at = NULL;
- }
-
- /* Dequeue jobs not yet fired as well */
-
- vchan_get_all_descriptors(&s3cchan->vc, &head);
-
- spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
-
- vchan_dma_desc_free_list(&s3cchan->vc, &head);
-
- return 0;
-
-unlock:
- spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
-
- return ret;
-}
-
-static void s3c24xx_dma_synchronize(struct dma_chan *chan)
-{
- struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
-
- vchan_synchronize(&s3cchan->vc);
-}
-
-static void s3c24xx_dma_free_chan_resources(struct dma_chan *chan)
-{
- /* Ensure all queued descriptors are freed */
- vchan_free_chan_resources(to_virt_chan(chan));
-}
-
-static enum dma_status s3c24xx_dma_tx_status(struct dma_chan *chan,
- dma_cookie_t cookie, struct dma_tx_state *txstate)
-{
- struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
- struct s3c24xx_txd *txd;
- struct s3c24xx_sg *dsg;
- struct virt_dma_desc *vd;
- unsigned long flags;
- enum dma_status ret;
- size_t bytes = 0;
-
- spin_lock_irqsave(&s3cchan->vc.lock, flags);
- ret = dma_cookie_status(chan, cookie, txstate);
-
- /*
- * There's no point calculating the residue if there's
- * no txstate to store the value.
- */
- if (ret == DMA_COMPLETE || !txstate) {
- spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
- return ret;
- }
-
- vd = vchan_find_desc(&s3cchan->vc, cookie);
- if (vd) {
- /* On the issued list, so hasn't been processed yet */
- txd = to_s3c24xx_txd(&vd->tx);
-
- list_for_each_entry(dsg, &txd->dsg_list, node)
- bytes += dsg->len;
- } else {
- /*
- * Currently running, so sum over the pending sg's and
- * the currently active one.
- */
- txd = s3cchan->at;
-
- dsg = list_entry(txd->at, struct s3c24xx_sg, node);
- list_for_each_entry_from(dsg, &txd->dsg_list, node)
- bytes += dsg->len;
-
- bytes += s3c24xx_dma_getbytes_chan(s3cchan);
- }
- spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
-
- /*
- * This cookie not complete yet
- * Get number of bytes left in the active transactions and queue
- */
- dma_set_residue(txstate, bytes);
-
- /* Whether waiting or running, we're in progress */
- return ret;
-}
-
-/*
- * Initialize a descriptor to be used by memcpy submit
- */
-static struct dma_async_tx_descriptor *s3c24xx_dma_prep_memcpy(
- struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
- size_t len, unsigned long flags)
-{
- struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
- struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
- struct s3c24xx_txd *txd;
- struct s3c24xx_sg *dsg;
- int src_mod, dest_mod;
-
- dev_dbg(&s3cdma->pdev->dev, "prepare memcpy of %zu bytes from %s\n",
- len, s3cchan->name);
-
- if ((len & S3C24XX_DCON_TC_MASK) != len) {
- dev_err(&s3cdma->pdev->dev, "memcpy size %zu to large\n", len);
- return NULL;
- }
-
- txd = s3c24xx_dma_get_txd();
- if (!txd)
- return NULL;
-
- dsg = kzalloc(sizeof(*dsg), GFP_NOWAIT);
- if (!dsg) {
- s3c24xx_dma_free_txd(txd);
- return NULL;
- }
- list_add_tail(&dsg->node, &txd->dsg_list);
-
- dsg->src_addr = src;
- dsg->dst_addr = dest;
- dsg->len = len;
-
- /*
- * Determine a suitable transfer width.
- * The DMA controller cannot fetch/store information which is not
- * naturally aligned on the bus, i.e., a 4 byte fetch must start at
- * an address divisible by 4 - more generally addr % width must be 0.
- */
- src_mod = src % 4;
- dest_mod = dest % 4;
- switch (len % 4) {
- case 0:
- txd->width = (src_mod == 0 && dest_mod == 0) ? 4 : 1;
- break;
- case 2:
- txd->width = ((src_mod == 2 || src_mod == 0) &&
- (dest_mod == 2 || dest_mod == 0)) ? 2 : 1;
- break;
- default:
- txd->width = 1;
- break;
- }
-
- txd->disrcc = S3C24XX_DISRCC_LOC_AHB | S3C24XX_DISRCC_INC_INCREMENT;
- txd->didstc = S3C24XX_DIDSTC_LOC_AHB | S3C24XX_DIDSTC_INC_INCREMENT;
- txd->dcon |= S3C24XX_DCON_DEMAND | S3C24XX_DCON_SYNC_HCLK |
- S3C24XX_DCON_SERV_WHOLE;
-
- return vchan_tx_prep(&s3cchan->vc, &txd->vd, flags);
-}
-
-static struct dma_async_tx_descriptor *s3c24xx_dma_prep_dma_cyclic(
- struct dma_chan *chan, dma_addr_t addr, size_t size, size_t period,
- enum dma_transfer_direction direction, unsigned long flags)
-{
- struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
- struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
- const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata;
- struct s3c24xx_dma_channel *cdata = &pdata->channels[s3cchan->id];
- struct s3c24xx_txd *txd;
- struct s3c24xx_sg *dsg;
- unsigned sg_len;
- dma_addr_t slave_addr;
- u32 hwcfg = 0;
- int i;
-
- dev_dbg(&s3cdma->pdev->dev,
- "prepare cyclic transaction of %zu bytes with period %zu from %s\n",
- size, period, s3cchan->name);
-
- if (!is_slave_direction(direction)) {
- dev_err(&s3cdma->pdev->dev,
- "direction %d unsupported\n", direction);
- return NULL;
- }
-
- txd = s3c24xx_dma_get_txd();
- if (!txd)
- return NULL;
-
- txd->cyclic = 1;
-
- if (cdata->handshake)
- txd->dcon |= S3C24XX_DCON_HANDSHAKE;
-
- switch (cdata->bus) {
- case S3C24XX_DMA_APB:
- txd->dcon |= S3C24XX_DCON_SYNC_PCLK;
- hwcfg |= S3C24XX_DISRCC_LOC_APB;
- break;
- case S3C24XX_DMA_AHB:
- txd->dcon |= S3C24XX_DCON_SYNC_HCLK;
- hwcfg |= S3C24XX_DISRCC_LOC_AHB;
- break;
- }
-
- /*
- * Always assume our peripheral desintation is a fixed
- * address in memory.
- */
- hwcfg |= S3C24XX_DISRCC_INC_FIXED;
-
- /*
- * Individual dma operations are requested by the slave,
- * so serve only single atomic operations (S3C24XX_DCON_SERV_SINGLE).
- */
- txd->dcon |= S3C24XX_DCON_SERV_SINGLE;
-
- if (direction == DMA_MEM_TO_DEV) {
- txd->disrcc = S3C24XX_DISRCC_LOC_AHB |
- S3C24XX_DISRCC_INC_INCREMENT;
- txd->didstc = hwcfg;
- slave_addr = s3cchan->cfg.dst_addr;
- txd->width = s3cchan->cfg.dst_addr_width;
- } else {
- txd->disrcc = hwcfg;
- txd->didstc = S3C24XX_DIDSTC_LOC_AHB |
- S3C24XX_DIDSTC_INC_INCREMENT;
- slave_addr = s3cchan->cfg.src_addr;
- txd->width = s3cchan->cfg.src_addr_width;
- }
-
- sg_len = size / period;
-
- for (i = 0; i < sg_len; i++) {
- dsg = kzalloc(sizeof(*dsg), GFP_NOWAIT);
- if (!dsg) {
- s3c24xx_dma_free_txd(txd);
- return NULL;
- }
- list_add_tail(&dsg->node, &txd->dsg_list);
-
- dsg->len = period;
- /* Check last period length */
- if (i == sg_len - 1)
- dsg->len = size - period * i;
- if (direction == DMA_MEM_TO_DEV) {
- dsg->src_addr = addr + period * i;
- dsg->dst_addr = slave_addr;
- } else { /* DMA_DEV_TO_MEM */
- dsg->src_addr = slave_addr;
- dsg->dst_addr = addr + period * i;
- }
- }
-
- return vchan_tx_prep(&s3cchan->vc, &txd->vd, flags);
-}
-
-static struct dma_async_tx_descriptor *s3c24xx_dma_prep_slave_sg(
- struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_transfer_direction direction,
- unsigned long flags, void *context)
-{
- struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
- struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
- const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata;
- struct s3c24xx_dma_channel *cdata = &pdata->channels[s3cchan->id];
- struct s3c24xx_txd *txd;
- struct s3c24xx_sg *dsg;
- struct scatterlist *sg;
- dma_addr_t slave_addr;
- u32 hwcfg = 0;
- int tmp;
-
- dev_dbg(&s3cdma->pdev->dev, "prepare transaction of %d bytes from %s\n",
- sg_dma_len(sgl), s3cchan->name);
-
- txd = s3c24xx_dma_get_txd();
- if (!txd)
- return NULL;
-
- if (cdata->handshake)
- txd->dcon |= S3C24XX_DCON_HANDSHAKE;
-
- switch (cdata->bus) {
- case S3C24XX_DMA_APB:
- txd->dcon |= S3C24XX_DCON_SYNC_PCLK;
- hwcfg |= S3C24XX_DISRCC_LOC_APB;
- break;
- case S3C24XX_DMA_AHB:
- txd->dcon |= S3C24XX_DCON_SYNC_HCLK;
- hwcfg |= S3C24XX_DISRCC_LOC_AHB;
- break;
- }
-
- /*
- * Always assume our peripheral desintation is a fixed
- * address in memory.
- */
- hwcfg |= S3C24XX_DISRCC_INC_FIXED;
-
- /*
- * Individual dma operations are requested by the slave,
- * so serve only single atomic operations (S3C24XX_DCON_SERV_SINGLE).
- */
- txd->dcon |= S3C24XX_DCON_SERV_SINGLE;
-
- if (direction == DMA_MEM_TO_DEV) {
- txd->disrcc = S3C24XX_DISRCC_LOC_AHB |
- S3C24XX_DISRCC_INC_INCREMENT;
- txd->didstc = hwcfg;
- slave_addr = s3cchan->cfg.dst_addr;
- txd->width = s3cchan->cfg.dst_addr_width;
- } else if (direction == DMA_DEV_TO_MEM) {
- txd->disrcc = hwcfg;
- txd->didstc = S3C24XX_DIDSTC_LOC_AHB |
- S3C24XX_DIDSTC_INC_INCREMENT;
- slave_addr = s3cchan->cfg.src_addr;
- txd->width = s3cchan->cfg.src_addr_width;
- } else {
- s3c24xx_dma_free_txd(txd);
- dev_err(&s3cdma->pdev->dev,
- "direction %d unsupported\n", direction);
- return NULL;
- }
-
- for_each_sg(sgl, sg, sg_len, tmp) {
- dsg = kzalloc(sizeof(*dsg), GFP_NOWAIT);
- if (!dsg) {
- s3c24xx_dma_free_txd(txd);
- return NULL;
- }
- list_add_tail(&dsg->node, &txd->dsg_list);
-
- dsg->len = sg_dma_len(sg);
- if (direction == DMA_MEM_TO_DEV) {
- dsg->src_addr = sg_dma_address(sg);
- dsg->dst_addr = slave_addr;
- } else { /* DMA_DEV_TO_MEM */
- dsg->src_addr = slave_addr;
- dsg->dst_addr = sg_dma_address(sg);
- }
- }
-
- return vchan_tx_prep(&s3cchan->vc, &txd->vd, flags);
-}
-
-/*
- * Slave transactions callback to the slave device to allow
- * synchronization of slave DMA signals with the DMAC enable
- */
-static void s3c24xx_dma_issue_pending(struct dma_chan *chan)
-{
- struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
- unsigned long flags;
-
- spin_lock_irqsave(&s3cchan->vc.lock, flags);
- if (vchan_issue_pending(&s3cchan->vc)) {
- if (!s3cchan->phy && s3cchan->state != S3C24XX_DMA_CHAN_WAITING)
- s3c24xx_dma_phy_alloc_and_start(s3cchan);
- }
- spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
-}
-
-/*
- * Bringup and teardown
- */
-
-/*
- * Initialise the DMAC memcpy/slave channels.
- * Make a local wrapper to hold required data
- */
-static int s3c24xx_dma_init_virtual_channels(struct s3c24xx_dma_engine *s3cdma,
- struct dma_device *dmadev, unsigned int channels, bool slave)
-{
- struct s3c24xx_dma_chan *chan;
- int i;
-
- INIT_LIST_HEAD(&dmadev->channels);
-
- /*
- * Register as many memcpy as we have physical channels,
- * we won't always be able to use all but the code will have
- * to cope with that situation.
- */
- for (i = 0; i < channels; i++) {
- chan = devm_kzalloc(dmadev->dev, sizeof(*chan), GFP_KERNEL);
- if (!chan)
- return -ENOMEM;
-
- chan->id = i;
- chan->host = s3cdma;
- chan->state = S3C24XX_DMA_CHAN_IDLE;
-
- if (slave) {
- chan->slave = true;
- chan->name = kasprintf(GFP_KERNEL, "slave%d", i);
- if (!chan->name)
- return -ENOMEM;
- } else {
- chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i);
- if (!chan->name)
- return -ENOMEM;
- }
- dev_dbg(dmadev->dev,
- "initialize virtual channel \"%s\"\n",
- chan->name);
-
- chan->vc.desc_free = s3c24xx_dma_desc_free;
- vchan_init(&chan->vc, dmadev);
- }
- dev_info(dmadev->dev, "initialized %d virtual %s channels\n",
- i, slave ? "slave" : "memcpy");
- return i;
-}
-
-static void s3c24xx_dma_free_virtual_channels(struct dma_device *dmadev)
-{
- struct s3c24xx_dma_chan *chan = NULL;
- struct s3c24xx_dma_chan *next;
-
- list_for_each_entry_safe(chan,
- next, &dmadev->channels, vc.chan.device_node) {
- list_del(&chan->vc.chan.device_node);
- tasklet_kill(&chan->vc.task);
- }
-}
-
-/* s3c2410, s3c2440 and s3c2442 have a 0x40 stride without separate clocks */
-static struct soc_data soc_s3c2410 = {
- .stride = 0x40,
- .has_reqsel = false,
- .has_clocks = false,
-};
-
-/* s3c2412 and s3c2413 have a 0x40 stride and dmareqsel mechanism */
-static struct soc_data soc_s3c2412 = {
- .stride = 0x40,
- .has_reqsel = true,
- .has_clocks = true,
-};
-
-/* s3c2443 and following have a 0x100 stride and dmareqsel mechanism */
-static struct soc_data soc_s3c2443 = {
- .stride = 0x100,
- .has_reqsel = true,
- .has_clocks = true,
-};
-
-static const struct platform_device_id s3c24xx_dma_driver_ids[] = {
- {
- .name = "s3c2410-dma",
- .driver_data = (kernel_ulong_t)&soc_s3c2410,
- }, {
- .name = "s3c2412-dma",
- .driver_data = (kernel_ulong_t)&soc_s3c2412,
- }, {
- .name = "s3c2443-dma",
- .driver_data = (kernel_ulong_t)&soc_s3c2443,
- },
- { },
-};
-
-static struct soc_data *s3c24xx_dma_get_soc_data(struct platform_device *pdev)
-{
- return (struct soc_data *)
- platform_get_device_id(pdev)->driver_data;
-}
-
-static int s3c24xx_dma_probe(struct platform_device *pdev)
-{
- const struct s3c24xx_dma_platdata *pdata = dev_get_platdata(&pdev->dev);
- struct s3c24xx_dma_engine *s3cdma;
- struct soc_data *sdata;
- struct resource *res;
- int ret;
- int i;
-
- if (!pdata) {
- dev_err(&pdev->dev, "platform data missing\n");
- return -ENODEV;
- }
-
- /* Basic sanity check */
- if (pdata->num_phy_channels > MAX_DMA_CHANNELS) {
- dev_err(&pdev->dev, "too many dma channels %d, max %d\n",
- pdata->num_phy_channels, MAX_DMA_CHANNELS);
- return -EINVAL;
- }
-
- sdata = s3c24xx_dma_get_soc_data(pdev);
- if (!sdata)
- return -EINVAL;
-
- s3cdma = devm_kzalloc(&pdev->dev, sizeof(*s3cdma), GFP_KERNEL);
- if (!s3cdma)
- return -ENOMEM;
-
- s3cdma->pdev = pdev;
- s3cdma->pdata = pdata;
- s3cdma->sdata = sdata;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- s3cdma->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(s3cdma->base))
- return PTR_ERR(s3cdma->base);
-
- s3cdma->phy_chans = devm_kcalloc(&pdev->dev,
- pdata->num_phy_channels,
- sizeof(struct s3c24xx_dma_phy),
- GFP_KERNEL);
- if (!s3cdma->phy_chans)
- return -ENOMEM;
-
- /* acquire irqs and clocks for all physical channels */
- for (i = 0; i < pdata->num_phy_channels; i++) {
- struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i];
- char clk_name[6];
-
- phy->id = i;
- phy->base = s3cdma->base + (i * sdata->stride);
- phy->host = s3cdma;
-
- phy->irq = platform_get_irq(pdev, i);
- if (phy->irq < 0)
- continue;
-
- ret = devm_request_irq(&pdev->dev, phy->irq, s3c24xx_dma_irq,
- 0, pdev->name, phy);
- if (ret) {
- dev_err(&pdev->dev, "Unable to request irq for channel %d, error %d\n",
- i, ret);
- continue;
- }
-
- if (sdata->has_clocks) {
- sprintf(clk_name, "dma.%d", i);
- phy->clk = devm_clk_get(&pdev->dev, clk_name);
- if (IS_ERR(phy->clk) && sdata->has_clocks) {
- dev_err(&pdev->dev, "unable to acquire clock for channel %d, error %lu\n",
- i, PTR_ERR(phy->clk));
- continue;
- }
-
- ret = clk_prepare(phy->clk);
- if (ret) {
- dev_err(&pdev->dev, "clock for phy %d failed, error %d\n",
- i, ret);
- continue;
- }
- }
-
- spin_lock_init(&phy->lock);
- phy->valid = true;
-
- dev_dbg(&pdev->dev, "physical channel %d is %s\n",
- i, s3c24xx_dma_phy_busy(phy) ? "BUSY" : "FREE");
- }
-
- /* Initialize memcpy engine */
- dma_cap_set(DMA_MEMCPY, s3cdma->memcpy.cap_mask);
- dma_cap_set(DMA_PRIVATE, s3cdma->memcpy.cap_mask);
- s3cdma->memcpy.dev = &pdev->dev;
- s3cdma->memcpy.device_free_chan_resources =
- s3c24xx_dma_free_chan_resources;
- s3cdma->memcpy.device_prep_dma_memcpy = s3c24xx_dma_prep_memcpy;
- s3cdma->memcpy.device_tx_status = s3c24xx_dma_tx_status;
- s3cdma->memcpy.device_issue_pending = s3c24xx_dma_issue_pending;
- s3cdma->memcpy.device_config = s3c24xx_dma_set_runtime_config;
- s3cdma->memcpy.device_terminate_all = s3c24xx_dma_terminate_all;
- s3cdma->memcpy.device_synchronize = s3c24xx_dma_synchronize;
-
- /* Initialize slave engine for SoC internal dedicated peripherals */
- dma_cap_set(DMA_SLAVE, s3cdma->slave.cap_mask);
- dma_cap_set(DMA_CYCLIC, s3cdma->slave.cap_mask);
- dma_cap_set(DMA_PRIVATE, s3cdma->slave.cap_mask);
- s3cdma->slave.dev = &pdev->dev;
- s3cdma->slave.device_free_chan_resources =
- s3c24xx_dma_free_chan_resources;
- s3cdma->slave.device_tx_status = s3c24xx_dma_tx_status;
- s3cdma->slave.device_issue_pending = s3c24xx_dma_issue_pending;
- s3cdma->slave.device_prep_slave_sg = s3c24xx_dma_prep_slave_sg;
- s3cdma->slave.device_prep_dma_cyclic = s3c24xx_dma_prep_dma_cyclic;
- s3cdma->slave.device_config = s3c24xx_dma_set_runtime_config;
- s3cdma->slave.device_terminate_all = s3c24xx_dma_terminate_all;
- s3cdma->slave.device_synchronize = s3c24xx_dma_synchronize;
- s3cdma->slave.filter.map = pdata->slave_map;
- s3cdma->slave.filter.mapcnt = pdata->slavecnt;
- s3cdma->slave.filter.fn = s3c24xx_dma_filter;
-
- /* Register as many memcpy channels as there are physical channels */
- ret = s3c24xx_dma_init_virtual_channels(s3cdma, &s3cdma->memcpy,
- pdata->num_phy_channels, false);
- if (ret <= 0) {
- dev_warn(&pdev->dev,
- "%s failed to enumerate memcpy channels - %d\n",
- __func__, ret);
- goto err_memcpy;
- }
-
- /* Register slave channels */
- ret = s3c24xx_dma_init_virtual_channels(s3cdma, &s3cdma->slave,
- pdata->num_channels, true);
- if (ret <= 0) {
- dev_warn(&pdev->dev,
- "%s failed to enumerate slave channels - %d\n",
- __func__, ret);
- goto err_slave;
- }
-
- ret = dma_async_device_register(&s3cdma->memcpy);
- if (ret) {
- dev_warn(&pdev->dev,
- "%s failed to register memcpy as an async device - %d\n",
- __func__, ret);
- goto err_memcpy_reg;
- }
-
- ret = dma_async_device_register(&s3cdma->slave);
- if (ret) {
- dev_warn(&pdev->dev,
- "%s failed to register slave as an async device - %d\n",
- __func__, ret);
- goto err_slave_reg;
- }
-
- platform_set_drvdata(pdev, s3cdma);
- dev_info(&pdev->dev, "Loaded dma driver with %d physical channels\n",
- pdata->num_phy_channels);
-
- return 0;
-
-err_slave_reg:
- dma_async_device_unregister(&s3cdma->memcpy);
-err_memcpy_reg:
- s3c24xx_dma_free_virtual_channels(&s3cdma->slave);
-err_slave:
- s3c24xx_dma_free_virtual_channels(&s3cdma->memcpy);
-err_memcpy:
- if (sdata->has_clocks)
- for (i = 0; i < pdata->num_phy_channels; i++) {
- struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i];
- if (phy->valid)
- clk_unprepare(phy->clk);
- }
-
- return ret;
-}
-
-static void s3c24xx_dma_free_irq(struct platform_device *pdev,
- struct s3c24xx_dma_engine *s3cdma)
-{
- int i;
-
- for (i = 0; i < s3cdma->pdata->num_phy_channels; i++) {
- struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i];
-
- devm_free_irq(&pdev->dev, phy->irq, phy);
- }
-}
-
-static int s3c24xx_dma_remove(struct platform_device *pdev)
-{
- const struct s3c24xx_dma_platdata *pdata = dev_get_platdata(&pdev->dev);
- struct s3c24xx_dma_engine *s3cdma = platform_get_drvdata(pdev);
- struct soc_data *sdata = s3c24xx_dma_get_soc_data(pdev);
- int i;
-
- dma_async_device_unregister(&s3cdma->slave);
- dma_async_device_unregister(&s3cdma->memcpy);
-
- s3c24xx_dma_free_irq(pdev, s3cdma);
-
- s3c24xx_dma_free_virtual_channels(&s3cdma->slave);
- s3c24xx_dma_free_virtual_channels(&s3cdma->memcpy);
-
- if (sdata->has_clocks)
- for (i = 0; i < pdata->num_phy_channels; i++) {
- struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i];
- if (phy->valid)
- clk_unprepare(phy->clk);
- }
-
- return 0;
-}
-
-static struct platform_driver s3c24xx_dma_driver = {
- .driver = {
- .name = "s3c24xx-dma",
- },
- .id_table = s3c24xx_dma_driver_ids,
- .probe = s3c24xx_dma_probe,
- .remove = s3c24xx_dma_remove,
-};
-
-module_platform_driver(s3c24xx_dma_driver);
-
-bool s3c24xx_dma_filter(struct dma_chan *chan, void *param)
-{
- struct s3c24xx_dma_chan *s3cchan;
-
- if (chan->device->dev->driver != &s3c24xx_dma_driver.driver)
- return false;
-
- s3cchan = to_s3c24xx_dma_chan(chan);
-
- return s3cchan->id == (uintptr_t)param;
-}
-EXPORT_SYMBOL(s3c24xx_dma_filter);
-
-MODULE_DESCRIPTION("S3C24XX DMA Driver");
-MODULE_AUTHOR("Heiko Stuebner");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/tegra186-gpc-dma.c b/drivers/dma/tegra186-gpc-dma.c
index 1d1180db6d4e..8f67f453a492 100644
--- a/drivers/dma/tegra186-gpc-dma.c
+++ b/drivers/dma/tegra186-gpc-dma.c
@@ -711,6 +711,7 @@ static int tegra_dma_terminate_all(struct dma_chan *dc)
return err;
}
+ vchan_terminate_vdesc(&tdc->dma_desc->vd);
tegra_dma_disable(tdc);
tdc->dma_desc = NULL;
}
diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c
index d1a84483f627..b97004036071 100644
--- a/drivers/dma/tegra210-adma.c
+++ b/drivers/dma/tegra210-adma.c
@@ -221,7 +221,7 @@ static int tegra_adma_init(struct tegra_adma *tdma)
int ret;
/* Clear any interrupts */
- tdma_write(tdma, tdma->cdata->global_int_clear, 0x1);
+ tdma_write(tdma, tdma->cdata->ch_base_offset + tdma->cdata->global_int_clear, 0x1);
/* Assert soft reset */
tdma_write(tdma, ADMA_GLOBAL_SOFT_RESET, 0x1);
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
index 5ea1656b8919..7e23a6fdef95 100644
--- a/drivers/dma/ti/k3-udma.c
+++ b/drivers/dma/ti/k3-udma.c
@@ -763,11 +763,12 @@ static void udma_decrement_byte_counters(struct udma_chan *uc, u32 val)
if (uc->desc->dir == DMA_DEV_TO_MEM) {
udma_rchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
udma_rchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
- udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
+ if (uc->config.ep_type != PSIL_EP_NATIVE)
+ udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
} else {
udma_tchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
udma_tchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
- if (!uc->bchan)
+ if (!uc->bchan && uc->config.ep_type != PSIL_EP_NATIVE)
udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
}
}
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index a8d23cdf883e..ac09f0e5f58d 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -3143,8 +3143,10 @@ static int xilinx_dma_probe(struct platform_device *pdev)
/* Initialize the channels */
for_each_child_of_node(node, child) {
err = xilinx_dma_child_probe(xdev, child);
- if (err < 0)
+ if (err < 0) {
+ of_node_put(child);
goto error;
+ }
}
if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {