summaryrefslogtreecommitdiff
path: root/drivers/dma
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-12-15 07:42:45 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2016-12-15 07:42:45 +0300
commite3842cbfe0976b014288147b130551d8bf52b96c (patch)
tree686501f0eb80076240c5f38b34d1acbb105a190b /drivers/dma
parent4d98ead183a2be77bfea425d5243e32629eaaeb1 (diff)
parent4625d2a513d60ca9c3e8cae42c8f3d9efc1b4211 (diff)
downloadlinux-e3842cbfe0976b014288147b130551d8bf52b96c.tar.xz
Merge tag 'dmaengine-4.10-rc1' of git://git.infradead.org/users/vkoul/slave-dma
Pull dmaengine updates from Vinod Koul: "Fairly routine update this time around with all changes specific to drivers: - New driver for STMicroelectronics FDMA - Memory-to-memory transfers on dw dmac - Support for slave maps on pl08x devices - Bunch of driver fixes to use dma_pool_zalloc - Bunch of compile and warning fixes spread across drivers" [ The ST FDMA driver already came in earlier through the remoteproc tree ] * tag 'dmaengine-4.10-rc1' of git://git.infradead.org/users/vkoul/slave-dma: (68 commits) dmaengine: sirf-dma: remove unused ‘sdesc’ dmaengine: pl330: remove unused ‘regs’ dmaengine: s3c24xx: remove unused ‘cdata’ dmaengine: stm32-dma: remove unused ‘src_addr’ dmaengine: stm32-dma: remove unused ‘dst_addr’ dmaengine: stm32-dma: remove unused ‘sfcr’ dmaengine: pch_dma: remove unused ‘cookie’ dmaengine: mic_x100_dma: remove unused ‘data’ dmaengine: img-mdc: remove unused ‘prev_phys’ dmaengine: usb-dmac: remove unused ‘uchan’ dmaengine: ioat: remove unused ‘res’ dmaengine: ioat: remove unused ‘ioat_dma’ dmaengine: ioat: remove unused ‘is_raid_device’ dmaengine: pl330: do not generate unaligned access dmaengine: k3dma: move to dma_pool_zalloc dmaengine: at_hdmac: move to dma_pool_zalloc dmaengine: at_xdmac: don't restore unsaved status dmaengine: ioat: set error code on failures dmaengine: ioat: set error code on failures dmaengine: DW DMAC: add multi-block property to device tree ...
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/Kconfig2
-rw-r--r--drivers/dma/amba-pl08x.c11
-rw-r--r--drivers/dma/at_hdmac.c3
-rw-r--r--drivers/dma/at_xdmac.c5
-rw-r--r--drivers/dma/dmatest.c74
-rw-r--r--drivers/dma/dw/core.c2
-rw-r--r--drivers/dma/dw/platform.c18
-rw-r--r--drivers/dma/dw/regs.h3
-rw-r--r--drivers/dma/edma.c3
-rw-r--r--drivers/dma/fsl_raid.c1
-rw-r--r--drivers/dma/hsu/pci.c8
-rw-r--r--drivers/dma/img-mdc-dma.c9
-rw-r--r--drivers/dma/imx-sdma.c13
-rw-r--r--drivers/dma/ioat/dma.c17
-rw-r--r--drivers/dma/ioat/init.c21
-rw-r--r--drivers/dma/k3dma.c3
-rw-r--r--drivers/dma/mic_x100_dma.c2
-rw-r--r--drivers/dma/mv_xor.c190
-rw-r--r--drivers/dma/mv_xor.h1
-rw-r--r--drivers/dma/nbpfaxi.c38
-rw-r--r--drivers/dma/omap-dma.c187
-rw-r--r--drivers/dma/pch_dma.c5
-rw-r--r--drivers/dma/pl330.c23
-rw-r--r--drivers/dma/pxa_dma.c28
-rw-r--r--drivers/dma/qcom/hidma.c173
-rw-r--r--drivers/dma/qcom/hidma.h9
-rw-r--r--drivers/dma/qcom/hidma_dbg.c4
-rw-r--r--drivers/dma/qcom/hidma_ll.c176
-rw-r--r--drivers/dma/qcom/hidma_mgmt.c11
-rw-r--r--drivers/dma/s3c24xx-dma.c5
-rw-r--r--drivers/dma/sh/usb-dmac.c3
-rw-r--r--drivers/dma/sirf-dma.c4
-rw-r--r--drivers/dma/stm32-dma.c6
-rw-r--r--drivers/dma/zx296702_dma.c3
34 files changed, 777 insertions, 284 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 2154ea3c5d1c..263495d0adbd 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -494,7 +494,7 @@ config TEGRA20_APB_DMA
or vice versa. It does not support memory to memory data transfer.
config TEGRA210_ADMA
- bool "NVIDIA Tegra210 ADMA support"
+ tristate "NVIDIA Tegra210 ADMA support"
depends on (ARCH_TEGRA_210_SOC || COMPILE_TEST) && PM_CLK
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 939a7c31f760..0b7c6ce629a6 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -1793,6 +1793,13 @@ bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
}
EXPORT_SYMBOL_GPL(pl08x_filter_id);
+static bool pl08x_filter_fn(struct dma_chan *chan, void *chan_id)
+{
+ struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
+
+ return plchan->cd == chan_id;
+}
+
/*
* Just check that the device is there and active
* TODO: turn this bit on/off depending on the number of physical channels
@@ -2307,6 +2314,10 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
ret = -EINVAL;
goto out_no_platdata;
}
+ } else {
+ pl08x->slave.filter.map = pl08x->pd->slave_map;
+ pl08x->slave.filter.mapcnt = pl08x->pd->slave_map_len;
+ pl08x->slave.filter.fn = pl08x_filter_fn;
}
/* By default, AHB1 only. If dualmaster, from platform */
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index a4c8f80db29d..1baf3404a365 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -111,9 +111,8 @@ static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan,
struct at_dma *atdma = to_at_dma(chan->device);
dma_addr_t phys;
- desc = dma_pool_alloc(atdma->dma_desc_pool, gfp_flags, &phys);
+ desc = dma_pool_zalloc(atdma->dma_desc_pool, gfp_flags, &phys);
if (desc) {
- memset(desc, 0, sizeof(struct at_desc));
INIT_LIST_HEAD(&desc->tx_list);
dma_async_tx_descriptor_init(&desc->txd, chan);
/* txd.flags will be overwritten in prep functions */
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index b7d7f2d443a1..7d4e0bcda9af 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -221,7 +221,6 @@ struct at_xdmac {
int irq;
struct clk *clk;
u32 save_gim;
- u32 save_gs;
struct dma_pool *at_xdmac_desc_pool;
struct at_xdmac_chan chan[0];
};
@@ -444,9 +443,8 @@ static struct at_xdmac_desc *at_xdmac_alloc_desc(struct dma_chan *chan,
struct at_xdmac *atxdmac = to_at_xdmac(chan->device);
dma_addr_t phys;
- desc = dma_pool_alloc(atxdmac->at_xdmac_desc_pool, gfp_flags, &phys);
+ desc = dma_pool_zalloc(atxdmac->at_xdmac_desc_pool, gfp_flags, &phys);
if (desc) {
- memset(desc, 0, sizeof(*desc));
INIT_LIST_HEAD(&desc->descs_list);
dma_async_tx_descriptor_init(&desc->tx_dma_desc, chan);
desc->tx_dma_desc.tx_submit = at_xdmac_tx_submit;
@@ -1896,7 +1894,6 @@ static int atmel_xdmac_resume(struct device *dev)
}
at_xdmac_write(atxdmac, AT_XDMAC_GIE, atxdmac->save_gim);
- at_xdmac_write(atxdmac, AT_XDMAC_GE, atxdmac->save_gs);
list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
atchan = to_at_xdmac_chan(chan);
at_xdmac_chan_write(atchan, AT_XDMAC_CC, atchan->save_cc);
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index cf76fc6149e5..451f899f74e4 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -164,7 +164,9 @@ struct dmatest_thread {
struct task_struct *task;
struct dma_chan *chan;
u8 **srcs;
+ u8 **usrcs;
u8 **dsts;
+ u8 **udsts;
enum dma_transaction_type type;
bool done;
};
@@ -431,6 +433,7 @@ static int dmatest_func(void *data)
ktime_t comparetime = ktime_set(0, 0);
s64 runtime = 0;
unsigned long long total_len = 0;
+ u8 align = 0;
set_freezable();
@@ -441,20 +444,24 @@ static int dmatest_func(void *data)
params = &info->params;
chan = thread->chan;
dev = chan->device;
- if (thread->type == DMA_MEMCPY)
+ if (thread->type == DMA_MEMCPY) {
+ align = dev->copy_align;
src_cnt = dst_cnt = 1;
- else if (thread->type == DMA_SG)
+ } else if (thread->type == DMA_SG) {
+ align = dev->copy_align;
src_cnt = dst_cnt = sg_buffers;
- else if (thread->type == DMA_XOR) {
+ } else if (thread->type == DMA_XOR) {
/* force odd to ensure dst = src */
src_cnt = min_odd(params->xor_sources | 1, dev->max_xor);
dst_cnt = 1;
+ align = dev->xor_align;
} else if (thread->type == DMA_PQ) {
/* force odd to ensure dst = src */
src_cnt = min_odd(params->pq_sources | 1, dma_maxpq(dev, 0));
dst_cnt = 2;
+ align = dev->pq_align;
- pq_coefs = kmalloc(params->pq_sources+1, GFP_KERNEL);
+ pq_coefs = kmalloc(params->pq_sources + 1, GFP_KERNEL);
if (!pq_coefs)
goto err_thread_type;
@@ -463,23 +470,47 @@ static int dmatest_func(void *data)
} else
goto err_thread_type;
- thread->srcs = kcalloc(src_cnt+1, sizeof(u8 *), GFP_KERNEL);
+ thread->srcs = kcalloc(src_cnt + 1, sizeof(u8 *), GFP_KERNEL);
if (!thread->srcs)
goto err_srcs;
+
+ thread->usrcs = kcalloc(src_cnt + 1, sizeof(u8 *), GFP_KERNEL);
+ if (!thread->usrcs)
+ goto err_usrcs;
+
for (i = 0; i < src_cnt; i++) {
- thread->srcs[i] = kmalloc(params->buf_size, GFP_KERNEL);
- if (!thread->srcs[i])
+ thread->usrcs[i] = kmalloc(params->buf_size + align,
+ GFP_KERNEL);
+ if (!thread->usrcs[i])
goto err_srcbuf;
+
+ /* align srcs to alignment restriction */
+ if (align)
+ thread->srcs[i] = PTR_ALIGN(thread->usrcs[i], align);
+ else
+ thread->srcs[i] = thread->usrcs[i];
}
thread->srcs[i] = NULL;
- thread->dsts = kcalloc(dst_cnt+1, sizeof(u8 *), GFP_KERNEL);
+ thread->dsts = kcalloc(dst_cnt + 1, sizeof(u8 *), GFP_KERNEL);
if (!thread->dsts)
goto err_dsts;
+
+ thread->udsts = kcalloc(dst_cnt + 1, sizeof(u8 *), GFP_KERNEL);
+ if (!thread->udsts)
+ goto err_udsts;
+
for (i = 0; i < dst_cnt; i++) {
- thread->dsts[i] = kmalloc(params->buf_size, GFP_KERNEL);
- if (!thread->dsts[i])
+ thread->udsts[i] = kmalloc(params->buf_size + align,
+ GFP_KERNEL);
+ if (!thread->udsts[i])
goto err_dstbuf;
+
+ /* align dsts to alignment restriction */
+ if (align)
+ thread->dsts[i] = PTR_ALIGN(thread->udsts[i], align);
+ else
+ thread->dsts[i] = thread->udsts[i];
}
thread->dsts[i] = NULL;
@@ -498,20 +529,11 @@ static int dmatest_func(void *data)
dma_addr_t srcs[src_cnt];
dma_addr_t *dsts;
unsigned int src_off, dst_off, len;
- u8 align = 0;
struct scatterlist tx_sg[src_cnt];
struct scatterlist rx_sg[src_cnt];
total_tests++;
- /* honor alignment restrictions */
- if (thread->type == DMA_MEMCPY || thread->type == DMA_SG)
- align = dev->copy_align;
- else if (thread->type == DMA_XOR)
- align = dev->xor_align;
- else if (thread->type == DMA_PQ)
- align = dev->pq_align;
-
if (1 << align > params->buf_size) {
pr_err("%u-byte buffer too small for %d-byte alignment\n",
params->buf_size, 1 << align);
@@ -549,7 +571,7 @@ static int dmatest_func(void *data)
filltime = ktime_add(filltime, diff);
}
- um = dmaengine_get_unmap_data(dev->dev, src_cnt+dst_cnt,
+ um = dmaengine_get_unmap_data(dev->dev, src_cnt + dst_cnt,
GFP_KERNEL);
if (!um) {
failed_tests++;
@@ -729,13 +751,17 @@ static int dmatest_func(void *data)
ret = 0;
err_dstbuf:
- for (i = 0; thread->dsts[i]; i++)
- kfree(thread->dsts[i]);
+ for (i = 0; thread->udsts[i]; i++)
+ kfree(thread->udsts[i]);
+ kfree(thread->udsts);
+err_udsts:
kfree(thread->dsts);
err_dsts:
err_srcbuf:
- for (i = 0; thread->srcs[i]; i++)
- kfree(thread->srcs[i]);
+ for (i = 0; thread->usrcs[i]; i++)
+ kfree(thread->usrcs[i]);
+ kfree(thread->usrcs);
+err_usrcs:
kfree(thread->srcs);
err_srcs:
kfree(pq_coefs);
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index c2c0a613cb7a..e5adf5d1c34f 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -1569,7 +1569,7 @@ int dw_dma_probe(struct dw_dma_chip *chip)
(dwc_params >> DWC_PARAMS_MBLK_EN & 0x1) == 0;
} else {
dwc->block_size = pdata->block_size;
- dwc->nollp = pdata->is_nollp;
+ dwc->nollp = !pdata->multi_block[i];
}
}
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c
index 5bda0eb9f393..b1655e40cfa2 100644
--- a/drivers/dma/dw/platform.c
+++ b/drivers/dma/dw/platform.c
@@ -102,7 +102,7 @@ dw_dma_parse_dt(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct dw_dma_platform_data *pdata;
- u32 tmp, arr[DW_DMA_MAX_NR_MASTERS];
+ u32 tmp, arr[DW_DMA_MAX_NR_MASTERS], mb[DW_DMA_MAX_NR_CHANNELS];
u32 nr_masters;
u32 nr_channels;
@@ -118,6 +118,8 @@ dw_dma_parse_dt(struct platform_device *pdev)
if (of_property_read_u32(np, "dma-channels", &nr_channels))
return NULL;
+ if (nr_channels > DW_DMA_MAX_NR_CHANNELS)
+ return NULL;
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
@@ -129,6 +131,12 @@ dw_dma_parse_dt(struct platform_device *pdev)
if (of_property_read_bool(np, "is_private"))
pdata->is_private = true;
+ /*
+ * All known devices, which use DT for configuration, support
+ * memory-to-memory transfers. So enable it by default.
+ */
+ pdata->is_memcpy = true;
+
if (!of_property_read_u32(np, "chan_allocation_order", &tmp))
pdata->chan_allocation_order = (unsigned char)tmp;
@@ -146,6 +154,14 @@ dw_dma_parse_dt(struct platform_device *pdev)
pdata->data_width[tmp] = BIT(arr[tmp] & 0x07);
}
+ if (!of_property_read_u32_array(np, "multi-block", mb, nr_channels)) {
+ for (tmp = 0; tmp < nr_channels; tmp++)
+ pdata->multi_block[tmp] = mb[tmp];
+ } else {
+ for (tmp = 0; tmp < nr_channels; tmp++)
+ pdata->multi_block[tmp] = 1;
+ }
+
return pdata;
}
#else
diff --git a/drivers/dma/dw/regs.h b/drivers/dma/dw/regs.h
index f65dd104479f..4e0128c62704 100644
--- a/drivers/dma/dw/regs.h
+++ b/drivers/dma/dw/regs.h
@@ -12,7 +12,8 @@
#include <linux/interrupt.h>
#include <linux/dmaengine.h>
-#define DW_DMA_MAX_NR_CHANNELS 8
+#include "internal.h"
+
#define DW_DMA_MAX_NR_REQUESTS 16
/* flow controller */
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 77242b37ef87..3879f80a4815 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -2451,6 +2451,9 @@ static int edma_pm_resume(struct device *dev)
int i;
s8 (*queue_priority_mapping)[2];
+ /* re initialize dummy slot to dummy param set */
+ edma_write_slot(ecc, ecc->dummy_slot, &dummy_paramset);
+
queue_priority_mapping = ecc->info->queue_priority_mapping;
/* Event queue priority mapping */
diff --git a/drivers/dma/fsl_raid.c b/drivers/dma/fsl_raid.c
index db2f9e1653a2..90d29f90acfb 100644
--- a/drivers/dma/fsl_raid.c
+++ b/drivers/dma/fsl_raid.c
@@ -881,6 +881,7 @@ static struct of_device_id fsl_re_ids[] = {
{ .compatible = "fsl,raideng-v1.0", },
{}
};
+MODULE_DEVICE_TABLE(of, fsl_re_ids);
static struct platform_driver fsl_re_driver = {
.driver = {
diff --git a/drivers/dma/hsu/pci.c b/drivers/dma/hsu/pci.c
index b51639f045ed..4875fa428e81 100644
--- a/drivers/dma/hsu/pci.c
+++ b/drivers/dma/hsu/pci.c
@@ -77,13 +77,15 @@ static int hsu_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (!chip)
return -ENOMEM;
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+ if (ret < 0)
+ return ret;
+
chip->dev = &pdev->dev;
chip->regs = pcim_iomap_table(pdev)[0];
chip->length = pci_resource_len(pdev, 0);
chip->offset = HSU_PCI_CHAN_OFFSET;
- chip->irq = pdev->irq;
-
- pci_enable_msi(pdev);
+ chip->irq = pci_irq_vector(pdev, 0);
ret = hsu_dma_probe(chip);
if (ret)
diff --git a/drivers/dma/img-mdc-dma.c b/drivers/dma/img-mdc-dma.c
index 624f1e1e9c55..54db1411ce73 100644
--- a/drivers/dma/img-mdc-dma.c
+++ b/drivers/dma/img-mdc-dma.c
@@ -292,7 +292,7 @@ static struct dma_async_tx_descriptor *mdc_prep_dma_memcpy(
struct mdc_dma *mdma = mchan->mdma;
struct mdc_tx_desc *mdesc;
struct mdc_hw_list_desc *curr, *prev = NULL;
- dma_addr_t curr_phys, prev_phys;
+ dma_addr_t curr_phys;
if (!len)
return NULL;
@@ -324,7 +324,6 @@ static struct dma_async_tx_descriptor *mdc_prep_dma_memcpy(
xfer_size);
prev = curr;
- prev_phys = curr_phys;
mdesc->list_len++;
src += xfer_size;
@@ -375,7 +374,7 @@ static struct dma_async_tx_descriptor *mdc_prep_dma_cyclic(
struct mdc_dma *mdma = mchan->mdma;
struct mdc_tx_desc *mdesc;
struct mdc_hw_list_desc *curr, *prev = NULL;
- dma_addr_t curr_phys, prev_phys;
+ dma_addr_t curr_phys;
if (!buf_len && !period_len)
return NULL;
@@ -430,7 +429,6 @@ static struct dma_async_tx_descriptor *mdc_prep_dma_cyclic(
}
prev = curr;
- prev_phys = curr_phys;
mdesc->list_len++;
buf_addr += xfer_size;
@@ -458,7 +456,7 @@ static struct dma_async_tx_descriptor *mdc_prep_slave_sg(
struct mdc_tx_desc *mdesc;
struct scatterlist *sg;
struct mdc_hw_list_desc *curr, *prev = NULL;
- dma_addr_t curr_phys, prev_phys;
+ dma_addr_t curr_phys;
unsigned int i;
if (!sgl)
@@ -509,7 +507,6 @@ static struct dma_async_tx_descriptor *mdc_prep_slave_sg(
}
prev = curr;
- prev_phys = curr_phys;
mdesc->list_len++;
mdesc->list_xfer_size += xfer_size;
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index b9629b2bfc05..d1651a50c349 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -298,6 +298,7 @@ struct sdma_engine;
* @event_id1 for channels that use 2 events
* @word_size peripheral access size
* @buf_tail ID of the buffer that was processed
+ * @buf_ptail ID of the previous buffer that was processed
* @num_bd max NUM_BD. number of descriptors currently handling
*/
struct sdma_channel {
@@ -309,6 +310,7 @@ struct sdma_channel {
unsigned int event_id1;
enum dma_slave_buswidth word_size;
unsigned int buf_tail;
+ unsigned int buf_ptail;
unsigned int num_bd;
unsigned int period_len;
struct sdma_buffer_descriptor *bd;
@@ -700,6 +702,8 @@ static void sdma_update_channel_loop(struct sdma_channel *sdmac)
sdmac->chn_real_count = bd->mode.count;
bd->mode.status |= BD_DONE;
bd->mode.count = sdmac->period_len;
+ sdmac->buf_ptail = sdmac->buf_tail;
+ sdmac->buf_tail = (sdmac->buf_tail + 1) % sdmac->num_bd;
/*
* The callback is called from the interrupt context in order
@@ -710,9 +714,6 @@ static void sdma_update_channel_loop(struct sdma_channel *sdmac)
dmaengine_desc_get_callback_invoke(&sdmac->desc, NULL);
- sdmac->buf_tail++;
- sdmac->buf_tail %= sdmac->num_bd;
-
if (error)
sdmac->status = old_status;
}
@@ -1186,6 +1187,8 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
sdmac->flags = 0;
sdmac->buf_tail = 0;
+ sdmac->buf_ptail = 0;
+ sdmac->chn_real_count = 0;
dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n",
sg_len, channel);
@@ -1288,6 +1291,8 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
sdmac->status = DMA_IN_PROGRESS;
sdmac->buf_tail = 0;
+ sdmac->buf_ptail = 0;
+ sdmac->chn_real_count = 0;
sdmac->period_len = period_len;
sdmac->flags |= IMX_DMA_SG_LOOP;
@@ -1385,7 +1390,7 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan,
u32 residue;
if (sdmac->flags & IMX_DMA_SG_LOOP)
- residue = (sdmac->num_bd - sdmac->buf_tail) *
+ residue = (sdmac->num_bd - sdmac->buf_ptail) *
sdmac->period_len - sdmac->chn_real_count;
else
residue = sdmac->chn_count - sdmac->chn_real_count;
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 49386ce04bf5..a371b07a0981 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -39,6 +39,7 @@
#include "../dmaengine.h"
static char *chanerr_str[] = {
+ "DMA Transfer Source Address Error",
"DMA Transfer Destination Address Error",
"Next Descriptor Address Error",
"Descriptor Error",
@@ -66,7 +67,6 @@ static char *chanerr_str[] = {
"Result Guard Tag verification Error",
"Result Application Tag verification Error",
"Result Reference Tag verification Error",
- NULL
};
static void ioat_eh(struct ioatdma_chan *ioat_chan);
@@ -75,13 +75,10 @@ static void ioat_print_chanerrs(struct ioatdma_chan *ioat_chan, u32 chanerr)
{
int i;
- for (i = 0; i < 32; i++) {
+ for (i = 0; i < ARRAY_SIZE(chanerr_str); i++) {
if ((chanerr >> i) & 1) {
- if (chanerr_str[i]) {
- dev_err(to_dev(ioat_chan), "Err(%d): %s\n",
- i, chanerr_str[i]);
- } else
- break;
+ dev_err(to_dev(ioat_chan), "Err(%d): %s\n",
+ i, chanerr_str[i]);
}
}
}
@@ -341,15 +338,12 @@ ioat_alloc_ring_ent(struct dma_chan *chan, int idx, gfp_t flags)
{
struct ioat_dma_descriptor *hw;
struct ioat_ring_ent *desc;
- struct ioatdma_device *ioat_dma;
struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);
int chunk;
dma_addr_t phys;
u8 *pos;
off_t offs;
- ioat_dma = to_ioatdma_device(chan->device);
-
chunk = idx / IOAT_DESCS_PER_2M;
idx &= (IOAT_DESCS_PER_2M - 1);
offs = idx * IOAT_DESC_SZ;
@@ -614,11 +608,8 @@ static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete)
tx = &desc->txd;
if (tx->cookie) {
- struct dmaengine_result res;
-
dma_cookie_complete(tx);
dma_descriptor_unmap(tx);
- res.result = DMA_TRANS_NOERROR;
dmaengine_desc_get_callback_invoke(tx, NULL);
tx->callback = NULL;
tx->callback_result = NULL;
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index 015f7110b96d..90eddd9f07e4 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -340,11 +340,13 @@ static int ioat_dma_self_test(struct ioatdma_device *ioat_dma)
dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
if (dma_mapping_error(dev, dma_src)) {
dev_err(dev, "mapping src buffer failed\n");
+ err = -ENOMEM;
goto free_resources;
}
dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
if (dma_mapping_error(dev, dma_dest)) {
dev_err(dev, "mapping dest buffer failed\n");
+ err = -ENOMEM;
goto unmap_src;
}
flags = DMA_PREP_INTERRUPT;
@@ -827,16 +829,20 @@ static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma)
op = IOAT_OP_XOR;
dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE);
- if (dma_mapping_error(dev, dest_dma))
+ if (dma_mapping_error(dev, dest_dma)) {
+ err = -ENOMEM;
goto free_resources;
+ }
for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
dma_srcs[i] = DMA_ERROR_CODE;
for (i = 0; i < IOAT_NUM_SRC_TEST; i++) {
dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE,
DMA_TO_DEVICE);
- if (dma_mapping_error(dev, dma_srcs[i]))
+ if (dma_mapping_error(dev, dma_srcs[i])) {
+ err = -ENOMEM;
goto dma_unmap;
+ }
}
tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
IOAT_NUM_SRC_TEST, PAGE_SIZE,
@@ -904,8 +910,10 @@ static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma)
for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) {
dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
DMA_TO_DEVICE);
- if (dma_mapping_error(dev, dma_srcs[i]))
+ if (dma_mapping_error(dev, dma_srcs[i])) {
+ err = -ENOMEM;
goto dma_unmap;
+ }
}
tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
@@ -957,8 +965,10 @@ static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma)
for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) {
dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
DMA_TO_DEVICE);
- if (dma_mapping_error(dev, dma_srcs[i]))
+ if (dma_mapping_error(dev, dma_srcs[i])) {
+ err = -ENOMEM;
goto dma_unmap;
+ }
}
tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
@@ -1071,7 +1081,6 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca)
struct dma_device *dma;
struct dma_chan *c;
struct ioatdma_chan *ioat_chan;
- bool is_raid_device = false;
int err;
u16 val16;
@@ -1095,7 +1104,6 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca)
ioat_dma->cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ);
if (ioat_dma->cap & IOAT_CAP_XOR) {
- is_raid_device = true;
dma->max_xor = 8;
dma_cap_set(DMA_XOR, dma->cap_mask);
@@ -1106,7 +1114,6 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca)
}
if (ioat_dma->cap & IOAT_CAP_PQ) {
- is_raid_device = true;
dma->device_prep_dma_pq = ioat_prep_pq;
dma->device_prep_dma_pq_val = ioat_prep_pq_val;
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
index aabcb7934b05..01e25c68dd5a 100644
--- a/drivers/dma/k3dma.c
+++ b/drivers/dma/k3dma.c
@@ -458,13 +458,12 @@ static struct k3_dma_desc_sw *k3_dma_alloc_desc_resource(int num,
if (!ds)
return NULL;
- ds->desc_hw = dma_pool_alloc(d->pool, GFP_NOWAIT, &ds->desc_hw_lli);
+ ds->desc_hw = dma_pool_zalloc(d->pool, GFP_NOWAIT, &ds->desc_hw_lli);
if (!ds->desc_hw) {
dev_dbg(chan->device->dev, "vch %p: dma alloc fail\n", &c->vc);
kfree(ds);
return NULL;
}
- memset(ds->desc_hw, 0, sizeof(struct k3_desc_hw) * num);
ds->desc_num = num;
return ds;
}
diff --git a/drivers/dma/mic_x100_dma.c b/drivers/dma/mic_x100_dma.c
index 818255844a3c..5ba5714d0b7c 100644
--- a/drivers/dma/mic_x100_dma.c
+++ b/drivers/dma/mic_x100_dma.c
@@ -554,9 +554,7 @@ static int mic_dma_init(struct mic_dma_device *mic_dma_dev,
int ret;
for (i = first_chan; i < first_chan + MIC_DMA_NUM_CHAN; i++) {
- unsigned long data;
ch = &mic_dma_dev->mic_ch[i];
- data = (unsigned long)ch;
ch->ch_num = i;
ch->owner = owner;
spin_lock_init(&ch->cleanup_lock);
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 23f75285a4d9..0cb951b743a6 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -68,6 +68,36 @@ static void mv_desc_init(struct mv_xor_desc_slot *desc,
hw_desc->byte_count = byte_count;
}
+/* Populate the descriptor */
+static void mv_xor_config_sg_ll_desc(struct mv_xor_desc_slot *desc,
+ dma_addr_t dma_src, dma_addr_t dma_dst,
+ u32 len, struct mv_xor_desc_slot *prev)
+{
+ struct mv_xor_desc *hw_desc = desc->hw_desc;
+
+ hw_desc->status = XOR_DESC_DMA_OWNED;
+ hw_desc->phy_next_desc = 0;
+ /* Configure for XOR with only one src address -> MEMCPY */
+ hw_desc->desc_command = XOR_DESC_OPERATION_XOR | (0x1 << 0);
+ hw_desc->phy_dest_addr = dma_dst;
+ hw_desc->phy_src_addr[0] = dma_src;
+ hw_desc->byte_count = len;
+
+ if (prev) {
+ struct mv_xor_desc *hw_prev = prev->hw_desc;
+
+ hw_prev->phy_next_desc = desc->async_tx.phys;
+ }
+}
+
+static void mv_xor_desc_config_eod(struct mv_xor_desc_slot *desc)
+{
+ struct mv_xor_desc *hw_desc = desc->hw_desc;
+
+ /* Enable end-of-descriptor interrupt */
+ hw_desc->desc_command |= XOR_DESC_EOD_INT_EN;
+}
+
static void mv_desc_set_mode(struct mv_xor_desc_slot *desc)
{
struct mv_xor_desc *hw_desc = desc->hw_desc;
@@ -228,8 +258,13 @@ mv_chan_clean_completed_slots(struct mv_xor_chan *mv_chan)
list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
node) {
- if (async_tx_test_ack(&iter->async_tx))
+ if (async_tx_test_ack(&iter->async_tx)) {
list_move_tail(&iter->node, &mv_chan->free_slots);
+ if (!list_empty(&iter->sg_tx_list)) {
+ list_splice_tail_init(&iter->sg_tx_list,
+ &mv_chan->free_slots);
+ }
+ }
}
return 0;
}
@@ -244,11 +279,20 @@ mv_desc_clean_slot(struct mv_xor_desc_slot *desc,
/* the client is allowed to attach dependent operations
* until 'ack' is set
*/
- if (!async_tx_test_ack(&desc->async_tx))
+ if (!async_tx_test_ack(&desc->async_tx)) {
/* move this slot to the completed_slots */
list_move_tail(&desc->node, &mv_chan->completed_slots);
- else
+ if (!list_empty(&desc->sg_tx_list)) {
+ list_splice_tail_init(&desc->sg_tx_list,
+ &mv_chan->completed_slots);
+ }
+ } else {
list_move_tail(&desc->node, &mv_chan->free_slots);
+ if (!list_empty(&desc->sg_tx_list)) {
+ list_splice_tail_init(&desc->sg_tx_list,
+ &mv_chan->free_slots);
+ }
+ }
return 0;
}
@@ -450,6 +494,7 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
dma_async_tx_descriptor_init(&slot->async_tx, chan);
slot->async_tx.tx_submit = mv_xor_tx_submit;
INIT_LIST_HEAD(&slot->node);
+ INIT_LIST_HEAD(&slot->sg_tx_list);
dma_desc = mv_chan->dma_desc_pool;
slot->async_tx.phys = dma_desc + idx * MV_XOR_SLOT_SIZE;
slot->idx = idx++;
@@ -617,6 +662,132 @@ mv_xor_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags);
}
+/**
+ * mv_xor_prep_dma_sg - prepare descriptors for a memory sg transaction
+ * @chan: DMA channel
+ * @dst_sg: Destination scatter list
+ * @dst_sg_len: Number of entries in destination scatter list
+ * @src_sg: Source scatter list
+ * @src_sg_len: Number of entries in source scatter list
+ * @flags: transfer ack flags
+ *
+ * Return: Async transaction descriptor on success and NULL on failure
+ */
+static struct dma_async_tx_descriptor *
+mv_xor_prep_dma_sg(struct dma_chan *chan, struct scatterlist *dst_sg,
+ unsigned int dst_sg_len, struct scatterlist *src_sg,
+ unsigned int src_sg_len, unsigned long flags)
+{
+ struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
+ struct mv_xor_desc_slot *new;
+ struct mv_xor_desc_slot *first = NULL;
+ struct mv_xor_desc_slot *prev = NULL;
+ size_t len, dst_avail, src_avail;
+ dma_addr_t dma_dst, dma_src;
+ int desc_cnt = 0;
+ int ret;
+
+ dev_dbg(mv_chan_to_devp(mv_chan),
+ "%s dst_sg_len: %d src_sg_len: %d flags: %ld\n",
+ __func__, dst_sg_len, src_sg_len, flags);
+
+ dst_avail = sg_dma_len(dst_sg);
+ src_avail = sg_dma_len(src_sg);
+
+ /* Run until we are out of scatterlist entries */
+ while (true) {
+ /* Allocate and populate the descriptor */
+ desc_cnt++;
+ new = mv_chan_alloc_slot(mv_chan);
+ if (!new) {
+ dev_err(mv_chan_to_devp(mv_chan),
+ "Out of descriptors (desc_cnt=%d)!\n",
+ desc_cnt);
+ goto err;
+ }
+
+ len = min_t(size_t, src_avail, dst_avail);
+ len = min_t(size_t, len, MV_XOR_MAX_BYTE_COUNT);
+ if (len == 0)
+ goto fetch;
+
+ if (len < MV_XOR_MIN_BYTE_COUNT) {
+ dev_err(mv_chan_to_devp(mv_chan),
+ "Transfer size of %zu too small!\n", len);
+ goto err;
+ }
+
+ dma_dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) -
+ dst_avail;
+ dma_src = sg_dma_address(src_sg) + sg_dma_len(src_sg) -
+ src_avail;
+
+ /* Check if a new window needs to get added for 'dst' */
+ ret = mv_xor_add_io_win(mv_chan, dma_dst);
+ if (ret)
+ goto err;
+
+ /* Check if a new window needs to get added for 'src' */
+ ret = mv_xor_add_io_win(mv_chan, dma_src);
+ if (ret)
+ goto err;
+
+ /* Populate the descriptor */
+ mv_xor_config_sg_ll_desc(new, dma_src, dma_dst, len, prev);
+ prev = new;
+ dst_avail -= len;
+ src_avail -= len;
+
+ if (!first)
+ first = new;
+ else
+ list_move_tail(&new->node, &first->sg_tx_list);
+
+fetch:
+ /* Fetch the next dst scatterlist entry */
+ if (dst_avail == 0) {
+ if (dst_sg_len == 0)
+ break;
+
+ /* Fetch the next entry: if there are no more: done */
+ dst_sg = sg_next(dst_sg);
+ if (dst_sg == NULL)
+ break;
+
+ dst_sg_len--;
+ dst_avail = sg_dma_len(dst_sg);
+ }
+
+ /* Fetch the next src scatterlist entry */
+ if (src_avail == 0) {
+ if (src_sg_len == 0)
+ break;
+
+ /* Fetch the next entry: if there are no more: done */
+ src_sg = sg_next(src_sg);
+ if (src_sg == NULL)
+ break;
+
+ src_sg_len--;
+ src_avail = sg_dma_len(src_sg);
+ }
+ }
+
+ /* Set the EOD flag in the last descriptor */
+ mv_xor_desc_config_eod(new);
+ first->async_tx.flags = flags;
+
+ return &first->async_tx;
+
+err:
+ /* Cleanup: Move all descriptors back into the free list */
+ spin_lock_bh(&mv_chan->lock);
+ mv_desc_clean_slot(first, mv_chan);
+ spin_unlock_bh(&mv_chan->lock);
+
+ return NULL;
+}
+
static void mv_xor_free_chan_resources(struct dma_chan *chan)
{
struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
@@ -1083,6 +1254,8 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
dma_dev->device_prep_dma_interrupt = mv_xor_prep_dma_interrupt;
if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
+ if (dma_has_cap(DMA_SG, dma_dev->cap_mask))
+ dma_dev->device_prep_dma_sg = mv_xor_prep_dma_sg;
if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
dma_dev->max_xor = 8;
dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
@@ -1132,10 +1305,11 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
goto err_free_irq;
}
- dev_info(&pdev->dev, "Marvell XOR (%s): ( %s%s%s)\n",
+ dev_info(&pdev->dev, "Marvell XOR (%s): ( %s%s%s%s)\n",
mv_chan->op_in_desc ? "Descriptor Mode" : "Registers Mode",
dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
+ dma_has_cap(DMA_SG, dma_dev->cap_mask) ? "sg " : "",
dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
dma_async_device_register(dma_dev);
@@ -1378,6 +1552,7 @@ static int mv_xor_probe(struct platform_device *pdev)
dma_cap_zero(cap_mask);
dma_cap_set(DMA_MEMCPY, cap_mask);
+ dma_cap_set(DMA_SG, cap_mask);
dma_cap_set(DMA_XOR, cap_mask);
dma_cap_set(DMA_INTERRUPT, cap_mask);
@@ -1455,12 +1630,7 @@ static struct platform_driver mv_xor_driver = {
},
};
-
-static int __init mv_xor_init(void)
-{
- return platform_driver_register(&mv_xor_driver);
-}
-device_initcall(mv_xor_init);
+builtin_platform_driver(mv_xor_driver);
/*
MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>");
diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h
index 88eeab222a23..cf921dd6af73 100644
--- a/drivers/dma/mv_xor.h
+++ b/drivers/dma/mv_xor.h
@@ -148,6 +148,7 @@ struct mv_xor_chan {
*/
struct mv_xor_desc_slot {
struct list_head node;
+ struct list_head sg_tx_list;
enum dma_transaction_type type;
void *hw_desc;
u16 idx;
diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c
index 09de71519d37..3f45b9bdf201 100644
--- a/drivers/dma/nbpfaxi.c
+++ b/drivers/dma/nbpfaxi.c
@@ -225,6 +225,8 @@ struct nbpf_channel {
struct nbpf_device {
struct dma_device dma_dev;
void __iomem *base;
+ u32 max_burst_mem_read;
+ u32 max_burst_mem_write;
struct clk *clk;
const struct nbpf_config *config;
unsigned int eirq;
@@ -425,10 +427,33 @@ static void nbpf_chan_configure(struct nbpf_channel *chan)
nbpf_chan_write(chan, NBPF_CHAN_CFG, NBPF_CHAN_CFG_DMS | chan->dmarq_cfg);
}
-static u32 nbpf_xfer_ds(struct nbpf_device *nbpf, size_t size)
+static u32 nbpf_xfer_ds(struct nbpf_device *nbpf, size_t size,
+ enum dma_transfer_direction direction)
{
+ int max_burst = nbpf->config->buffer_size * 8;
+
+ if (nbpf->max_burst_mem_read || nbpf->max_burst_mem_write) {
+ switch (direction) {
+ case DMA_MEM_TO_MEM:
+ max_burst = min_not_zero(nbpf->max_burst_mem_read,
+ nbpf->max_burst_mem_write);
+ break;
+ case DMA_MEM_TO_DEV:
+ if (nbpf->max_burst_mem_read)
+ max_burst = nbpf->max_burst_mem_read;
+ break;
+ case DMA_DEV_TO_MEM:
+ if (nbpf->max_burst_mem_write)
+ max_burst = nbpf->max_burst_mem_write;
+ break;
+ case DMA_DEV_TO_DEV:
+ default:
+ break;
+ }
+ }
+
/* Maximum supported bursts depend on the buffer size */
- return min_t(int, __ffs(size), ilog2(nbpf->config->buffer_size * 8));
+ return min_t(int, __ffs(size), ilog2(max_burst));
}
static size_t nbpf_xfer_size(struct nbpf_device *nbpf,
@@ -458,7 +483,7 @@ static size_t nbpf_xfer_size(struct nbpf_device *nbpf,
size = burst;
}
- return nbpf_xfer_ds(nbpf, size);
+ return nbpf_xfer_ds(nbpf, size, DMA_TRANS_NONE);
}
/*
@@ -507,7 +532,7 @@ static int nbpf_prep_one(struct nbpf_link_desc *ldesc,
* transfers we enable the SBE bit and terminate the transfer in our
* .device_pause handler.
*/
- mem_xfer = nbpf_xfer_ds(chan->nbpf, size);
+ mem_xfer = nbpf_xfer_ds(chan->nbpf, size, direction);
switch (direction) {
case DMA_DEV_TO_MEM:
@@ -1313,6 +1338,11 @@ static int nbpf_probe(struct platform_device *pdev)
if (IS_ERR(nbpf->clk))
return PTR_ERR(nbpf->clk);
+ of_property_read_u32(np, "max-burst-mem-read",
+ &nbpf->max_burst_mem_read);
+ of_property_read_u32(np, "max-burst-mem-write",
+ &nbpf->max_burst_mem_write);
+
nbpf->config = cfg;
for (i = 0; irqs < ARRAY_SIZE(irqbuf); i++) {
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index 7ca27d4b1c54..ac68666cd3f4 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -166,6 +166,9 @@ enum {
CSDP_DST_BURST_16 = 1 << 14,
CSDP_DST_BURST_32 = 2 << 14,
CSDP_DST_BURST_64 = 3 << 14,
+ CSDP_WRITE_NON_POSTED = 0 << 16,
+ CSDP_WRITE_POSTED = 1 << 16,
+ CSDP_WRITE_LAST_NON_POSTED = 2 << 16,
CICR_TOUT_IE = BIT(0), /* OMAP1 only */
CICR_DROP_IE = BIT(1),
@@ -422,7 +425,30 @@ static void omap_dma_start(struct omap_chan *c, struct omap_desc *d)
c->running = true;
}
-static void omap_dma_stop(struct omap_chan *c)
+static void omap_dma_drain_chan(struct omap_chan *c)
+{
+ int i;
+ u32 val;
+
+ /* Wait for sDMA FIFO to drain */
+ for (i = 0; ; i++) {
+ val = omap_dma_chan_read(c, CCR);
+ if (!(val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE)))
+ break;
+
+ if (i > 100)
+ break;
+
+ udelay(5);
+ }
+
+ if (val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE))
+ dev_err(c->vc.chan.device->dev,
+ "DMA drain did not complete on lch %d\n",
+ c->dma_ch);
+}
+
+static int omap_dma_stop(struct omap_chan *c)
{
struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
uint32_t val;
@@ -435,7 +461,6 @@ static void omap_dma_stop(struct omap_chan *c)
val = omap_dma_chan_read(c, CCR);
if (od->plat->errata & DMA_ERRATA_i541 && val & CCR_TRIGGER_SRC) {
uint32_t sysconfig;
- unsigned i;
sysconfig = omap_dma_glbl_read(od, OCP_SYSCONFIG);
val = sysconfig & ~DMA_SYSCONFIG_MIDLEMODE_MASK;
@@ -446,27 +471,19 @@ static void omap_dma_stop(struct omap_chan *c)
val &= ~CCR_ENABLE;
omap_dma_chan_write(c, CCR, val);
- /* Wait for sDMA FIFO to drain */
- for (i = 0; ; i++) {
- val = omap_dma_chan_read(c, CCR);
- if (!(val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE)))
- break;
-
- if (i > 100)
- break;
-
- udelay(5);
- }
-
- if (val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE))
- dev_err(c->vc.chan.device->dev,
- "DMA drain did not complete on lch %d\n",
- c->dma_ch);
+ if (!(c->ccr & CCR_BUFFERING_DISABLE))
+ omap_dma_drain_chan(c);
omap_dma_glbl_write(od, OCP_SYSCONFIG, sysconfig);
} else {
+ if (!(val & CCR_ENABLE))
+ return -EINVAL;
+
val &= ~CCR_ENABLE;
omap_dma_chan_write(c, CCR, val);
+
+ if (!(c->ccr & CCR_BUFFERING_DISABLE))
+ omap_dma_drain_chan(c);
}
mb();
@@ -481,8 +498,8 @@ static void omap_dma_stop(struct omap_chan *c)
omap_dma_chan_write(c, CLNK_CTRL, val);
}
-
c->running = false;
+ return 0;
}
static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d)
@@ -836,6 +853,8 @@ static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
} else {
txstate->residue = 0;
}
+ if (ret == DMA_IN_PROGRESS && c->paused)
+ ret = DMA_PAUSED;
spin_unlock_irqrestore(&c->vc.lock, flags);
return ret;
@@ -865,15 +884,18 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
unsigned i, es, en, frame_bytes;
bool ll_failed = false;
u32 burst;
+ u32 port_window, port_window_bytes;
if (dir == DMA_DEV_TO_MEM) {
dev_addr = c->cfg.src_addr;
dev_width = c->cfg.src_addr_width;
burst = c->cfg.src_maxburst;
+ port_window = c->cfg.src_port_window_size;
} else if (dir == DMA_MEM_TO_DEV) {
dev_addr = c->cfg.dst_addr;
dev_width = c->cfg.dst_addr_width;
burst = c->cfg.dst_maxburst;
+ port_window = c->cfg.dst_port_window_size;
} else {
dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
return NULL;
@@ -894,6 +916,12 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
return NULL;
}
+ /* When the port_window is used, one frame must cover the window */
+ if (port_window) {
+ burst = port_window;
+ port_window_bytes = port_window * es_bytes[es];
+ }
+
/* Now allocate and setup the descriptor. */
d = kzalloc(sizeof(*d) + sglen * sizeof(d->sg[0]), GFP_ATOMIC);
if (!d)
@@ -905,11 +933,45 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
d->ccr = c->ccr | CCR_SYNC_FRAME;
if (dir == DMA_DEV_TO_MEM) {
- d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_CONSTANT;
d->csdp = CSDP_DST_BURST_64 | CSDP_DST_PACKED;
+
+ d->ccr |= CCR_DST_AMODE_POSTINC;
+ if (port_window) {
+ d->ccr |= CCR_SRC_AMODE_DBLIDX;
+ d->ei = 1;
+ /*
+ * One frame covers the port_window and by configure
+ * the source frame index to be -1 * (port_window - 1)
+ * we instruct the sDMA that after a frame is processed
+ * it should move back to the start of the window.
+ */
+ d->fi = -(port_window_bytes - 1);
+
+ if (port_window_bytes >= 64)
+ d->csdp = CSDP_SRC_BURST_64 | CSDP_SRC_PACKED;
+ else if (port_window_bytes >= 32)
+ d->csdp = CSDP_SRC_BURST_32 | CSDP_SRC_PACKED;
+ else if (port_window_bytes >= 16)
+ d->csdp = CSDP_SRC_BURST_16 | CSDP_SRC_PACKED;
+ } else {
+ d->ccr |= CCR_SRC_AMODE_CONSTANT;
+ }
} else {
- d->ccr |= CCR_DST_AMODE_CONSTANT | CCR_SRC_AMODE_POSTINC;
d->csdp = CSDP_SRC_BURST_64 | CSDP_SRC_PACKED;
+
+ d->ccr |= CCR_SRC_AMODE_POSTINC;
+ if (port_window) {
+ d->ccr |= CCR_DST_AMODE_DBLIDX;
+
+ if (port_window_bytes >= 64)
+ d->csdp = CSDP_DST_BURST_64 | CSDP_DST_PACKED;
+ else if (port_window_bytes >= 32)
+ d->csdp = CSDP_DST_BURST_32 | CSDP_DST_PACKED;
+ else if (port_window_bytes >= 16)
+ d->csdp = CSDP_DST_BURST_16 | CSDP_DST_PACKED;
+ } else {
+ d->ccr |= CCR_DST_AMODE_CONSTANT;
+ }
}
d->cicr = CICR_DROP_IE | CICR_BLOCK_IE;
@@ -927,6 +989,9 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
d->ccr |= CCR_TRIGGER_SRC;
d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE;
+
+ if (port_window)
+ d->csdp |= CSDP_WRITE_LAST_NON_POSTED;
}
if (od->plat->errata & DMA_ERRATA_PARALLEL_CHANNELS)
d->clnk_ctrl = c->dma_ch;
@@ -952,6 +1017,16 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
osg->addr = sg_dma_address(sgent);
osg->en = en;
osg->fn = sg_dma_len(sgent) / frame_bytes;
+ if (port_window && dir == DMA_MEM_TO_DEV) {
+ osg->ei = 1;
+ /*
+ * One frame covers the port_window and by configure
+ * the source frame index to be -1 * (port_window - 1)
+ * we instruct the sDMA that after a frame is processed
+ * it should move back to the start of the window.
+ */
+ osg->fi = -(port_window_bytes - 1);
+ }
if (d->using_ll) {
osg->t2_desc = dma_pool_alloc(od->desc_pool, GFP_ATOMIC,
@@ -1247,10 +1322,8 @@ static int omap_dma_terminate_all(struct dma_chan *chan)
omap_dma_stop(c);
}
- if (c->cyclic) {
- c->cyclic = false;
- c->paused = false;
- }
+ c->cyclic = false;
+ c->paused = false;
vchan_get_all_descriptors(&c->vc, &head);
spin_unlock_irqrestore(&c->vc.lock, flags);
@@ -1269,28 +1342,66 @@ static void omap_dma_synchronize(struct dma_chan *chan)
static int omap_dma_pause(struct dma_chan *chan)
{
struct omap_chan *c = to_omap_dma_chan(chan);
+ struct omap_dmadev *od = to_omap_dma_dev(chan->device);
+ unsigned long flags;
+ int ret = -EINVAL;
+ bool can_pause = false;
- /* Pause/Resume only allowed with cyclic mode */
- if (!c->cyclic)
- return -EINVAL;
+ spin_lock_irqsave(&od->irq_lock, flags);
- if (!c->paused) {
- omap_dma_stop(c);
- c->paused = true;
+ if (!c->desc)
+ goto out;
+
+ if (c->cyclic)
+ can_pause = true;
+
+ /*
+ * We do not allow DMA_MEM_TO_DEV transfers to be paused.
+ * From the AM572x TRM, 16.1.4.18 Disabling a Channel During Transfer:
+ * "When a channel is disabled during a transfer, the channel undergoes
+ * an abort, unless it is hardware-source-synchronized …".
+ * A source-synchronised channel is one where the fetching of data is
+ * under control of the device. In other words, a device-to-memory
+ * transfer. So, a destination-synchronised channel (which would be a
+ * memory-to-device transfer) undergoes an abort if the the CCR_ENABLE
+ * bit is cleared.
+ * From 16.1.4.20.4.6.2 Abort: "If an abort trigger occurs, the channel
+ * aborts immediately after completion of current read/write
+ * transactions and then the FIFO is cleaned up." The term "cleaned up"
+ * is not defined. TI recommends to check that RD_ACTIVE and WR_ACTIVE
+ * are both clear _before_ disabling the channel, otherwise data loss
+ * will occur.
+ * The problem is that if the channel is active, then device activity
+ * can result in DMA activity starting between reading those as both
+ * clear and the write to DMA_CCR to clear the enable bit hitting the
+ * hardware. If the DMA hardware can't drain the data in its FIFO to the
+ * destination, then data loss "might" occur (say if we write to an UART
+ * and the UART is not accepting any further data).
+ */
+ else if (c->desc->dir == DMA_DEV_TO_MEM)
+ can_pause = true;
+
+ if (can_pause && !c->paused) {
+ ret = omap_dma_stop(c);
+ if (!ret)
+ c->paused = true;
}
+out:
+ spin_unlock_irqrestore(&od->irq_lock, flags);
- return 0;
+ return ret;
}
static int omap_dma_resume(struct dma_chan *chan)
{
struct omap_chan *c = to_omap_dma_chan(chan);
+ struct omap_dmadev *od = to_omap_dma_dev(chan->device);
+ unsigned long flags;
+ int ret = -EINVAL;
- /* Pause/Resume only allowed with cyclic mode */
- if (!c->cyclic)
- return -EINVAL;
+ spin_lock_irqsave(&od->irq_lock, flags);
- if (c->paused) {
+ if (c->paused && c->desc) {
mb();
/* Restore channel link register */
@@ -1298,9 +1409,11 @@ static int omap_dma_resume(struct dma_chan *chan)
omap_dma_start(c, c->desc);
c->paused = false;
+ ret = 0;
}
+ spin_unlock_irqrestore(&od->irq_lock, flags);
- return 0;
+ return ret;
}
static int omap_dma_chan_init(struct omap_dmadev *od)
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index df95727dc2fb..f9028e9d0dfc 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -417,10 +417,8 @@ static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd)
{
struct pch_dma_desc *desc = to_pd_desc(txd);
struct pch_dma_chan *pd_chan = to_pd_chan(txd->chan);
- dma_cookie_t cookie;
spin_lock(&pd_chan->lock);
- cookie = dma_cookie_assign(txd);
if (list_empty(&pd_chan->active_list)) {
list_add_tail(&desc->desc_node, &pd_chan->active_list);
@@ -439,9 +437,8 @@ static struct pch_dma_desc *pdc_alloc_desc(struct dma_chan *chan, gfp_t flags)
struct pch_dma *pd = to_pd(chan->device);
dma_addr_t addr;
- desc = pci_pool_alloc(pd->pool, flags, &addr);
+ desc = pci_pool_zalloc(pd->pool, flags, &addr);
if (desc) {
- memset(desc, 0, sizeof(struct pch_dma_desc));
INIT_LIST_HEAD(&desc->tx_list);
dma_async_tx_descriptor_init(&desc->txd, chan);
desc->txd.tx_submit = pd_tx_submit;
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 030fe05ed43b..87fd01539fcb 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -570,7 +570,8 @@ static inline u32 _emit_ADDH(unsigned dry_run, u8 buf[],
buf[0] = CMD_DMAADDH;
buf[0] |= (da << 1);
- *((__le16 *)&buf[1]) = cpu_to_le16(val);
+ buf[1] = val;
+ buf[2] = val >> 8;
PL330_DBGCMD_DUMP(SZ_DMAADDH, "\tDMAADDH %s %u\n",
da == 1 ? "DA" : "SA", val);
@@ -724,7 +725,10 @@ static inline u32 _emit_MOV(unsigned dry_run, u8 buf[],
buf[0] = CMD_DMAMOV;
buf[1] = dst;
- *((__le32 *)&buf[2]) = cpu_to_le32(val);
+ buf[2] = val;
+ buf[3] = val >> 8;
+ buf[4] = val >> 16;
+ buf[5] = val >> 24;
PL330_DBGCMD_DUMP(SZ_DMAMOV, "\tDMAMOV %s 0x%x\n",
dst == SAR ? "SAR" : (dst == DAR ? "DAR" : "CCR"), val);
@@ -899,10 +903,11 @@ static inline u32 _emit_GO(unsigned dry_run, u8 buf[],
buf[0] = CMD_DMAGO;
buf[0] |= (ns << 1);
-
buf[1] = chan & 0x7;
-
- *((__le32 *)&buf[2]) = cpu_to_le32(addr);
+ buf[2] = addr;
+ buf[3] = addr >> 8;
+ buf[4] = addr >> 16;
+ buf[5] = addr >> 24;
return SZ_DMAGO;
}
@@ -1883,11 +1888,8 @@ static int dmac_alloc_resources(struct pl330_dmac *pl330)
static int pl330_add(struct pl330_dmac *pl330)
{
- void __iomem *regs;
int i, ret;
- regs = pl330->base;
-
/* Check if we can handle this DMAC */
if ((pl330->pcfg.periph_id & 0xfffff) != PERIPH_ID_VAL) {
dev_err(pl330->ddma.dev, "PERIPH_ID 0x%x !\n",
@@ -2263,6 +2265,11 @@ static int pl330_get_current_xferred_count(struct dma_pl330_chan *pch,
}
pm_runtime_mark_last_busy(pch->dmac->ddma.dev);
pm_runtime_put_autosuspend(pl330->ddma.dev);
+
+ /* If DMAMOV hasn't finished yet, SAR/DAR can be zero */
+ if (!val)
+ return 0;
+
return val - addr;
}
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
index 3f56f9ca4482..b53fb618bbf6 100644
--- a/drivers/dma/pxa_dma.c
+++ b/drivers/dma/pxa_dma.c
@@ -413,15 +413,6 @@ static inline void pxad_init_debugfs(struct pxad_device *pdev) {}
static inline void pxad_cleanup_debugfs(struct pxad_device *pdev) {}
#endif
-/*
- * In the transition phase where legacy pxa handling is done at the same time as
- * mmp_dma, the DMA physical channel split between the 2 DMA providers is done
- * through legacy_reserved. Legacy code reserves DMA channels by settings
- * corresponding bits in legacy_reserved.
- */
-static u32 legacy_reserved;
-static u32 legacy_unavailable;
-
static struct pxad_phy *lookup_phy(struct pxad_chan *pchan)
{
int prio, i;
@@ -442,14 +433,10 @@ static struct pxad_phy *lookup_phy(struct pxad_chan *pchan)
for (i = 0; i < pdev->nr_chans; i++) {
if (prio != (i & 0xf) >> 2)
continue;
- if ((i < 32) && (legacy_reserved & BIT(i)))
- continue;
phy = &pdev->phys[i];
if (!phy->vchan) {
phy->vchan = pchan;
found = phy;
- if (i < 32)
- legacy_unavailable |= BIT(i);
goto out_unlock;
}
}
@@ -469,7 +456,6 @@ static void pxad_free_phy(struct pxad_chan *chan)
struct pxad_device *pdev = to_pxad_dev(chan->vc.chan.device);
unsigned long flags;
u32 reg;
- int i;
dev_dbg(&chan->vc.chan.dev->device,
"%s(): freeing\n", __func__);
@@ -483,9 +469,6 @@ static void pxad_free_phy(struct pxad_chan *chan)
}
spin_lock_irqsave(&pdev->phy_lock, flags);
- for (i = 0; i < 32; i++)
- if (chan->phy == &pdev->phys[i])
- legacy_unavailable &= ~BIT(i);
chan->phy->vchan = NULL;
chan->phy = NULL;
spin_unlock_irqrestore(&pdev->phy_lock, flags);
@@ -739,8 +722,6 @@ static irqreturn_t pxad_int_handler(int irq, void *dev_id)
i = __ffs(dint);
dint &= (dint - 1);
phy = &pdev->phys[i];
- if ((i < 32) && (legacy_reserved & BIT(i)))
- continue;
if (pxad_chan_handler(irq, phy) == IRQ_HANDLED)
ret = IRQ_HANDLED;
}
@@ -1522,15 +1503,6 @@ bool pxad_filter_fn(struct dma_chan *chan, void *param)
}
EXPORT_SYMBOL_GPL(pxad_filter_fn);
-int pxad_toggle_reserved_channel(int legacy_channel)
-{
- if (legacy_unavailable & (BIT(legacy_channel)))
- return -EBUSY;
- legacy_reserved ^= BIT(legacy_channel);
- return 0;
-}
-EXPORT_SYMBOL_GPL(pxad_toggle_reserved_channel);
-
module_platform_driver(pxad_driver);
MODULE_DESCRIPTION("Marvell PXA Peripheral DMA Driver");
diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c
index e244e10a94b5..3c982c96b4b7 100644
--- a/drivers/dma/qcom/hidma.c
+++ b/drivers/dma/qcom/hidma.c
@@ -56,6 +56,7 @@
#include <linux/irq.h>
#include <linux/atomic.h>
#include <linux/pm_runtime.h>
+#include <linux/msi.h>
#include "../dmaengine.h"
#include "hidma.h"
@@ -70,6 +71,7 @@
#define HIDMA_ERR_INFO_SW 0xFF
#define HIDMA_ERR_CODE_UNEXPECTED_TERMINATE 0x0
#define HIDMA_NR_DEFAULT_DESC 10
+#define HIDMA_MSI_INTS 11
static inline struct hidma_dev *to_hidma_dev(struct dma_device *dmadev)
{
@@ -553,6 +555,17 @@ static irqreturn_t hidma_chirq_handler(int chirq, void *arg)
return hidma_ll_inthandler(chirq, lldev);
}
+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
+static irqreturn_t hidma_chirq_handler_msi(int chirq, void *arg)
+{
+ struct hidma_lldev **lldevp = arg;
+ struct hidma_dev *dmadev = to_hidma_dev_from_lldev(lldevp);
+
+ return hidma_ll_inthandler_msi(chirq, *lldevp,
+ 1 << (chirq - dmadev->msi_virqbase));
+}
+#endif
+
static ssize_t hidma_show_values(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -567,8 +580,13 @@ static ssize_t hidma_show_values(struct device *dev,
return strlen(buf);
}
-static int hidma_create_sysfs_entry(struct hidma_dev *dev, char *name,
- int mode)
+static inline void hidma_sysfs_uninit(struct hidma_dev *dev)
+{
+ device_remove_file(dev->ddev.dev, dev->chid_attrs);
+}
+
+static struct device_attribute*
+hidma_create_sysfs_entry(struct hidma_dev *dev, char *name, int mode)
{
struct device_attribute *attrs;
char *name_copy;
@@ -576,18 +594,125 @@ static int hidma_create_sysfs_entry(struct hidma_dev *dev, char *name,
attrs = devm_kmalloc(dev->ddev.dev, sizeof(struct device_attribute),
GFP_KERNEL);
if (!attrs)
- return -ENOMEM;
+ return NULL;
name_copy = devm_kstrdup(dev->ddev.dev, name, GFP_KERNEL);
if (!name_copy)
- return -ENOMEM;
+ return NULL;
attrs->attr.name = name_copy;
attrs->attr.mode = mode;
attrs->show = hidma_show_values;
sysfs_attr_init(&attrs->attr);
- return device_create_file(dev->ddev.dev, attrs);
+ return attrs;
+}
+
+static int hidma_sysfs_init(struct hidma_dev *dev)
+{
+ dev->chid_attrs = hidma_create_sysfs_entry(dev, "chid", S_IRUGO);
+ if (!dev->chid_attrs)
+ return -ENOMEM;
+
+ return device_create_file(dev->ddev.dev, dev->chid_attrs);
+}
+
+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
+static void hidma_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
+{
+ struct device *dev = msi_desc_to_dev(desc);
+ struct hidma_dev *dmadev = dev_get_drvdata(dev);
+
+ if (!desc->platform.msi_index) {
+ writel(msg->address_lo, dmadev->dev_evca + 0x118);
+ writel(msg->address_hi, dmadev->dev_evca + 0x11C);
+ writel(msg->data, dmadev->dev_evca + 0x120);
+ }
+}
+#endif
+
+static void hidma_free_msis(struct hidma_dev *dmadev)
+{
+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
+ struct device *dev = dmadev->ddev.dev;
+ struct msi_desc *desc;
+
+ /* free allocated MSI interrupts above */
+ for_each_msi_entry(desc, dev)
+ devm_free_irq(dev, desc->irq, &dmadev->lldev);
+
+ platform_msi_domain_free_irqs(dev);
+#endif
+}
+
+static int hidma_request_msi(struct hidma_dev *dmadev,
+ struct platform_device *pdev)
+{
+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
+ int rc;
+ struct msi_desc *desc;
+ struct msi_desc *failed_desc = NULL;
+
+ rc = platform_msi_domain_alloc_irqs(&pdev->dev, HIDMA_MSI_INTS,
+ hidma_write_msi_msg);
+ if (rc)
+ return rc;
+
+ for_each_msi_entry(desc, &pdev->dev) {
+ if (!desc->platform.msi_index)
+ dmadev->msi_virqbase = desc->irq;
+
+ rc = devm_request_irq(&pdev->dev, desc->irq,
+ hidma_chirq_handler_msi,
+ 0, "qcom-hidma-msi",
+ &dmadev->lldev);
+ if (rc) {
+ failed_desc = desc;
+ break;
+ }
+ }
+
+ if (rc) {
+ /* free allocated MSI interrupts above */
+ for_each_msi_entry(desc, &pdev->dev) {
+ if (desc == failed_desc)
+ break;
+ devm_free_irq(&pdev->dev, desc->irq,
+ &dmadev->lldev);
+ }
+ } else {
+ /* Add callback to free MSIs on teardown */
+ hidma_ll_setup_irq(dmadev->lldev, true);
+
+ }
+ if (rc)
+ dev_warn(&pdev->dev,
+ "failed to request MSI irq, falling back to wired IRQ\n");
+ return rc;
+#else
+ return -EINVAL;
+#endif
+}
+
+static bool hidma_msi_capable(struct device *dev)
+{
+ struct acpi_device *adev = ACPI_COMPANION(dev);
+ const char *of_compat;
+ int ret = -EINVAL;
+
+ if (!adev || acpi_disabled) {
+ ret = device_property_read_string(dev, "compatible",
+ &of_compat);
+ if (ret)
+ return false;
+
+ ret = strcmp(of_compat, "qcom,hidma-1.1");
+ } else {
+#ifdef CONFIG_ACPI
+ ret = strcmp(acpi_device_hid(adev), "QCOM8062");
+#endif
+ }
+ return ret == 0;
}
static int hidma_probe(struct platform_device *pdev)
@@ -599,6 +724,7 @@ static int hidma_probe(struct platform_device *pdev)
void __iomem *evca;
void __iomem *trca;
int rc;
+ bool msi;
pm_runtime_set_autosuspend_delay(&pdev->dev, HIDMA_AUTOSUSPEND_TIMEOUT);
pm_runtime_use_autosuspend(&pdev->dev);
@@ -660,6 +786,12 @@ static int hidma_probe(struct platform_device *pdev)
dmadev->ddev.device_terminate_all = hidma_terminate_all;
dmadev->ddev.copy_align = 8;
+ /*
+ * Determine the MSI capability of the platform. Old HW doesn't
+ * support MSI.
+ */
+ msi = hidma_msi_capable(&pdev->dev);
+
device_property_read_u32(&pdev->dev, "desc-count",
&dmadev->nr_descriptors);
@@ -688,10 +820,17 @@ static int hidma_probe(struct platform_device *pdev)
goto dmafree;
}
- rc = devm_request_irq(&pdev->dev, chirq, hidma_chirq_handler, 0,
- "qcom-hidma", dmadev->lldev);
- if (rc)
- goto uninit;
+ platform_set_drvdata(pdev, dmadev);
+ if (msi)
+ rc = hidma_request_msi(dmadev, pdev);
+
+ if (!msi || rc) {
+ hidma_ll_setup_irq(dmadev->lldev, false);
+ rc = devm_request_irq(&pdev->dev, chirq, hidma_chirq_handler,
+ 0, "qcom-hidma", dmadev->lldev);
+ if (rc)
+ goto uninit;
+ }
INIT_LIST_HEAD(&dmadev->ddev.channels);
rc = hidma_chan_init(dmadev, 0);
@@ -705,14 +844,16 @@ static int hidma_probe(struct platform_device *pdev)
dmadev->irq = chirq;
tasklet_init(&dmadev->task, hidma_issue_task, (unsigned long)dmadev);
hidma_debug_init(dmadev);
- hidma_create_sysfs_entry(dmadev, "chid", S_IRUGO);
+ hidma_sysfs_init(dmadev);
dev_info(&pdev->dev, "HI-DMA engine driver registration complete\n");
- platform_set_drvdata(pdev, dmadev);
pm_runtime_mark_last_busy(dmadev->ddev.dev);
pm_runtime_put_autosuspend(dmadev->ddev.dev);
return 0;
uninit:
+ if (msi)
+ hidma_free_msis(dmadev);
+
hidma_debug_uninit(dmadev);
hidma_ll_uninit(dmadev->lldev);
dmafree:
@@ -730,8 +871,13 @@ static int hidma_remove(struct platform_device *pdev)
pm_runtime_get_sync(dmadev->ddev.dev);
dma_async_device_unregister(&dmadev->ddev);
- devm_free_irq(dmadev->ddev.dev, dmadev->irq, dmadev->lldev);
+ if (!dmadev->lldev->msi_support)
+ devm_free_irq(dmadev->ddev.dev, dmadev->irq, dmadev->lldev);
+ else
+ hidma_free_msis(dmadev);
+
tasklet_kill(&dmadev->task);
+ hidma_sysfs_uninit(dmadev);
hidma_debug_uninit(dmadev);
hidma_ll_uninit(dmadev->lldev);
hidma_free(dmadev);
@@ -746,12 +892,15 @@ static int hidma_remove(struct platform_device *pdev)
#if IS_ENABLED(CONFIG_ACPI)
static const struct acpi_device_id hidma_acpi_ids[] = {
{"QCOM8061"},
+ {"QCOM8062"},
{},
};
+MODULE_DEVICE_TABLE(acpi, hidma_acpi_ids);
#endif
static const struct of_device_id hidma_match[] = {
{.compatible = "qcom,hidma-1.0",},
+ {.compatible = "qcom,hidma-1.1",},
{},
};
MODULE_DEVICE_TABLE(of, hidma_match);
diff --git a/drivers/dma/qcom/hidma.h b/drivers/dma/qcom/hidma.h
index e52e20716303..c7d014235c32 100644
--- a/drivers/dma/qcom/hidma.h
+++ b/drivers/dma/qcom/hidma.h
@@ -46,6 +46,7 @@ struct hidma_tre {
};
struct hidma_lldev {
+ bool msi_support; /* flag indicating MSI support */
bool initialized; /* initialized flag */
u8 trch_state; /* trch_state of the device */
u8 evch_state; /* evch_state of the device */
@@ -58,7 +59,7 @@ struct hidma_lldev {
void __iomem *evca; /* Event Channel address */
struct hidma_tre
**pending_tre_list; /* Pointers to pending TREs */
- s32 pending_tre_count; /* Number of TREs pending */
+ atomic_t pending_tre_count; /* Number of TREs pending */
void *tre_ring; /* TRE ring */
dma_addr_t tre_dma; /* TRE ring to be shared with HW */
@@ -114,6 +115,7 @@ struct hidma_dev {
int irq;
int chidx;
u32 nr_descriptors;
+ int msi_virqbase;
struct hidma_lldev *lldev;
void __iomem *dev_trca;
@@ -128,6 +130,9 @@ struct hidma_dev {
struct dentry *debugfs;
struct dentry *stats;
+ /* sysfs entry for the channel id */
+ struct device_attribute *chid_attrs;
+
/* Task delivering issue_pending */
struct tasklet_struct task;
};
@@ -145,12 +150,14 @@ int hidma_ll_disable(struct hidma_lldev *lldev);
int hidma_ll_enable(struct hidma_lldev *llhndl);
void hidma_ll_set_transfer_params(struct hidma_lldev *llhndl, u32 tre_ch,
dma_addr_t src, dma_addr_t dest, u32 len, u32 flags);
+void hidma_ll_setup_irq(struct hidma_lldev *lldev, bool msi);
int hidma_ll_setup(struct hidma_lldev *lldev);
struct hidma_lldev *hidma_ll_init(struct device *dev, u32 max_channels,
void __iomem *trca, void __iomem *evca,
u8 chidx);
int hidma_ll_uninit(struct hidma_lldev *llhndl);
irqreturn_t hidma_ll_inthandler(int irq, void *arg);
+irqreturn_t hidma_ll_inthandler_msi(int irq, void *arg, int cause);
void hidma_cleanup_pending_tre(struct hidma_lldev *llhndl, u8 err_info,
u8 err_code);
int hidma_debug_init(struct hidma_dev *dmadev);
diff --git a/drivers/dma/qcom/hidma_dbg.c b/drivers/dma/qcom/hidma_dbg.c
index fa827e5ffd68..3bdcb8056a36 100644
--- a/drivers/dma/qcom/hidma_dbg.c
+++ b/drivers/dma/qcom/hidma_dbg.c
@@ -74,7 +74,8 @@ static void hidma_ll_devstats(struct seq_file *s, void *llhndl)
seq_printf(s, "tre_ring_handle=%pap\n", &lldev->tre_dma);
seq_printf(s, "tre_ring_size = 0x%x\n", lldev->tre_ring_size);
seq_printf(s, "tre_processed_off = 0x%x\n", lldev->tre_processed_off);
- seq_printf(s, "pending_tre_count=%d\n", lldev->pending_tre_count);
+ seq_printf(s, "pending_tre_count=%d\n",
+ atomic_read(&lldev->pending_tre_count));
seq_printf(s, "evca=%p\n", lldev->evca);
seq_printf(s, "evre_ring=%p\n", lldev->evre_ring);
seq_printf(s, "evre_ring_handle=%pap\n", &lldev->evre_dma);
@@ -164,7 +165,6 @@ static const struct file_operations hidma_dma_fops = {
void hidma_debug_uninit(struct hidma_dev *dmadev)
{
debugfs_remove_recursive(dmadev->debugfs);
- debugfs_remove_recursive(dmadev->stats);
}
int hidma_debug_init(struct hidma_dev *dmadev)
diff --git a/drivers/dma/qcom/hidma_ll.c b/drivers/dma/qcom/hidma_ll.c
index 3224f24c577b..6645bdf0d151 100644
--- a/drivers/dma/qcom/hidma_ll.c
+++ b/drivers/dma/qcom/hidma_ll.c
@@ -198,13 +198,16 @@ static void hidma_ll_tre_complete(unsigned long arg)
}
}
-static int hidma_post_completed(struct hidma_lldev *lldev, int tre_iterator,
- u8 err_info, u8 err_code)
+static int hidma_post_completed(struct hidma_lldev *lldev, u8 err_info,
+ u8 err_code)
{
struct hidma_tre *tre;
unsigned long flags;
+ u32 tre_iterator;
spin_lock_irqsave(&lldev->lock, flags);
+
+ tre_iterator = lldev->tre_processed_off;
tre = lldev->pending_tre_list[tre_iterator / HIDMA_TRE_SIZE];
if (!tre) {
spin_unlock_irqrestore(&lldev->lock, flags);
@@ -218,12 +221,14 @@ static int hidma_post_completed(struct hidma_lldev *lldev, int tre_iterator,
* Keep track of pending TREs that SW is expecting to receive
* from HW. We got one now. Decrement our counter.
*/
- lldev->pending_tre_count--;
- if (lldev->pending_tre_count < 0) {
+ if (atomic_dec_return(&lldev->pending_tre_count) < 0) {
dev_warn(lldev->dev, "tre count mismatch on completion");
- lldev->pending_tre_count = 0;
+ atomic_set(&lldev->pending_tre_count, 0);
}
+ HIDMA_INCREMENT_ITERATOR(tre_iterator, HIDMA_TRE_SIZE,
+ lldev->tre_ring_size);
+ lldev->tre_processed_off = tre_iterator;
spin_unlock_irqrestore(&lldev->lock, flags);
tre->err_info = err_info;
@@ -245,13 +250,11 @@ static int hidma_post_completed(struct hidma_lldev *lldev, int tre_iterator,
static int hidma_handle_tre_completion(struct hidma_lldev *lldev)
{
u32 evre_ring_size = lldev->evre_ring_size;
- u32 tre_ring_size = lldev->tre_ring_size;
u32 err_info, err_code, evre_write_off;
- u32 tre_iterator, evre_iterator;
+ u32 evre_iterator;
u32 num_completed = 0;
evre_write_off = readl_relaxed(lldev->evca + HIDMA_EVCA_WRITE_PTR_REG);
- tre_iterator = lldev->tre_processed_off;
evre_iterator = lldev->evre_processed_off;
if ((evre_write_off > evre_ring_size) ||
@@ -274,12 +277,9 @@ static int hidma_handle_tre_completion(struct hidma_lldev *lldev)
err_code =
(cfg >> HIDMA_EVRE_CODE_BIT_POS) & HIDMA_EVRE_CODE_MASK;
- if (hidma_post_completed(lldev, tre_iterator, err_info,
- err_code))
+ if (hidma_post_completed(lldev, err_info, err_code))
break;
- HIDMA_INCREMENT_ITERATOR(tre_iterator, HIDMA_TRE_SIZE,
- tre_ring_size);
HIDMA_INCREMENT_ITERATOR(evre_iterator, HIDMA_EVRE_SIZE,
evre_ring_size);
@@ -291,21 +291,22 @@ static int hidma_handle_tre_completion(struct hidma_lldev *lldev)
evre_write_off =
readl_relaxed(lldev->evca + HIDMA_EVCA_WRITE_PTR_REG);
num_completed++;
+
+ /*
+ * An error interrupt might have arrived while we are processing
+ * the completed interrupt.
+ */
+ if (!hidma_ll_isenabled(lldev))
+ break;
}
if (num_completed) {
u32 evre_read_off = (lldev->evre_processed_off +
HIDMA_EVRE_SIZE * num_completed);
- u32 tre_read_off = (lldev->tre_processed_off +
- HIDMA_TRE_SIZE * num_completed);
-
evre_read_off = evre_read_off % evre_ring_size;
- tre_read_off = tre_read_off % tre_ring_size;
-
writel(evre_read_off, lldev->evca + HIDMA_EVCA_DOORBELL_REG);
/* record the last processed tre offset */
- lldev->tre_processed_off = tre_read_off;
lldev->evre_processed_off = evre_read_off;
}
@@ -315,27 +316,10 @@ static int hidma_handle_tre_completion(struct hidma_lldev *lldev)
void hidma_cleanup_pending_tre(struct hidma_lldev *lldev, u8 err_info,
u8 err_code)
{
- u32 tre_iterator;
- u32 tre_ring_size = lldev->tre_ring_size;
- int num_completed = 0;
- u32 tre_read_off;
-
- tre_iterator = lldev->tre_processed_off;
- while (lldev->pending_tre_count) {
- if (hidma_post_completed(lldev, tre_iterator, err_info,
- err_code))
+ while (atomic_read(&lldev->pending_tre_count)) {
+ if (hidma_post_completed(lldev, err_info, err_code))
break;
- HIDMA_INCREMENT_ITERATOR(tre_iterator, HIDMA_TRE_SIZE,
- tre_ring_size);
- num_completed++;
}
- tre_read_off = (lldev->tre_processed_off +
- HIDMA_TRE_SIZE * num_completed);
-
- tre_read_off = tre_read_off % tre_ring_size;
-
- /* record the last processed tre offset */
- lldev->tre_processed_off = tre_read_off;
}
static int hidma_ll_reset(struct hidma_lldev *lldev)
@@ -412,12 +396,24 @@ static int hidma_ll_reset(struct hidma_lldev *lldev)
* requests traditionally to the destination, this concept does not apply
* here for this HW.
*/
-irqreturn_t hidma_ll_inthandler(int chirq, void *arg)
+static void hidma_ll_int_handler_internal(struct hidma_lldev *lldev, int cause)
{
- struct hidma_lldev *lldev = arg;
- u32 status;
- u32 enable;
- u32 cause;
+ if (cause & HIDMA_ERR_INT_MASK) {
+ dev_err(lldev->dev, "error 0x%x, disabling...\n",
+ cause);
+
+ /* Clear out pending interrupts */
+ writel(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
+
+ /* No further submissions. */
+ hidma_ll_disable(lldev);
+
+ /* Driver completes the txn and intimates the client.*/
+ hidma_cleanup_pending_tre(lldev, 0xFF,
+ HIDMA_EVRE_STATUS_ERROR);
+
+ return;
+ }
/*
* Fine tuned for this HW...
@@ -426,35 +422,28 @@ irqreturn_t hidma_ll_inthandler(int chirq, void *arg)
* read and write accessors are used for performance reasons due to
* interrupt delivery guarantees. Do not copy this code blindly and
* expect that to work.
+ *
+ * Try to consume as many EVREs as possible.
*/
+ hidma_handle_tre_completion(lldev);
+
+ /* We consumed TREs or there are pending TREs or EVREs. */
+ writel_relaxed(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
+}
+
+irqreturn_t hidma_ll_inthandler(int chirq, void *arg)
+{
+ struct hidma_lldev *lldev = arg;
+ u32 status;
+ u32 enable;
+ u32 cause;
+
status = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG);
enable = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
cause = status & enable;
while (cause) {
- if (cause & HIDMA_ERR_INT_MASK) {
- dev_err(lldev->dev, "error 0x%x, disabling...\n",
- cause);
-
- /* Clear out pending interrupts */
- writel(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
-
- /* No further submissions. */
- hidma_ll_disable(lldev);
-
- /* Driver completes the txn and intimates the client.*/
- hidma_cleanup_pending_tre(lldev, 0xFF,
- HIDMA_EVRE_STATUS_ERROR);
- goto out;
- }
-
- /*
- * Try to consume as many EVREs as possible.
- */
- hidma_handle_tre_completion(lldev);
-
- /* We consumed TREs or there are pending TREs or EVREs. */
- writel_relaxed(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
+ hidma_ll_int_handler_internal(lldev, cause);
/*
* Another interrupt might have arrived while we are
@@ -465,7 +454,14 @@ irqreturn_t hidma_ll_inthandler(int chirq, void *arg)
cause = status & enable;
}
-out:
+ return IRQ_HANDLED;
+}
+
+irqreturn_t hidma_ll_inthandler_msi(int chirq, void *arg, int cause)
+{
+ struct hidma_lldev *lldev = arg;
+
+ hidma_ll_int_handler_internal(lldev, cause);
return IRQ_HANDLED;
}
@@ -548,7 +544,7 @@ void hidma_ll_queue_request(struct hidma_lldev *lldev, u32 tre_ch)
tre->err_code = 0;
tre->err_info = 0;
tre->queued = 1;
- lldev->pending_tre_count++;
+ atomic_inc(&lldev->pending_tre_count);
lldev->tre_write_offset = (lldev->tre_write_offset + HIDMA_TRE_SIZE)
% lldev->tre_ring_size;
spin_unlock_irqrestore(&lldev->lock, flags);
@@ -564,19 +560,8 @@ int hidma_ll_disable(struct hidma_lldev *lldev)
u32 val;
int ret;
- val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
- lldev->evch_state = HIDMA_CH_STATE(val);
- val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
- lldev->trch_state = HIDMA_CH_STATE(val);
-
- /* already suspended by this OS */
- if ((lldev->trch_state == HIDMA_CH_SUSPENDED) ||
- (lldev->evch_state == HIDMA_CH_SUSPENDED))
- return 0;
-
- /* already stopped by the manager */
- if ((lldev->trch_state == HIDMA_CH_STOPPED) ||
- (lldev->evch_state == HIDMA_CH_STOPPED))
+ /* The channel needs to be in working state */
+ if (!hidma_ll_isenabled(lldev))
return 0;
val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
@@ -654,7 +639,7 @@ int hidma_ll_setup(struct hidma_lldev *lldev)
u32 val;
u32 nr_tres = lldev->nr_tres;
- lldev->pending_tre_count = 0;
+ atomic_set(&lldev->pending_tre_count, 0);
lldev->tre_processed_off = 0;
lldev->evre_processed_off = 0;
lldev->tre_write_offset = 0;
@@ -691,17 +676,36 @@ int hidma_ll_setup(struct hidma_lldev *lldev)
writel(HIDMA_EVRE_SIZE * nr_tres,
lldev->evca + HIDMA_EVCA_RING_LEN_REG);
- /* support IRQ only for now */
+ /* configure interrupts */
+ hidma_ll_setup_irq(lldev, lldev->msi_support);
+
+ rc = hidma_ll_enable(lldev);
+ if (rc)
+ return rc;
+
+ return rc;
+}
+
+void hidma_ll_setup_irq(struct hidma_lldev *lldev, bool msi)
+{
+ u32 val;
+
+ lldev->msi_support = msi;
+
+ /* disable interrupts again after reset */
+ writel(0, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
+ writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
+
+ /* support IRQ by default */
val = readl(lldev->evca + HIDMA_EVCA_INTCTRL_REG);
val &= ~0xF;
- val |= 0x1;
+ if (!lldev->msi_support)
+ val = val | 0x1;
writel(val, lldev->evca + HIDMA_EVCA_INTCTRL_REG);
/* clear all pending interrupts and enable them */
writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
-
- return hidma_ll_enable(lldev);
}
struct hidma_lldev *hidma_ll_init(struct device *dev, u32 nr_tres,
@@ -816,7 +820,7 @@ int hidma_ll_uninit(struct hidma_lldev *lldev)
tasklet_kill(&lldev->task);
memset(lldev->trepool, 0, required_bytes);
lldev->trepool = NULL;
- lldev->pending_tre_count = 0;
+ atomic_set(&lldev->pending_tre_count, 0);
lldev->tre_write_offset = 0;
rc = hidma_ll_reset(lldev);
diff --git a/drivers/dma/qcom/hidma_mgmt.c b/drivers/dma/qcom/hidma_mgmt.c
index 82f36e466083..f847d32cc4b5 100644
--- a/drivers/dma/qcom/hidma_mgmt.c
+++ b/drivers/dma/qcom/hidma_mgmt.c
@@ -282,6 +282,7 @@ static const struct acpi_device_id hidma_mgmt_acpi_ids[] = {
{"QCOM8060"},
{},
};
+MODULE_DEVICE_TABLE(acpi, hidma_mgmt_acpi_ids);
#endif
static const struct of_device_id hidma_mgmt_match[] = {
@@ -375,8 +376,15 @@ static int __init hidma_mgmt_of_populate_channels(struct device_node *np)
ret = PTR_ERR(new_pdev);
goto out;
}
+ of_node_get(child);
+ new_pdev->dev.of_node = child;
of_dma_configure(&new_pdev->dev, child);
-
+ /*
+ * It is assumed that calling of_msi_configure is safe on
+ * platforms with or without MSI support.
+ */
+ of_msi_configure(&new_pdev->dev, child);
+ of_node_put(child);
kfree(res);
res = NULL;
}
@@ -395,7 +403,6 @@ static int __init hidma_mgmt_init(void)
for_each_matching_node(child, hidma_mgmt_match) {
/* device tree based firmware here */
hidma_mgmt_of_populate_channels(child);
- of_node_put(child);
}
#endif
platform_driver_register(&hidma_mgmt_driver);
diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c
index 3c579abbabb7..f04c4702d98b 100644
--- a/drivers/dma/s3c24xx-dma.c
+++ b/drivers/dma/s3c24xx-dma.c
@@ -289,16 +289,11 @@ static
struct s3c24xx_dma_phy *s3c24xx_dma_get_phy(struct s3c24xx_dma_chan *s3cchan)
{
struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
- const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata;
- struct s3c24xx_dma_channel *cdata;
struct s3c24xx_dma_phy *phy = NULL;
unsigned long flags;
int i;
int ret;
- if (s3cchan->slave)
- cdata = &pdata->channels[s3cchan->id];
-
for (i = 0; i < s3cdma->pdata->num_phy_channels; i++) {
phy = &s3cdma->phy_chans[i];
diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c
index 06ecdc38cee0..72c649713ace 100644
--- a/drivers/dma/sh/usb-dmac.c
+++ b/drivers/dma/sh/usb-dmac.c
@@ -652,7 +652,6 @@ static bool usb_dmac_chan_filter(struct dma_chan *chan, void *arg)
static struct dma_chan *usb_dmac_of_xlate(struct of_phandle_args *dma_spec,
struct of_dma *ofdma)
{
- struct usb_dmac_chan *uchan;
struct dma_chan *chan;
dma_cap_mask_t mask;
@@ -667,8 +666,6 @@ static struct dma_chan *usb_dmac_of_xlate(struct of_phandle_args *dma_spec,
if (!chan)
return NULL;
- uchan = to_usb_dmac_chan(chan);
-
return chan;
}
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c
index 8f62edad51be..a0733ac3edb1 100644
--- a/drivers/dma/sirf-dma.c
+++ b/drivers/dma/sirf-dma.c
@@ -1011,7 +1011,6 @@ static int __maybe_unused sirfsoc_dma_pm_suspend(struct device *dev)
{
struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
struct sirfsoc_dma_regs *save = &sdma->regs_save;
- struct sirfsoc_dma_desc *sdesc;
struct sirfsoc_dma_chan *schan;
int ch;
int ret;
@@ -1044,9 +1043,6 @@ static int __maybe_unused sirfsoc_dma_pm_suspend(struct device *dev)
schan = &sdma->channels[ch];
if (list_empty(&schan->active))
continue;
- sdesc = list_first_entry(&schan->active,
- struct sirfsoc_dma_desc,
- node);
save->ctrl[ch] = readl_relaxed(sdma->base +
ch * 0x10 + SIRFSOC_DMA_CH_CTRL);
}
diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c
index 307547f4848d..3688d0873a3e 100644
--- a/drivers/dma/stm32-dma.c
+++ b/drivers/dma/stm32-dma.c
@@ -527,13 +527,12 @@ static irqreturn_t stm32_dma_chan_irq(int irq, void *devid)
{
struct stm32_dma_chan *chan = devid;
struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
- u32 status, scr, sfcr;
+ u32 status, scr;
spin_lock(&chan->vchan.lock);
status = stm32_dma_irq_status(chan);
scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
- sfcr = stm32_dma_read(dmadev, STM32_DMA_SFCR(chan->id));
if ((status & STM32_DMA_TCI) && (scr & STM32_DMA_SCR_TCIE)) {
stm32_dma_irq_clear(chan, STM32_DMA_TCI);
@@ -574,15 +573,12 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan,
int src_bus_width, dst_bus_width;
int src_burst_size, dst_burst_size;
u32 src_maxburst, dst_maxburst;
- dma_addr_t src_addr, dst_addr;
u32 dma_scr = 0;
src_addr_width = chan->dma_sconfig.src_addr_width;
dst_addr_width = chan->dma_sconfig.dst_addr_width;
src_maxburst = chan->dma_sconfig.src_maxburst;
dst_maxburst = chan->dma_sconfig.dst_maxburst;
- src_addr = chan->dma_sconfig.src_addr;
- dst_addr = chan->dma_sconfig.dst_addr;
switch (direction) {
case DMA_MEM_TO_DEV:
diff --git a/drivers/dma/zx296702_dma.c b/drivers/dma/zx296702_dma.c
index 245d759d5ffc..380276d078b2 100644
--- a/drivers/dma/zx296702_dma.c
+++ b/drivers/dma/zx296702_dma.c
@@ -435,13 +435,12 @@ static struct zx_dma_desc_sw *zx_alloc_desc_resource(int num,
if (!ds)
return NULL;
- ds->desc_hw = dma_pool_alloc(d->pool, GFP_NOWAIT, &ds->desc_hw_lli);
+ ds->desc_hw = dma_pool_zalloc(d->pool, GFP_NOWAIT, &ds->desc_hw_lli);
if (!ds->desc_hw) {
dev_dbg(chan->device->dev, "vch %p: dma alloc fail\n", &c->vc);
kfree(ds);
return NULL;
}
- memset(ds->desc_hw, 0, sizeof(struct zx_desc_hw) * num);
ds->desc_num = num;
return ds;
}