summaryrefslogtreecommitdiff
path: root/drivers/dma
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/at_hdmac.c5
-rw-r--r--drivers/dma/bcm2835-dma.c95
-rw-r--r--drivers/dma/dma-axi-dmac.c3
-rw-r--r--drivers/dma/dma-jz4780.c5
-rw-r--r--drivers/dma/dmatest.c269
-rw-r--r--drivers/dma/dw/core.c1
-rw-r--r--drivers/dma/imx-sdma.c25
-rw-r--r--drivers/dma/ioat/dma.c12
-rw-r--r--drivers/dma/ioat/dma.h2
-rw-r--r--drivers/dma/ioat/hw.h3
-rw-r--r--drivers/dma/ioat/init.c40
-rw-r--r--drivers/dma/ioat/registers.h24
-rw-r--r--drivers/dma/mv_xor.c2
-rw-r--r--drivers/dma/pl330.c1
-rw-r--r--drivers/dma/qcom/bam_dma.c4
-rw-r--r--drivers/dma/qcom/hidma.c19
-rw-r--r--drivers/dma/qcom/hidma_mgmt.c3
-rw-r--r--drivers/dma/sa11x0-dma.c2
-rw-r--r--drivers/dma/sprd-dma.c19
-rw-r--r--drivers/dma/st_fdma.c6
-rw-r--r--drivers/dma/timb_dma.c4
21 files changed, 295 insertions, 249 deletions
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 01d936c9fe89..a0a9cd76c1d4 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -134,7 +134,6 @@ static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
struct at_desc *ret = NULL;
unsigned long flags;
unsigned int i = 0;
- LIST_HEAD(tmp_list);
spin_lock_irqsave(&atchan->lock, flags);
list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
@@ -1387,8 +1386,6 @@ static int atc_pause(struct dma_chan *chan)
int chan_id = atchan->chan_common.chan_id;
unsigned long flags;
- LIST_HEAD(list);
-
dev_vdbg(chan2dev(chan), "%s\n", __func__);
spin_lock_irqsave(&atchan->lock, flags);
@@ -1408,8 +1405,6 @@ static int atc_resume(struct dma_chan *chan)
int chan_id = atchan->chan_common.chan_id;
unsigned long flags;
- LIST_HEAD(list);
-
dev_vdbg(chan2dev(chan), "%s\n", __func__);
if (!atc_chan_is_paused(atchan))
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index 1a44c8086d77..ec8a291d62ba 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -2,9 +2,6 @@
/*
* BCM2835 DMA engine support
*
- * This driver only supports cyclic DMA transfers
- * as needed for the I2S module.
- *
* Author: Florian Meier <florian.meier@koalo.de>
* Copyright 2013
*
@@ -42,7 +39,6 @@
struct bcm2835_dmadev {
struct dma_device ddev;
- spinlock_t lock;
void __iomem *base;
struct device_dma_parameters dma_parms;
};
@@ -64,7 +60,6 @@ struct bcm2835_cb_entry {
struct bcm2835_chan {
struct virt_dma_chan vc;
- struct list_head node;
struct dma_slave_config cfg;
unsigned int dreq;
@@ -312,8 +307,7 @@ static struct bcm2835_desc *bcm2835_dma_create_cb_chain(
return NULL;
/* allocate and setup the descriptor. */
- d = kzalloc(sizeof(*d) + frames * sizeof(struct bcm2835_cb_entry),
- gfp);
+ d = kzalloc(struct_size(d, cb_list, frames), gfp);
if (!d)
return NULL;
@@ -406,39 +400,32 @@ static void bcm2835_dma_fill_cb_chain_with_sg(
}
}
-static int bcm2835_dma_abort(void __iomem *chan_base)
+static void bcm2835_dma_abort(struct bcm2835_chan *c)
{
- unsigned long cs;
+ void __iomem *chan_base = c->chan_base;
long int timeout = 10000;
- cs = readl(chan_base + BCM2835_DMA_CS);
- if (!(cs & BCM2835_DMA_ACTIVE))
- return 0;
+ /*
+ * A zero control block address means the channel is idle.
+ * (The ACTIVE flag in the CS register is not a reliable indicator.)
+ */
+ if (!readl(chan_base + BCM2835_DMA_ADDR))
+ return;
/* Write 0 to the active bit - Pause the DMA */
writel(0, chan_base + BCM2835_DMA_CS);
/* Wait for any current AXI transfer to complete */
- while ((cs & BCM2835_DMA_ISPAUSED) && --timeout) {
+ while ((readl(chan_base + BCM2835_DMA_CS) &
+ BCM2835_DMA_WAITING_FOR_WRITES) && --timeout)
cpu_relax();
- cs = readl(chan_base + BCM2835_DMA_CS);
- }
- /* We'll un-pause when we set of our next DMA */
+ /* Peripheral might be stuck and fail to signal AXI write responses */
if (!timeout)
- return -ETIMEDOUT;
-
- if (!(cs & BCM2835_DMA_ACTIVE))
- return 0;
+ dev_err(c->vc.chan.device->dev,
+ "failed to complete outstanding writes\n");
- /* Terminate the control block chain */
- writel(0, chan_base + BCM2835_DMA_NEXTCB);
-
- /* Abort the whole DMA */
- writel(BCM2835_DMA_ABORT | BCM2835_DMA_ACTIVE,
- chan_base + BCM2835_DMA_CS);
-
- return 0;
+ writel(BCM2835_DMA_RESET, chan_base + BCM2835_DMA_CS);
}
static void bcm2835_dma_start_desc(struct bcm2835_chan *c)
@@ -476,8 +463,15 @@ static irqreturn_t bcm2835_dma_callback(int irq, void *data)
spin_lock_irqsave(&c->vc.lock, flags);
- /* Acknowledge interrupt */
- writel(BCM2835_DMA_INT, c->chan_base + BCM2835_DMA_CS);
+ /*
+ * Clear the INT flag to receive further interrupts. Keep the channel
+ * active in case the descriptor is cyclic or in case the client has
+ * already terminated the descriptor and issued a new one. (May happen
+ * if this IRQ handler is threaded.) If the channel is finished, it
+ * will remain idle despite the ACTIVE flag being set.
+ */
+ writel(BCM2835_DMA_INT | BCM2835_DMA_ACTIVE,
+ c->chan_base + BCM2835_DMA_CS);
d = c->desc;
@@ -485,11 +479,7 @@ static irqreturn_t bcm2835_dma_callback(int irq, void *data)
if (d->cyclic) {
/* call the cyclic callback */
vchan_cyclic_callback(&d->vd);
-
- /* Keep the DMA engine running */
- writel(BCM2835_DMA_ACTIVE,
- c->chan_base + BCM2835_DMA_CS);
- } else {
+ } else if (!readl(c->chan_base + BCM2835_DMA_ADDR)) {
vchan_cookie_complete(&c->desc->vd);
bcm2835_dma_start_desc(c);
}
@@ -507,8 +497,12 @@ static int bcm2835_dma_alloc_chan_resources(struct dma_chan *chan)
dev_dbg(dev, "Allocating DMA channel %d\n", c->ch);
+ /*
+ * Control blocks are 256 bit in length and must start at a 256 bit
+ * (32 byte) aligned address (BCM2835 ARM Peripherals, sec. 4.2.1.1).
+ */
c->cb_pool = dma_pool_create(dev_name(dev), dev,
- sizeof(struct bcm2835_dma_cb), 0, 0);
+ sizeof(struct bcm2835_dma_cb), 32, 0);
if (!c->cb_pool) {
dev_err(dev, "unable to allocate descriptor pool\n");
return -ENOMEM;
@@ -777,39 +771,16 @@ static int bcm2835_dma_slave_config(struct dma_chan *chan,
static int bcm2835_dma_terminate_all(struct dma_chan *chan)
{
struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
- struct bcm2835_dmadev *d = to_bcm2835_dma_dev(c->vc.chan.device);
unsigned long flags;
- int timeout = 10000;
LIST_HEAD(head);
spin_lock_irqsave(&c->vc.lock, flags);
- /* Prevent this channel being scheduled */
- spin_lock(&d->lock);
- list_del_init(&c->node);
- spin_unlock(&d->lock);
-
- /*
- * Stop DMA activity: we assume the callback will not be called
- * after bcm_dma_abort() returns (even if it does, it will see
- * c->desc is NULL and exit.)
- */
+ /* stop DMA activity */
if (c->desc) {
vchan_terminate_vdesc(&c->desc->vd);
c->desc = NULL;
- bcm2835_dma_abort(c->chan_base);
-
- /* Wait for stopping */
- while (--timeout) {
- if (!(readl(c->chan_base + BCM2835_DMA_CS) &
- BCM2835_DMA_ACTIVE))
- break;
-
- cpu_relax();
- }
-
- if (!timeout)
- dev_err(d->ddev.dev, "DMA transfer could not be terminated\n");
+ bcm2835_dma_abort(c);
}
vchan_get_all_descriptors(&c->vc, &head);
@@ -837,7 +808,6 @@ static int bcm2835_dma_chan_init(struct bcm2835_dmadev *d, int chan_id,
c->vc.desc_free = bcm2835_dma_desc_free;
vchan_init(&c->vc, &d->ddev);
- INIT_LIST_HEAD(&c->node);
c->chan_base = BCM2835_DMA_CHANIO(d->base, chan_id);
c->ch = chan_id;
@@ -940,7 +910,6 @@ static int bcm2835_dma_probe(struct platform_device *pdev)
od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
od->ddev.dev = &pdev->dev;
INIT_LIST_HEAD(&od->ddev.channels);
- spin_lock_init(&od->lock);
platform_set_drvdata(pdev, od);
diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c
index 15b2453d2647..ffc0adc2f6ce 100644
--- a/drivers/dma/dma-axi-dmac.c
+++ b/drivers/dma/dma-axi-dmac.c
@@ -367,8 +367,7 @@ static struct axi_dmac_desc *axi_dmac_alloc_desc(unsigned int num_sgs)
struct axi_dmac_desc *desc;
unsigned int i;
- desc = kzalloc(sizeof(struct axi_dmac_desc) +
- sizeof(struct axi_dmac_sg) * num_sgs, GFP_NOWAIT);
+ desc = kzalloc(struct_size(desc, sg, num_sgs), GFP_NOWAIT);
if (!desc)
return NULL;
diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c
index a8b6225faa12..9ce0a386225b 100644
--- a/drivers/dma/dma-jz4780.c
+++ b/drivers/dma/dma-jz4780.c
@@ -838,9 +838,8 @@ static int jz4780_dma_probe(struct platform_device *pdev)
if (!soc_data)
return -EINVAL;
- jzdma = devm_kzalloc(dev, sizeof(*jzdma)
- + sizeof(*jzdma->chan) * soc_data->nb_channels,
- GFP_KERNEL);
+ jzdma = devm_kzalloc(dev, struct_size(jzdma, chan,
+ soc_data->nb_channels), GFP_KERNEL);
if (!jzdma)
return -ENOMEM;
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 2eea4ef72915..50221d467d86 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -200,15 +200,20 @@ struct dmatest_done {
wait_queue_head_t *wait;
};
+struct dmatest_data {
+ u8 **raw;
+ u8 **aligned;
+ unsigned int cnt;
+ unsigned int off;
+};
+
struct dmatest_thread {
struct list_head node;
struct dmatest_info *info;
struct task_struct *task;
struct dma_chan *chan;
- u8 **srcs;
- u8 **usrcs;
- u8 **dsts;
- u8 **udsts;
+ struct dmatest_data src;
+ struct dmatest_data dst;
enum dma_transaction_type type;
wait_queue_head_t done_wait;
struct dmatest_done test_done;
@@ -481,6 +486,53 @@ static unsigned long long dmatest_KBs(s64 runtime, unsigned long long len)
return FIXPT_TO_INT(dmatest_persec(runtime, len >> 10));
}
+static void __dmatest_free_test_data(struct dmatest_data *d, unsigned int cnt)
+{
+ unsigned int i;
+
+ for (i = 0; i < cnt; i++)
+ kfree(d->raw[i]);
+
+ kfree(d->aligned);
+ kfree(d->raw);
+}
+
+static void dmatest_free_test_data(struct dmatest_data *d)
+{
+ __dmatest_free_test_data(d, d->cnt);
+}
+
+static int dmatest_alloc_test_data(struct dmatest_data *d,
+ unsigned int buf_size, u8 align)
+{
+ unsigned int i = 0;
+
+ d->raw = kcalloc(d->cnt + 1, sizeof(u8 *), GFP_KERNEL);
+ if (!d->raw)
+ return -ENOMEM;
+
+ d->aligned = kcalloc(d->cnt + 1, sizeof(u8 *), GFP_KERNEL);
+ if (!d->aligned)
+ goto err;
+
+ for (i = 0; i < d->cnt; i++) {
+ d->raw[i] = kmalloc(buf_size + align, GFP_KERNEL);
+ if (!d->raw[i])
+ goto err;
+
+ /* align to alignment restriction */
+ if (align)
+ d->aligned[i] = PTR_ALIGN(d->raw[i], align);
+ else
+ d->aligned[i] = d->raw[i];
+ }
+
+ return 0;
+err:
+ __dmatest_free_test_data(d, i);
+ return -ENOMEM;
+}
+
/*
* This function repeatedly tests DMA transfers of various lengths and
* offsets for a given operation type until it is told to exit by
@@ -511,8 +563,9 @@ static int dmatest_func(void *data)
enum dma_ctrl_flags flags;
u8 *pq_coefs = NULL;
int ret;
- int src_cnt;
- int dst_cnt;
+ unsigned int buf_size;
+ struct dmatest_data *src;
+ struct dmatest_data *dst;
int i;
ktime_t ktime, start, diff;
ktime_t filltime = 0;
@@ -535,25 +588,27 @@ static int dmatest_func(void *data)
params = &info->params;
chan = thread->chan;
dev = chan->device;
+ src = &thread->src;
+ dst = &thread->dst;
if (thread->type == DMA_MEMCPY) {
align = params->alignment < 0 ? dev->copy_align :
params->alignment;
- src_cnt = dst_cnt = 1;
+ src->cnt = dst->cnt = 1;
} else if (thread->type == DMA_MEMSET) {
align = params->alignment < 0 ? dev->fill_align :
params->alignment;
- src_cnt = dst_cnt = 1;
+ src->cnt = dst->cnt = 1;
is_memset = true;
} else if (thread->type == DMA_XOR) {
/* force odd to ensure dst = src */
- src_cnt = min_odd(params->xor_sources | 1, dev->max_xor);
- dst_cnt = 1;
+ src->cnt = min_odd(params->xor_sources | 1, dev->max_xor);
+ dst->cnt = 1;
align = params->alignment < 0 ? dev->xor_align :
params->alignment;
} else if (thread->type == DMA_PQ) {
/* force odd to ensure dst = src */
- src_cnt = min_odd(params->pq_sources | 1, dma_maxpq(dev, 0));
- dst_cnt = 2;
+ src->cnt = min_odd(params->pq_sources | 1, dma_maxpq(dev, 0));
+ dst->cnt = 2;
align = params->alignment < 0 ? dev->pq_align :
params->alignment;
@@ -561,75 +616,38 @@ static int dmatest_func(void *data)
if (!pq_coefs)
goto err_thread_type;
- for (i = 0; i < src_cnt; i++)
+ for (i = 0; i < src->cnt; i++)
pq_coefs[i] = 1;
} else
goto err_thread_type;
/* Check if buffer count fits into map count variable (u8) */
- if ((src_cnt + dst_cnt) >= 255) {
+ if ((src->cnt + dst->cnt) >= 255) {
pr_err("too many buffers (%d of 255 supported)\n",
- src_cnt + dst_cnt);
+ src->cnt + dst->cnt);
goto err_free_coefs;
}
- if (1 << align > params->buf_size) {
+ buf_size = params->buf_size;
+ if (1 << align > buf_size) {
pr_err("%u-byte buffer too small for %d-byte alignment\n",
- params->buf_size, 1 << align);
+ buf_size, 1 << align);
goto err_free_coefs;
}
- thread->srcs = kcalloc(src_cnt + 1, sizeof(u8 *), GFP_KERNEL);
- if (!thread->srcs)
+ if (dmatest_alloc_test_data(src, buf_size, align) < 0)
goto err_free_coefs;
- thread->usrcs = kcalloc(src_cnt + 1, sizeof(u8 *), GFP_KERNEL);
- if (!thread->usrcs)
- goto err_usrcs;
-
- for (i = 0; i < src_cnt; i++) {
- thread->usrcs[i] = kmalloc(params->buf_size + align,
- GFP_KERNEL);
- if (!thread->usrcs[i])
- goto err_srcbuf;
-
- /* align srcs to alignment restriction */
- if (align)
- thread->srcs[i] = PTR_ALIGN(thread->usrcs[i], align);
- else
- thread->srcs[i] = thread->usrcs[i];
- }
- thread->srcs[i] = NULL;
-
- thread->dsts = kcalloc(dst_cnt + 1, sizeof(u8 *), GFP_KERNEL);
- if (!thread->dsts)
- goto err_dsts;
-
- thread->udsts = kcalloc(dst_cnt + 1, sizeof(u8 *), GFP_KERNEL);
- if (!thread->udsts)
- goto err_udsts;
-
- for (i = 0; i < dst_cnt; i++) {
- thread->udsts[i] = kmalloc(params->buf_size + align,
- GFP_KERNEL);
- if (!thread->udsts[i])
- goto err_dstbuf;
-
- /* align dsts to alignment restriction */
- if (align)
- thread->dsts[i] = PTR_ALIGN(thread->udsts[i], align);
- else
- thread->dsts[i] = thread->udsts[i];
- }
- thread->dsts[i] = NULL;
+ if (dmatest_alloc_test_data(dst, buf_size, align) < 0)
+ goto err_src;
set_user_nice(current, 10);
- srcs = kcalloc(src_cnt, sizeof(dma_addr_t), GFP_KERNEL);
+ srcs = kcalloc(src->cnt, sizeof(dma_addr_t), GFP_KERNEL);
if (!srcs)
- goto err_dstbuf;
+ goto err_dst;
- dma_pq = kcalloc(dst_cnt, sizeof(dma_addr_t), GFP_KERNEL);
+ dma_pq = kcalloc(dst->cnt, sizeof(dma_addr_t), GFP_KERNEL);
if (!dma_pq)
goto err_srcs_array;
@@ -644,21 +662,21 @@ static int dmatest_func(void *data)
struct dma_async_tx_descriptor *tx = NULL;
struct dmaengine_unmap_data *um;
dma_addr_t *dsts;
- unsigned int src_off, dst_off, len;
+ unsigned int len;
total_tests++;
if (params->transfer_size) {
- if (params->transfer_size >= params->buf_size) {
+ if (params->transfer_size >= buf_size) {
pr_err("%u-byte transfer size must be lower than %u-buffer size\n",
- params->transfer_size, params->buf_size);
+ params->transfer_size, buf_size);
break;
}
len = params->transfer_size;
} else if (params->norandom) {
- len = params->buf_size;
+ len = buf_size;
} else {
- len = dmatest_random() % params->buf_size + 1;
+ len = dmatest_random() % buf_size + 1;
}
/* Do not alter transfer size explicitly defined by user */
@@ -670,59 +688,59 @@ static int dmatest_func(void *data)
total_len += len;
if (params->norandom) {
- src_off = 0;
- dst_off = 0;
+ src->off = 0;
+ dst->off = 0;
} else {
- src_off = dmatest_random() % (params->buf_size - len + 1);
- dst_off = dmatest_random() % (params->buf_size - len + 1);
+ src->off = dmatest_random() % (buf_size - len + 1);
+ dst->off = dmatest_random() % (buf_size - len + 1);
- src_off = (src_off >> align) << align;
- dst_off = (dst_off >> align) << align;
+ src->off = (src->off >> align) << align;
+ dst->off = (dst->off >> align) << align;
}
if (!params->noverify) {
start = ktime_get();
- dmatest_init_srcs(thread->srcs, src_off, len,
- params->buf_size, is_memset);
- dmatest_init_dsts(thread->dsts, dst_off, len,
- params->buf_size, is_memset);
+ dmatest_init_srcs(src->aligned, src->off, len,
+ buf_size, is_memset);
+ dmatest_init_dsts(dst->aligned, dst->off, len,
+ buf_size, is_memset);
diff = ktime_sub(ktime_get(), start);
filltime = ktime_add(filltime, diff);
}
- um = dmaengine_get_unmap_data(dev->dev, src_cnt + dst_cnt,
+ um = dmaengine_get_unmap_data(dev->dev, src->cnt + dst->cnt,
GFP_KERNEL);
if (!um) {
failed_tests++;
result("unmap data NULL", total_tests,
- src_off, dst_off, len, ret);
+ src->off, dst->off, len, ret);
continue;
}
- um->len = params->buf_size;
- for (i = 0; i < src_cnt; i++) {
- void *buf = thread->srcs[i];
+ um->len = buf_size;
+ for (i = 0; i < src->cnt; i++) {
+ void *buf = src->aligned[i];
struct page *pg = virt_to_page(buf);
unsigned long pg_off = offset_in_page(buf);
um->addr[i] = dma_map_page(dev->dev, pg, pg_off,
um->len, DMA_TO_DEVICE);
- srcs[i] = um->addr[i] + src_off;
+ srcs[i] = um->addr[i] + src->off;
ret = dma_mapping_error(dev->dev, um->addr[i]);
if (ret) {
dmaengine_unmap_put(um);
result("src mapping error", total_tests,
- src_off, dst_off, len, ret);
+ src->off, dst->off, len, ret);
failed_tests++;
continue;
}
um->to_cnt++;
}
/* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
- dsts = &um->addr[src_cnt];
- for (i = 0; i < dst_cnt; i++) {
- void *buf = thread->dsts[i];
+ dsts = &um->addr[src->cnt];
+ for (i = 0; i < dst->cnt; i++) {
+ void *buf = dst->aligned[i];
struct page *pg = virt_to_page(buf);
unsigned long pg_off = offset_in_page(buf);
@@ -732,7 +750,7 @@ static int dmatest_func(void *data)
if (ret) {
dmaengine_unmap_put(um);
result("dst mapping error", total_tests,
- src_off, dst_off, len, ret);
+ src->off, dst->off, len, ret);
failed_tests++;
continue;
}
@@ -741,30 +759,30 @@ static int dmatest_func(void *data)
if (thread->type == DMA_MEMCPY)
tx = dev->device_prep_dma_memcpy(chan,
- dsts[0] + dst_off,
+ dsts[0] + dst->off,
srcs[0], len, flags);
else if (thread->type == DMA_MEMSET)
tx = dev->device_prep_dma_memset(chan,
- dsts[0] + dst_off,
- *(thread->srcs[0] + src_off),
+ dsts[0] + dst->off,
+ *(src->aligned[0] + src->off),
len, flags);
else if (thread->type == DMA_XOR)
tx = dev->device_prep_dma_xor(chan,
- dsts[0] + dst_off,
- srcs, src_cnt,
+ dsts[0] + dst->off,
+ srcs, src->cnt,
len, flags);
else if (thread->type == DMA_PQ) {
- for (i = 0; i < dst_cnt; i++)
- dma_pq[i] = dsts[i] + dst_off;
+ for (i = 0; i < dst->cnt; i++)
+ dma_pq[i] = dsts[i] + dst->off;
tx = dev->device_prep_dma_pq(chan, dma_pq, srcs,
- src_cnt, pq_coefs,
+ src->cnt, pq_coefs,
len, flags);
}
if (!tx) {
dmaengine_unmap_put(um);
- result("prep error", total_tests, src_off,
- dst_off, len, ret);
+ result("prep error", total_tests, src->off,
+ dst->off, len, ret);
msleep(100);
failed_tests++;
continue;
@@ -777,8 +795,8 @@ static int dmatest_func(void *data)
if (dma_submit_error(cookie)) {
dmaengine_unmap_put(um);
- result("submit error", total_tests, src_off,
- dst_off, len, ret);
+ result("submit error", total_tests, src->off,
+ dst->off, len, ret);
msleep(100);
failed_tests++;
continue;
@@ -793,58 +811,58 @@ static int dmatest_func(void *data)
dmaengine_unmap_put(um);
if (!done->done) {
- result("test timed out", total_tests, src_off, dst_off,
+ result("test timed out", total_tests, src->off, dst->off,
len, 0);
failed_tests++;
continue;
} else if (status != DMA_COMPLETE) {
result(status == DMA_ERROR ?
"completion error status" :
- "completion busy status", total_tests, src_off,
- dst_off, len, ret);
+ "completion busy status", total_tests, src->off,
+ dst->off, len, ret);
failed_tests++;
continue;
}
if (params->noverify) {
- verbose_result("test passed", total_tests, src_off,
- dst_off, len, 0);
+ verbose_result("test passed", total_tests, src->off,
+ dst->off, len, 0);
continue;
}
start = ktime_get();
pr_debug("%s: verifying source buffer...\n", current->comm);
- error_count = dmatest_verify(thread->srcs, 0, src_off,
+ error_count = dmatest_verify(src->aligned, 0, src->off,
0, PATTERN_SRC, true, is_memset);
- error_count += dmatest_verify(thread->srcs, src_off,
- src_off + len, src_off,
+ error_count += dmatest_verify(src->aligned, src->off,
+ src->off + len, src->off,
PATTERN_SRC | PATTERN_COPY, true, is_memset);
- error_count += dmatest_verify(thread->srcs, src_off + len,
- params->buf_size, src_off + len,
+ error_count += dmatest_verify(src->aligned, src->off + len,
+ buf_size, src->off + len,
PATTERN_SRC, true, is_memset);
pr_debug("%s: verifying dest buffer...\n", current->comm);
- error_count += dmatest_verify(thread->dsts, 0, dst_off,
+ error_count += dmatest_verify(dst->aligned, 0, dst->off,
0, PATTERN_DST, false, is_memset);
- error_count += dmatest_verify(thread->dsts, dst_off,
- dst_off + len, src_off,
+ error_count += dmatest_verify(dst->aligned, dst->off,
+ dst->off + len, src->off,
PATTERN_SRC | PATTERN_COPY, false, is_memset);
- error_count += dmatest_verify(thread->dsts, dst_off + len,
- params->buf_size, dst_off + len,
+ error_count += dmatest_verify(dst->aligned, dst->off + len,
+ buf_size, dst->off + len,
PATTERN_DST, false, is_memset);
diff = ktime_sub(ktime_get(), start);
comparetime = ktime_add(comparetime, diff);
if (error_count) {
- result("data error", total_tests, src_off, dst_off,
+ result("data error", total_tests, src->off, dst->off,
len, error_count);
failed_tests++;
} else {
- verbose_result("test passed", total_tests, src_off,
- dst_off, len, 0);
+ verbose_result("test passed", total_tests, src->off,
+ dst->off, len, 0);
}
}
ktime = ktime_sub(ktime_get(), ktime);
@@ -856,19 +874,10 @@ static int dmatest_func(void *data)
kfree(dma_pq);
err_srcs_array:
kfree(srcs);
-err_dstbuf:
- for (i = 0; thread->udsts[i]; i++)
- kfree(thread->udsts[i]);
- kfree(thread->udsts);
-err_udsts:
- kfree(thread->dsts);
-err_dsts:
-err_srcbuf:
- for (i = 0; thread->usrcs[i]; i++)
- kfree(thread->usrcs[i]);
- kfree(thread->usrcs);
-err_usrcs:
- kfree(thread->srcs);
+err_dst:
+ dmatest_free_test_data(dst);
+err_src:
+ dmatest_free_test_data(src);
err_free_coefs:
kfree(pq_coefs);
err_thread_type:
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index 3a1ab52ffae0..21cb2a58dbd2 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -1018,7 +1018,6 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
struct dw_dma *dw = to_dw_dma(chan->device);
unsigned long flags;
- LIST_HEAD(list);
dev_dbg(chan2dev(chan), "%s: descs allocated=%u\n", __func__,
dwc->descs_allocated);
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index a2b0a0e71168..237a9c165072 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -440,6 +440,8 @@ struct sdma_engine {
unsigned int irq;
dma_addr_t bd0_phys;
struct sdma_buffer_descriptor *bd0;
+ /* clock ratio for AHB:SDMA core. 1:1 is 1, 2:1 is 0*/
+ bool clk_ratio;
};
static int sdma_config_write(struct dma_chan *chan,
@@ -662,8 +664,11 @@ static int sdma_run_channel0(struct sdma_engine *sdma)
dev_err(sdma->dev, "Timeout waiting for CH0 ready\n");
/* Set bits of CONFIG register with dynamic context switching */
- if (readl(sdma->regs + SDMA_H_CONFIG) == 0)
- writel_relaxed(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG);
+ reg = readl(sdma->regs + SDMA_H_CONFIG);
+ if ((reg & SDMA_H_CONFIG_CSM) == 0) {
+ reg |= SDMA_H_CONFIG_CSM;
+ writel_relaxed(reg, sdma->regs + SDMA_H_CONFIG);
+ }
return ret;
}
@@ -1839,6 +1844,9 @@ static int sdma_init(struct sdma_engine *sdma)
if (ret)
goto disable_clk_ipg;
+ if (clk_get_rate(sdma->clk_ahb) == clk_get_rate(sdma->clk_ipg))
+ sdma->clk_ratio = 1;
+
/* Be sure SDMA has not started yet */
writel_relaxed(0, sdma->regs + SDMA_H_C0PTR);
@@ -1879,8 +1887,10 @@ static int sdma_init(struct sdma_engine *sdma)
writel_relaxed(0x4050, sdma->regs + SDMA_CHN0ADDR);
/* Set bits of CONFIG register but with static context switching */
- /* FIXME: Check whether to set ACR bit depending on clock ratios */
- writel_relaxed(0, sdma->regs + SDMA_H_CONFIG);
+ if (sdma->clk_ratio)
+ writel_relaxed(SDMA_H_CONFIG_ACR, sdma->regs + SDMA_H_CONFIG);
+ else
+ writel_relaxed(0, sdma->regs + SDMA_H_CONFIG);
writel_relaxed(ccb_phys, sdma->regs + SDMA_H_C0PTR);
@@ -1903,11 +1913,16 @@ disable_clk_ipg:
static bool sdma_filter_fn(struct dma_chan *chan, void *fn_param)
{
struct sdma_channel *sdmac = to_sdma_chan(chan);
+ struct sdma_engine *sdma = sdmac->sdma;
struct imx_dma_data *data = fn_param;
if (!imx_dma_is_general_purpose(chan))
return false;
+ /* return false if it's not the right device */
+ if (sdma->dev->of_node != data->of_node)
+ return false;
+
sdmac->data = *data;
chan->private = &sdmac->data;
@@ -1935,6 +1950,7 @@ static struct dma_chan *sdma_xlate(struct of_phandle_args *dma_spec,
* be set to sdmac->event_id1.
*/
data.dma_request2 = 0;
+ data.of_node = ofdma->of_node;
return dma_request_channel(mask, sdma_filter_fn, &data);
}
@@ -2097,6 +2113,7 @@ static int sdma_probe(struct platform_device *pdev)
sdma->dma_device.device_prep_dma_memcpy = sdma_prep_memcpy;
sdma->dma_device.device_issue_pending = sdma_issue_pending;
sdma->dma_device.dev->dma_parms = &sdma->dma_parms;
+ sdma->dma_device.copy_align = 2;
dma_set_max_seg_size(sdma->dma_device.dev, SDMA_BD_MAX_CNT);
platform_set_drvdata(pdev, sdma);
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 23fb2fa04000..f373a139e0c3 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -372,6 +372,7 @@ struct ioat_ring_ent **
ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
{
struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
+ struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
struct ioat_ring_ent **ring;
int total_descs = 1 << order;
int i, chunks;
@@ -437,6 +438,17 @@ ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
}
ring[i]->hw->next = ring[0]->txd.phys;
+ /* setup descriptor pre-fetching for v3.4 */
+ if (ioat_dma->cap & IOAT_CAP_DPS) {
+ u16 drsctl = IOAT_CHAN_DRSZ_2MB | IOAT_CHAN_DRS_EN;
+
+ if (chunks == 1)
+ drsctl |= IOAT_CHAN_DRS_AUTOWRAP;
+
+ writew(drsctl, ioat_chan->reg_base + IOAT_CHAN_DRSCTL_OFFSET);
+
+ }
+
return ring;
}
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index 1ab42ec2b7ff..aaafd0e882b5 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -27,7 +27,7 @@
#include "registers.h"
#include "hw.h"
-#define IOAT_DMA_VERSION "4.00"
+#define IOAT_DMA_VERSION "5.00"
#define IOAT_DMA_DCA_ANY_CPU ~0
diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h
index abcc51b343ce..781c94de8e81 100644
--- a/drivers/dma/ioat/hw.h
+++ b/drivers/dma/ioat/hw.h
@@ -66,11 +66,14 @@
#define PCI_DEVICE_ID_INTEL_IOAT_SKX 0x2021
+#define PCI_DEVICE_ID_INTEL_IOAT_ICX 0x0b00
+
#define IOAT_VER_1_2 0x12 /* Version 1.2 */
#define IOAT_VER_2_0 0x20 /* Version 2.0 */
#define IOAT_VER_3_0 0x30 /* Version 3.0 */
#define IOAT_VER_3_2 0x32 /* Version 3.2 */
#define IOAT_VER_3_3 0x33 /* Version 3.3 */
+#define IOAT_VER_3_4 0x34 /* Version 3.4 */
int system_has_dca_enabled(struct pci_dev *pdev);
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index 2d810dfcdc48..d41dc9a9ff68 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -119,6 +119,9 @@ static const struct pci_device_id ioat_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE2) },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE3) },
+ /* I/OAT v3.4 platforms */
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_ICX) },
+
{ 0, }
};
MODULE_DEVICE_TABLE(pci, ioat_pci_tbl);
@@ -135,10 +138,10 @@ static int ioat3_dma_self_test(struct ioatdma_device *ioat_dma);
static int ioat_dca_enabled = 1;
module_param(ioat_dca_enabled, int, 0644);
MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)");
-int ioat_pending_level = 4;
+int ioat_pending_level = 7;
module_param(ioat_pending_level, int, 0644);
MODULE_PARM_DESC(ioat_pending_level,
- "high-water mark for pushing ioat descriptors (default: 4)");
+ "high-water mark for pushing ioat descriptors (default: 7)");
static char ioat_interrupt_style[32] = "msix";
module_param_string(ioat_interrupt_style, ioat_interrupt_style,
sizeof(ioat_interrupt_style), 0644);
@@ -635,6 +638,11 @@ static void ioat_free_chan_resources(struct dma_chan *c)
ioat_stop(ioat_chan);
ioat_reset_hw(ioat_chan);
+ /* Put LTR to idle */
+ if (ioat_dma->version >= IOAT_VER_3_4)
+ writeb(IOAT_CHAN_LTR_SWSEL_IDLE,
+ ioat_chan->reg_base + IOAT_CHAN_LTR_SWSEL_OFFSET);
+
spin_lock_bh(&ioat_chan->cleanup_lock);
spin_lock_bh(&ioat_chan->prep_lock);
descs = ioat_ring_space(ioat_chan);
@@ -724,6 +732,28 @@ static int ioat_alloc_chan_resources(struct dma_chan *c)
spin_unlock_bh(&ioat_chan->prep_lock);
spin_unlock_bh(&ioat_chan->cleanup_lock);
+ /* Setting up LTR values for 3.4 or later */
+ if (ioat_chan->ioat_dma->version >= IOAT_VER_3_4) {
+ u32 lat_val;
+
+ lat_val = IOAT_CHAN_LTR_ACTIVE_SNVAL |
+ IOAT_CHAN_LTR_ACTIVE_SNLATSCALE |
+ IOAT_CHAN_LTR_ACTIVE_SNREQMNT;
+ writel(lat_val, ioat_chan->reg_base +
+ IOAT_CHAN_LTR_ACTIVE_OFFSET);
+
+ lat_val = IOAT_CHAN_LTR_IDLE_SNVAL |
+ IOAT_CHAN_LTR_IDLE_SNLATSCALE |
+ IOAT_CHAN_LTR_IDLE_SNREQMNT;
+ writel(lat_val, ioat_chan->reg_base +
+ IOAT_CHAN_LTR_IDLE_OFFSET);
+
+ /* Select to active */
+ writeb(IOAT_CHAN_LTR_SWSEL_ACTIVE,
+ ioat_chan->reg_base +
+ IOAT_CHAN_LTR_SWSEL_OFFSET);
+ }
+
ioat_start_null_desc(ioat_chan);
/* check that we got off the ground */
@@ -1185,6 +1215,10 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca)
if (err)
return err;
+ if (ioat_dma->cap & IOAT_CAP_DPS)
+ writeb(ioat_pending_level + 1,
+ ioat_dma->reg_base + IOAT_PREFETCH_LIMIT_OFFSET);
+
return 0;
}
@@ -1350,6 +1384,8 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_drvdata(pdev, device);
device->version = readb(device->reg_base + IOAT_VER_OFFSET);
+ if (device->version >= IOAT_VER_3_4)
+ ioat_dca_enabled = 0;
if (device->version >= IOAT_VER_3_0) {
if (is_skx_ioat(pdev))
device->version = IOAT_VER_3_2;
diff --git a/drivers/dma/ioat/registers.h b/drivers/dma/ioat/registers.h
index 2f3bbc88ff2a..99c1c24d465d 100644
--- a/drivers/dma/ioat/registers.h
+++ b/drivers/dma/ioat/registers.h
@@ -84,6 +84,9 @@
#define IOAT_CAP_PQ 0x00000200
#define IOAT_CAP_DWBES 0x00002000
#define IOAT_CAP_RAID16SS 0x00020000
+#define IOAT_CAP_DPS 0x00800000
+
+#define IOAT_PREFETCH_LIMIT_OFFSET 0x4C /* CHWPREFLMT */
#define IOAT_CHANNEL_MMIO_SIZE 0x80 /* Each Channel MMIO space is this size */
@@ -243,4 +246,25 @@
#define IOAT_CHANERR_MASK_OFFSET 0x2C /* 32-bit Channel Error Register */
+#define IOAT_CHAN_DRSCTL_OFFSET 0xB6
+#define IOAT_CHAN_DRSZ_4KB 0x0000
+#define IOAT_CHAN_DRSZ_8KB 0x0001
+#define IOAT_CHAN_DRSZ_2MB 0x0009
+#define IOAT_CHAN_DRS_EN 0x0100
+#define IOAT_CHAN_DRS_AUTOWRAP 0x0200
+
+#define IOAT_CHAN_LTR_SWSEL_OFFSET 0xBC
+#define IOAT_CHAN_LTR_SWSEL_ACTIVE 0x0
+#define IOAT_CHAN_LTR_SWSEL_IDLE 0x1
+
+#define IOAT_CHAN_LTR_ACTIVE_OFFSET 0xC0
+#define IOAT_CHAN_LTR_ACTIVE_SNVAL 0x0000 /* 0 us */
+#define IOAT_CHAN_LTR_ACTIVE_SNLATSCALE 0x0800 /* 1us scale */
+#define IOAT_CHAN_LTR_ACTIVE_SNREQMNT 0x8000 /* snoop req enable */
+
+#define IOAT_CHAN_LTR_IDLE_OFFSET 0xC4
+#define IOAT_CHAN_LTR_IDLE_SNVAL 0x0258 /* 600 us */
+#define IOAT_CHAN_LTR_IDLE_SNLATSCALE 0x0800 /* 1us scale */
+#define IOAT_CHAN_LTR_IDLE_SNREQMNT 0x8000 /* snoop req enable */
+
#endif /* _IOAT_REGISTERS_H_ */
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 7f595355fb79..fe4a7c71fede 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -1059,6 +1059,7 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
mv_chan->op_in_desc = XOR_MODE_IN_DESC;
dma_dev = &mv_chan->dmadev;
+ dma_dev->dev = &pdev->dev;
mv_chan->xordev = xordev;
/*
@@ -1091,7 +1092,6 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
dma_dev->device_tx_status = mv_xor_status;
dma_dev->device_issue_pending = mv_xor_issue_pending;
- dma_dev->dev = &pdev->dev;
/* set prep routines based on capability */
if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask))
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index cff1b143fff5..eec79fdf27a5 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2267,7 +2267,6 @@ static int pl330_terminate_all(struct dma_chan *chan)
struct dma_pl330_desc *desc;
unsigned long flags;
struct pl330_dmac *pl330 = pch->dmac;
- LIST_HEAD(list);
bool power_down = false;
pm_runtime_get_sync(pl330->ddma.dev);
diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c
index 1617715aa6e0..cb860cb53c27 100644
--- a/drivers/dma/qcom/bam_dma.c
+++ b/drivers/dma/qcom/bam_dma.c
@@ -636,8 +636,8 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan,
num_alloc += DIV_ROUND_UP(sg_dma_len(sg), BAM_FIFO_SIZE);
/* allocate enough room to accomodate the number of entries */
- async_desc = kzalloc(sizeof(*async_desc) +
- (num_alloc * sizeof(struct bam_desc_hw)), GFP_NOWAIT);
+ async_desc = kzalloc(struct_size(async_desc, desc, num_alloc),
+ GFP_NOWAIT);
if (!async_desc)
goto err_out;
diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c
index 43d4b00b8138..411f91fde734 100644
--- a/drivers/dma/qcom/hidma.c
+++ b/drivers/dma/qcom/hidma.c
@@ -138,24 +138,25 @@ static void hidma_process_completed(struct hidma_chan *mchan)
desc = &mdesc->desc;
last_cookie = desc->cookie;
+ llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch);
+
spin_lock_irqsave(&mchan->lock, irqflags);
+ if (llstat == DMA_COMPLETE) {
+ mchan->last_success = last_cookie;
+ result.result = DMA_TRANS_NOERROR;
+ } else {
+ result.result = DMA_TRANS_ABORTED;
+ }
+
dma_cookie_complete(desc);
spin_unlock_irqrestore(&mchan->lock, irqflags);
- llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch);
dmaengine_desc_get_callback(desc, &cb);
dma_run_dependencies(desc);
spin_lock_irqsave(&mchan->lock, irqflags);
list_move(&mdesc->node, &mchan->free);
-
- if (llstat == DMA_COMPLETE) {
- mchan->last_success = last_cookie;
- result.result = DMA_TRANS_NOERROR;
- } else
- result.result = DMA_TRANS_ABORTED;
-
spin_unlock_irqrestore(&mchan->lock, irqflags);
dmaengine_desc_callback_invoke(&cb, &result);
@@ -415,6 +416,7 @@ hidma_prep_dma_memcpy(struct dma_chan *dmach, dma_addr_t dest, dma_addr_t src,
if (!mdesc)
return NULL;
+ mdesc->desc.flags = flags;
hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch,
src, dest, len, flags,
HIDMA_TRE_MEMCPY);
@@ -447,6 +449,7 @@ hidma_prep_dma_memset(struct dma_chan *dmach, dma_addr_t dest, int value,
if (!mdesc)
return NULL;
+ mdesc->desc.flags = flags;
hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch,
value, dest, len, flags,
HIDMA_TRE_MEMSET);
diff --git a/drivers/dma/qcom/hidma_mgmt.c b/drivers/dma/qcom/hidma_mgmt.c
index d64edeb6771a..681de12f4c67 100644
--- a/drivers/dma/qcom/hidma_mgmt.c
+++ b/drivers/dma/qcom/hidma_mgmt.c
@@ -423,9 +423,8 @@ static int __init hidma_mgmt_init(void)
hidma_mgmt_of_populate_channels(child);
}
#endif
- platform_driver_register(&hidma_mgmt_driver);
+ return platform_driver_register(&hidma_mgmt_driver);
- return 0;
}
module_init(hidma_mgmt_init);
MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c
index 784d5f1a473b..3fae23768b47 100644
--- a/drivers/dma/sa11x0-dma.c
+++ b/drivers/dma/sa11x0-dma.c
@@ -705,7 +705,6 @@ static int sa11x0_dma_device_pause(struct dma_chan *chan)
struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
struct sa11x0_dma_phy *p;
- LIST_HEAD(head);
unsigned long flags;
dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc);
@@ -732,7 +731,6 @@ static int sa11x0_dma_device_resume(struct dma_chan *chan)
struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
struct sa11x0_dma_phy *p;
- LIST_HEAD(head);
unsigned long flags;
dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc);
diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c
index e2f016700fcc..48431e2da987 100644
--- a/drivers/dma/sprd-dma.c
+++ b/drivers/dma/sprd-dma.c
@@ -580,15 +580,7 @@ static irqreturn_t dma_irq_handle(int irq, void *dev_id)
static int sprd_dma_alloc_chan_resources(struct dma_chan *chan)
{
- struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
- int ret;
-
- ret = pm_runtime_get_sync(chan->device->dev);
- if (ret < 0)
- return ret;
-
- schan->dev_id = SPRD_DMA_SOFTWARE_UID;
- return 0;
+ return pm_runtime_get_sync(chan->device->dev);
}
static void sprd_dma_free_chan_resources(struct dma_chan *chan)
@@ -1021,13 +1013,10 @@ static void sprd_dma_free_desc(struct virt_dma_desc *vd)
static bool sprd_dma_filter_fn(struct dma_chan *chan, void *param)
{
struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
- struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
- u32 req = *(u32 *)param;
+ u32 slave_id = *(u32 *)param;
- if (req < sdev->total_chns)
- return req == schan->chn_num + 1;
- else
- return false;
+ schan->dev_id = slave_id;
+ return true;
}
static int sprd_dma_probe(struct platform_device *pdev)
diff --git a/drivers/dma/st_fdma.c b/drivers/dma/st_fdma.c
index 07c20aa2e955..bc7a1de3f29b 100644
--- a/drivers/dma/st_fdma.c
+++ b/drivers/dma/st_fdma.c
@@ -243,8 +243,7 @@ static struct st_fdma_desc *st_fdma_alloc_desc(struct st_fdma_chan *fchan,
struct st_fdma_desc *fdesc;
int i;
- fdesc = kzalloc(sizeof(*fdesc) +
- sizeof(struct st_fdma_sw_node) * sg_len, GFP_NOWAIT);
+ fdesc = kzalloc(struct_size(fdesc, node, sg_len), GFP_NOWAIT);
if (!fdesc)
return NULL;
@@ -294,8 +293,6 @@ static void st_fdma_free_chan_res(struct dma_chan *chan)
struct rproc *rproc = fchan->fdev->slim_rproc->rproc;
unsigned long flags;
- LIST_HEAD(head);
-
dev_dbg(fchan->fdev->dev, "%s: freeing chan:%d\n",
__func__, fchan->vchan.chan.chan_id);
@@ -626,7 +623,6 @@ static void st_fdma_issue_pending(struct dma_chan *chan)
static int st_fdma_pause(struct dma_chan *chan)
{
unsigned long flags;
- LIST_HEAD(head);
struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
int ch_id = fchan->vchan.chan.chan_id;
unsigned long cmd = FDMA_CMD_PAUSE(ch_id);
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index fc0f9c8766a8..afbb1c95b721 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -643,8 +643,8 @@ static int td_probe(struct platform_device *pdev)
DRIVER_NAME))
return -EBUSY;
- td = kzalloc(sizeof(struct timb_dma) +
- sizeof(struct timb_dma_chan) * pdata->nr_channels, GFP_KERNEL);
+ td = kzalloc(struct_size(td, channels, pdata->nr_channels),
+ GFP_KERNEL);
if (!td) {
err = -ENOMEM;
goto err_release_region;