summaryrefslogtreecommitdiff
path: root/drivers/dma/fsl-edma-common.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma/fsl-edma-common.c')
-rw-r--r--drivers/dma/fsl-edma-common.c307
1 files changed, 210 insertions, 97 deletions
diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c
index a06a1575a2a5..a0f5741abcc4 100644
--- a/drivers/dma/fsl-edma-common.c
+++ b/drivers/dma/fsl-edma-common.c
@@ -7,6 +7,8 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/dma-mapping.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_domain.h>
#include "fsl-edma-common.h"
@@ -40,14 +42,73 @@
#define EDMA64_ERRH 0x28
#define EDMA64_ERRL 0x2c
-#define EDMA_TCD 0x1000
+void fsl_edma_tx_chan_handler(struct fsl_edma_chan *fsl_chan)
+{
+ spin_lock(&fsl_chan->vchan.lock);
+
+ if (!fsl_chan->edesc) {
+ /* terminate_all called before */
+ spin_unlock(&fsl_chan->vchan.lock);
+ return;
+ }
+
+ if (!fsl_chan->edesc->iscyclic) {
+ list_del(&fsl_chan->edesc->vdesc.node);
+ vchan_cookie_complete(&fsl_chan->edesc->vdesc);
+ fsl_chan->edesc = NULL;
+ fsl_chan->status = DMA_COMPLETE;
+ fsl_chan->idle = true;
+ } else {
+ vchan_cyclic_callback(&fsl_chan->edesc->vdesc);
+ }
+
+ if (!fsl_chan->edesc)
+ fsl_edma_xfer_desc(fsl_chan);
+
+ spin_unlock(&fsl_chan->vchan.lock);
+}
+
+static void fsl_edma3_enable_request(struct fsl_edma_chan *fsl_chan)
+{
+ u32 val, flags;
+
+ flags = fsl_edma_drvflags(fsl_chan);
+ val = edma_readl_chreg(fsl_chan, ch_sbr);
+ /* Remote/local swapped wrongly on iMX8 QM Audio edma */
+ if (flags & FSL_EDMA_DRV_QUIRK_SWAPPED) {
+ if (!fsl_chan->is_rxchan)
+ val |= EDMA_V3_CH_SBR_RD;
+ else
+ val |= EDMA_V3_CH_SBR_WR;
+ } else {
+ if (fsl_chan->is_rxchan)
+ val |= EDMA_V3_CH_SBR_RD;
+ else
+ val |= EDMA_V3_CH_SBR_WR;
+ }
+
+ if (fsl_chan->is_remote)
+ val &= ~(EDMA_V3_CH_SBR_RD | EDMA_V3_CH_SBR_WR);
+
+ edma_writel_chreg(fsl_chan, val, ch_sbr);
+
+ if (flags & FSL_EDMA_DRV_HAS_CHMUX)
+ edma_writel_chreg(fsl_chan, fsl_chan->srcid, ch_mux);
+
+ val = edma_readl_chreg(fsl_chan, ch_csr);
+ val |= EDMA_V3_CH_CSR_ERQ;
+ edma_writel_chreg(fsl_chan, val, ch_csr);
+}
static void fsl_edma_enable_request(struct fsl_edma_chan *fsl_chan)
{
struct edma_regs *regs = &fsl_chan->edma->regs;
u32 ch = fsl_chan->vchan.chan.chan_id;
- if (fsl_chan->edma->drvdata->version == v1) {
+ if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_SPLIT_REG)
+ return fsl_edma3_enable_request(fsl_chan);
+
+ if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_WRAP_IO) {
edma_writeb(fsl_chan->edma, EDMA_SEEI_SEEI(ch), regs->seei);
edma_writeb(fsl_chan->edma, ch, regs->serq);
} else {
@@ -59,12 +120,29 @@ static void fsl_edma_enable_request(struct fsl_edma_chan *fsl_chan)
}
}
+static void fsl_edma3_disable_request(struct fsl_edma_chan *fsl_chan)
+{
+ u32 val = edma_readl_chreg(fsl_chan, ch_csr);
+ u32 flags;
+
+ flags = fsl_edma_drvflags(fsl_chan);
+
+ if (flags & FSL_EDMA_DRV_HAS_CHMUX)
+ edma_writel_chreg(fsl_chan, 0, ch_mux);
+
+ val &= ~EDMA_V3_CH_CSR_ERQ;
+ edma_writel_chreg(fsl_chan, val, ch_csr);
+}
+
void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan)
{
struct edma_regs *regs = &fsl_chan->edma->regs;
u32 ch = fsl_chan->vchan.chan.chan_id;
- if (fsl_chan->edma->drvdata->version == v1) {
+ if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_SPLIT_REG)
+ return fsl_edma3_disable_request(fsl_chan);
+
+ if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_WRAP_IO) {
edma_writeb(fsl_chan->edma, ch, regs->cerq);
edma_writeb(fsl_chan->edma, EDMA_CEEI_CEEI(ch), regs->ceei);
} else {
@@ -75,7 +153,6 @@ void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan)
iowrite8(EDMA_CEEI_CEEI(ch), regs->ceei);
}
}
-EXPORT_SYMBOL_GPL(fsl_edma_disable_request);
static void mux_configure8(struct fsl_edma_chan *fsl_chan, void __iomem *addr,
u32 off, u32 slot, bool enable)
@@ -112,36 +189,33 @@ void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
int endian_diff[4] = {3, 1, -1, -3};
u32 dmamux_nr = fsl_chan->edma->drvdata->dmamuxs;
+ if (!dmamux_nr)
+ return;
+
chans_per_mux = fsl_chan->edma->n_chans / dmamux_nr;
ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux;
- if (fsl_chan->edma->drvdata->mux_swap)
+ if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_MUX_SWAP)
ch_off += endian_diff[ch_off % 4];
muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux];
slot = EDMAMUX_CHCFG_SOURCE(slot);
- if (fsl_chan->edma->drvdata->version == v3)
+ if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_CONFIG32)
mux_configure32(fsl_chan, muxaddr, ch_off, slot, enable);
else
mux_configure8(fsl_chan, muxaddr, ch_off, slot, enable);
}
-EXPORT_SYMBOL_GPL(fsl_edma_chan_mux);
static unsigned int fsl_edma_get_tcd_attr(enum dma_slave_buswidth addr_width)
{
- switch (addr_width) {
- case 1:
- return EDMA_TCD_ATTR_SSIZE_8BIT | EDMA_TCD_ATTR_DSIZE_8BIT;
- case 2:
- return EDMA_TCD_ATTR_SSIZE_16BIT | EDMA_TCD_ATTR_DSIZE_16BIT;
- case 4:
- return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT;
- case 8:
- return EDMA_TCD_ATTR_SSIZE_64BIT | EDMA_TCD_ATTR_DSIZE_64BIT;
- default:
- return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT;
- }
+ u32 val;
+
+ if (addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
+ addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+
+ val = ffs(addr_width) - 1;
+ return val | (val << 8);
}
void fsl_edma_free_desc(struct virt_dma_desc *vdesc)
@@ -155,7 +229,6 @@ void fsl_edma_free_desc(struct virt_dma_desc *vdesc)
fsl_desc->tcd[i].ptcd);
kfree(fsl_desc);
}
-EXPORT_SYMBOL_GPL(fsl_edma_free_desc);
int fsl_edma_terminate_all(struct dma_chan *chan)
{
@@ -170,9 +243,12 @@ int fsl_edma_terminate_all(struct dma_chan *chan)
vchan_get_all_descriptors(&fsl_chan->vchan, &head);
spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
+
+ if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_HAS_PD)
+ pm_runtime_allow(fsl_chan->pd_dev);
+
return 0;
}
-EXPORT_SYMBOL_GPL(fsl_edma_terminate_all);
int fsl_edma_pause(struct dma_chan *chan)
{
@@ -188,7 +264,6 @@ int fsl_edma_pause(struct dma_chan *chan)
spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
return 0;
}
-EXPORT_SYMBOL_GPL(fsl_edma_pause);
int fsl_edma_resume(struct dma_chan *chan)
{
@@ -204,7 +279,6 @@ int fsl_edma_resume(struct dma_chan *chan)
spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
return 0;
}
-EXPORT_SYMBOL_GPL(fsl_edma_resume);
static void fsl_edma_unprep_slave_dma(struct fsl_edma_chan *fsl_chan)
{
@@ -265,36 +339,41 @@ int fsl_edma_slave_config(struct dma_chan *chan,
return 0;
}
-EXPORT_SYMBOL_GPL(fsl_edma_slave_config);
static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan,
struct virt_dma_desc *vdesc, bool in_progress)
{
struct fsl_edma_desc *edesc = fsl_chan->edesc;
- struct edma_regs *regs = &fsl_chan->edma->regs;
- u32 ch = fsl_chan->vchan.chan.chan_id;
enum dma_transfer_direction dir = edesc->dirn;
dma_addr_t cur_addr, dma_addr;
size_t len, size;
+ u32 nbytes = 0;
int i;
/* calculate the total size in this desc */
- for (len = i = 0; i < fsl_chan->edesc->n_tcds; i++)
- len += le32_to_cpu(edesc->tcd[i].vtcd->nbytes)
- * le16_to_cpu(edesc->tcd[i].vtcd->biter);
+ for (len = i = 0; i < fsl_chan->edesc->n_tcds; i++) {
+ nbytes = le32_to_cpu(edesc->tcd[i].vtcd->nbytes);
+ if (nbytes & (EDMA_V3_TCD_NBYTES_DMLOE | EDMA_V3_TCD_NBYTES_SMLOE))
+ nbytes = EDMA_V3_TCD_NBYTES_MLOFF_NBYTES(nbytes);
+ len += nbytes * le16_to_cpu(edesc->tcd[i].vtcd->biter);
+ }
if (!in_progress)
return len;
if (dir == DMA_MEM_TO_DEV)
- cur_addr = edma_readl(fsl_chan->edma, &regs->tcd[ch].saddr);
+ cur_addr = edma_read_tcdreg(fsl_chan, saddr);
else
- cur_addr = edma_readl(fsl_chan->edma, &regs->tcd[ch].daddr);
+ cur_addr = edma_read_tcdreg(fsl_chan, daddr);
/* figure out the finished and calculate the residue */
for (i = 0; i < fsl_chan->edesc->n_tcds; i++) {
- size = le32_to_cpu(edesc->tcd[i].vtcd->nbytes)
- * le16_to_cpu(edesc->tcd[i].vtcd->biter);
+ nbytes = le32_to_cpu(edesc->tcd[i].vtcd->nbytes);
+ if (nbytes & (EDMA_V3_TCD_NBYTES_DMLOE | EDMA_V3_TCD_NBYTES_SMLOE))
+ nbytes = EDMA_V3_TCD_NBYTES_MLOFF_NBYTES(nbytes);
+
+ size = nbytes * le16_to_cpu(edesc->tcd[i].vtcd->biter);
+
if (dir == DMA_MEM_TO_DEV)
dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->saddr);
else
@@ -340,14 +419,10 @@ enum dma_status fsl_edma_tx_status(struct dma_chan *chan,
return fsl_chan->status;
}
-EXPORT_SYMBOL_GPL(fsl_edma_tx_status);
static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan,
struct fsl_edma_hw_tcd *tcd)
{
- struct fsl_edma_engine *edma = fsl_chan->edma;
- struct edma_regs *regs = &fsl_chan->edma->regs;
- u32 ch = fsl_chan->vchan.chan.chan_id;
u16 csr = 0;
/*
@@ -356,23 +431,22 @@ static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan,
* big- or little-endian obeying the eDMA engine model endian,
* and this is performed from specific edma_write functions
*/
- edma_writew(edma, 0, &regs->tcd[ch].csr);
+ edma_write_tcdreg(fsl_chan, 0, csr);
- edma_writel(edma, (s32)tcd->saddr, &regs->tcd[ch].saddr);
- edma_writel(edma, (s32)tcd->daddr, &regs->tcd[ch].daddr);
+ edma_write_tcdreg(fsl_chan, tcd->saddr, saddr);
+ edma_write_tcdreg(fsl_chan, tcd->daddr, daddr);
- edma_writew(edma, (s16)tcd->attr, &regs->tcd[ch].attr);
- edma_writew(edma, tcd->soff, &regs->tcd[ch].soff);
+ edma_write_tcdreg(fsl_chan, tcd->attr, attr);
+ edma_write_tcdreg(fsl_chan, tcd->soff, soff);
- edma_writel(edma, (s32)tcd->nbytes, &regs->tcd[ch].nbytes);
- edma_writel(edma, (s32)tcd->slast, &regs->tcd[ch].slast);
+ edma_write_tcdreg(fsl_chan, tcd->nbytes, nbytes);
+ edma_write_tcdreg(fsl_chan, tcd->slast, slast);
- edma_writew(edma, (s16)tcd->citer, &regs->tcd[ch].citer);
- edma_writew(edma, (s16)tcd->biter, &regs->tcd[ch].biter);
- edma_writew(edma, (s16)tcd->doff, &regs->tcd[ch].doff);
+ edma_write_tcdreg(fsl_chan, tcd->citer, citer);
+ edma_write_tcdreg(fsl_chan, tcd->biter, biter);
+ edma_write_tcdreg(fsl_chan, tcd->doff, doff);
- edma_writel(edma, (s32)tcd->dlast_sga,
- &regs->tcd[ch].dlast_sga);
+ edma_write_tcdreg(fsl_chan, tcd->dlast_sga, dlast_sga);
if (fsl_chan->is_sw) {
csr = le16_to_cpu(tcd->csr);
@@ -380,16 +454,19 @@ static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan,
tcd->csr = cpu_to_le16(csr);
}
- edma_writew(edma, (s16)tcd->csr, &regs->tcd[ch].csr);
+ edma_write_tcdreg(fsl_chan, tcd->csr, csr);
}
static inline
-void fsl_edma_fill_tcd(struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst,
+void fsl_edma_fill_tcd(struct fsl_edma_chan *fsl_chan,
+ struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst,
u16 attr, u16 soff, u32 nbytes, u32 slast, u16 citer,
u16 biter, u16 doff, u32 dlast_sga, bool major_int,
bool disable_req, bool enable_sg)
{
+ struct dma_slave_config *cfg = &fsl_chan->cfg;
u16 csr = 0;
+ u32 burst;
/*
* eDMA hardware SGs require the TCDs to be stored in little
@@ -404,6 +481,21 @@ void fsl_edma_fill_tcd(struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst,
tcd->soff = cpu_to_le16(soff);
+ if (fsl_chan->is_multi_fifo) {
+ /* set mloff to support multiple fifo */
+ burst = cfg->direction == DMA_DEV_TO_MEM ?
+ cfg->src_addr_width : cfg->dst_addr_width;
+ nbytes |= EDMA_V3_TCD_NBYTES_MLOFF(-(burst * 4));
+ /* enable DMLOE/SMLOE */
+ if (cfg->direction == DMA_MEM_TO_DEV) {
+ nbytes |= EDMA_V3_TCD_NBYTES_DMLOE;
+ nbytes &= ~EDMA_V3_TCD_NBYTES_SMLOE;
+ } else {
+ nbytes |= EDMA_V3_TCD_NBYTES_SMLOE;
+ nbytes &= ~EDMA_V3_TCD_NBYTES_DMLOE;
+ }
+ }
+
tcd->nbytes = cpu_to_le32(nbytes);
tcd->slast = cpu_to_le32(slast);
@@ -422,6 +514,12 @@ void fsl_edma_fill_tcd(struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst,
if (enable_sg)
csr |= EDMA_TCD_CSR_E_SG;
+ if (fsl_chan->is_rxchan)
+ csr |= EDMA_TCD_CSR_ACTIVE;
+
+ if (fsl_chan->is_sw)
+ csr |= EDMA_TCD_CSR_START;
+
tcd->csr = cpu_to_le16(csr);
}
@@ -461,6 +559,7 @@ struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
struct fsl_edma_desc *fsl_desc;
dma_addr_t dma_buf_next;
+ bool major_int = true;
int sg_len, i;
u32 src_addr, dst_addr, last_sg, nbytes;
u16 soff, doff, iter;
@@ -504,23 +603,28 @@ struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
src_addr = dma_buf_next;
dst_addr = fsl_chan->dma_dev_addr;
soff = fsl_chan->cfg.dst_addr_width;
- doff = 0;
- } else {
+ doff = fsl_chan->is_multi_fifo ? 4 : 0;
+ } else if (direction == DMA_DEV_TO_MEM) {
src_addr = fsl_chan->dma_dev_addr;
dst_addr = dma_buf_next;
- soff = 0;
+ soff = fsl_chan->is_multi_fifo ? 4 : 0;
doff = fsl_chan->cfg.src_addr_width;
+ } else {
+ /* DMA_DEV_TO_DEV */
+ src_addr = fsl_chan->cfg.src_addr;
+ dst_addr = fsl_chan->cfg.dst_addr;
+ soff = doff = 0;
+ major_int = false;
}
- fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr, dst_addr,
+ fsl_edma_fill_tcd(fsl_chan, fsl_desc->tcd[i].vtcd, src_addr, dst_addr,
fsl_chan->attr, soff, nbytes, 0, iter,
- iter, doff, last_sg, true, false, true);
+ iter, doff, last_sg, major_int, false, true);
dma_buf_next += period_len;
}
return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
}
-EXPORT_SYMBOL_GPL(fsl_edma_prep_dma_cyclic);
struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl,
@@ -564,23 +668,51 @@ struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
dst_addr = fsl_chan->dma_dev_addr;
soff = fsl_chan->cfg.dst_addr_width;
doff = 0;
- } else {
+ } else if (direction == DMA_DEV_TO_MEM) {
src_addr = fsl_chan->dma_dev_addr;
dst_addr = sg_dma_address(sg);
soff = 0;
doff = fsl_chan->cfg.src_addr_width;
+ } else {
+ /* DMA_DEV_TO_DEV */
+ src_addr = fsl_chan->cfg.src_addr;
+ dst_addr = fsl_chan->cfg.dst_addr;
+ soff = 0;
+ doff = 0;
}
+ /*
+ * Choose the suitable burst length if sg_dma_len is not
+ * multiple of burst length so that the whole transfer length is
+ * multiple of minor loop(burst length).
+ */
+ if (sg_dma_len(sg) % nbytes) {
+ u32 width = (direction == DMA_DEV_TO_MEM) ? doff : soff;
+ u32 burst = (direction == DMA_DEV_TO_MEM) ?
+ fsl_chan->cfg.src_maxburst :
+ fsl_chan->cfg.dst_maxburst;
+ int j;
+
+ for (j = burst; j > 1; j--) {
+ if (!(sg_dma_len(sg) % (j * width))) {
+ nbytes = j * width;
+ break;
+ }
+ }
+ /* Set burst size as 1 if there's no suitable one */
+ if (j == 1)
+ nbytes = width;
+ }
iter = sg_dma_len(sg) / nbytes;
if (i < sg_len - 1) {
last_sg = fsl_desc->tcd[(i + 1)].ptcd;
- fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr,
+ fsl_edma_fill_tcd(fsl_chan, fsl_desc->tcd[i].vtcd, src_addr,
dst_addr, fsl_chan->attr, soff,
nbytes, 0, iter, iter, doff, last_sg,
false, false, true);
} else {
last_sg = 0;
- fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr,
+ fsl_edma_fill_tcd(fsl_chan, fsl_desc->tcd[i].vtcd, src_addr,
dst_addr, fsl_chan->attr, soff,
nbytes, 0, iter, iter, doff, last_sg,
true, true, false);
@@ -589,7 +721,6 @@ struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
}
-EXPORT_SYMBOL_GPL(fsl_edma_prep_slave_sg);
struct dma_async_tx_descriptor *fsl_edma_prep_memcpy(struct dma_chan *chan,
dma_addr_t dma_dst, dma_addr_t dma_src,
@@ -606,13 +737,12 @@ struct dma_async_tx_descriptor *fsl_edma_prep_memcpy(struct dma_chan *chan,
fsl_chan->is_sw = true;
/* To match with copy_align and max_seg_size so 1 tcd is enough */
- fsl_edma_fill_tcd(fsl_desc->tcd[0].vtcd, dma_src, dma_dst,
- EDMA_TCD_ATTR_SSIZE_32BYTE | EDMA_TCD_ATTR_DSIZE_32BYTE,
+ fsl_edma_fill_tcd(fsl_chan, fsl_desc->tcd[0].vtcd, dma_src, dma_dst,
+ fsl_edma_get_tcd_attr(DMA_SLAVE_BUSWIDTH_32_BYTES),
32, len, 0, 1, 1, 32, 0, true, true, false);
return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
}
-EXPORT_SYMBOL_GPL(fsl_edma_prep_memcpy);
void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan)
{
@@ -629,7 +759,6 @@ void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan)
fsl_chan->status = DMA_IN_PROGRESS;
fsl_chan->idle = false;
}
-EXPORT_SYMBOL_GPL(fsl_edma_xfer_desc);
void fsl_edma_issue_pending(struct dma_chan *chan)
{
@@ -649,7 +778,6 @@ void fsl_edma_issue_pending(struct dma_chan *chan)
spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
}
-EXPORT_SYMBOL_GPL(fsl_edma_issue_pending);
int fsl_edma_alloc_chan_resources(struct dma_chan *chan)
{
@@ -660,7 +788,6 @@ int fsl_edma_alloc_chan_resources(struct dma_chan *chan)
32, 0);
return 0;
}
-EXPORT_SYMBOL_GPL(fsl_edma_alloc_chan_resources);
void fsl_edma_free_chan_resources(struct dma_chan *chan)
{
@@ -683,7 +810,6 @@ void fsl_edma_free_chan_resources(struct dma_chan *chan)
fsl_chan->tcd_pool = NULL;
fsl_chan->is_sw = false;
}
-EXPORT_SYMBOL_GPL(fsl_edma_free_chan_resources);
void fsl_edma_cleanup_vchan(struct dma_device *dmadev)
{
@@ -695,12 +821,10 @@ void fsl_edma_cleanup_vchan(struct dma_device *dmadev)
tasklet_kill(&chan->vchan.task);
}
}
-EXPORT_SYMBOL_GPL(fsl_edma_cleanup_vchan);
/*
- * On the 32 channels Vybrid/mpc577x edma version (here called "v1"),
- * register offsets are different compared to ColdFire mcf5441x 64 channels
- * edma (here called "v2").
+ * On the 32 channels Vybrid/mpc577x edma version, register offsets are
+ * different compared to ColdFire mcf5441x 64 channels edma.
*
* This function sets up register offsets as per proper declared version
* so must be called in xxx_edma_probe() just after setting the
@@ -708,41 +832,30 @@ EXPORT_SYMBOL_GPL(fsl_edma_cleanup_vchan);
*/
void fsl_edma_setup_regs(struct fsl_edma_engine *edma)
{
+ bool is64 = !!(edma->drvdata->flags & FSL_EDMA_DRV_EDMA64);
+
edma->regs.cr = edma->membase + EDMA_CR;
edma->regs.es = edma->membase + EDMA_ES;
edma->regs.erql = edma->membase + EDMA_ERQ;
edma->regs.eeil = edma->membase + EDMA_EEI;
- edma->regs.serq = edma->membase + ((edma->drvdata->version == v2) ?
- EDMA64_SERQ : EDMA_SERQ);
- edma->regs.cerq = edma->membase + ((edma->drvdata->version == v2) ?
- EDMA64_CERQ : EDMA_CERQ);
- edma->regs.seei = edma->membase + ((edma->drvdata->version == v2) ?
- EDMA64_SEEI : EDMA_SEEI);
- edma->regs.ceei = edma->membase + ((edma->drvdata->version == v2) ?
- EDMA64_CEEI : EDMA_CEEI);
- edma->regs.cint = edma->membase + ((edma->drvdata->version == v2) ?
- EDMA64_CINT : EDMA_CINT);
- edma->regs.cerr = edma->membase + ((edma->drvdata->version == v2) ?
- EDMA64_CERR : EDMA_CERR);
- edma->regs.ssrt = edma->membase + ((edma->drvdata->version == v2) ?
- EDMA64_SSRT : EDMA_SSRT);
- edma->regs.cdne = edma->membase + ((edma->drvdata->version == v2) ?
- EDMA64_CDNE : EDMA_CDNE);
- edma->regs.intl = edma->membase + ((edma->drvdata->version == v2) ?
- EDMA64_INTL : EDMA_INTR);
- edma->regs.errl = edma->membase + ((edma->drvdata->version == v2) ?
- EDMA64_ERRL : EDMA_ERR);
-
- if (edma->drvdata->version == v2) {
+ edma->regs.serq = edma->membase + (is64 ? EDMA64_SERQ : EDMA_SERQ);
+ edma->regs.cerq = edma->membase + (is64 ? EDMA64_CERQ : EDMA_CERQ);
+ edma->regs.seei = edma->membase + (is64 ? EDMA64_SEEI : EDMA_SEEI);
+ edma->regs.ceei = edma->membase + (is64 ? EDMA64_CEEI : EDMA_CEEI);
+ edma->regs.cint = edma->membase + (is64 ? EDMA64_CINT : EDMA_CINT);
+ edma->regs.cerr = edma->membase + (is64 ? EDMA64_CERR : EDMA_CERR);
+ edma->regs.ssrt = edma->membase + (is64 ? EDMA64_SSRT : EDMA_SSRT);
+ edma->regs.cdne = edma->membase + (is64 ? EDMA64_CDNE : EDMA_CDNE);
+ edma->regs.intl = edma->membase + (is64 ? EDMA64_INTL : EDMA_INTR);
+ edma->regs.errl = edma->membase + (is64 ? EDMA64_ERRL : EDMA_ERR);
+
+ if (is64) {
edma->regs.erqh = edma->membase + EDMA64_ERQH;
edma->regs.eeih = edma->membase + EDMA64_EEIH;
edma->regs.errh = edma->membase + EDMA64_ERRH;
edma->regs.inth = edma->membase + EDMA64_INTH;
}
-
- edma->regs.tcd = edma->membase + EDMA_TCD;
}
-EXPORT_SYMBOL_GPL(fsl_edma_setup_regs);
MODULE_LICENSE("GPL v2");