summaryrefslogtreecommitdiff
path: root/drivers/dma/fsl-edma-main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma/fsl-edma-main.c')
-rw-r--r--drivers/dma/fsl-edma-main.c245
1 files changed, 238 insertions, 7 deletions
diff --git a/drivers/dma/fsl-edma-main.c b/drivers/dma/fsl-edma-main.c
index 72b7587226df..63d48d046f04 100644
--- a/drivers/dma/fsl-edma-main.c
+++ b/drivers/dma/fsl-edma-main.c
@@ -18,9 +18,15 @@
#include <linux/of_irq.h>
#include <linux/of_dma.h>
#include <linux/dma-mapping.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_domain.h>
#include "fsl-edma-common.h"
+#define ARGS_RX BIT(0)
+#define ARGS_REMOTE BIT(1)
+#define ARGS_MULTI_FIFO BIT(2)
+
static void fsl_edma_synchronize(struct dma_chan *chan)
{
struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
@@ -47,6 +53,22 @@ static irqreturn_t fsl_edma_tx_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static irqreturn_t fsl_edma3_tx_handler(int irq, void *dev_id)
+{
+ struct fsl_edma_chan *fsl_chan = dev_id;
+ unsigned int intr;
+
+ intr = edma_readl_chreg(fsl_chan, ch_int);
+ if (!intr)
+ return IRQ_HANDLED;
+
+ edma_writel_chreg(fsl_chan, 1, ch_int);
+
+ fsl_edma_tx_chan_handler(fsl_chan);
+
+ return IRQ_HANDLED;
+}
+
static irqreturn_t fsl_edma_err_handler(int irq, void *dev_id)
{
struct fsl_edma_engine *fsl_edma = dev_id;
@@ -108,6 +130,51 @@ static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec,
return NULL;
}
+static struct dma_chan *fsl_edma3_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct fsl_edma_engine *fsl_edma = ofdma->of_dma_data;
+ struct dma_chan *chan, *_chan;
+ struct fsl_edma_chan *fsl_chan;
+ bool b_chmux;
+ int i;
+
+ if (dma_spec->args_count != 3)
+ return NULL;
+
+ b_chmux = !!(fsl_edma->drvdata->flags & FSL_EDMA_DRV_HAS_CHMUX);
+
+ mutex_lock(&fsl_edma->fsl_edma_mutex);
+ list_for_each_entry_safe(chan, _chan, &fsl_edma->dma_dev.channels,
+ device_node) {
+
+ if (chan->client_count)
+ continue;
+
+ fsl_chan = to_fsl_edma_chan(chan);
+ i = fsl_chan - fsl_edma->chans;
+
+ chan = dma_get_slave_channel(chan);
+ chan->device->privatecnt++;
+ fsl_chan->priority = dma_spec->args[1];
+ fsl_chan->is_rxchan = dma_spec->args[2] & ARGS_RX;
+ fsl_chan->is_remote = dma_spec->args[2] & ARGS_REMOTE;
+ fsl_chan->is_multi_fifo = dma_spec->args[2] & ARGS_MULTI_FIFO;
+
+ if (!b_chmux && i == dma_spec->args[0]) {
+ mutex_unlock(&fsl_edma->fsl_edma_mutex);
+ return chan;
+ } else if (b_chmux && !fsl_chan->srcid) {
+ /* if controller support channel mux, choose a free channel */
+ fsl_chan->srcid = dma_spec->args[0];
+ mutex_unlock(&fsl_edma->fsl_edma_mutex);
+ return chan;
+ }
+ }
+ mutex_unlock(&fsl_edma->fsl_edma_mutex);
+ return NULL;
+}
+
static int
fsl_edma_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
{
@@ -149,6 +216,37 @@ fsl_edma_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma
return 0;
}
+static int fsl_edma3_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < fsl_edma->n_chans; i++) {
+
+ struct fsl_edma_chan *fsl_chan = &fsl_edma->chans[i];
+
+ if (fsl_edma->chan_masked & BIT(i))
+ continue;
+
+ /* request channel irq */
+ fsl_chan->txirq = platform_get_irq(pdev, i);
+ if (fsl_chan->txirq < 0) {
+ dev_err(&pdev->dev, "Can't get chan %d's irq.\n", i);
+ return -EINVAL;
+ }
+
+ ret = devm_request_irq(&pdev->dev, fsl_chan->txirq,
+ fsl_edma3_tx_handler, IRQF_SHARED,
+ fsl_chan->chan_name, fsl_chan);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't register chan%d's IRQ.\n", i);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static int
fsl_edma2_irq_init(struct platform_device *pdev,
struct fsl_edma_engine *fsl_edma)
@@ -214,29 +312,108 @@ static void fsl_disable_clocks(struct fsl_edma_engine *fsl_edma, int nr_clocks)
static struct fsl_edma_drvdata vf610_data = {
.dmamuxs = DMAMUX_NR,
.flags = FSL_EDMA_DRV_WRAP_IO,
+ .chreg_off = EDMA_TCD,
+ .chreg_space_sz = sizeof(struct fsl_edma_hw_tcd),
.setup_irq = fsl_edma_irq_init,
};
static struct fsl_edma_drvdata ls1028a_data = {
.dmamuxs = DMAMUX_NR,
.flags = FSL_EDMA_DRV_MUX_SWAP | FSL_EDMA_DRV_WRAP_IO,
+ .chreg_off = EDMA_TCD,
+ .chreg_space_sz = sizeof(struct fsl_edma_hw_tcd),
.setup_irq = fsl_edma_irq_init,
};
static struct fsl_edma_drvdata imx7ulp_data = {
.dmamuxs = 1,
+ .chreg_off = EDMA_TCD,
+ .chreg_space_sz = sizeof(struct fsl_edma_hw_tcd),
.flags = FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_CONFIG32,
.setup_irq = fsl_edma2_irq_init,
};
+static struct fsl_edma_drvdata imx8qm_data = {
+ .flags = FSL_EDMA_DRV_HAS_PD | FSL_EDMA_DRV_EDMA3,
+ .chreg_space_sz = 0x10000,
+ .chreg_off = 0x10000,
+ .setup_irq = fsl_edma3_irq_init,
+};
+
+static struct fsl_edma_drvdata imx8qm_audio_data = {
+ .flags = FSL_EDMA_DRV_QUIRK_SWAPPED | FSL_EDMA_DRV_HAS_PD | FSL_EDMA_DRV_EDMA3,
+ .chreg_space_sz = 0x10000,
+ .chreg_off = 0x10000,
+ .setup_irq = fsl_edma3_irq_init,
+};
+
+static struct fsl_edma_drvdata imx93_data3 = {
+ .flags = FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_EDMA3,
+ .chreg_space_sz = 0x10000,
+ .chreg_off = 0x10000,
+ .setup_irq = fsl_edma3_irq_init,
+};
+
+static struct fsl_edma_drvdata imx93_data4 = {
+ .flags = FSL_EDMA_DRV_HAS_CHMUX | FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_EDMA3,
+ .chreg_space_sz = 0x8000,
+ .chreg_off = 0x10000,
+ .setup_irq = fsl_edma3_irq_init,
+};
+
static const struct of_device_id fsl_edma_dt_ids[] = {
{ .compatible = "fsl,vf610-edma", .data = &vf610_data},
{ .compatible = "fsl,ls1028a-edma", .data = &ls1028a_data},
{ .compatible = "fsl,imx7ulp-edma", .data = &imx7ulp_data},
+ { .compatible = "fsl,imx8qm-edma", .data = &imx8qm_data},
+ { .compatible = "fsl,imx8qm-adma", .data = &imx8qm_audio_data},
+ { .compatible = "fsl,imx93-edma3", .data = &imx93_data3},
+ { .compatible = "fsl,imx93-edma4", .data = &imx93_data4},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, fsl_edma_dt_ids);
+static int fsl_edma3_attach_pd(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
+{
+ struct fsl_edma_chan *fsl_chan;
+ struct device_link *link;
+ struct device *pd_chan;
+ struct device *dev;
+ int i;
+
+ dev = &pdev->dev;
+
+ for (i = 0; i < fsl_edma->n_chans; i++) {
+ if (fsl_edma->chan_masked & BIT(i))
+ continue;
+
+ fsl_chan = &fsl_edma->chans[i];
+
+ pd_chan = dev_pm_domain_attach_by_id(dev, i);
+ if (IS_ERR_OR_NULL(pd_chan)) {
+ dev_err(dev, "Failed attach pd %d\n", i);
+ return -EINVAL;
+ }
+
+ link = device_link_add(dev, pd_chan, DL_FLAG_STATELESS |
+ DL_FLAG_PM_RUNTIME |
+ DL_FLAG_RPM_ACTIVE);
+ if (IS_ERR(link)) {
+ dev_err(dev, "Failed to add device_link to %d: %ld\n", i,
+ PTR_ERR(link));
+ return -EINVAL;
+ }
+
+ fsl_chan->pd_dev = pd_chan;
+
+ pm_runtime_use_autosuspend(fsl_chan->pd_dev);
+ pm_runtime_set_autosuspend_delay(fsl_chan->pd_dev, 200);
+ pm_runtime_set_active(fsl_chan->pd_dev);
+ }
+
+ return 0;
+}
+
static int fsl_edma_probe(struct platform_device *pdev)
{
const struct of_device_id *of_id =
@@ -244,6 +421,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct fsl_edma_engine *fsl_edma;
const struct fsl_edma_drvdata *drvdata = NULL;
+ u32 chan_mask[2] = {0, 0};
struct edma_regs *regs;
int chans;
int ret, i;
@@ -274,8 +452,10 @@ static int fsl_edma_probe(struct platform_device *pdev)
if (IS_ERR(fsl_edma->membase))
return PTR_ERR(fsl_edma->membase);
- fsl_edma_setup_regs(fsl_edma);
- regs = &fsl_edma->regs;
+ if (!(drvdata->flags & FSL_EDMA_DRV_SPLIT_REG)) {
+ fsl_edma_setup_regs(fsl_edma);
+ regs = &fsl_edma->regs;
+ }
if (drvdata->flags & FSL_EDMA_DRV_HAS_DMACLK) {
fsl_edma->dmaclk = devm_clk_get_enabled(&pdev->dev, "dma");
@@ -285,9 +465,29 @@ static int fsl_edma_probe(struct platform_device *pdev)
}
}
+ if (drvdata->flags & FSL_EDMA_DRV_HAS_CHCLK) {
+ fsl_edma->chclk = devm_clk_get_enabled(&pdev->dev, "mp");
+ if (IS_ERR(fsl_edma->chclk)) {
+ dev_err(&pdev->dev, "Missing MP block clock.\n");
+ return PTR_ERR(fsl_edma->chclk);
+ }
+ }
+
+ ret = of_property_read_variable_u32_array(np, "dma-channel-mask", chan_mask, 1, 2);
+
+ if (ret > 0) {
+ fsl_edma->chan_masked = chan_mask[1];
+ fsl_edma->chan_masked <<= 32;
+ fsl_edma->chan_masked |= chan_mask[0];
+ }
+
for (i = 0; i < fsl_edma->drvdata->dmamuxs; i++) {
char clkname[32];
+ /* eDMAv3 mux register move to TCD area if ch_mux exist */
+ if (drvdata->flags & FSL_EDMA_DRV_SPLIT_REG)
+ break;
+
fsl_edma->muxbase[i] = devm_platform_ioremap_resource(pdev,
1 + i);
if (IS_ERR(fsl_edma->muxbase[i])) {
@@ -307,9 +507,19 @@ static int fsl_edma_probe(struct platform_device *pdev)
fsl_edma->big_endian = of_property_read_bool(np, "big-endian");
+ if (drvdata->flags & FSL_EDMA_DRV_HAS_PD) {
+ ret = fsl_edma3_attach_pd(pdev, fsl_edma);
+ if (ret)
+ return ret;
+ }
+
INIT_LIST_HEAD(&fsl_edma->dma_dev.channels);
for (i = 0; i < fsl_edma->n_chans; i++) {
struct fsl_edma_chan *fsl_chan = &fsl_edma->chans[i];
+ int len;
+
+ if (fsl_edma->chan_masked & BIT(i))
+ continue;
snprintf(fsl_chan->chan_name, sizeof(fsl_chan->chan_name), "%s-CH%02d",
dev_name(&pdev->dev), i);
@@ -320,8 +530,13 @@ static int fsl_edma_probe(struct platform_device *pdev)
fsl_chan->idle = true;
fsl_chan->dma_dir = DMA_NONE;
fsl_chan->vchan.desc_free = fsl_edma_free_desc;
- fsl_chan->tcd = fsl_edma->membase + EDMA_TCD
- + i * sizeof(struct fsl_edma_hw_tcd);
+
+ len = (drvdata->flags & FSL_EDMA_DRV_SPLIT_REG) ?
+ offsetof(struct fsl_edma3_ch_reg, tcd) : 0;
+ fsl_chan->tcd = fsl_edma->membase
+ + i * drvdata->chreg_space_sz + drvdata->chreg_off + len;
+
+ fsl_chan->pdev = pdev;
vchan_init(&fsl_chan->vchan, &fsl_edma->dma_dev);
edma_write_tcdreg(fsl_chan, 0, csr);
@@ -355,12 +570,25 @@ static int fsl_edma_probe(struct platform_device *pdev)
fsl_edma->dma_dev.src_addr_widths = FSL_EDMA_BUSWIDTHS;
fsl_edma->dma_dev.dst_addr_widths = FSL_EDMA_BUSWIDTHS;
+
+ if (drvdata->flags & FSL_EDMA_DRV_BUS_8BYTE) {
+ fsl_edma->dma_dev.src_addr_widths |= BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
+ fsl_edma->dma_dev.dst_addr_widths |= BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
+ }
+
fsl_edma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+ if (drvdata->flags & FSL_EDMA_DRV_DEV_TO_DEV)
+ fsl_edma->dma_dev.directions |= BIT(DMA_DEV_TO_DEV);
+
+ fsl_edma->dma_dev.copy_align = drvdata->flags & FSL_EDMA_DRV_ALIGN_64BYTE ?
+ DMAENGINE_ALIGN_64_BYTES :
+ DMAENGINE_ALIGN_32_BYTES;
- fsl_edma->dma_dev.copy_align = DMAENGINE_ALIGN_32_BYTES;
/* Per worst case 'nbytes = 1' take CITER as the max_seg_size */
dma_set_max_seg_size(fsl_edma->dma_dev.dev, 0x3fff);
+ fsl_edma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
+
platform_set_drvdata(pdev, fsl_edma);
ret = dma_async_device_register(&fsl_edma->dma_dev);
@@ -370,7 +598,9 @@ static int fsl_edma_probe(struct platform_device *pdev)
return ret;
}
- ret = of_dma_controller_register(np, fsl_edma_xlate, fsl_edma);
+ ret = of_dma_controller_register(np,
+ drvdata->flags & FSL_EDMA_DRV_SPLIT_REG ? fsl_edma3_xlate : fsl_edma_xlate,
+ fsl_edma);
if (ret) {
dev_err(&pdev->dev,
"Can't register Freescale eDMA of_dma. (%d)\n", ret);
@@ -379,7 +609,8 @@ static int fsl_edma_probe(struct platform_device *pdev)
}
/* enable round robin arbitration */
- edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, regs->cr);
+ if (!(drvdata->flags & FSL_EDMA_DRV_SPLIT_REG))
+ edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, regs->cr);
return 0;
}