From 4c7f3ca1745e18e2da9a83373c26eaf6f51c0505 Mon Sep 17 00:00:00 2001 From: Vignesh Raghavendra Date: Tue, 13 Dec 2022 22:13:01 +0530 Subject: dmaengine: ti: k3-udma: Fix BCDMA for case w/o BCHAN Reusing loop iterator fails if BCHAN is not present as iterator is uninitialized Signed-off-by: Vignesh Raghavendra Acked-by: Peter Ujfalusi Link: https://lore.kernel.org/r/20221213164304.1126945-3-vigneshr@ti.com Signed-off-by: Vinod Koul --- drivers/dma/ti/k3-udma.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/dma') diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c index ce8b80bb34d7..aa50d46fa856 100644 --- a/drivers/dma/ti/k3-udma.c +++ b/drivers/dma/ti/k3-udma.c @@ -4774,7 +4774,10 @@ static int bcdma_setup_resources(struct udma_dev *ud) irq_res.desc[i].num = rm_res->desc[i].num; } } + } else { + i = 0; } + if (ud->tchan_cnt) { rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; if (IS_ERR(rm_res)) { -- cgit v1.2.3 From aac6db7e243a24f7b1a84a6293b68a72e6764893 Mon Sep 17 00:00:00 2001 From: Jai Luthra Date: Tue, 13 Dec 2022 22:13:02 +0530 Subject: dmaengine: ti: k3-psil-am62a: Add AM62Ax PSIL and PDMA data Add PSIL and PDMA data for AM62Ax SoC. Signed-off-by: Jai Luthra Signed-off-by: Vignesh Raghavendra Acked-by: Peter Ujfalusi Link: https://lore.kernel.org/r/20221213164304.1126945-4-vigneshr@ti.com Signed-off-by: Vinod Koul --- drivers/dma/ti/Makefile | 3 +- drivers/dma/ti/k3-psil-am62a.c | 196 +++++++++++++++++++++++++++++++++++++++++ drivers/dma/ti/k3-psil-priv.h | 1 + drivers/dma/ti/k3-psil.c | 1 + 4 files changed, 200 insertions(+), 1 deletion(-) create mode 100644 drivers/dma/ti/k3-psil-am62a.c (limited to 'drivers/dma') diff --git a/drivers/dma/ti/Makefile b/drivers/dma/ti/Makefile index b53d05b11ca5..bd1e07fda559 100644 --- a/drivers/dma/ti/Makefile +++ b/drivers/dma/ti/Makefile @@ -10,6 +10,7 @@ k3-psil-lib-objs := k3-psil.o \ k3-psil-j7200.o \ k3-psil-am64.o \ k3-psil-j721s2.o \ - k3-psil-am62.o + k3-psil-am62.o \ + k3-psil-am62a.o obj-$(CONFIG_TI_K3_PSIL) += k3-psil-lib.o obj-$(CONFIG_TI_DMA_CROSSBAR) += dma-crossbar.o diff --git a/drivers/dma/ti/k3-psil-am62a.c b/drivers/dma/ti/k3-psil-am62a.c new file mode 100644 index 000000000000..ca9d71f91422 --- /dev/null +++ b/drivers/dma/ti/k3-psil-am62a.c @@ -0,0 +1,196 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2022 Texas Instruments Incorporated - https://www.ti.com + */ + +#include + +#include "k3-psil-priv.h" + +#define PSIL_PDMA_XY_TR(x) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_PDMA_XY, \ + .mapped_channel_id = -1, \ + .default_flow_id = -1, \ + }, \ + } + +#define PSIL_PDMA_XY_PKT(x) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_PDMA_XY, \ + .mapped_channel_id = -1, \ + .default_flow_id = -1, \ + .pkt_mode = 1, \ + }, \ + } + +#define PSIL_ETHERNET(x, ch, flow_base, flow_cnt) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_NATIVE, \ + .pkt_mode = 1, \ + .needs_epib = 1, \ + .psd_size = 16, \ + .mapped_channel_id = ch, \ + .flow_start = flow_base, \ + .flow_num = flow_cnt, \ + .default_flow_id = flow_base, \ + }, \ + } + +#define PSIL_SAUL(x, ch, flow_base, flow_cnt, default_flow, tx) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_NATIVE, \ + .pkt_mode = 1, \ + .needs_epib = 1, \ + .psd_size = 64, \ + .mapped_channel_id = ch, \ + .flow_start = flow_base, \ + .flow_num = flow_cnt, \ + .default_flow_id = default_flow, \ + .notdpkt = tx, \ + }, \ + } + +#define PSIL_PDMA_MCASP(x) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_PDMA_XY, \ + .pdma_acc32 = 1, \ + .pdma_burst = 1, \ + }, \ + } + +#define PSIL_CSI2RX(x) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_NATIVE, \ + }, \ + } + +/* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */ +static struct psil_ep am62a_src_ep_map[] = { + /* SAUL */ + PSIL_SAUL(0x7504, 20, 35, 8, 35, 0), + PSIL_SAUL(0x7505, 21, 35, 8, 36, 0), + PSIL_SAUL(0x7506, 22, 43, 8, 43, 0), + PSIL_SAUL(0x7507, 23, 43, 8, 44, 0), + /* PDMA_MAIN0 - SPI0-3 */ + PSIL_PDMA_XY_PKT(0x4302), + PSIL_PDMA_XY_PKT(0x4303), + PSIL_PDMA_XY_PKT(0x4304), + PSIL_PDMA_XY_PKT(0x4305), + PSIL_PDMA_XY_PKT(0x4306), + PSIL_PDMA_XY_PKT(0x4307), + PSIL_PDMA_XY_PKT(0x4308), + PSIL_PDMA_XY_PKT(0x4309), + PSIL_PDMA_XY_PKT(0x430a), + PSIL_PDMA_XY_PKT(0x430b), + PSIL_PDMA_XY_PKT(0x430c), + PSIL_PDMA_XY_PKT(0x430d), + /* PDMA_MAIN1 - UART0-6 */ + PSIL_PDMA_XY_PKT(0x4400), + PSIL_PDMA_XY_PKT(0x4401), + PSIL_PDMA_XY_PKT(0x4402), + PSIL_PDMA_XY_PKT(0x4403), + PSIL_PDMA_XY_PKT(0x4404), + PSIL_PDMA_XY_PKT(0x4405), + PSIL_PDMA_XY_PKT(0x4406), + /* PDMA_MAIN2 - MCASP0-2 */ + PSIL_PDMA_MCASP(0x4500), + PSIL_PDMA_MCASP(0x4501), + PSIL_PDMA_MCASP(0x4502), + /* CPSW3G */ + PSIL_ETHERNET(0x4600, 19, 19, 16), + /* CSI2RX */ + PSIL_CSI2RX(0x5000), + PSIL_CSI2RX(0x5001), + PSIL_CSI2RX(0x5002), + PSIL_CSI2RX(0x5003), + PSIL_CSI2RX(0x5004), + PSIL_CSI2RX(0x5005), + PSIL_CSI2RX(0x5006), + PSIL_CSI2RX(0x5007), + PSIL_CSI2RX(0x5008), + PSIL_CSI2RX(0x5009), + PSIL_CSI2RX(0x500a), + PSIL_CSI2RX(0x500b), + PSIL_CSI2RX(0x500c), + PSIL_CSI2RX(0x500d), + PSIL_CSI2RX(0x500e), + PSIL_CSI2RX(0x500f), + PSIL_CSI2RX(0x5010), + PSIL_CSI2RX(0x5011), + PSIL_CSI2RX(0x5012), + PSIL_CSI2RX(0x5013), + PSIL_CSI2RX(0x5014), + PSIL_CSI2RX(0x5015), + PSIL_CSI2RX(0x5016), + PSIL_CSI2RX(0x5017), + PSIL_CSI2RX(0x5018), + PSIL_CSI2RX(0x5019), + PSIL_CSI2RX(0x501a), + PSIL_CSI2RX(0x501b), + PSIL_CSI2RX(0x501c), + PSIL_CSI2RX(0x501d), + PSIL_CSI2RX(0x501e), + PSIL_CSI2RX(0x501f), +}; + +/* PSI-L destination thread IDs, used for TX (DMA_MEM_TO_DEV) */ +static struct psil_ep am62a_dst_ep_map[] = { + /* SAUL */ + PSIL_SAUL(0xf500, 27, 83, 8, 83, 1), + PSIL_SAUL(0xf501, 28, 91, 8, 91, 1), + /* PDMA_MAIN0 - SPI0-3 */ + PSIL_PDMA_XY_PKT(0xc302), + PSIL_PDMA_XY_PKT(0xc303), + PSIL_PDMA_XY_PKT(0xc304), + PSIL_PDMA_XY_PKT(0xc305), + PSIL_PDMA_XY_PKT(0xc306), + PSIL_PDMA_XY_PKT(0xc307), + PSIL_PDMA_XY_PKT(0xc308), + PSIL_PDMA_XY_PKT(0xc309), + PSIL_PDMA_XY_PKT(0xc30a), + PSIL_PDMA_XY_PKT(0xc30b), + PSIL_PDMA_XY_PKT(0xc30c), + PSIL_PDMA_XY_PKT(0xc30d), + /* PDMA_MAIN1 - UART0-6 */ + PSIL_PDMA_XY_PKT(0xc400), + PSIL_PDMA_XY_PKT(0xc401), + PSIL_PDMA_XY_PKT(0xc402), + PSIL_PDMA_XY_PKT(0xc403), + PSIL_PDMA_XY_PKT(0xc404), + PSIL_PDMA_XY_PKT(0xc405), + PSIL_PDMA_XY_PKT(0xc406), + /* PDMA_MAIN2 - MCASP0-2 */ + PSIL_PDMA_MCASP(0xc500), + PSIL_PDMA_MCASP(0xc501), + PSIL_PDMA_MCASP(0xc502), + /* CPSW3G */ + PSIL_ETHERNET(0xc600, 19, 19, 8), + PSIL_ETHERNET(0xc601, 20, 27, 8), + PSIL_ETHERNET(0xc602, 21, 35, 8), + PSIL_ETHERNET(0xc603, 22, 43, 8), + PSIL_ETHERNET(0xc604, 23, 51, 8), + PSIL_ETHERNET(0xc605, 24, 59, 8), + PSIL_ETHERNET(0xc606, 25, 67, 8), + PSIL_ETHERNET(0xc607, 26, 75, 8), +}; + +struct psil_ep_map am62a_ep_map = { + .name = "am62a", + .src = am62a_src_ep_map, + .src_count = ARRAY_SIZE(am62a_src_ep_map), + .dst = am62a_dst_ep_map, + .dst_count = ARRAY_SIZE(am62a_dst_ep_map), +}; diff --git a/drivers/dma/ti/k3-psil-priv.h b/drivers/dma/ti/k3-psil-priv.h index 74fa9ec02968..abd650bb7600 100644 --- a/drivers/dma/ti/k3-psil-priv.h +++ b/drivers/dma/ti/k3-psil-priv.h @@ -43,5 +43,6 @@ extern struct psil_ep_map j7200_ep_map; extern struct psil_ep_map am64_ep_map; extern struct psil_ep_map j721s2_ep_map; extern struct psil_ep_map am62_ep_map; +extern struct psil_ep_map am62a_ep_map; #endif /* K3_PSIL_PRIV_H_ */ diff --git a/drivers/dma/ti/k3-psil.c b/drivers/dma/ti/k3-psil.c index 8b6533a1eeeb..2da6988a0e7b 100644 --- a/drivers/dma/ti/k3-psil.c +++ b/drivers/dma/ti/k3-psil.c @@ -24,6 +24,7 @@ static const struct soc_device_attribute k3_soc_devices[] = { { .family = "AM64X", .data = &am64_ep_map }, { .family = "J721S2", .data = &j721s2_ep_map }, { .family = "AM62X", .data = &am62_ep_map }, + { .family = "AM62AX", .data = &am62a_ep_map }, { /* sentinel */ } }; -- cgit v1.2.3 From c1475ad338bdc6f2635450e35dd86b9051fa7c68 Mon Sep 17 00:00:00 2001 From: Vignesh Raghavendra Date: Tue, 13 Dec 2022 22:13:03 +0530 Subject: dmaengine: ti: k3-udma: Add support for DMAs on AM62A SoC AM62A SoC has a BCDMA and PKTDMA as systems DMAs for service various peripherals similar to AM64 SoC. Add support for the same. Signed-off-by: Vignesh Raghavendra Acked-by: Peter Ujfalusi Link: https://lore.kernel.org/r/20221213164304.1126945-5-vigneshr@ti.com Signed-off-by: Vinod Koul --- drivers/dma/ti/k3-udma.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/dma') diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c index aa50d46fa856..c1005d17b42e 100644 --- a/drivers/dma/ti/k3-udma.c +++ b/drivers/dma/ti/k3-udma.c @@ -4386,6 +4386,7 @@ static const struct soc_device_attribute k3_soc_devices[] = { { .family = "AM64X", .data = &am64_soc_data }, { .family = "J721S2", .data = &j721e_soc_data}, { .family = "AM62X", .data = &am64_soc_data }, + { .family = "AM62AX", .data = &am64_soc_data }, { /* sentinel */ } }; -- cgit v1.2.3 From 3f58e10615f3bd7da8d0ef2f9c815d8e1a968122 Mon Sep 17 00:00:00 2001 From: Vignesh Raghavendra Date: Tue, 13 Dec 2022 22:13:04 +0530 Subject: dmaengine: ti: k3-udma: Add support for BCDMA CSI RX BCDMA CSI RX present on AM62Ax SoC is a dedicated DMA for servicing Camera Serial Interface (CSI) IP. Add support for the same. Signed-off-by: Vignesh Raghavendra Acked-by: Peter Ujfalusi Link: https://lore.kernel.org/r/20221213164304.1126945-6-vigneshr@ti.com Signed-off-by: Vinod Koul --- drivers/dma/ti/k3-udma.c | 37 ++++++++++++++++++++++++++++++++----- 1 file changed, 32 insertions(+), 5 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c index c1005d17b42e..1d3d1b387b96 100644 --- a/drivers/dma/ti/k3-udma.c +++ b/drivers/dma/ti/k3-udma.c @@ -135,6 +135,7 @@ struct udma_match_data { u32 flags; u32 statictr_z_mask; u8 burst_size[3]; + struct udma_soc_data *soc_data; }; struct udma_soc_data { @@ -4295,6 +4296,25 @@ static struct udma_match_data j721e_mcu_data = { }, }; +static struct udma_soc_data am62a_dmss_csi_soc_data = { + .oes = { + .bcdma_rchan_data = 0xe00, + .bcdma_rchan_ring = 0x1000, + }, +}; + +static struct udma_match_data am62a_bcdma_csirx_data = { + .type = DMA_TYPE_BCDMA, + .psil_base = 0x3100, + .enable_memcpy_support = false, + .burst_size = { + TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */ + 0, /* No H Channels */ + 0, /* No UH Channels */ + }, + .soc_data = &am62a_dmss_csi_soc_data, +}; + static struct udma_match_data am64_bcdma_data = { .type = DMA_TYPE_BCDMA, .psil_base = 0x2000, /* for tchan and rchan, not applicable to bchan */ @@ -4344,6 +4364,10 @@ static const struct of_device_id udma_of_match[] = { .compatible = "ti,am64-dmss-pktdma", .data = &am64_pktdma_data, }, + { + .compatible = "ti,am62a-dmss-bcdma-csirx", + .data = &am62a_bcdma_csirx_data, + }, { /* Sentinel */ }, }; @@ -5274,12 +5298,15 @@ static int udma_probe(struct platform_device *pdev) } ud->match_data = match->data; - soc = soc_device_match(k3_soc_devices); - if (!soc) { - dev_err(dev, "No compatible SoC found\n"); - return -ENODEV; + ud->soc_data = ud->match_data->soc_data; + if (!ud->soc_data) { + soc = soc_device_match(k3_soc_devices); + if (!soc) { + dev_err(dev, "No compatible SoC found\n"); + return -ENODEV; + } + ud->soc_data = soc->data; } - ud->soc_data = soc->data; ret = udma_get_mmrs(pdev, ud); if (ret) -- cgit v1.2.3 From 5840c8915a2f2becd760ba3a700ee7832d13c53f Mon Sep 17 00:00:00 2001 From: Jiapeng Chong Date: Mon, 12 Dec 2022 11:35:14 +0800 Subject: dmaengine: idxd: Remove the unused function set_completion_address() The function set_completion_address is defined in the dma.c file, but not called elsewhere, so remove this unused function. drivers/dma/idxd/dma.c:66:20: warning: unused function 'set_completion_address'. Link: https://bugzilla.openanolis.cn/show_bug.cgi?id=3416 Reported-by: Abaci Robot Signed-off-by: Jiapeng Chong Acked-by: Dave Jiang Link: https://lore.kernel.org/r/20221212033514.5831-1-jiapeng.chong@linux.alibaba.com Signed-off-by: Vinod Koul --- drivers/dma/idxd/dma.c | 6 ------ 1 file changed, 6 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/idxd/dma.c b/drivers/dma/idxd/dma.c index e0874cb4721c..eb35ca313684 100644 --- a/drivers/dma/idxd/dma.c +++ b/drivers/dma/idxd/dma.c @@ -63,12 +63,6 @@ static void op_flag_setup(unsigned long flags, u32 *desc_flags) *desc_flags |= IDXD_OP_FLAG_RCI; } -static inline void set_completion_address(struct idxd_desc *desc, - u64 *compl_addr) -{ - *compl_addr = desc->compl_dma; -} - static inline void idxd_prep_desc_common(struct idxd_wq *wq, struct dsa_hw_desc *hw, char opcode, u64 addr_f1, u64 addr_f2, u64 len, -- cgit v1.2.3 From 9735bde36487da43d3c3fc910df49639f72decbf Mon Sep 17 00:00:00 2001 From: Fenghua Yu Date: Fri, 9 Dec 2022 09:21:41 -0800 Subject: dmaengine: idxd: Set traffic class values in GRPCFG on DSA 2.0 On DSA/IAX 1.0, TC-A and TC-B in GRPCFG are set as 1 to have best performance and cannot be changed through sysfs knobs unless override option is given. The same values should be set on DSA 2.0 as well. Fixes: ea7c8f598c32 ("dmaengine: idxd: restore traffic class defaults after wq reset") Fixes: ade8a86b512c ("dmaengine: idxd: Set defaults for GRPCFG traffic class") Signed-off-by: Fenghua Yu Reviewed-by: Dave Jiang Link: https://lore.kernel.org/r/20221209172141.562648-1-fenghua.yu@intel.com Signed-off-by: Vinod Koul --- drivers/dma/idxd/device.c | 2 +- drivers/dma/idxd/init.c | 2 +- drivers/dma/idxd/sysfs.c | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c index 06f5d3783d77..19ab3e55033e 100644 --- a/drivers/dma/idxd/device.c +++ b/drivers/dma/idxd/device.c @@ -701,7 +701,7 @@ static void idxd_groups_clear_state(struct idxd_device *idxd) group->use_rdbuf_limit = false; group->rdbufs_allowed = 0; group->rdbufs_reserved = 0; - if (idxd->hw.version < DEVICE_VERSION_2 && !tc_override) { + if (idxd->hw.version <= DEVICE_VERSION_2 && !tc_override) { group->tc_a = 1; group->tc_b = 1; } else { diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c index 529ea09c9094..e63b0c674d88 100644 --- a/drivers/dma/idxd/init.c +++ b/drivers/dma/idxd/init.c @@ -295,7 +295,7 @@ static int idxd_setup_groups(struct idxd_device *idxd) } idxd->groups[i] = group; - if (idxd->hw.version < DEVICE_VERSION_2 && !tc_override) { + if (idxd->hw.version <= DEVICE_VERSION_2 && !tc_override) { group->tc_a = 1; group->tc_b = 1; } else { diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c index 3229dfc78650..18cd8151dee0 100644 --- a/drivers/dma/idxd/sysfs.c +++ b/drivers/dma/idxd/sysfs.c @@ -387,7 +387,7 @@ static ssize_t group_traffic_class_a_store(struct device *dev, if (idxd->state == IDXD_DEV_ENABLED) return -EPERM; - if (idxd->hw.version < DEVICE_VERSION_2 && !tc_override) + if (idxd->hw.version <= DEVICE_VERSION_2 && !tc_override) return -EPERM; if (val < 0 || val > 7) @@ -429,7 +429,7 @@ static ssize_t group_traffic_class_b_store(struct device *dev, if (idxd->state == IDXD_DEV_ENABLED) return -EPERM; - if (idxd->hw.version < DEVICE_VERSION_2 && !tc_override) + if (idxd->hw.version <= DEVICE_VERSION_2 && !tc_override) return -EPERM; if (val < 0 || val > 7) -- cgit v1.2.3 From dcca9d045c0852584ad092123c7f6e6526a633b1 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Mon, 21 Nov 2022 16:23:53 +0100 Subject: dmaengine: HISI_DMA should depend on ARCH_HISI The HiSilicon DMA Engine is only present on HiSilicon SoCs. Hence add a dependency on ARCH_HISI, to prevent asking the user about this driver when configuring a kernel without HiSilicon SoC support. Fixes: e9f08b65250d73ab ("dmaengine: hisilicon: Add Kunpeng DMA engine support") Signed-off-by: Geert Uytterhoeven Link: https://lore.kernel.org/r/363a1816d36cd3cf604d88ec90f97c75f604de64.1669044190.git.geert+renesas@glider.be Signed-off-by: Vinod Koul --- drivers/dma/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/dma') diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index b6d48d54f42f..7b95f07c6f1a 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -245,7 +245,7 @@ config FSL_RAID config HISI_DMA tristate "HiSilicon DMA Engine support" - depends on ARM64 || COMPILE_TEST + depends on ARCH_HISI || COMPILE_TEST depends on PCI_MSI select DMA_ENGINE select DMA_VIRTUAL_CHANNELS -- cgit v1.2.3 From e873d4329ccb891bf3b17f1e0d44396de943e92d Mon Sep 17 00:00:00 2001 From: Joy Zou Date: Tue, 15 Nov 2022 17:38:23 +0800 Subject: dmaengine: imx-sdma: support hdmi in sdma The hdmi script already supported in sdma firmware. So add support hdmi in sdma driver. The design of hdmi script is different from common script such as sai. There is no need to config buffer descriptor for HDMI. The cyclic capability is achieved by the hdmi script. The slave config is so simple, only config src_addr, dts_addr and direction DMA_TRANS_NONE. Signed-off-by: Joy Zou Reviewed-by: Sascha Hauer Link: https://lore.kernel.org/r/20221115093823.2879128-3-joy.zou@nxp.com Signed-off-by: Vinod Koul --- drivers/dma/imx-sdma.c | 38 ++++++++++++++++++++++++++++++-------- include/linux/dma/imx-dma.h | 1 + 2 files changed, 31 insertions(+), 8 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index fbea5f62dd98..ab877ceeac3f 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -954,7 +954,10 @@ static irqreturn_t sdma_int_handler(int irq, void *dev_id) desc = sdmac->desc; if (desc) { if (sdmac->flags & IMX_DMA_SG_LOOP) { - sdma_update_channel_loop(sdmac); + if (sdmac->peripheral_type != IMX_DMATYPE_HDMI) + sdma_update_channel_loop(sdmac); + else + vchan_cyclic_callback(&desc->vd); } else { mxc_sdma_handle_channel_normal(sdmac); vchan_cookie_complete(&desc->vd); @@ -1074,6 +1077,10 @@ static int sdma_get_pc(struct sdma_channel *sdmac, per_2_emi = sdma->script_addrs->sai_2_mcu_addr; emi_2_per = sdma->script_addrs->mcu_2_sai_addr; break; + case IMX_DMATYPE_HDMI: + emi_2_per = sdma->script_addrs->hdmi_dma_addr; + sdmac->is_ram_script = true; + break; default: dev_err(sdma->dev, "Unsupported transfer type %d\n", peripheral_type); @@ -1125,11 +1132,16 @@ static int sdma_load_context(struct sdma_channel *sdmac) /* Send by context the event mask,base address for peripheral * and watermark level */ - context->gReg[0] = sdmac->event_mask[1]; - context->gReg[1] = sdmac->event_mask[0]; - context->gReg[2] = sdmac->per_addr; - context->gReg[6] = sdmac->shp_addr; - context->gReg[7] = sdmac->watermark_level; + if (sdmac->peripheral_type == IMX_DMATYPE_HDMI) { + context->gReg[4] = sdmac->per_addr; + context->gReg[6] = sdmac->shp_addr; + } else { + context->gReg[0] = sdmac->event_mask[1]; + context->gReg[1] = sdmac->event_mask[0]; + context->gReg[2] = sdmac->per_addr; + context->gReg[6] = sdmac->shp_addr; + context->gReg[7] = sdmac->watermark_level; + } bd0->mode.command = C0_SETDM; bd0->mode.status = BD_DONE | BD_WRAP | BD_EXTD; @@ -1513,7 +1525,7 @@ static struct sdma_desc *sdma_transfer_init(struct sdma_channel *sdmac, desc->sdmac = sdmac; desc->num_bd = bds; - if (sdma_alloc_bd(desc)) + if (bds && sdma_alloc_bd(desc)) goto err_desc_out; /* No slave_config called in MEMCPY case, so do here */ @@ -1678,13 +1690,16 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic( { struct sdma_channel *sdmac = to_sdma_chan(chan); struct sdma_engine *sdma = sdmac->sdma; - int num_periods = buf_len / period_len; + int num_periods = 0; int channel = sdmac->channel; int i = 0, buf = 0; struct sdma_desc *desc; dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel); + if (sdmac->peripheral_type != IMX_DMATYPE_HDMI) + num_periods = buf_len / period_len; + sdma_config_write(chan, &sdmac->slave_config, direction); desc = sdma_transfer_init(sdmac, direction, num_periods); @@ -1701,6 +1716,9 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic( goto err_bd_out; } + if (sdmac->peripheral_type == IMX_DMATYPE_HDMI) + return vchan_tx_prep(&sdmac->vc, &desc->vd, flags); + while (buf < buf_len) { struct sdma_buffer_descriptor *bd = &desc->bd[i]; int param; @@ -1761,6 +1779,10 @@ static int sdma_config_write(struct dma_chan *chan, sdmac->watermark_level |= (dmaengine_cfg->dst_maxburst << 16) & SDMA_WATERMARK_LEVEL_HWML; sdmac->word_size = dmaengine_cfg->dst_addr_width; + } else if (sdmac->peripheral_type == IMX_DMATYPE_HDMI) { + sdmac->per_address = dmaengine_cfg->dst_addr; + sdmac->per_address2 = dmaengine_cfg->src_addr; + sdmac->watermark_level = 0; } else { sdmac->per_address = dmaengine_cfg->dst_addr; sdmac->watermark_level = dmaengine_cfg->dst_maxburst * diff --git a/include/linux/dma/imx-dma.h b/include/linux/dma/imx-dma.h index f487a4fa103a..cfec5f946e23 100644 --- a/include/linux/dma/imx-dma.h +++ b/include/linux/dma/imx-dma.h @@ -40,6 +40,7 @@ enum sdma_peripheral_type { IMX_DMATYPE_ASRC_SP, /* Shared ASRC */ IMX_DMATYPE_SAI, /* SAI */ IMX_DMATYPE_MULTI_SAI, /* MULTI FIFOs For Audio */ + IMX_DMATYPE_HDMI, /* HDMI Audio */ }; enum imx_dma_prio { -- cgit v1.2.3 From ad4ce0789033529673aa69ef1568ac37f38f9f27 Mon Sep 17 00:00:00 2001 From: Jayesh Choudhary Date: Tue, 17 Jan 2023 10:48:55 +0530 Subject: dmaengine: ti: k3-udma: remove non-fatal probe deferral log Drop the non-fatal probe deferral log for getting MSI domain. This makes the kernel log clean and we do not get recurring logs stating: "Failed to get MSI domain". Signed-off-by: Jayesh Choudhary Acked-by: Peter Ujfalusi Link: https://lore.kernel.org/r/20230117051855.29644-1-j-choudhary@ti.com Signed-off-by: Vinod Koul --- drivers/dma/ti/k3-udma.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers/dma') diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c index 1d3d1b387b96..5ea1656b8919 100644 --- a/drivers/dma/ti/k3-udma.c +++ b/drivers/dma/ti/k3-udma.c @@ -5375,7 +5375,6 @@ static int udma_probe(struct platform_device *pdev) dev->msi.domain = of_msi_get_domain(dev, dev->of_node, DOMAIN_BUS_TI_SCI_INTA_MSI); if (!dev->msi.domain) { - dev_err(dev, "Failed to get MSI domain\n"); return -EPROBE_DEFER; } -- cgit v1.2.3 From 3dfaa68fa89afd7a926cc3c5f0388b52f489609b Mon Sep 17 00:00:00 2001 From: Samuel Holland Date: Sun, 1 Jan 2023 13:36:04 -0600 Subject: dmaengine: sun6i: Set the maximum segment size The sun6i DMA engine supports segment sizes up to 2^25-1 bytes. This is explicitly stated in newer SoC documentation (H6, D1), and it is implied in older documentation by the 25-bit width of the "bytes left in the current segment" register field. Exposing the real segment size limit (instead of the 64k default) reduces the number of SG list segments needed for a transaction. Reviewed-by: Jernej Skrabec Signed-off-by: Samuel Holland Link: https://lore.kernel.org/r/20230101193605.50285-1-samuel@sholland.org Signed-off-by: Vinod Koul --- drivers/dma/sun6i-dma.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/dma') diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c index b7557f437936..30667d251e97 100644 --- a/drivers/dma/sun6i-dma.c +++ b/drivers/dma/sun6i-dma.c @@ -9,6 +9,7 @@ #include #include +#include #include #include #include @@ -1334,6 +1335,8 @@ static int sun6i_dma_probe(struct platform_device *pdev) INIT_LIST_HEAD(&sdc->pending); spin_lock_init(&sdc->lock); + dma_set_max_seg_size(&pdev->dev, SZ_32M - 1); + dma_cap_set(DMA_PRIVATE, sdc->slave.cap_mask); dma_cap_set(DMA_MEMCPY, sdc->slave.cap_mask); dma_cap_set(DMA_SLAVE, sdc->slave.cap_mask); -- cgit v1.2.3 From faab12342f5a4579f066a9e9dcfa97d3e60cf36f Mon Sep 17 00:00:00 2001 From: ye xingchen Date: Tue, 6 Dec 2022 17:14:50 +0800 Subject: dmaengine: ppc4xx: Convert to use sysfs_emit()/sysfs_emit_at() APIs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Follow the advice of the Documentation/filesystems/sysfs.rst and show() should only use sysfs_emit() or sysfs_emit_at() when formatting the value to be returned to user space. Signed-off-by: ye xingchen Link: https://lore.kernel.org/r/202212061714501297954@zte.com.cn Signed-off-by: Vinod Koul --- drivers/dma/ppc4xx/adma.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c index 6b5e91f26afc..686c270ef710 100644 --- a/drivers/dma/ppc4xx/adma.c +++ b/drivers/dma/ppc4xx/adma.c @@ -4299,9 +4299,8 @@ static ssize_t devices_show(struct device_driver *dev, char *buf) for (i = 0; i < PPC440SPE_ADMA_ENGINES_NUM; i++) { if (ppc440spe_adma_devices[i] == -1) continue; - size += scnprintf(buf + size, PAGE_SIZE - size, - "PPC440SP(E)-ADMA.%d: %s\n", i, - ppc_adma_errors[ppc440spe_adma_devices[i]]); + size += sysfs_emit_at(buf, size, "PPC440SP(E)-ADMA.%d: %s\n", + i, ppc_adma_errors[ppc440spe_adma_devices[i]]); } return size; } @@ -4309,9 +4308,8 @@ static DRIVER_ATTR_RO(devices); static ssize_t enable_show(struct device_driver *dev, char *buf) { - return snprintf(buf, PAGE_SIZE, - "PPC440SP(e) RAID-6 capabilities are %sABLED.\n", - ppc440spe_r6_enabled ? "EN" : "DIS"); + return sysfs_emit(buf, "PPC440SP(e) RAID-6 capabilities are %sABLED.\n", + ppc440spe_r6_enabled ? "EN" : "DIS"); } static ssize_t enable_store(struct device_driver *dev, const char *buf, @@ -4362,7 +4360,7 @@ static ssize_t poly_show(struct device_driver *dev, char *buf) reg &= 0xFF; #endif - size = snprintf(buf, PAGE_SIZE, "PPC440SP(e) RAID-6 driver " + size = sysfs_emit(buf, "PPC440SP(e) RAID-6 driver " "uses 0x1%02x polynomial.\n", reg); return size; } -- cgit v1.2.3 From 610b573e5169336e99092dbf7071fdf08222c3f1 Mon Sep 17 00:00:00 2001 From: Claudiu Beznea Date: Thu, 17 Nov 2022 15:15:46 +0200 Subject: dmaengine: at_xdmac: align properly function members Align properly function members. Signed-off-by: Claudiu Beznea Link: https://lore.kernel.org/r/20221117131547.293044-3-claudiu.beznea@microchip.com Signed-off-by: Vinod Koul --- drivers/dma/at_xdmac.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/dma') diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index d6c9781cd46a..5e8647b0ce87 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -1456,7 +1456,7 @@ at_xdmac_prep_dma_memset_sg(struct dma_chan *chan, struct scatterlist *sgl, static enum dma_status at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, - struct dma_tx_state *txstate) + struct dma_tx_state *txstate) { struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); -- cgit v1.2.3 From 650b0e990cbd7e214251a173460f79f3681e8233 Mon Sep 17 00:00:00 2001 From: Claudiu Beznea Date: Thu, 17 Nov 2022 15:15:45 +0200 Subject: dmaengine: at_xdmac: add runtime pm support Add runtime PM support which involves disabling/enabling controller's clocks on runtime PM suspend/resume ops. The runtime suspend/resume is done based on the work submitted to the controller: runtime resume is happening on at_xdmac_start_xfer() and runtime suspend on at_xdmac_tasklet(). Signed-off-by: Claudiu Beznea Link: https://lore.kernel.org/r/20221117131547.293044-2-claudiu.beznea@microchip.com Signed-off-by: Vinod Koul --- drivers/dma/at_xdmac.c | 217 +++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 202 insertions(+), 15 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index 5e8647b0ce87..a9278bf4c17b 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -21,6 +21,7 @@ #include #include #include +#include #include "dmaengine.h" @@ -240,6 +241,7 @@ struct at_xdmac_chan { struct at_xdmac { struct dma_device dma; void __iomem *regs; + struct device *dev; int irq; struct clk *clk; u32 save_gim; @@ -361,13 +363,65 @@ MODULE_PARM_DESC(init_nr_desc_per_channel, "initial descriptors per channel (default: 64)"); +static void at_xdmac_runtime_suspend_descriptors(struct at_xdmac_chan *atchan) +{ + struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); + struct at_xdmac_desc *desc, *_desc; + + list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node) { + if (!desc->active_xfer) + continue; + + pm_runtime_mark_last_busy(atxdmac->dev); + pm_runtime_put_autosuspend(atxdmac->dev); + } +} + +static int at_xdmac_runtime_resume_descriptors(struct at_xdmac_chan *atchan) +{ + struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); + struct at_xdmac_desc *desc, *_desc; + int ret; + + list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node) { + if (!desc->active_xfer) + continue; + + ret = pm_runtime_resume_and_get(atxdmac->dev); + if (ret < 0) + return ret; + } + + return 0; +} + static bool at_xdmac_chan_is_enabled(struct at_xdmac_chan *atchan) { - return at_xdmac_chan_read(atchan, AT_XDMAC_GS) & atchan->mask; + struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); + int ret; + + ret = pm_runtime_resume_and_get(atxdmac->dev); + if (ret < 0) + return false; + + ret = !!(at_xdmac_chan_read(atchan, AT_XDMAC_GS) & atchan->mask); + + pm_runtime_mark_last_busy(atxdmac->dev); + pm_runtime_put_autosuspend(atxdmac->dev); + + return ret; } static void at_xdmac_off(struct at_xdmac *atxdmac) { + struct dma_chan *chan, *_chan; + struct at_xdmac_chan *atchan; + int ret; + + ret = pm_runtime_resume_and_get(atxdmac->dev); + if (ret < 0) + return; + at_xdmac_write(atxdmac, AT_XDMAC_GD, -1L); /* Wait that all chans are disabled. */ @@ -375,6 +429,18 @@ static void at_xdmac_off(struct at_xdmac *atxdmac) cpu_relax(); at_xdmac_write(atxdmac, AT_XDMAC_GID, -1L); + + /* Decrement runtime PM ref counter for each active descriptor. */ + if (!list_empty(&atxdmac->dma.channels)) { + list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, + device_node) { + atchan = to_at_xdmac_chan(chan); + at_xdmac_runtime_suspend_descriptors(atchan); + } + } + + pm_runtime_mark_last_busy(atxdmac->dev); + pm_runtime_put_autosuspend(atxdmac->dev); } /* Call with lock hold. */ @@ -383,6 +449,11 @@ static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan, { struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); u32 reg; + int ret; + + ret = pm_runtime_resume_and_get(atxdmac->dev); + if (ret < 0) + return; dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, first); @@ -1463,7 +1534,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, struct at_xdmac_desc *desc, *_desc, *iter; struct list_head *descs_list; enum dma_status ret; - int residue, retry; + int residue, retry, pm_status; u32 cur_nda, check_nda, cur_ubc, mask, value; u8 dwidth = 0; unsigned long flags; @@ -1473,6 +1544,10 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, if (ret == DMA_COMPLETE || !txstate) return ret; + pm_status = pm_runtime_resume_and_get(atxdmac->dev); + if (pm_status < 0) + return DMA_ERROR; + spin_lock_irqsave(&atchan->lock, flags); desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node); @@ -1590,6 +1665,8 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, spin_unlock: spin_unlock_irqrestore(&atchan->lock, flags); + pm_runtime_mark_last_busy(atxdmac->dev); + pm_runtime_put_autosuspend(atxdmac->dev); return ret; } @@ -1636,6 +1713,11 @@ static void at_xdmac_handle_error(struct at_xdmac_chan *atchan) { struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); struct at_xdmac_desc *bad_desc; + int ret; + + ret = pm_runtime_resume_and_get(atxdmac->dev); + if (ret < 0) + return; /* * The descriptor currently at the head of the active list is @@ -1665,12 +1747,16 @@ static void at_xdmac_handle_error(struct at_xdmac_chan *atchan) __func__, &bad_desc->lld.mbr_sa, &bad_desc->lld.mbr_da, bad_desc->lld.mbr_ubc); + pm_runtime_mark_last_busy(atxdmac->dev); + pm_runtime_put_autosuspend(atxdmac->dev); + /* Then continue with usual descriptor management */ } static void at_xdmac_tasklet(struct tasklet_struct *t) { struct at_xdmac_chan *atchan = from_tasklet(atchan, t, tasklet); + struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); struct at_xdmac_desc *desc; struct dma_async_tx_descriptor *txd; u32 error_mask; @@ -1720,6 +1806,13 @@ static void at_xdmac_tasklet(struct tasklet_struct *t) list_splice_tail_init(&desc->descs_list, &atchan->free_descs_list); at_xdmac_advance_work(atchan); spin_unlock_irq(&atchan->lock); + + /* + * Decrement runtime PM ref counter incremented in + * at_xdmac_start_xfer(). + */ + pm_runtime_mark_last_busy(atxdmac->dev); + pm_runtime_put_autosuspend(atxdmac->dev); } static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id) @@ -1811,19 +1904,31 @@ static int at_xdmac_device_pause(struct dma_chan *chan) struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); unsigned long flags; + int ret; dev_dbg(chan2dev(chan), "%s\n", __func__); if (test_and_set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status)) return 0; + ret = pm_runtime_resume_and_get(atxdmac->dev); + if (ret < 0) + return ret; + spin_lock_irqsave(&atchan->lock, flags); at_xdmac_write(atxdmac, atxdmac->layout->grws, atchan->mask); while (at_xdmac_chan_read(atchan, AT_XDMAC_CC) & (AT_XDMAC_CC_WRIP | AT_XDMAC_CC_RDIP)) cpu_relax(); + + /* Decrement runtime PM ref counter for each active descriptor. */ + at_xdmac_runtime_suspend_descriptors(atchan); + spin_unlock_irqrestore(&atchan->lock, flags); + pm_runtime_mark_last_busy(atxdmac->dev); + pm_runtime_put_autosuspend(atxdmac->dev); + return 0; } @@ -1832,20 +1937,32 @@ static int at_xdmac_device_resume(struct dma_chan *chan) struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); unsigned long flags; + int ret; dev_dbg(chan2dev(chan), "%s\n", __func__); + ret = pm_runtime_resume_and_get(atxdmac->dev); + if (ret < 0) + return ret; + spin_lock_irqsave(&atchan->lock, flags); - if (!at_xdmac_chan_is_paused(atchan)) { - spin_unlock_irqrestore(&atchan->lock, flags); - return 0; - } + if (!at_xdmac_chan_is_paused(atchan)) + goto unlock; + + /* Increment runtime PM ref counter for each active descriptor. */ + ret = at_xdmac_runtime_resume_descriptors(atchan); + if (ret < 0) + goto unlock; at_xdmac_write(atxdmac, atxdmac->layout->grwr, atchan->mask); clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status); + +unlock: spin_unlock_irqrestore(&atchan->lock, flags); + pm_runtime_mark_last_busy(atxdmac->dev); + pm_runtime_put_autosuspend(atxdmac->dev); - return 0; + return ret; } static int at_xdmac_device_terminate_all(struct dma_chan *chan) @@ -1854,9 +1971,14 @@ static int at_xdmac_device_terminate_all(struct dma_chan *chan) struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); unsigned long flags; + int ret; dev_dbg(chan2dev(chan), "%s\n", __func__); + ret = pm_runtime_resume_and_get(atxdmac->dev); + if (ret < 0) + return ret; + spin_lock_irqsave(&atchan->lock, flags); at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask); while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask) @@ -1867,12 +1989,24 @@ static int at_xdmac_device_terminate_all(struct dma_chan *chan) list_del(&desc->xfer_node); list_splice_tail_init(&desc->descs_list, &atchan->free_descs_list); + /* + * We incremented the runtime PM reference count on + * at_xdmac_start_xfer() for this descriptor. Now it's time + * to release it. + */ + if (desc->active_xfer) { + pm_runtime_put_autosuspend(atxdmac->dev); + pm_runtime_mark_last_busy(atxdmac->dev); + } } clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status); clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status); spin_unlock_irqrestore(&atchan->lock, flags); + pm_runtime_mark_last_busy(atxdmac->dev); + pm_runtime_put_autosuspend(atxdmac->dev); + return 0; } @@ -1974,6 +2108,11 @@ static int __maybe_unused atmel_xdmac_suspend(struct device *dev) { struct at_xdmac *atxdmac = dev_get_drvdata(dev); struct dma_chan *chan, *_chan; + int ret; + + ret = pm_runtime_resume_and_get(atxdmac->dev); + if (ret < 0) + return ret; list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) { struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); @@ -1986,12 +2125,13 @@ static int __maybe_unused atmel_xdmac_suspend(struct device *dev) atchan->save_cnda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA); atchan->save_cndc = at_xdmac_chan_read(atchan, AT_XDMAC_CNDC); } + + at_xdmac_runtime_suspend_descriptors(atchan); } atxdmac->save_gim = at_xdmac_read(atxdmac, AT_XDMAC_GIM); at_xdmac_off(atxdmac); - clk_disable_unprepare(atxdmac->clk); - return 0; + return pm_runtime_force_suspend(atxdmac->dev); } static int __maybe_unused atmel_xdmac_resume(struct device *dev) @@ -2003,8 +2143,8 @@ static int __maybe_unused atmel_xdmac_resume(struct device *dev) int i; int ret; - ret = clk_prepare_enable(atxdmac->clk); - if (ret) + ret = pm_runtime_force_resume(atxdmac->dev); + if (ret < 0) return ret; at_xdmac_axi_config(pdev); @@ -2019,6 +2159,11 @@ static int __maybe_unused atmel_xdmac_resume(struct device *dev) at_xdmac_write(atxdmac, AT_XDMAC_GIE, atxdmac->save_gim); list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) { atchan = to_at_xdmac_chan(chan); + + ret = at_xdmac_runtime_resume_descriptors(atchan); + if (ret < 0) + return ret; + at_xdmac_chan_write(atchan, AT_XDMAC_CC, atchan->save_cc); if (at_xdmac_chan_is_cyclic(atchan)) { if (at_xdmac_chan_is_paused(atchan)) @@ -2030,9 +2175,29 @@ static int __maybe_unused atmel_xdmac_resume(struct device *dev) at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask); } } + + pm_runtime_mark_last_busy(atxdmac->dev); + pm_runtime_put_autosuspend(atxdmac->dev); + + return 0; +} + +static int __maybe_unused atmel_xdmac_runtime_suspend(struct device *dev) +{ + struct at_xdmac *atxdmac = dev_get_drvdata(dev); + + clk_disable(atxdmac->clk); + return 0; } +static int __maybe_unused atmel_xdmac_runtime_resume(struct device *dev) +{ + struct at_xdmac *atxdmac = dev_get_drvdata(dev); + + return clk_enable(atxdmac->clk); +} + static int at_xdmac_probe(struct platform_device *pdev) { struct at_xdmac *atxdmac; @@ -2071,6 +2236,7 @@ static int at_xdmac_probe(struct platform_device *pdev) atxdmac->regs = base; atxdmac->irq = irq; + atxdmac->dev = &pdev->dev; atxdmac->layout = of_device_get_match_data(&pdev->dev); if (!atxdmac->layout) @@ -2135,11 +2301,20 @@ static int at_xdmac_probe(struct platform_device *pdev) atxdmac->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); atxdmac->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; - /* Disable all chans and interrupts. */ - at_xdmac_off(atxdmac); + platform_set_drvdata(pdev, atxdmac); + + pm_runtime_set_autosuspend_delay(&pdev->dev, 500); + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); + pm_runtime_get_noresume(&pdev->dev); /* Init channels. */ INIT_LIST_HEAD(&atxdmac->dma.channels); + + /* Disable all chans and interrupts. */ + at_xdmac_off(atxdmac); + for (i = 0; i < nr_channels; i++) { struct at_xdmac_chan *atchan = &atxdmac->chan[i]; @@ -2159,12 +2334,11 @@ static int at_xdmac_probe(struct platform_device *pdev) while (at_xdmac_chan_read(atchan, AT_XDMAC_CIS)) cpu_relax(); } - platform_set_drvdata(pdev, atxdmac); ret = dma_async_device_register(&atxdmac->dma); if (ret) { dev_err(&pdev->dev, "fail to register DMA engine device\n"); - goto err_clk_disable; + goto err_pm_disable; } ret = of_dma_controller_register(pdev->dev.of_node, @@ -2179,10 +2353,18 @@ static int at_xdmac_probe(struct platform_device *pdev) at_xdmac_axi_config(pdev); + pm_runtime_mark_last_busy(&pdev->dev); + pm_runtime_put_autosuspend(&pdev->dev); + return 0; err_dma_unregister: dma_async_device_unregister(&atxdmac->dma); +err_pm_disable: + pm_runtime_put_noidle(&pdev->dev); + pm_runtime_disable(&pdev->dev); + pm_runtime_set_suspended(&pdev->dev); + pm_runtime_dont_use_autosuspend(&pdev->dev); err_clk_disable: clk_disable_unprepare(atxdmac->clk); err_free_irq: @@ -2198,6 +2380,9 @@ static int at_xdmac_remove(struct platform_device *pdev) at_xdmac_off(atxdmac); of_dma_controller_free(pdev->dev.of_node); dma_async_device_unregister(&atxdmac->dma); + pm_runtime_disable(atxdmac->dev); + pm_runtime_set_suspended(&pdev->dev); + pm_runtime_dont_use_autosuspend(&pdev->dev); clk_disable_unprepare(atxdmac->clk); free_irq(atxdmac->irq, atxdmac); @@ -2215,6 +2400,8 @@ static int at_xdmac_remove(struct platform_device *pdev) static const struct dev_pm_ops __maybe_unused atmel_xdmac_dev_pm_ops = { .prepare = atmel_xdmac_prepare, SET_LATE_SYSTEM_SLEEP_PM_OPS(atmel_xdmac_suspend, atmel_xdmac_resume) + SET_RUNTIME_PM_OPS(atmel_xdmac_runtime_suspend, + atmel_xdmac_runtime_resume, NULL) }; static const struct of_device_id atmel_xdmac_dt_ids[] = { -- cgit v1.2.3 From 531d4dfcfd164f575d7b12c784b63c762da1fbf4 Mon Sep 17 00:00:00 2001 From: Claudiu Beznea Date: Thu, 17 Nov 2022 15:15:47 +0200 Subject: dmaengine: at_xdmac: remove empty line Remove empty line. Signed-off-by: Claudiu Beznea Link: https://lore.kernel.org/r/20221117131547.293044-4-claudiu.beznea@microchip.com Signed-off-by: Vinod Koul --- drivers/dma/at_xdmac.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers/dma') diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index a9278bf4c17b..1f0fab180f8f 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -533,7 +533,6 @@ static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan, at_xdmac_chan_read(atchan, AT_XDMAC_CSA), at_xdmac_chan_read(atchan, AT_XDMAC_CDA), at_xdmac_chan_read(atchan, AT_XDMAC_CUBC)); - } static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx) -- cgit v1.2.3 From 4b23603a251d24022f2fa48ee67610eb245a4115 Mon Sep 17 00:00:00 2001 From: Tudor Ambarus Date: Thu, 10 Nov 2022 17:25:28 +0200 Subject: dmaengine: drivers: Use devm_platform_ioremap_resource() platform_get_resource() and devm_ioremap_resource() are wrapped up in the devm_platform_ioremap_resource() helper. Use the helper and get rid of the local variable for struct resource *. We now have a function call less. Signed-off-by: Tudor Ambarus Acked-by: Jernej Skrabec Acked-by: Peter Ujfalusi Link: https://lore.kernel.org/r/20221110152528.7821-1-tudor.ambarus@microchip.com Signed-off-by: Vinod Koul --- drivers/dma/bcm2835-dma.c | 4 +--- drivers/dma/dma-axi-dmac.c | 4 +--- drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c | 4 +--- drivers/dma/fsl-edma.c | 8 +++----- drivers/dma/fsl-qdma.c | 10 +++------- drivers/dma/idma64.c | 4 +--- drivers/dma/img-mdc-dma.c | 4 +--- drivers/dma/imx-dma.c | 4 +--- drivers/dma/imx-sdma.c | 4 +--- drivers/dma/mcf-edma.c | 5 +---- drivers/dma/mediatek/mtk-hsdma.c | 4 +--- drivers/dma/mmp_pdma.c | 4 +--- drivers/dma/mmp_tdma.c | 4 +--- drivers/dma/moxart-dma.c | 4 +--- drivers/dma/mv_xor_v2.c | 7 ++----- drivers/dma/mxs-dma.c | 4 +--- drivers/dma/nbpfaxi.c | 4 +--- drivers/dma/pxa_dma.c | 4 +--- drivers/dma/qcom/bam_dma.c | 4 +--- drivers/dma/sf-pdma/sf-pdma.c | 4 +--- drivers/dma/sh/usb-dmac.c | 4 +--- drivers/dma/stm32-dmamux.c | 4 +--- drivers/dma/stm32-mdma.c | 4 +--- drivers/dma/sun4i-dma.c | 4 +--- drivers/dma/sun6i-dma.c | 4 +--- drivers/dma/tegra210-adma.c | 4 +--- drivers/dma/ti/cppi41.c | 10 +++------- drivers/dma/ti/omap-dma.c | 4 +--- drivers/dma/xilinx/zynqmp_dma.c | 4 +--- 29 files changed, 36 insertions(+), 100 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c index 630dfbb01a40..0807fb9eb262 100644 --- a/drivers/dma/bcm2835-dma.c +++ b/drivers/dma/bcm2835-dma.c @@ -878,7 +878,6 @@ static struct dma_chan *bcm2835_dma_xlate(struct of_phandle_args *spec, static int bcm2835_dma_probe(struct platform_device *pdev) { struct bcm2835_dmadev *od; - struct resource *res; void __iomem *base; int rc; int i, j; @@ -902,8 +901,7 @@ static int bcm2835_dma_probe(struct platform_device *pdev) dma_set_max_seg_size(&pdev->dev, 0x3FFFFFFF); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - base = devm_ioremap_resource(&pdev->dev, res); + base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(base)) return PTR_ERR(base); diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c index f30dabc99795..a812b9b00e6b 100644 --- a/drivers/dma/dma-axi-dmac.c +++ b/drivers/dma/dma-axi-dmac.c @@ -910,7 +910,6 @@ static int axi_dmac_probe(struct platform_device *pdev) { struct dma_device *dma_dev; struct axi_dmac *dmac; - struct resource *res; struct regmap *regmap; unsigned int version; int ret; @@ -925,8 +924,7 @@ static int axi_dmac_probe(struct platform_device *pdev) if (dmac->irq == 0) return -EINVAL; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - dmac->base = devm_ioremap_resource(&pdev->dev, res); + dmac->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(dmac->base)) return PTR_ERR(dmac->base); diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c index a183d93bd7e2..3dec8adfc4ea 100644 --- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c +++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c @@ -1365,7 +1365,6 @@ static int dw_probe(struct platform_device *pdev) { struct device_node *node = pdev->dev.of_node; struct axi_dma_chip *chip; - struct resource *mem; struct dw_axi_dma *dw; struct dw_axi_dma_hcfg *hdata; u32 i; @@ -1391,8 +1390,7 @@ static int dw_probe(struct platform_device *pdev) if (chip->irq < 0) return chip->irq; - mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - chip->regs = devm_ioremap_resource(chip->dev, mem); + chip->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(chip->regs)) return PTR_ERR(chip->regs); diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c index 76cbf54aec58..e40769666e39 100644 --- a/drivers/dma/fsl-edma.c +++ b/drivers/dma/fsl-edma.c @@ -272,7 +272,6 @@ static int fsl_edma_probe(struct platform_device *pdev) const struct fsl_edma_drvdata *drvdata = NULL; struct fsl_edma_chan *fsl_chan; struct edma_regs *regs; - struct resource *res; int len, chans; int ret, i; @@ -298,8 +297,7 @@ static int fsl_edma_probe(struct platform_device *pdev) fsl_edma->n_chans = chans; mutex_init(&fsl_edma->fsl_edma_mutex); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - fsl_edma->membase = devm_ioremap_resource(&pdev->dev, res); + fsl_edma->membase = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(fsl_edma->membase)) return PTR_ERR(fsl_edma->membase); @@ -323,8 +321,8 @@ static int fsl_edma_probe(struct platform_device *pdev) for (i = 0; i < fsl_edma->drvdata->dmamuxs; i++) { char clkname[32]; - res = platform_get_resource(pdev, IORESOURCE_MEM, 1 + i); - fsl_edma->muxbase[i] = devm_ioremap_resource(&pdev->dev, res); + fsl_edma->muxbase[i] = devm_platform_ioremap_resource(pdev, + 1 + i); if (IS_ERR(fsl_edma->muxbase[i])) { /* on error: disable all previously enabled clks */ fsl_disable_clocks(fsl_edma, i); diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c index 045ead46ec8f..eddb2688f234 100644 --- a/drivers/dma/fsl-qdma.c +++ b/drivers/dma/fsl-qdma.c @@ -1119,7 +1119,6 @@ static int fsl_qdma_probe(struct platform_device *pdev) int ret, i; int blk_num, blk_off; u32 len, chans, queues; - struct resource *res; struct fsl_qdma_chan *fsl_chan; struct fsl_qdma_engine *fsl_qdma; struct device_node *np = pdev->dev.of_node; @@ -1183,18 +1182,15 @@ static int fsl_qdma_probe(struct platform_device *pdev) if (!fsl_qdma->status[i]) return -ENOMEM; } - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - fsl_qdma->ctrl_base = devm_ioremap_resource(&pdev->dev, res); + fsl_qdma->ctrl_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(fsl_qdma->ctrl_base)) return PTR_ERR(fsl_qdma->ctrl_base); - res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - fsl_qdma->status_base = devm_ioremap_resource(&pdev->dev, res); + fsl_qdma->status_base = devm_platform_ioremap_resource(pdev, 1); if (IS_ERR(fsl_qdma->status_base)) return PTR_ERR(fsl_qdma->status_base); - res = platform_get_resource(pdev, IORESOURCE_MEM, 2); - fsl_qdma->block_base = devm_ioremap_resource(&pdev->dev, res); + fsl_qdma->block_base = devm_platform_ioremap_resource(pdev, 2); if (IS_ERR(fsl_qdma->block_base)) return PTR_ERR(fsl_qdma->block_base); fsl_qdma->queue = fsl_qdma_alloc_queue_resources(pdev, fsl_qdma); diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c index c33087c5cd02..cd9622f6e59c 100644 --- a/drivers/dma/idma64.c +++ b/drivers/dma/idma64.c @@ -627,7 +627,6 @@ static int idma64_platform_probe(struct platform_device *pdev) struct idma64_chip *chip; struct device *dev = &pdev->dev; struct device *sysdev = dev->parent; - struct resource *mem; int ret; chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL); @@ -638,8 +637,7 @@ static int idma64_platform_probe(struct platform_device *pdev) if (chip->irq < 0) return chip->irq; - mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - chip->regs = devm_ioremap_resource(dev, mem); + chip->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(chip->regs)) return PTR_ERR(chip->regs); diff --git a/drivers/dma/img-mdc-dma.c b/drivers/dma/img-mdc-dma.c index e4ea107ce78c..ad084552640f 100644 --- a/drivers/dma/img-mdc-dma.c +++ b/drivers/dma/img-mdc-dma.c @@ -886,7 +886,6 @@ static int img_mdc_runtime_resume(struct device *dev) static int mdc_dma_probe(struct platform_device *pdev) { struct mdc_dma *mdma; - struct resource *res; unsigned int i; u32 val; int ret; @@ -898,8 +897,7 @@ static int mdc_dma_probe(struct platform_device *pdev) mdma->soc = of_device_get_match_data(&pdev->dev); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - mdma->regs = devm_ioremap_resource(&pdev->dev, res); + mdma->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(mdma->regs)) return PTR_ERR(mdma->regs); diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index 65c6094ce063..80086977973f 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c @@ -1038,7 +1038,6 @@ static struct dma_chan *imxdma_xlate(struct of_phandle_args *dma_spec, static int __init imxdma_probe(struct platform_device *pdev) { struct imxdma_engine *imxdma; - struct resource *res; int ret, i; int irq, irq_err; @@ -1049,8 +1048,7 @@ static int __init imxdma_probe(struct platform_device *pdev) imxdma->dev = &pdev->dev; imxdma->devtype = (uintptr_t)of_device_get_match_data(&pdev->dev); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - imxdma->base = devm_ioremap_resource(&pdev->dev, res); + imxdma->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(imxdma->base)) return PTR_ERR(imxdma->base); diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index ab877ceeac3f..97396af129f8 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -2189,7 +2189,6 @@ static int sdma_probe(struct platform_device *pdev) const char *fw_name; int ret; int irq; - struct resource *iores; struct resource spba_res; int i; struct sdma_engine *sdma; @@ -2212,8 +2211,7 @@ static int sdma_probe(struct platform_device *pdev) if (irq < 0) return irq; - iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); - sdma->regs = devm_ioremap_resource(&pdev->dev, iores); + sdma->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(sdma->regs)) return PTR_ERR(sdma->regs); diff --git a/drivers/dma/mcf-edma.c b/drivers/dma/mcf-edma.c index e12b754e6398..ebd8733f72ad 100644 --- a/drivers/dma/mcf-edma.c +++ b/drivers/dma/mcf-edma.c @@ -182,7 +182,6 @@ static int mcf_edma_probe(struct platform_device *pdev) struct fsl_edma_engine *mcf_edma; struct fsl_edma_chan *mcf_chan; struct edma_regs *regs; - struct resource *res; int ret, i, len, chans; pdata = dev_get_platdata(&pdev->dev); @@ -210,9 +209,7 @@ static int mcf_edma_probe(struct platform_device *pdev) mutex_init(&mcf_edma->fsl_edma_mutex); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - - mcf_edma->membase = devm_ioremap_resource(&pdev->dev, res); + mcf_edma->membase = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(mcf_edma->membase)) return PTR_ERR(mcf_edma->membase); diff --git a/drivers/dma/mediatek/mtk-hsdma.c b/drivers/dma/mediatek/mtk-hsdma.c index f7717c44b887..69cc61c0b262 100644 --- a/drivers/dma/mediatek/mtk-hsdma.c +++ b/drivers/dma/mediatek/mtk-hsdma.c @@ -896,7 +896,6 @@ static int mtk_hsdma_probe(struct platform_device *pdev) struct mtk_hsdma_device *hsdma; struct mtk_hsdma_vchan *vc; struct dma_device *dd; - struct resource *res; int i, err; hsdma = devm_kzalloc(&pdev->dev, sizeof(*hsdma), GFP_KERNEL); @@ -905,8 +904,7 @@ static int mtk_hsdma_probe(struct platform_device *pdev) dd = &hsdma->ddev; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - hsdma->base = devm_ioremap_resource(&pdev->dev, res); + hsdma->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(hsdma->base)) return PTR_ERR(hsdma->base); diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c index e8d71b35593e..ebdfdcbb4f7a 100644 --- a/drivers/dma/mmp_pdma.c +++ b/drivers/dma/mmp_pdma.c @@ -1022,7 +1022,6 @@ static int mmp_pdma_probe(struct platform_device *op) struct mmp_pdma_device *pdev; const struct of_device_id *of_id; struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev); - struct resource *iores; int i, ret, irq = 0; int dma_channels = 0, irq_num = 0; const enum dma_slave_buswidth widths = @@ -1037,8 +1036,7 @@ static int mmp_pdma_probe(struct platform_device *op) spin_lock_init(&pdev->phy_lock); - iores = platform_get_resource(op, IORESOURCE_MEM, 0); - pdev->base = devm_ioremap_resource(pdev->dev, iores); + pdev->base = devm_platform_ioremap_resource(op, 0); if (IS_ERR(pdev->base)) return PTR_ERR(pdev->base); diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c index a262e0eb4cc9..e956702932aa 100644 --- a/drivers/dma/mmp_tdma.c +++ b/drivers/dma/mmp_tdma.c @@ -639,7 +639,6 @@ static int mmp_tdma_probe(struct platform_device *pdev) enum mmp_tdma_type type; const struct of_device_id *of_id; struct mmp_tdma_device *tdev; - struct resource *iores; int i, ret; int irq = 0, irq_num = 0; int chan_num = TDMA_CHANNEL_NUM; @@ -663,8 +662,7 @@ static int mmp_tdma_probe(struct platform_device *pdev) irq_num++; } - iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); - tdev->base = devm_ioremap_resource(&pdev->dev, iores); + tdev->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(tdev->base)) return PTR_ERR(tdev->base); diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c index 7459382a8353..7565ad98ba66 100644 --- a/drivers/dma/moxart-dma.c +++ b/drivers/dma/moxart-dma.c @@ -563,7 +563,6 @@ static int moxart_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *node = dev->of_node; - struct resource *res; void __iomem *dma_base_addr; int ret, i; unsigned int irq; @@ -580,8 +579,7 @@ static int moxart_probe(struct platform_device *pdev) return -EINVAL; } - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - dma_base_addr = devm_ioremap_resource(dev, res); + dma_base_addr = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(dma_base_addr)) return PTR_ERR(dma_base_addr); diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c index 113834e1167b..89790beba305 100644 --- a/drivers/dma/mv_xor_v2.c +++ b/drivers/dma/mv_xor_v2.c @@ -714,7 +714,6 @@ static int mv_xor_v2_resume(struct platform_device *dev) static int mv_xor_v2_probe(struct platform_device *pdev) { struct mv_xor_v2_device *xor_dev; - struct resource *res; int i, ret = 0; struct dma_device *dma_dev; struct mv_xor_v2_sw_desc *sw_desc; @@ -726,13 +725,11 @@ static int mv_xor_v2_probe(struct platform_device *pdev) if (!xor_dev) return -ENOMEM; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - xor_dev->dma_base = devm_ioremap_resource(&pdev->dev, res); + xor_dev->dma_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(xor_dev->dma_base)) return PTR_ERR(xor_dev->dma_base); - res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - xor_dev->glob_base = devm_ioremap_resource(&pdev->dev, res); + xor_dev->glob_base = devm_platform_ioremap_resource(pdev, 1); if (IS_ERR(xor_dev->glob_base)) return PTR_ERR(xor_dev->glob_base); diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c index dc147cc2436e..acc4d53e4630 100644 --- a/drivers/dma/mxs-dma.c +++ b/drivers/dma/mxs-dma.c @@ -746,7 +746,6 @@ static int mxs_dma_probe(struct platform_device *pdev) struct device_node *np = pdev->dev.of_node; const struct mxs_dma_type *dma_type; struct mxs_dma_engine *mxs_dma; - struct resource *iores; int ret, i; mxs_dma = devm_kzalloc(&pdev->dev, sizeof(*mxs_dma), GFP_KERNEL); @@ -763,8 +762,7 @@ static int mxs_dma_probe(struct platform_device *pdev) mxs_dma->type = dma_type->type; mxs_dma->dev_id = dma_type->id; - iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); - mxs_dma->base = devm_ioremap_resource(&pdev->dev, iores); + mxs_dma->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(mxs_dma->base)) return PTR_ERR(mxs_dma->base); diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c index a7063e9cd551..e72e8c10355e 100644 --- a/drivers/dma/nbpfaxi.c +++ b/drivers/dma/nbpfaxi.c @@ -1294,7 +1294,6 @@ static int nbpf_probe(struct platform_device *pdev) struct device_node *np = dev->of_node; struct nbpf_device *nbpf; struct dma_device *dma_dev; - struct resource *iomem; const struct nbpf_config *cfg; int num_channels; int ret, irq, eirq, i; @@ -1318,8 +1317,7 @@ static int nbpf_probe(struct platform_device *pdev) dma_dev = &nbpf->dma_dev; dma_dev->dev = dev; - iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - nbpf->base = devm_ioremap_resource(dev, iomem); + nbpf->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(nbpf->base)) return PTR_ERR(nbpf->base); diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c index 22a392fe6d32..1b046d9a3a26 100644 --- a/drivers/dma/pxa_dma.c +++ b/drivers/dma/pxa_dma.c @@ -1346,7 +1346,6 @@ static int pxad_probe(struct platform_device *op) const struct of_device_id *of_id; const struct dma_slave_map *slave_map = NULL; struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev); - struct resource *iores; int ret, dma_channels = 0, nb_requestors = 0, slave_map_cnt = 0; const enum dma_slave_buswidth widths = DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES | @@ -1358,8 +1357,7 @@ static int pxad_probe(struct platform_device *op) spin_lock_init(&pdev->phy_lock); - iores = platform_get_resource(op, IORESOURCE_MEM, 0); - pdev->base = devm_ioremap_resource(&op->dev, iores); + pdev->base = devm_platform_ioremap_resource(op, 0); if (IS_ERR(pdev->base)) return PTR_ERR(pdev->base); diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c index 2ff787df513e..1e47d27e1f81 100644 --- a/drivers/dma/qcom/bam_dma.c +++ b/drivers/dma/qcom/bam_dma.c @@ -1237,7 +1237,6 @@ static int bam_dma_probe(struct platform_device *pdev) { struct bam_device *bdev; const struct of_device_id *match; - struct resource *iores; int ret, i; bdev = devm_kzalloc(&pdev->dev, sizeof(*bdev), GFP_KERNEL); @@ -1254,8 +1253,7 @@ static int bam_dma_probe(struct platform_device *pdev) bdev->layout = match->data; - iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); - bdev->regs = devm_ioremap_resource(&pdev->dev, iores); + bdev->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(bdev->regs)) return PTR_ERR(bdev->regs); diff --git a/drivers/dma/sf-pdma/sf-pdma.c b/drivers/dma/sf-pdma/sf-pdma.c index 6b524eb6bcf3..d7d2dacac830 100644 --- a/drivers/dma/sf-pdma/sf-pdma.c +++ b/drivers/dma/sf-pdma/sf-pdma.c @@ -494,7 +494,6 @@ static void sf_pdma_setup_chans(struct sf_pdma *pdma) static int sf_pdma_probe(struct platform_device *pdev) { struct sf_pdma *pdma; - struct resource *res; int ret, n_chans; const enum dma_slave_buswidth widths = DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES | @@ -519,8 +518,7 @@ static int sf_pdma_probe(struct platform_device *pdev) pdma->n_chans = n_chans; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - pdma->membase = devm_ioremap_resource(&pdev->dev, res); + pdma->membase = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(pdma->membase)) return PTR_ERR(pdma->membase); diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c index 5edaeb89d1e6..b14cf350b669 100644 --- a/drivers/dma/sh/usb-dmac.c +++ b/drivers/dma/sh/usb-dmac.c @@ -768,7 +768,6 @@ static int usb_dmac_probe(struct platform_device *pdev) const enum dma_slave_buswidth widths = USB_DMAC_SLAVE_BUSWIDTH; struct dma_device *engine; struct usb_dmac *dmac; - struct resource *mem; unsigned int i; int ret; @@ -789,8 +788,7 @@ static int usb_dmac_probe(struct platform_device *pdev) return -ENOMEM; /* Request resources. */ - mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - dmac->iomem = devm_ioremap_resource(&pdev->dev, mem); + dmac->iomem = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(dmac->iomem)) return PTR_ERR(dmac->iomem); diff --git a/drivers/dma/stm32-dmamux.c b/drivers/dma/stm32-dmamux.c index ee3cbbf51006..46b884d46188 100644 --- a/drivers/dma/stm32-dmamux.c +++ b/drivers/dma/stm32-dmamux.c @@ -179,7 +179,6 @@ static int stm32_dmamux_probe(struct platform_device *pdev) const struct of_device_id *match; struct device_node *dma_node; struct stm32_dmamux_data *stm32_dmamux; - struct resource *res; void __iomem *iomem; struct reset_control *rst; int i, count, ret; @@ -238,8 +237,7 @@ static int stm32_dmamux_probe(struct platform_device *pdev) } pm_runtime_get_noresume(&pdev->dev); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - iomem = devm_ioremap_resource(&pdev->dev, res); + iomem = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(iomem)) return PTR_ERR(iomem); diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c index b9d4c843635f..84e7f4f4a800 100644 --- a/drivers/dma/stm32-mdma.c +++ b/drivers/dma/stm32-mdma.c @@ -1580,7 +1580,6 @@ static int stm32_mdma_probe(struct platform_device *pdev) struct stm32_mdma_device *dmadev; struct dma_device *dd; struct device_node *of_node; - struct resource *res; struct reset_control *rst; u32 nr_channels, nr_requests; int i, count, ret; @@ -1622,8 +1621,7 @@ static int stm32_mdma_probe(struct platform_device *pdev) count); dmadev->nr_ahb_addr_masks = count; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - dmadev->base = devm_ioremap_resource(&pdev->dev, res); + dmadev->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(dmadev->base)) return PTR_ERR(dmadev->base); diff --git a/drivers/dma/sun4i-dma.c b/drivers/dma/sun4i-dma.c index f291b1b4db32..e86c8829513a 100644 --- a/drivers/dma/sun4i-dma.c +++ b/drivers/dma/sun4i-dma.c @@ -1144,15 +1144,13 @@ handle_pending: static int sun4i_dma_probe(struct platform_device *pdev) { struct sun4i_dma_dev *priv; - struct resource *res; int i, j, ret; priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - priv->base = devm_ioremap_resource(&pdev->dev, res); + priv->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->base)) return PTR_ERR(priv->base); diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c index 30667d251e97..ebfd29888b2f 100644 --- a/drivers/dma/sun6i-dma.c +++ b/drivers/dma/sun6i-dma.c @@ -1284,7 +1284,6 @@ static int sun6i_dma_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct sun6i_dma_dev *sdc; - struct resource *res; int ret, i; sdc = devm_kzalloc(&pdev->dev, sizeof(*sdc), GFP_KERNEL); @@ -1295,8 +1294,7 @@ static int sun6i_dma_probe(struct platform_device *pdev) if (!sdc->cfg) return -ENODEV; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - sdc->base = devm_ioremap_resource(&pdev->dev, res); + sdc->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(sdc->base)) return PTR_ERR(sdc->base); diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c index ae39b52012b2..d1a84483f627 100644 --- a/drivers/dma/tegra210-adma.c +++ b/drivers/dma/tegra210-adma.c @@ -837,7 +837,6 @@ static int tegra_adma_probe(struct platform_device *pdev) { const struct tegra_adma_chip_data *cdata; struct tegra_adma *tdma; - struct resource *res; int ret, i; cdata = of_device_get_match_data(&pdev->dev); @@ -857,8 +856,7 @@ static int tegra_adma_probe(struct platform_device *pdev) tdma->nr_channels = cdata->nr_channels; platform_set_drvdata(pdev, tdma); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - tdma->base_addr = devm_ioremap_resource(&pdev->dev, res); + tdma->base_addr = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(tdma->base_addr)) return PTR_ERR(tdma->base_addr); diff --git a/drivers/dma/ti/cppi41.c b/drivers/dma/ti/cppi41.c index 695915dba707..c3555cfb0681 100644 --- a/drivers/dma/ti/cppi41.c +++ b/drivers/dma/ti/cppi41.c @@ -1039,7 +1039,6 @@ static int cppi41_dma_probe(struct platform_device *pdev) struct cppi41_dd *cdd; struct device *dev = &pdev->dev; const struct cppi_glue_infos *glue_info; - struct resource *mem; int index; int irq; int ret; @@ -1072,18 +1071,15 @@ static int cppi41_dma_probe(struct platform_device *pdev) if (index < 0) return index; - mem = platform_get_resource(pdev, IORESOURCE_MEM, index); - cdd->ctrl_mem = devm_ioremap_resource(dev, mem); + cdd->ctrl_mem = devm_platform_ioremap_resource(pdev, index); if (IS_ERR(cdd->ctrl_mem)) return PTR_ERR(cdd->ctrl_mem); - mem = platform_get_resource(pdev, IORESOURCE_MEM, index + 1); - cdd->sched_mem = devm_ioremap_resource(dev, mem); + cdd->sched_mem = devm_platform_ioremap_resource(pdev, index + 1); if (IS_ERR(cdd->sched_mem)) return PTR_ERR(cdd->sched_mem); - mem = platform_get_resource(pdev, IORESOURCE_MEM, index + 2); - cdd->qmgr_mem = devm_ioremap_resource(dev, mem); + cdd->qmgr_mem = devm_platform_ioremap_resource(pdev, index + 2); if (IS_ERR(cdd->qmgr_mem)) return PTR_ERR(cdd->qmgr_mem); diff --git a/drivers/dma/ti/omap-dma.c b/drivers/dma/ti/omap-dma.c index 27f5019bdc1e..02e1c08c596d 100644 --- a/drivers/dma/ti/omap-dma.c +++ b/drivers/dma/ti/omap-dma.c @@ -1658,7 +1658,6 @@ static int omap_dma_probe(struct platform_device *pdev) { const struct omap_dma_config *conf; struct omap_dmadev *od; - struct resource *res; int rc, i, irq; u32 val; @@ -1666,8 +1665,7 @@ static int omap_dma_probe(struct platform_device *pdev) if (!od) return -ENOMEM; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - od->base = devm_ioremap_resource(&pdev->dev, res); + od->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(od->base)) return PTR_ERR(od->base); diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c index 21472a5d7636..ce359058c638 100644 --- a/drivers/dma/xilinx/zynqmp_dma.c +++ b/drivers/dma/xilinx/zynqmp_dma.c @@ -890,7 +890,6 @@ static int zynqmp_dma_chan_probe(struct zynqmp_dma_device *zdev, struct platform_device *pdev) { struct zynqmp_dma_chan *chan; - struct resource *res; struct device_node *node = pdev->dev.of_node; int err; @@ -900,8 +899,7 @@ static int zynqmp_dma_chan_probe(struct zynqmp_dma_device *zdev, chan->dev = zdev->dev; chan->zdev = zdev; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - chan->regs = devm_ioremap_resource(&pdev->dev, res); + chan->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(chan->regs)) return PTR_ERR(chan->regs); -- cgit v1.2.3 From 17ce252266c7f016ece026492c45838f852ddc79 Mon Sep 17 00:00:00 2001 From: Lizhi Hou Date: Thu, 19 Jan 2023 08:32:05 -0800 Subject: dmaengine: xilinx: xdma: Add xilinx xdma driver Add driver to enable PCIe board which uses XDMA (the DMA/Bridge Subsystem for PCI Express). For example, Xilinx Alveo PCIe devices. https://www.xilinx.com/products/boards-and-kits/alveo.html The XDMA engine support up to 4 Host to Card (H2C) and 4 Card to Host (C2H) channels. Memory transfers are specified on a per-channel basis in descriptor linked lists, which the DMA fetches from host memory and processes. Events such as descriptor completion and errors are signaled using interrupts. The hardware detail is provided by https://docs.xilinx.com/r/en-US/pg195-pcie-dma/Introduction This driver implements dmaengine APIs. - probe the available DMA channels - use dma_slave_map for channel lookup - use virtual channel to manage dmaengine tx descriptors - implement device_prep_slave_sg callback to handle host scatter gather list - implement device_config to config device address for DMA transfer Signed-off-by: Lizhi Hou Signed-off-by: Sonal Santan Signed-off-by: Max Zhen Signed-off-by: Brian Xu Tested-by: Martin Tuma Link: https://lore.kernel.org/r/1674145926-29449-2-git-send-email-lizhi.hou@amd.com Signed-off-by: Vinod Koul --- MAINTAINERS | 10 + drivers/dma/Kconfig | 14 + drivers/dma/xilinx/Makefile | 1 + drivers/dma/xilinx/xdma-regs.h | 166 ++++++ drivers/dma/xilinx/xdma.c | 893 +++++++++++++++++++++++++++++++++ include/linux/platform_data/amd_xdma.h | 34 ++ 6 files changed, 1118 insertions(+) create mode 100644 drivers/dma/xilinx/xdma-regs.h create mode 100644 drivers/dma/xilinx/xdma.c create mode 100644 include/linux/platform_data/amd_xdma.h (limited to 'drivers/dma') diff --git a/MAINTAINERS b/MAINTAINERS index f61eb221415b..b768ad4e901b 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -22889,6 +22889,16 @@ F: Documentation/devicetree/bindings/media/xilinx/ F: drivers/media/platform/xilinx/ F: include/uapi/linux/xilinx-v4l2-controls.h +XILINX XDMA DRIVER +M: Lizhi Hou +M: Brian Xu +M: Raj Kumar Rampelli +L: dmaengine@vger.kernel.org +S: Supported +F: drivers/dma/xilinx/xdma-regs.h +F: drivers/dma/xilinx/xdma.c +F: include/linux/platform_data/amd_xdma.h + XILINX ZYNQMP DPDMA DRIVER M: Hyun Kwon M: Laurent Pinchart diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 7b95f07c6f1a..472dc315b889 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -728,6 +728,20 @@ config XILINX_DMA the scatter gather interface with multiple channels independent configuration support. +config XILINX_XDMA + tristate "Xilinx DMA/Bridge Subsystem DMA Engine" + depends on HAS_IOMEM + select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS + select REGMAP_MMIO + help + Enable support for Xilinx DMA/Bridge Subsystem DMA engine. The DMA + provides high performance block data movement between Host memory + and the DMA subsystem. These direct memory transfers can be both in + the Host to Card (H2C) and Card to Host (C2H) transfers. + The core also provides up to 16 user interrupt wires that generate + interrupts to the host. + config XILINX_ZYNQMP_DMA tristate "Xilinx ZynqMP DMA Engine" depends on ARCH_ZYNQ || MICROBLAZE || ARM64 || COMPILE_TEST diff --git a/drivers/dma/xilinx/Makefile b/drivers/dma/xilinx/Makefile index 767bb45f641f..ebaa93644c94 100644 --- a/drivers/dma/xilinx/Makefile +++ b/drivers/dma/xilinx/Makefile @@ -1,4 +1,5 @@ # SPDX-License-Identifier: GPL-2.0-only obj-$(CONFIG_XILINX_DMA) += xilinx_dma.o +obj-$(CONFIG_XILINX_XDMA) += xdma.o obj-$(CONFIG_XILINX_ZYNQMP_DMA) += zynqmp_dma.o obj-$(CONFIG_XILINX_ZYNQMP_DPDMA) += xilinx_dpdma.o diff --git a/drivers/dma/xilinx/xdma-regs.h b/drivers/dma/xilinx/xdma-regs.h new file mode 100644 index 000000000000..dd98b4526b90 --- /dev/null +++ b/drivers/dma/xilinx/xdma-regs.h @@ -0,0 +1,166 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2017-2020 Xilinx, Inc. All rights reserved. + * Copyright (C) 2022, Advanced Micro Devices, Inc. + */ + +#ifndef __DMA_XDMA_REGS_H +#define __DMA_XDMA_REGS_H + +/* The length of register space exposed to host */ +#define XDMA_REG_SPACE_LEN 65536 + +/* + * maximum number of DMA channels for each direction: + * Host to Card (H2C) or Card to Host (C2H) + */ +#define XDMA_MAX_CHANNELS 4 + +/* + * macros to define the number of descriptor blocks can be used in one + * DMA transfer request. + * the DMA engine uses a linked list of descriptor blocks that specify the + * source, destination, and length of the DMA transfers. + */ +#define XDMA_DESC_BLOCK_NUM BIT(7) +#define XDMA_DESC_BLOCK_MASK (XDMA_DESC_BLOCK_NUM - 1) + +/* descriptor definitions */ +#define XDMA_DESC_ADJACENT 32 +#define XDMA_DESC_ADJACENT_MASK (XDMA_DESC_ADJACENT - 1) +#define XDMA_DESC_ADJACENT_BITS GENMASK(13, 8) +#define XDMA_DESC_MAGIC 0xad4bUL +#define XDMA_DESC_MAGIC_BITS GENMASK(31, 16) +#define XDMA_DESC_FLAGS_BITS GENMASK(7, 0) +#define XDMA_DESC_STOPPED BIT(0) +#define XDMA_DESC_COMPLETED BIT(1) +#define XDMA_DESC_BLEN_BITS 28 +#define XDMA_DESC_BLEN_MAX (BIT(XDMA_DESC_BLEN_BITS) - PAGE_SIZE) + +/* macros to construct the descriptor control word */ +#define XDMA_DESC_CONTROL(adjacent, flag) \ + (FIELD_PREP(XDMA_DESC_MAGIC_BITS, XDMA_DESC_MAGIC) | \ + FIELD_PREP(XDMA_DESC_ADJACENT_BITS, (adjacent) - 1) | \ + FIELD_PREP(XDMA_DESC_FLAGS_BITS, (flag))) +#define XDMA_DESC_CONTROL_LAST \ + XDMA_DESC_CONTROL(1, XDMA_DESC_STOPPED | XDMA_DESC_COMPLETED) + +/* + * Descriptor for a single contiguous memory block transfer. + * + * Multiple descriptors are linked by means of the next pointer. An additional + * extra adjacent number gives the amount of extra contiguous descriptors. + * + * The descriptors are in root complex memory, and the bytes in the 32-bit + * words must be in little-endian byte ordering. + */ +struct xdma_hw_desc { + __le32 control; + __le32 bytes; + __le64 src_addr; + __le64 dst_addr; + __le64 next_desc; +}; + +#define XDMA_DESC_SIZE sizeof(struct xdma_hw_desc) +#define XDMA_DESC_BLOCK_SIZE (XDMA_DESC_SIZE * XDMA_DESC_ADJACENT) +#define XDMA_DESC_BLOCK_ALIGN 4096 + +/* + * Channel registers + */ +#define XDMA_CHAN_IDENTIFIER 0x0 +#define XDMA_CHAN_CONTROL 0x4 +#define XDMA_CHAN_CONTROL_W1S 0x8 +#define XDMA_CHAN_CONTROL_W1C 0xc +#define XDMA_CHAN_STATUS 0x40 +#define XDMA_CHAN_COMPLETED_DESC 0x48 +#define XDMA_CHAN_ALIGNMENTS 0x4c +#define XDMA_CHAN_INTR_ENABLE 0x90 +#define XDMA_CHAN_INTR_ENABLE_W1S 0x94 +#define XDMA_CHAN_INTR_ENABLE_W1C 0x9c + +#define XDMA_CHAN_STRIDE 0x100 +#define XDMA_CHAN_H2C_OFFSET 0x0 +#define XDMA_CHAN_C2H_OFFSET 0x1000 +#define XDMA_CHAN_H2C_TARGET 0x0 +#define XDMA_CHAN_C2H_TARGET 0x1 + +/* macro to check if channel is available */ +#define XDMA_CHAN_MAGIC 0x1fc0 +#define XDMA_CHAN_CHECK_TARGET(id, target) \ + (((u32)(id) >> 16) == XDMA_CHAN_MAGIC + (target)) + +/* bits of the channel control register */ +#define CHAN_CTRL_RUN_STOP BIT(0) +#define CHAN_CTRL_IE_DESC_STOPPED BIT(1) +#define CHAN_CTRL_IE_DESC_COMPLETED BIT(2) +#define CHAN_CTRL_IE_DESC_ALIGN_MISMATCH BIT(3) +#define CHAN_CTRL_IE_MAGIC_STOPPED BIT(4) +#define CHAN_CTRL_IE_IDLE_STOPPED BIT(6) +#define CHAN_CTRL_IE_READ_ERROR GENMASK(13, 9) +#define CHAN_CTRL_IE_DESC_ERROR GENMASK(23, 19) +#define CHAN_CTRL_NON_INCR_ADDR BIT(25) +#define CHAN_CTRL_POLL_MODE_WB BIT(26) + +#define CHAN_CTRL_START (CHAN_CTRL_RUN_STOP | \ + CHAN_CTRL_IE_DESC_STOPPED | \ + CHAN_CTRL_IE_DESC_COMPLETED | \ + CHAN_CTRL_IE_DESC_ALIGN_MISMATCH | \ + CHAN_CTRL_IE_MAGIC_STOPPED | \ + CHAN_CTRL_IE_READ_ERROR | \ + CHAN_CTRL_IE_DESC_ERROR) + +/* bits of the channel interrupt enable mask */ +#define CHAN_IM_DESC_ERROR BIT(19) +#define CHAN_IM_READ_ERROR BIT(9) +#define CHAN_IM_IDLE_STOPPED BIT(6) +#define CHAN_IM_MAGIC_STOPPED BIT(4) +#define CHAN_IM_DESC_COMPLETED BIT(2) +#define CHAN_IM_DESC_STOPPED BIT(1) + +#define CHAN_IM_ALL (CHAN_IM_DESC_ERROR | CHAN_IM_READ_ERROR | \ + CHAN_IM_IDLE_STOPPED | CHAN_IM_MAGIC_STOPPED | \ + CHAN_IM_DESC_COMPLETED | CHAN_IM_DESC_STOPPED) + +/* + * Channel SGDMA registers + */ +#define XDMA_SGDMA_IDENTIFIER 0x4000 +#define XDMA_SGDMA_DESC_LO 0x4080 +#define XDMA_SGDMA_DESC_HI 0x4084 +#define XDMA_SGDMA_DESC_ADJ 0x4088 +#define XDMA_SGDMA_DESC_CREDIT 0x408c + +/* bits of the SG DMA control register */ +#define XDMA_CTRL_RUN_STOP BIT(0) +#define XDMA_CTRL_IE_DESC_STOPPED BIT(1) +#define XDMA_CTRL_IE_DESC_COMPLETED BIT(2) +#define XDMA_CTRL_IE_DESC_ALIGN_MISMATCH BIT(3) +#define XDMA_CTRL_IE_MAGIC_STOPPED BIT(4) +#define XDMA_CTRL_IE_IDLE_STOPPED BIT(6) +#define XDMA_CTRL_IE_READ_ERROR GENMASK(13, 9) +#define XDMA_CTRL_IE_DESC_ERROR GENMASK(23, 19) +#define XDMA_CTRL_NON_INCR_ADDR BIT(25) +#define XDMA_CTRL_POLL_MODE_WB BIT(26) + +/* + * interrupt registers + */ +#define XDMA_IRQ_IDENTIFIER 0x2000 +#define XDMA_IRQ_USER_INT_EN 0x2004 +#define XDMA_IRQ_USER_INT_EN_W1S 0x2008 +#define XDMA_IRQ_USER_INT_EN_W1C 0x200c +#define XDMA_IRQ_CHAN_INT_EN 0x2010 +#define XDMA_IRQ_CHAN_INT_EN_W1S 0x2014 +#define XDMA_IRQ_CHAN_INT_EN_W1C 0x2018 +#define XDMA_IRQ_USER_INT_REQ 0x2040 +#define XDMA_IRQ_CHAN_INT_REQ 0x2044 +#define XDMA_IRQ_USER_INT_PEND 0x2048 +#define XDMA_IRQ_CHAN_INT_PEND 0x204c +#define XDMA_IRQ_USER_VEC_NUM 0x2080 +#define XDMA_IRQ_CHAN_VEC_NUM 0x20a0 + +#define XDMA_IRQ_VEC_SHIFT 8 + +#endif /* __DMA_XDMA_REGS_H */ diff --git a/drivers/dma/xilinx/xdma.c b/drivers/dma/xilinx/xdma.c new file mode 100644 index 000000000000..48efb75ef9b4 --- /dev/null +++ b/drivers/dma/xilinx/xdma.c @@ -0,0 +1,893 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * DMA driver for Xilinx DMA/Bridge Subsystem + * + * Copyright (C) 2017-2020 Xilinx, Inc. All rights reserved. + * Copyright (C) 2022, Advanced Micro Devices, Inc. + */ + +/* + * The DMA/Bridge Subsystem for PCI Express allows for the movement of data + * between Host memory and the DMA subsystem. It does this by operating on + * 'descriptors' that contain information about the source, destination and + * amount of data to transfer. These direct memory transfers can be both in + * the Host to Card (H2C) and Card to Host (C2H) transfers. The DMA can be + * configured to have a single AXI4 Master interface shared by all channels + * or one AXI4-Stream interface for each channel enabled. Memory transfers are + * specified on a per-channel basis in descriptor linked lists, which the DMA + * fetches from host memory and processes. Events such as descriptor completion + * and errors are signaled using interrupts. The core also provides up to 16 + * user interrupt wires that generate interrupts to the host. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "../virt-dma.h" +#include "xdma-regs.h" + +/* mmio regmap config for all XDMA registers */ +static const struct regmap_config xdma_regmap_config = { + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, + .max_register = XDMA_REG_SPACE_LEN, +}; + +/** + * struct xdma_desc_block - Descriptor block + * @virt_addr: Virtual address of block start + * @dma_addr: DMA address of block start + */ +struct xdma_desc_block { + void *virt_addr; + dma_addr_t dma_addr; +}; + +/** + * struct xdma_chan - Driver specific DMA channel structure + * @vchan: Virtual channel + * @xdev_hdl: Pointer to DMA device structure + * @base: Offset of channel registers + * @desc_pool: Descriptor pool + * @busy: Busy flag of the channel + * @dir: Transferring direction of the channel + * @cfg: Transferring config of the channel + * @irq: IRQ assigned to the channel + */ +struct xdma_chan { + struct virt_dma_chan vchan; + void *xdev_hdl; + u32 base; + struct dma_pool *desc_pool; + bool busy; + enum dma_transfer_direction dir; + struct dma_slave_config cfg; + u32 irq; +}; + +/** + * struct xdma_desc - DMA desc structure + * @vdesc: Virtual DMA descriptor + * @chan: DMA channel pointer + * @dir: Transferring direction of the request + * @dev_addr: Physical address on DMA device side + * @desc_blocks: Hardware descriptor blocks + * @dblk_num: Number of hardware descriptor blocks + * @desc_num: Number of hardware descriptors + * @completed_desc_num: Completed hardware descriptors + */ +struct xdma_desc { + struct virt_dma_desc vdesc; + struct xdma_chan *chan; + enum dma_transfer_direction dir; + u64 dev_addr; + struct xdma_desc_block *desc_blocks; + u32 dblk_num; + u32 desc_num; + u32 completed_desc_num; +}; + +#define XDMA_DEV_STATUS_REG_DMA BIT(0) +#define XDMA_DEV_STATUS_INIT_MSIX BIT(1) + +/** + * struct xdma_device - DMA device structure + * @pdev: Platform device pointer + * @dma_dev: DMA device structure + * @rmap: MMIO regmap for DMA registers + * @h2c_chans: Host to Card channels + * @c2h_chans: Card to Host channels + * @h2c_chan_num: Number of H2C channels + * @c2h_chan_num: Number of C2H channels + * @irq_start: Start IRQ assigned to device + * @irq_num: Number of IRQ assigned to device + * @status: Initialization status + */ +struct xdma_device { + struct platform_device *pdev; + struct dma_device dma_dev; + struct regmap *rmap; + struct xdma_chan *h2c_chans; + struct xdma_chan *c2h_chans; + u32 h2c_chan_num; + u32 c2h_chan_num; + u32 irq_start; + u32 irq_num; + u32 status; +}; + +#define xdma_err(xdev, fmt, args...) \ + dev_err(&(xdev)->pdev->dev, fmt, ##args) +#define XDMA_CHAN_NUM(_xd) ({ \ + typeof(_xd) (xd) = (_xd); \ + ((xd)->h2c_chan_num + (xd)->c2h_chan_num); }) + +/* Get the last desc in a desc block */ +static inline void *xdma_blk_last_desc(struct xdma_desc_block *block) +{ + return block->virt_addr + (XDMA_DESC_ADJACENT - 1) * XDMA_DESC_SIZE; +} + +/** + * xdma_link_desc_blocks - Link descriptor blocks for DMA transfer + * @sw_desc: Tx descriptor pointer + */ +static void xdma_link_desc_blocks(struct xdma_desc *sw_desc) +{ + struct xdma_desc_block *block; + u32 last_blk_desc, desc_control; + struct xdma_hw_desc *desc; + int i; + + desc_control = XDMA_DESC_CONTROL(XDMA_DESC_ADJACENT, 0); + for (i = 1; i < sw_desc->dblk_num; i++) { + block = &sw_desc->desc_blocks[i - 1]; + desc = xdma_blk_last_desc(block); + + if (!(i & XDMA_DESC_BLOCK_MASK)) { + desc->control = cpu_to_le32(XDMA_DESC_CONTROL_LAST); + continue; + } + desc->control = cpu_to_le32(desc_control); + desc->next_desc = cpu_to_le64(block[1].dma_addr); + } + + /* update the last block */ + last_blk_desc = (sw_desc->desc_num - 1) & XDMA_DESC_ADJACENT_MASK; + if (((sw_desc->dblk_num - 1) & XDMA_DESC_BLOCK_MASK) > 0) { + block = &sw_desc->desc_blocks[sw_desc->dblk_num - 2]; + desc = xdma_blk_last_desc(block); + desc_control = XDMA_DESC_CONTROL(last_blk_desc + 1, 0); + desc->control = cpu_to_le32(desc_control); + } + + block = &sw_desc->desc_blocks[sw_desc->dblk_num - 1]; + desc = block->virt_addr + last_blk_desc * XDMA_DESC_SIZE; + desc->control = cpu_to_le32(XDMA_DESC_CONTROL_LAST); +} + +static inline struct xdma_chan *to_xdma_chan(struct dma_chan *chan) +{ + return container_of(chan, struct xdma_chan, vchan.chan); +} + +static inline struct xdma_desc *to_xdma_desc(struct virt_dma_desc *vdesc) +{ + return container_of(vdesc, struct xdma_desc, vdesc); +} + +/** + * xdma_channel_init - Initialize DMA channel registers + * @chan: DMA channel pointer + */ +static int xdma_channel_init(struct xdma_chan *chan) +{ + struct xdma_device *xdev = chan->xdev_hdl; + int ret; + + ret = regmap_write(xdev->rmap, chan->base + XDMA_CHAN_CONTROL_W1C, + CHAN_CTRL_NON_INCR_ADDR); + if (ret) + return ret; + + ret = regmap_write(xdev->rmap, chan->base + XDMA_CHAN_INTR_ENABLE, + CHAN_IM_ALL); + if (ret) + return ret; + + return 0; +} + +/** + * xdma_free_desc - Free descriptor + * @vdesc: Virtual DMA descriptor + */ +static void xdma_free_desc(struct virt_dma_desc *vdesc) +{ + struct xdma_desc *sw_desc; + int i; + + sw_desc = to_xdma_desc(vdesc); + for (i = 0; i < sw_desc->dblk_num; i++) { + if (!sw_desc->desc_blocks[i].virt_addr) + break; + dma_pool_free(sw_desc->chan->desc_pool, + sw_desc->desc_blocks[i].virt_addr, + sw_desc->desc_blocks[i].dma_addr); + } + kfree(sw_desc->desc_blocks); + kfree(sw_desc); +} + +/** + * xdma_alloc_desc - Allocate descriptor + * @chan: DMA channel pointer + * @desc_num: Number of hardware descriptors + */ +static struct xdma_desc * +xdma_alloc_desc(struct xdma_chan *chan, u32 desc_num) +{ + struct xdma_desc *sw_desc; + struct xdma_hw_desc *desc; + dma_addr_t dma_addr; + u32 dblk_num; + void *addr; + int i, j; + + sw_desc = kzalloc(sizeof(*sw_desc), GFP_NOWAIT); + if (!sw_desc) + return NULL; + + sw_desc->chan = chan; + sw_desc->desc_num = desc_num; + dblk_num = DIV_ROUND_UP(desc_num, XDMA_DESC_ADJACENT); + sw_desc->desc_blocks = kcalloc(dblk_num, sizeof(*sw_desc->desc_blocks), + GFP_NOWAIT); + if (!sw_desc->desc_blocks) + goto failed; + + sw_desc->dblk_num = dblk_num; + for (i = 0; i < sw_desc->dblk_num; i++) { + addr = dma_pool_alloc(chan->desc_pool, GFP_NOWAIT, &dma_addr); + if (!addr) + goto failed; + + sw_desc->desc_blocks[i].virt_addr = addr; + sw_desc->desc_blocks[i].dma_addr = dma_addr; + for (j = 0, desc = addr; j < XDMA_DESC_ADJACENT; j++) + desc[j].control = cpu_to_le32(XDMA_DESC_CONTROL(1, 0)); + } + + xdma_link_desc_blocks(sw_desc); + + return sw_desc; + +failed: + xdma_free_desc(&sw_desc->vdesc); + return NULL; +} + +/** + * xdma_xfer_start - Start DMA transfer + * @xdma_chan: DMA channel pointer + */ +static int xdma_xfer_start(struct xdma_chan *xchan) +{ + struct virt_dma_desc *vd = vchan_next_desc(&xchan->vchan); + struct xdma_device *xdev = xchan->xdev_hdl; + struct xdma_desc_block *block; + u32 val, completed_blocks; + struct xdma_desc *desc; + int ret; + + /* + * check if there is not any submitted descriptor or channel is busy. + * vchan lock should be held where this function is called. + */ + if (!vd || xchan->busy) + return -EINVAL; + + /* clear run stop bit to get ready for transfer */ + ret = regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL_W1C, + CHAN_CTRL_RUN_STOP); + if (ret) + return ret; + + desc = to_xdma_desc(vd); + if (desc->dir != xchan->dir) { + xdma_err(xdev, "incorrect request direction"); + return -EINVAL; + } + + /* set DMA engine to the first descriptor block */ + completed_blocks = desc->completed_desc_num / XDMA_DESC_ADJACENT; + block = &desc->desc_blocks[completed_blocks]; + val = lower_32_bits(block->dma_addr); + ret = regmap_write(xdev->rmap, xchan->base + XDMA_SGDMA_DESC_LO, val); + if (ret) + return ret; + + val = upper_32_bits(block->dma_addr); + ret = regmap_write(xdev->rmap, xchan->base + XDMA_SGDMA_DESC_HI, val); + if (ret) + return ret; + + if (completed_blocks + 1 == desc->dblk_num) + val = (desc->desc_num - 1) & XDMA_DESC_ADJACENT_MASK; + else + val = XDMA_DESC_ADJACENT - 1; + ret = regmap_write(xdev->rmap, xchan->base + XDMA_SGDMA_DESC_ADJ, val); + if (ret) + return ret; + + /* kick off DMA transfer */ + ret = regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL, + CHAN_CTRL_START); + if (ret) + return ret; + + xchan->busy = true; + return 0; +} + +/** + * xdma_alloc_channels - Detect and allocate DMA channels + * @xdev: DMA device pointer + * @dir: Channel direction + */ +static int xdma_alloc_channels(struct xdma_device *xdev, + enum dma_transfer_direction dir) +{ + struct xdma_platdata *pdata = dev_get_platdata(&xdev->pdev->dev); + struct xdma_chan **chans, *xchan; + u32 base, identifier, target; + u32 *chan_num; + int i, j, ret; + + if (dir == DMA_MEM_TO_DEV) { + base = XDMA_CHAN_H2C_OFFSET; + target = XDMA_CHAN_H2C_TARGET; + chans = &xdev->h2c_chans; + chan_num = &xdev->h2c_chan_num; + } else if (dir == DMA_DEV_TO_MEM) { + base = XDMA_CHAN_C2H_OFFSET; + target = XDMA_CHAN_C2H_TARGET; + chans = &xdev->c2h_chans; + chan_num = &xdev->c2h_chan_num; + } else { + xdma_err(xdev, "invalid direction specified"); + return -EINVAL; + } + + /* detect number of available DMA channels */ + for (i = 0, *chan_num = 0; i < pdata->max_dma_channels; i++) { + ret = regmap_read(xdev->rmap, base + i * XDMA_CHAN_STRIDE, + &identifier); + if (ret) + return ret; + + /* check if it is available DMA channel */ + if (XDMA_CHAN_CHECK_TARGET(identifier, target)) + (*chan_num)++; + } + + if (!*chan_num) { + xdma_err(xdev, "does not probe any channel"); + return -EINVAL; + } + + *chans = devm_kcalloc(&xdev->pdev->dev, *chan_num, sizeof(**chans), + GFP_KERNEL); + if (!*chans) + return -ENOMEM; + + for (i = 0, j = 0; i < pdata->max_dma_channels; i++) { + ret = regmap_read(xdev->rmap, base + i * XDMA_CHAN_STRIDE, + &identifier); + if (ret) + return ret; + + if (!XDMA_CHAN_CHECK_TARGET(identifier, target)) + continue; + + if (j == *chan_num) { + xdma_err(xdev, "invalid channel number"); + return -EIO; + } + + /* init channel structure and hardware */ + xchan = &(*chans)[j]; + xchan->xdev_hdl = xdev; + xchan->base = base + i * XDMA_CHAN_STRIDE; + xchan->dir = dir; + + ret = xdma_channel_init(xchan); + if (ret) + return ret; + xchan->vchan.desc_free = xdma_free_desc; + vchan_init(&xchan->vchan, &xdev->dma_dev); + + j++; + } + + dev_info(&xdev->pdev->dev, "configured %d %s channels", j, + (dir == DMA_MEM_TO_DEV) ? "H2C" : "C2H"); + + return 0; +} + +/** + * xdma_issue_pending - Issue pending transactions + * @chan: DMA channel pointer + */ +static void xdma_issue_pending(struct dma_chan *chan) +{ + struct xdma_chan *xdma_chan = to_xdma_chan(chan); + unsigned long flags; + + spin_lock_irqsave(&xdma_chan->vchan.lock, flags); + if (vchan_issue_pending(&xdma_chan->vchan)) + xdma_xfer_start(xdma_chan); + spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags); +} + +/** + * xdma_prep_device_sg - prepare a descriptor for a DMA transaction + * @chan: DMA channel pointer + * @sgl: Transfer scatter gather list + * @sg_len: Length of scatter gather list + * @dir: Transfer direction + * @flags: transfer ack flags + * @context: APP words of the descriptor + */ +static struct dma_async_tx_descriptor * +xdma_prep_device_sg(struct dma_chan *chan, struct scatterlist *sgl, + unsigned int sg_len, enum dma_transfer_direction dir, + unsigned long flags, void *context) +{ + struct xdma_chan *xdma_chan = to_xdma_chan(chan); + struct dma_async_tx_descriptor *tx_desc; + u32 desc_num = 0, i, len, rest; + struct xdma_desc_block *dblk; + struct xdma_hw_desc *desc; + struct xdma_desc *sw_desc; + u64 dev_addr, *src, *dst; + struct scatterlist *sg; + u64 addr; + + for_each_sg(sgl, sg, sg_len, i) + desc_num += DIV_ROUND_UP(sg_dma_len(sg), XDMA_DESC_BLEN_MAX); + + sw_desc = xdma_alloc_desc(xdma_chan, desc_num); + if (!sw_desc) + return NULL; + sw_desc->dir = dir; + + if (dir == DMA_MEM_TO_DEV) { + dev_addr = xdma_chan->cfg.dst_addr; + src = &addr; + dst = &dev_addr; + } else { + dev_addr = xdma_chan->cfg.src_addr; + src = &dev_addr; + dst = &addr; + } + + dblk = sw_desc->desc_blocks; + desc = dblk->virt_addr; + desc_num = 1; + for_each_sg(sgl, sg, sg_len, i) { + addr = sg_dma_address(sg); + rest = sg_dma_len(sg); + + do { + len = min_t(u32, rest, XDMA_DESC_BLEN_MAX); + /* set hardware descriptor */ + desc->bytes = cpu_to_le32(len); + desc->src_addr = cpu_to_le64(*src); + desc->dst_addr = cpu_to_le64(*dst); + + if (!(desc_num & XDMA_DESC_ADJACENT_MASK)) { + dblk++; + desc = dblk->virt_addr; + } else { + desc++; + } + + desc_num++; + dev_addr += len; + addr += len; + rest -= len; + } while (rest); + } + + tx_desc = vchan_tx_prep(&xdma_chan->vchan, &sw_desc->vdesc, flags); + if (!tx_desc) + goto failed; + + return tx_desc; + +failed: + xdma_free_desc(&sw_desc->vdesc); + + return NULL; +} + +/** + * xdma_device_config - Configure the DMA channel + * @chan: DMA channel + * @cfg: channel configuration + */ +static int xdma_device_config(struct dma_chan *chan, + struct dma_slave_config *cfg) +{ + struct xdma_chan *xdma_chan = to_xdma_chan(chan); + + memcpy(&xdma_chan->cfg, cfg, sizeof(*cfg)); + + return 0; +} + +/** + * xdma_free_chan_resources - Free channel resources + * @chan: DMA channel + */ +static void xdma_free_chan_resources(struct dma_chan *chan) +{ + struct xdma_chan *xdma_chan = to_xdma_chan(chan); + + vchan_free_chan_resources(&xdma_chan->vchan); + dma_pool_destroy(xdma_chan->desc_pool); + xdma_chan->desc_pool = NULL; +} + +/** + * xdma_alloc_chan_resources - Allocate channel resources + * @chan: DMA channel + */ +static int xdma_alloc_chan_resources(struct dma_chan *chan) +{ + struct xdma_chan *xdma_chan = to_xdma_chan(chan); + struct xdma_device *xdev = xdma_chan->xdev_hdl; + struct device *dev = xdev->dma_dev.dev; + + while (dev && !dev_is_pci(dev)) + dev = dev->parent; + if (!dev) { + xdma_err(xdev, "unable to find pci device"); + return -EINVAL; + } + + xdma_chan->desc_pool = dma_pool_create(dma_chan_name(chan), + dev, XDMA_DESC_BLOCK_SIZE, + XDMA_DESC_BLOCK_ALIGN, 0); + if (!xdma_chan->desc_pool) { + xdma_err(xdev, "unable to allocate descriptor pool"); + return -ENOMEM; + } + + return 0; +} + +/** + * xdma_channel_isr - XDMA channel interrupt handler + * @irq: IRQ number + * @dev_id: Pointer to the DMA channel structure + */ +static irqreturn_t xdma_channel_isr(int irq, void *dev_id) +{ + struct xdma_chan *xchan = dev_id; + u32 complete_desc_num = 0; + struct xdma_device *xdev; + struct virt_dma_desc *vd; + struct xdma_desc *desc; + int ret; + + spin_lock(&xchan->vchan.lock); + + /* get submitted request */ + vd = vchan_next_desc(&xchan->vchan); + if (!vd) + goto out; + + xchan->busy = false; + desc = to_xdma_desc(vd); + xdev = xchan->xdev_hdl; + + ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_COMPLETED_DESC, + &complete_desc_num); + if (ret) + goto out; + + desc->completed_desc_num += complete_desc_num; + /* + * if all data blocks are transferred, remove and complete the request + */ + if (desc->completed_desc_num == desc->desc_num) { + list_del(&vd->node); + vchan_cookie_complete(vd); + goto out; + } + + if (desc->completed_desc_num > desc->desc_num || + complete_desc_num != XDMA_DESC_BLOCK_NUM * XDMA_DESC_ADJACENT) + goto out; + + /* transfer the rest of data */ + xdma_xfer_start(xchan); + +out: + spin_unlock(&xchan->vchan.lock); + return IRQ_HANDLED; +} + +/** + * xdma_irq_fini - Uninitialize IRQ + * @xdev: DMA device pointer + */ +static void xdma_irq_fini(struct xdma_device *xdev) +{ + int i; + + /* disable interrupt */ + regmap_write(xdev->rmap, XDMA_IRQ_CHAN_INT_EN_W1C, ~0); + + /* free irq handler */ + for (i = 0; i < xdev->h2c_chan_num; i++) + free_irq(xdev->h2c_chans[i].irq, &xdev->h2c_chans[i]); + + for (i = 0; i < xdev->c2h_chan_num; i++) + free_irq(xdev->c2h_chans[i].irq, &xdev->c2h_chans[i]); +} + +/** + * xdma_set_vector_reg - configure hardware IRQ registers + * @xdev: DMA device pointer + * @vec_tbl_start: Start of IRQ registers + * @irq_start: Start of IRQ + * @irq_num: Number of IRQ + */ +static int xdma_set_vector_reg(struct xdma_device *xdev, u32 vec_tbl_start, + u32 irq_start, u32 irq_num) +{ + u32 shift, i, val = 0; + int ret; + + /* Each IRQ register is 32 bit and contains 4 IRQs */ + while (irq_num > 0) { + for (i = 0; i < 4; i++) { + shift = XDMA_IRQ_VEC_SHIFT * i; + val |= irq_start << shift; + irq_start++; + irq_num--; + } + + /* write IRQ register */ + ret = regmap_write(xdev->rmap, vec_tbl_start, val); + if (ret) + return ret; + vec_tbl_start += sizeof(u32); + val = 0; + } + + return 0; +} + +/** + * xdma_irq_init - initialize IRQs + * @xdev: DMA device pointer + */ +static int xdma_irq_init(struct xdma_device *xdev) +{ + u32 irq = xdev->irq_start; + int i, j, ret; + + /* return failure if there are not enough IRQs */ + if (xdev->irq_num < XDMA_CHAN_NUM(xdev)) { + xdma_err(xdev, "not enough irq"); + return -EINVAL; + } + + /* setup H2C interrupt handler */ + for (i = 0; i < xdev->h2c_chan_num; i++) { + ret = request_irq(irq, xdma_channel_isr, 0, + "xdma-h2c-channel", &xdev->h2c_chans[i]); + if (ret) { + xdma_err(xdev, "H2C channel%d request irq%d failed: %d", + i, irq, ret); + goto failed_init_h2c; + } + xdev->h2c_chans[i].irq = irq; + irq++; + } + + /* setup C2H interrupt handler */ + for (j = 0; j < xdev->c2h_chan_num; j++) { + ret = request_irq(irq, xdma_channel_isr, 0, + "xdma-c2h-channel", &xdev->c2h_chans[j]); + if (ret) { + xdma_err(xdev, "H2C channel%d request irq%d failed: %d", + j, irq, ret); + goto failed_init_c2h; + } + xdev->c2h_chans[j].irq = irq; + irq++; + } + + /* config hardware IRQ registers */ + ret = xdma_set_vector_reg(xdev, XDMA_IRQ_CHAN_VEC_NUM, 0, + XDMA_CHAN_NUM(xdev)); + if (ret) { + xdma_err(xdev, "failed to set channel vectors: %d", ret); + goto failed_init_c2h; + } + + /* enable interrupt */ + ret = regmap_write(xdev->rmap, XDMA_IRQ_CHAN_INT_EN_W1S, ~0); + if (ret) + goto failed_init_c2h; + + return 0; + +failed_init_c2h: + while (j--) + free_irq(xdev->c2h_chans[j].irq, &xdev->c2h_chans[j]); +failed_init_h2c: + while (i--) + free_irq(xdev->h2c_chans[i].irq, &xdev->h2c_chans[i]); + + return ret; +} + +static bool xdma_filter_fn(struct dma_chan *chan, void *param) +{ + struct xdma_chan *xdma_chan = to_xdma_chan(chan); + struct xdma_chan_info *chan_info = param; + + return chan_info->dir == xdma_chan->dir; +} + +/** + * xdma_remove - Driver remove function + * @pdev: Pointer to the platform_device structure + */ +static int xdma_remove(struct platform_device *pdev) +{ + struct xdma_device *xdev = platform_get_drvdata(pdev); + + if (xdev->status & XDMA_DEV_STATUS_INIT_MSIX) + xdma_irq_fini(xdev); + + if (xdev->status & XDMA_DEV_STATUS_REG_DMA) + dma_async_device_unregister(&xdev->dma_dev); + + return 0; +} + +/** + * xdma_probe - Driver probe function + * @pdev: Pointer to the platform_device structure + */ +static int xdma_probe(struct platform_device *pdev) +{ + struct xdma_platdata *pdata = dev_get_platdata(&pdev->dev); + struct xdma_device *xdev; + void __iomem *reg_base; + struct resource *res; + int ret = -ENODEV; + + if (pdata->max_dma_channels > XDMA_MAX_CHANNELS) { + dev_err(&pdev->dev, "invalid max dma channels %d", + pdata->max_dma_channels); + return -EINVAL; + } + + xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL); + if (!xdev) + return -ENOMEM; + + platform_set_drvdata(pdev, xdev); + xdev->pdev = pdev; + + res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!res) { + xdma_err(xdev, "failed to get irq resource"); + goto failed; + } + xdev->irq_start = res->start; + xdev->irq_num = res->end - res->start + 1; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + xdma_err(xdev, "failed to get io resource"); + goto failed; + } + + reg_base = devm_ioremap_resource(&pdev->dev, res); + if (!reg_base) { + xdma_err(xdev, "ioremap failed"); + goto failed; + } + + xdev->rmap = devm_regmap_init_mmio(&pdev->dev, reg_base, + &xdma_regmap_config); + if (!xdev->rmap) { + xdma_err(xdev, "config regmap failed: %d", ret); + goto failed; + } + INIT_LIST_HEAD(&xdev->dma_dev.channels); + + ret = xdma_alloc_channels(xdev, DMA_MEM_TO_DEV); + if (ret) { + xdma_err(xdev, "config H2C channels failed: %d", ret); + goto failed; + } + + ret = xdma_alloc_channels(xdev, DMA_DEV_TO_MEM); + if (ret) { + xdma_err(xdev, "config C2H channels failed: %d", ret); + goto failed; + } + + dma_cap_set(DMA_SLAVE, xdev->dma_dev.cap_mask); + dma_cap_set(DMA_PRIVATE, xdev->dma_dev.cap_mask); + + xdev->dma_dev.dev = &pdev->dev; + xdev->dma_dev.device_free_chan_resources = xdma_free_chan_resources; + xdev->dma_dev.device_alloc_chan_resources = xdma_alloc_chan_resources; + xdev->dma_dev.device_tx_status = dma_cookie_status; + xdev->dma_dev.device_prep_slave_sg = xdma_prep_device_sg; + xdev->dma_dev.device_config = xdma_device_config; + xdev->dma_dev.device_issue_pending = xdma_issue_pending; + xdev->dma_dev.filter.map = pdata->device_map; + xdev->dma_dev.filter.mapcnt = pdata->device_map_cnt; + xdev->dma_dev.filter.fn = xdma_filter_fn; + + ret = dma_async_device_register(&xdev->dma_dev); + if (ret) { + xdma_err(xdev, "failed to register Xilinx XDMA: %d", ret); + goto failed; + } + xdev->status |= XDMA_DEV_STATUS_REG_DMA; + + ret = xdma_irq_init(xdev); + if (ret) { + xdma_err(xdev, "failed to init msix: %d", ret); + goto failed; + } + xdev->status |= XDMA_DEV_STATUS_INIT_MSIX; + + return 0; + +failed: + xdma_remove(pdev); + + return ret; +} + +static const struct platform_device_id xdma_id_table[] = { + { "xdma", 0}, + { }, +}; + +static struct platform_driver xdma_driver = { + .driver = { + .name = "xdma", + }, + .id_table = xdma_id_table, + .probe = xdma_probe, + .remove = xdma_remove, +}; + +module_platform_driver(xdma_driver); + +MODULE_DESCRIPTION("AMD XDMA driver"); +MODULE_AUTHOR("XRT Team "); +MODULE_LICENSE("GPL"); diff --git a/include/linux/platform_data/amd_xdma.h b/include/linux/platform_data/amd_xdma.h new file mode 100644 index 000000000000..b5e23e14bac8 --- /dev/null +++ b/include/linux/platform_data/amd_xdma.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2022, Advanced Micro Devices, Inc. + */ + +#ifndef _PLATDATA_AMD_XDMA_H +#define _PLATDATA_AMD_XDMA_H + +#include + +/** + * struct xdma_chan_info - DMA channel information + * This information is used to match channel when request dma channel + * @dir: Channel transfer direction + */ +struct xdma_chan_info { + enum dma_transfer_direction dir; +}; + +#define XDMA_FILTER_PARAM(chan_info) ((void *)(chan_info)) + +struct dma_slave_map; + +/** + * struct xdma_platdata - platform specific data for XDMA engine + * @max_dma_channels: Maximum dma channels in each direction + */ +struct xdma_platdata { + u32 max_dma_channels; + u32 device_map_cnt; + struct dma_slave_map *device_map; +}; + +#endif /* _PLATDATA_AMD_XDMA_H */ -- cgit v1.2.3 From ecf294a6f63f882319485f807754dacaeae96e9d Mon Sep 17 00:00:00 2001 From: Lizhi Hou Date: Thu, 19 Jan 2023 08:32:06 -0800 Subject: dmaengine: xilinx: xdma: Add user logic interrupt support The Xilinx DMA/Bridge Subsystem for PCIe (XDMA) provides up to 16 user interrupt wires to user logic that generate interrupts to the host. This patch adds APIs to enable/disable user logic interrupt for a given interrupt wire index. Signed-off-by: Lizhi Hou Signed-off-by: Sonal Santan Signed-off-by: Max Zhen Signed-off-by: Brian Xu Tested-by: Martin Tuma Link: https://lore.kernel.org/r/1674145926-29449-3-git-send-email-lizhi.hou@amd.com Signed-off-by: Vinod Koul --- MAINTAINERS | 1 + drivers/dma/xilinx/xdma.c | 81 ++++++++++++++++++++++++++++++++++++++++++++ include/linux/dma/amd_xdma.h | 16 +++++++++ 3 files changed, 98 insertions(+) create mode 100644 include/linux/dma/amd_xdma.h (limited to 'drivers/dma') diff --git a/MAINTAINERS b/MAINTAINERS index b768ad4e901b..7fdeab4c4baa 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -22897,6 +22897,7 @@ L: dmaengine@vger.kernel.org S: Supported F: drivers/dma/xilinx/xdma-regs.h F: drivers/dma/xilinx/xdma.c +F: include/linux/dma/amd_xdma.h F: include/linux/platform_data/amd_xdma.h XILINX ZYNQMP DPDMA DRIVER diff --git a/drivers/dma/xilinx/xdma.c b/drivers/dma/xilinx/xdma.c index 48efb75ef9b4..462109c61653 100644 --- a/drivers/dma/xilinx/xdma.c +++ b/drivers/dma/xilinx/xdma.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include #include @@ -687,6 +688,7 @@ static int xdma_set_vector_reg(struct xdma_device *xdev, u32 vec_tbl_start, static int xdma_irq_init(struct xdma_device *xdev) { u32 irq = xdev->irq_start; + u32 user_irq_start; int i, j, ret; /* return failure if there are not enough IRQs */ @@ -729,6 +731,18 @@ static int xdma_irq_init(struct xdma_device *xdev) goto failed_init_c2h; } + /* config user IRQ registers if needed */ + user_irq_start = XDMA_CHAN_NUM(xdev); + if (xdev->irq_num > user_irq_start) { + ret = xdma_set_vector_reg(xdev, XDMA_IRQ_USER_VEC_NUM, + user_irq_start, + xdev->irq_num - user_irq_start); + if (ret) { + xdma_err(xdev, "failed to set user vectors: %d", ret); + goto failed_init_c2h; + } + } + /* enable interrupt */ ret = regmap_write(xdev->rmap, XDMA_IRQ_CHAN_INT_EN_W1S, ~0); if (ret) @@ -754,6 +768,73 @@ static bool xdma_filter_fn(struct dma_chan *chan, void *param) return chan_info->dir == xdma_chan->dir; } +/** + * xdma_disable_user_irq - Disable user interrupt + * @pdev: Pointer to the platform_device structure + * @irq_num: System IRQ number + */ +void xdma_disable_user_irq(struct platform_device *pdev, u32 irq_num) +{ + struct xdma_device *xdev = platform_get_drvdata(pdev); + u32 index; + + index = irq_num - xdev->irq_start; + if (index < XDMA_CHAN_NUM(xdev) || index >= xdev->irq_num) { + xdma_err(xdev, "invalid user irq number"); + return; + } + index -= XDMA_CHAN_NUM(xdev); + + regmap_write(xdev->rmap, XDMA_IRQ_USER_INT_EN_W1C, 1 << index); +} +EXPORT_SYMBOL(xdma_disable_user_irq); + +/** + * xdma_enable_user_irq - Enable user logic interrupt + * @pdev: Pointer to the platform_device structure + * @irq_num: System IRQ number + */ +int xdma_enable_user_irq(struct platform_device *pdev, u32 irq_num) +{ + struct xdma_device *xdev = platform_get_drvdata(pdev); + u32 index; + int ret; + + index = irq_num - xdev->irq_start; + if (index < XDMA_CHAN_NUM(xdev) || index >= xdev->irq_num) { + xdma_err(xdev, "invalid user irq number"); + return -EINVAL; + } + index -= XDMA_CHAN_NUM(xdev); + + ret = regmap_write(xdev->rmap, XDMA_IRQ_USER_INT_EN_W1S, 1 << index); + if (ret) + return ret; + + return 0; +} +EXPORT_SYMBOL(xdma_enable_user_irq); + +/** + * xdma_get_user_irq - Get system IRQ number + * @pdev: Pointer to the platform_device structure + * @user_irq_index: User logic IRQ wire index + * + * Return: The system IRQ number allocated for the given wire index. + */ +int xdma_get_user_irq(struct platform_device *pdev, u32 user_irq_index) +{ + struct xdma_device *xdev = platform_get_drvdata(pdev); + + if (XDMA_CHAN_NUM(xdev) + user_irq_index >= xdev->irq_num) { + xdma_err(xdev, "invalid user irq index"); + return -EINVAL; + } + + return xdev->irq_start + XDMA_CHAN_NUM(xdev) + user_irq_index; +} +EXPORT_SYMBOL(xdma_get_user_irq); + /** * xdma_remove - Driver remove function * @pdev: Pointer to the platform_device structure diff --git a/include/linux/dma/amd_xdma.h b/include/linux/dma/amd_xdma.h new file mode 100644 index 000000000000..ceba69ed7cb4 --- /dev/null +++ b/include/linux/dma/amd_xdma.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2022, Advanced Micro Devices, Inc. + */ + +#ifndef _DMAENGINE_AMD_XDMA_H +#define _DMAENGINE_AMD_XDMA_H + +#include +#include + +int xdma_enable_user_irq(struct platform_device *pdev, u32 irq_num); +void xdma_disable_user_irq(struct platform_device *pdev, u32 irq_num); +int xdma_get_user_irq(struct platform_device *pdev, u32 user_irq_index); + +#endif /* _DMAENGINE_AMD_XDMA_H */ -- cgit v1.2.3 From 40e171c2d306e42057433fb2c2f1fee2d385e1c4 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Mon, 30 Jan 2023 13:11:41 +0200 Subject: dmaengine: use sysfs_emit() to instead of scnprintf() Follow the advice of the Documentation/filesystems/sysfs.rst and show() should only use sysfs_emit() or sysfs_emit_at() when formatting the value to be returned to user space. Signed-off-by: Andy Shevchenko Link: https://lore.kernel.org/r/20230130111141.59627-1-andriy.shevchenko@linux.intel.com Signed-off-by: Vinod Koul --- drivers/dma/dmaengine.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index c741b6431958..1ebbf22756bb 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -172,7 +172,7 @@ static ssize_t memcpy_count_show(struct device *dev, if (chan) { for_each_possible_cpu(i) count += per_cpu_ptr(chan->local, i)->memcpy_count; - err = sprintf(buf, "%lu\n", count); + err = sysfs_emit(buf, "%lu\n", count); } else err = -ENODEV; mutex_unlock(&dma_list_mutex); @@ -194,7 +194,7 @@ static ssize_t bytes_transferred_show(struct device *dev, if (chan) { for_each_possible_cpu(i) count += per_cpu_ptr(chan->local, i)->bytes_transferred; - err = sprintf(buf, "%lu\n", count); + err = sysfs_emit(buf, "%lu\n", count); } else err = -ENODEV; mutex_unlock(&dma_list_mutex); @@ -212,7 +212,7 @@ static ssize_t in_use_show(struct device *dev, struct device_attribute *attr, mutex_lock(&dma_list_mutex); chan = dev_to_dma_chan(dev); if (chan) - err = sprintf(buf, "%d\n", chan->client_count); + err = sysfs_emit(buf, "%d\n", chan->client_count); else err = -ENODEV; mutex_unlock(&dma_list_mutex); -- cgit v1.2.3 From a1beaa50b583a4f137048ff6e55bd2328a4e4362 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Mon, 30 Jan 2023 13:28:30 +0200 Subject: dmaengine: Simplify dmaenginem_async_device_register() function Use devm_add_action_or_reset() instead of devres_alloc() and devres_add(), which works the same. This will simplify the code. There is no functional changes. Signed-off-by: Andy Shevchenko Link: https://lore.kernel.org/r/20230130112830.52353-1-andriy.shevchenko@linux.intel.com Signed-off-by: Vinod Koul --- drivers/dma/dmaengine.c | 20 ++++---------------- 1 file changed, 4 insertions(+), 16 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 1ebbf22756bb..ed28be3b1fc7 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -1322,11 +1322,8 @@ void dma_async_device_unregister(struct dma_device *device) } EXPORT_SYMBOL(dma_async_device_unregister); -static void dmam_device_release(struct device *dev, void *res) +static void dmaenginem_async_device_unregister(void *device) { - struct dma_device *device; - - device = *(struct dma_device **)res; dma_async_device_unregister(device); } @@ -1338,22 +1335,13 @@ static void dmam_device_release(struct device *dev, void *res) */ int dmaenginem_async_device_register(struct dma_device *device) { - void *p; int ret; - p = devres_alloc(dmam_device_release, sizeof(void *), GFP_KERNEL); - if (!p) - return -ENOMEM; - ret = dma_async_device_register(device); - if (!ret) { - *(struct dma_device **)p = device; - devres_add(device->dev, p); - } else { - devres_free(p); - } + if (ret) + return ret; - return ret; + return devm_add_action(device->dev, dmaenginem_async_device_unregister, device); } EXPORT_SYMBOL(dmaenginem_async_device_register); -- cgit v1.2.3 From b02e07015a5ac7bbc029da931ae17914b8ae0339 Mon Sep 17 00:00:00 2001 From: Shravan Chippa Date: Fri, 20 Jan 2023 15:36:23 +0530 Subject: dmaengine: sf-pdma: pdma_desc memory leak fix Commit b2cc5c465c2c ("dmaengine: sf-pdma: Add multithread support for a DMA channel") changed sf_pdma_prep_dma_memcpy() to unconditionally allocate a new sf_pdma_desc each time it is called. The driver previously recycled descs, by checking the in_use flag, only allocating additional descs if the existing one was in use. This logic was removed in commit b2cc5c465c2c ("dmaengine: sf-pdma: Add multithread support for a DMA channel"), but sf_pdma_free_desc() was not changed to handle the new behaviour. As a result, each time sf_pdma_prep_dma_memcpy() is called, the previous descriptor is leaked, over time leading to memory starvation: unreferenced object 0xffffffe008447300 (size 192): comm "irq/39-mchp_dsc", pid 343, jiffies 4294906910 (age 981.200s) hex dump (first 32 bytes): 00 00 00 ff 00 00 00 00 b8 c1 00 00 00 00 00 00 ................ 00 00 70 08 10 00 00 00 00 00 00 c0 00 00 00 00 ..p............. backtrace: [<00000000064a04f4>] kmemleak_alloc+0x1e/0x28 [<00000000018927a7>] kmem_cache_alloc+0x11e/0x178 [<000000002aea8d16>] sf_pdma_prep_dma_memcpy+0x40/0x112 Add the missing kfree() to sf_pdma_free_desc(), and remove the redundant in_use flag. Fixes: b2cc5c465c2c ("dmaengine: sf-pdma: Add multithread support for a DMA channel") Signed-off-by: Shravan Chippa Reviewed-by: Conor Dooley Link: https://lore.kernel.org/r/20230120100623.3530634-1-shravan.chippa@microchip.com Signed-off-by: Vinod Koul --- drivers/dma/sf-pdma/sf-pdma.c | 3 +-- drivers/dma/sf-pdma/sf-pdma.h | 1 - 2 files changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/sf-pdma/sf-pdma.c b/drivers/dma/sf-pdma/sf-pdma.c index d7d2dacac830..d1c6956af452 100644 --- a/drivers/dma/sf-pdma/sf-pdma.c +++ b/drivers/dma/sf-pdma/sf-pdma.c @@ -96,7 +96,6 @@ sf_pdma_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dest, dma_addr_t src, if (!desc) return NULL; - desc->in_use = true; desc->dirn = DMA_MEM_TO_MEM; desc->async_tx = vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); @@ -290,7 +289,7 @@ static void sf_pdma_free_desc(struct virt_dma_desc *vdesc) struct sf_pdma_desc *desc; desc = to_sf_pdma_desc(vdesc); - desc->in_use = false; + kfree(desc); } static void sf_pdma_donebh_tasklet(struct tasklet_struct *t) diff --git a/drivers/dma/sf-pdma/sf-pdma.h b/drivers/dma/sf-pdma/sf-pdma.h index dcb3687bd5da..5c398a83b491 100644 --- a/drivers/dma/sf-pdma/sf-pdma.h +++ b/drivers/dma/sf-pdma/sf-pdma.h @@ -78,7 +78,6 @@ struct sf_pdma_desc { u64 src_addr; struct virt_dma_desc vdesc; struct sf_pdma_chan *chan; - bool in_use; enum dma_transfer_direction dirn; struct dma_async_tx_descriptor *async_tx; }; -- cgit v1.2.3 From 601bdadadb505a72d9a76cc20cdfa252cba7ddc0 Mon Sep 17 00:00:00 2001 From: Fenghua Yu Date: Fri, 27 Jan 2023 11:28:55 -0800 Subject: dmaengine: idxd: Fix default allowed read buffers value in group Currently default read buffers that is allowed in a group is 0. grpcfg will be configured to max read buffers that IDXD can support if the group's allowed read buffers value is 0. But 0 is an invalid read buffers value and user may get confused when seeing the invalid initial value 0 through sysfs interface. To show only valid allowed read buffers value and eliminate confusion, directly initialize the allowed read buffers to IDXD's max read buffers. User still can change the value through sysfs interface. Suggested-by: Ramesh Thomas Signed-off-by: Fenghua Yu Reviewed-by: Nikhil Rao Reviewed-by: Dave Jiang Link: https://lore.kernel.org/r/20230127192855.966929-1-fenghua.yu@intel.com Signed-off-by: Vinod Koul --- drivers/dma/idxd/device.c | 12 ++++++------ drivers/dma/idxd/init.c | 5 +++++ 2 files changed, 11 insertions(+), 6 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c index 19ab3e55033e..751675391e8c 100644 --- a/drivers/dma/idxd/device.c +++ b/drivers/dma/idxd/device.c @@ -699,7 +699,11 @@ static void idxd_groups_clear_state(struct idxd_device *idxd) group->num_engines = 0; group->num_wqs = 0; group->use_rdbuf_limit = false; - group->rdbufs_allowed = 0; + /* + * The default value is the same as the value of + * total read buffers in GRPCAP. + */ + group->rdbufs_allowed = idxd->max_rdbufs; group->rdbufs_reserved = 0; if (idxd->hw.version <= DEVICE_VERSION_2 && !tc_override) { group->tc_a = 1; @@ -934,11 +938,7 @@ static void idxd_group_flags_setup(struct idxd_device *idxd) group->grpcfg.flags.tc_b = group->tc_b; group->grpcfg.flags.use_rdbuf_limit = group->use_rdbuf_limit; group->grpcfg.flags.rdbufs_reserved = group->rdbufs_reserved; - if (group->rdbufs_allowed) - group->grpcfg.flags.rdbufs_allowed = group->rdbufs_allowed; - else - group->grpcfg.flags.rdbufs_allowed = idxd->max_rdbufs; - + group->grpcfg.flags.rdbufs_allowed = group->rdbufs_allowed; group->grpcfg.flags.desc_progress_limit = group->desc_progress_limit; group->grpcfg.flags.batch_progress_limit = group->batch_progress_limit; } diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c index e63b0c674d88..640d3048368e 100644 --- a/drivers/dma/idxd/init.c +++ b/drivers/dma/idxd/init.c @@ -302,6 +302,11 @@ static int idxd_setup_groups(struct idxd_device *idxd) group->tc_a = -1; group->tc_b = -1; } + /* + * The default value is the same as the value of + * total read buffers in GRPCAP. + */ + group->rdbufs_allowed = idxd->max_rdbufs; } return 0; -- cgit v1.2.3 From be4d46edeee4b2459d2f53f37ada88bbfb634b6c Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Fri, 27 Jan 2023 14:36:27 -0800 Subject: dmaengine: dw-axi-dmac: Do not dereference NULL structure If "vdesc" is NULL, it cannot be used with vd_to_axi_desc(). Leave "bytes" unchanged at 0. Seen under GCC 13 with -Warray-bounds: ../drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c: In function 'dma_chan_tx_status': ../drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c:329:46: warning: array subscript 0 is outside array bounds of 'struct virt_dma_desc[46116860184273879]' [-Warray-bounds=] 329 | bytes = vd_to_axi_desc(vdesc)->length; | ^~ Fixes: 8e55444da65c ("dmaengine: dw-axi-dmac: Support burst residue granularity") Cc: Eugeniy Paltsev Cc: Vinod Koul Cc: dmaengine@vger.kernel.org Signed-off-by: Kees Cook Reviewed-by: Andy Shevchenko Link: https://lore.kernel.org/r/20230127223623.never.507-kees@kernel.org Signed-off-by: Vinod Koul --- drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c index 3dec8adfc4ea..d0de8b032d49 100644 --- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c +++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c @@ -325,8 +325,6 @@ dma_chan_tx_status(struct dma_chan *dchan, dma_cookie_t cookie, len = vd_to_axi_desc(vdesc)->hw_desc[0].len; completed_length = completed_blocks * len; bytes = length - completed_length; - } else { - bytes = vd_to_axi_desc(vdesc)->length; } spin_unlock_irqrestore(&chan->vc.lock, flags); -- cgit v1.2.3 From 928469986171a6f763b34b039427f5667ba3fd50 Mon Sep 17 00:00:00 2001 From: Eric Pilmore Date: Thu, 9 Feb 2023 23:51:43 -0800 Subject: dmaengine: ptdma: check for null desc before calling pt_cmd_callback Resolves a panic that can occur on AMD systems, typically during host shutdown, after the PTDMA driver had been exercised. The issue was the pt_issue_pending() function is mistakenly assuming that there will be at least one descriptor in the Submitted queue when the function is called. However, it is possible that both the Submitted and Issued queues could be empty, which could result in pt_cmd_callback() being mistakenly called with a NULL pointer. Ref: Bugzilla Bug 216856. Fixes: 6fa7e0e836e2 ("dmaengine: ptdma: fix concurrency issue with multiple dma transfer") Signed-off-by: Eric Pilmore Link: https://lore.kernel.org/r/20230210075142.58253-1-epilmore@gigaio.com Signed-off-by: Vinod Koul --- drivers/dma/ptdma/ptdma-dmaengine.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/dma') diff --git a/drivers/dma/ptdma/ptdma-dmaengine.c b/drivers/dma/ptdma/ptdma-dmaengine.c index cc22d162ce25..1aa65e5de0f3 100644 --- a/drivers/dma/ptdma/ptdma-dmaengine.c +++ b/drivers/dma/ptdma/ptdma-dmaengine.c @@ -254,7 +254,7 @@ static void pt_issue_pending(struct dma_chan *dma_chan) spin_unlock_irqrestore(&chan->vc.lock, flags); /* If there was nothing active, start processing */ - if (engine_is_idle) + if (engine_is_idle && desc) pt_cmd_callback(desc, 0); } -- cgit v1.2.3 From 255ccd8b16a516209deb4257aa4e89e42a26413a Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Mon, 30 Jan 2023 17:17:47 +0200 Subject: dmaengine: dw: Move check for paused channel to dwc_get_residue() Move check for paused channel to dwc_get_residue() and rename the latter to dwc_get_residue_and_status(). This improves data integrity as residue and DMA channel status are set in the same function under the same conditions. Signed-off-by: Andy Shevchenko Link: https://lore.kernel.org/r/20230130151747.20704-1-andriy.shevchenko@linux.intel.com Signed-off-by: Vinod Koul --- drivers/dma/dw/core.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index 97ba3bfc10b1..5f7d690e3dba 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c @@ -889,7 +889,8 @@ static struct dw_desc *dwc_find_desc(struct dw_dma_chan *dwc, dma_cookie_t c) return NULL; } -static u32 dwc_get_residue(struct dw_dma_chan *dwc, dma_cookie_t cookie) +static u32 dwc_get_residue_and_status(struct dw_dma_chan *dwc, dma_cookie_t cookie, + enum dma_status *status) { struct dw_desc *desc; unsigned long flags; @@ -903,6 +904,8 @@ static u32 dwc_get_residue(struct dw_dma_chan *dwc, dma_cookie_t cookie) residue = desc->residue; if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags) && residue) residue -= dwc_get_sent(dwc); + if (test_bit(DW_DMA_IS_PAUSED, &dwc->flags)) + *status = DMA_PAUSED; } else { residue = desc->total_len; } @@ -932,11 +935,7 @@ dwc_tx_status(struct dma_chan *chan, if (ret == DMA_COMPLETE) return ret; - dma_set_residue(txstate, dwc_get_residue(dwc, cookie)); - - if (test_bit(DW_DMA_IS_PAUSED, &dwc->flags) && ret == DMA_IN_PROGRESS) - return DMA_PAUSED; - + dma_set_residue(txstate, dwc_get_residue_and_status(dwc, cookie, &ret)); return ret; } -- cgit v1.2.3 From 8d1b7bd543833169a5f5f74c4753509ab406f381 Mon Sep 17 00:00:00 2001 From: Kai-Heng Feng Date: Tue, 7 Feb 2023 12:57:45 +0800 Subject: dmaengine: imx-sdma: Set DMA channel to be private If async-tx is loaded before device drivers that requires imx-sdma, the dmaengine_get() routine from async-tx grabs all non-private channels, so devices that require DMA fail to work. So mark imx-sdma with DMA_PRIVATE to avoid such situation. Signed-off-by: Kai-Heng Feng Link: https://lore.kernel.org/r/20230207045745.1029959-1-kai.heng.feng@canonical.com Signed-off-by: Vinod Koul --- drivers/dma/imx-sdma.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/dma') diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 97396af129f8..0005ab059a4f 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -2252,6 +2252,7 @@ static int sdma_probe(struct platform_device *pdev) dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask); dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask); dma_cap_set(DMA_MEMCPY, sdma->dma_device.cap_mask); + dma_cap_set(DMA_PRIVATE, sdma->dma_device.cap_mask); INIT_LIST_HEAD(&sdma->dma_device.channels); /* Initialize channel parameters */ -- cgit v1.2.3 From e922bbf37564a4c67efca9dd6133eaadbffb65f5 Mon Sep 17 00:00:00 2001 From: Aman Kumar Date: Fri, 3 Feb 2023 17:47:02 +0530 Subject: dmaengine: idma64: Update bytes_transferred field Currently when 8250 data transfer is done, bytes_tranferred always returns 0 at /sys/devices/pci0000\:\:**.*/dma/dma*chan*/bytes_transferred. In many cases it gives false impression that data is not being trasferred via DMA. So, updating the bytes_transferred field to count the bytes whenever there is data transfer using idma64. Co-developed-by: Srikanth Thokala Signed-off-by: Srikanth Thokala Signed-off-by: Aman Kumar Reviewed-by: Andy Shevchenko Link: https://lore.kernel.org/r/20230203121702.15725-1-aman.kumar@intel.com Signed-off-by: Vinod Koul --- drivers/dma/idma64.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/dma') diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c index cd9622f6e59c..0ac634a51c5e 100644 --- a/drivers/dma/idma64.c +++ b/drivers/dma/idma64.c @@ -137,8 +137,11 @@ static void idma64_chan_irq(struct idma64 *idma64, unsigned short c, u32 status_err, u32 status_xfer) { struct idma64_chan *idma64c = &idma64->chan[c]; + struct dma_chan_percpu *stat; struct idma64_desc *desc; + stat = this_cpu_ptr(idma64c->vchan.chan.local); + spin_lock(&idma64c->vchan.lock); desc = idma64c->desc; if (desc) { @@ -149,6 +152,7 @@ static void idma64_chan_irq(struct idma64 *idma64, unsigned short c, dma_writel(idma64, CLEAR(XFER), idma64c->mask); desc->status = DMA_COMPLETE; vchan_cookie_complete(&desc->vdesc); + stat->bytes_transferred += desc->length; idma64_start_transfer(idma64c); } -- cgit v1.2.3