From 77a68e56aae141d3e9c740a0ac43362af75d4890 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Mon, 20 Jul 2015 10:41:32 +0200 Subject: dmaengine: Add an enum for the dmaengine alignment constraints Most drivers need to set constraints on the buffer alignment for async tx operations. However, even though it is documented, some drivers either use a defined constant that is not matching what the alignment variable expects (like DMA_BUSWIDTH_* constants) or fill the alignment in bytes instead of power of two. Add a new enum for these alignments that matches what the framework expects, and convert the drivers to it. Signed-off-by: Maxime Ripard Signed-off-by: Vinod Koul --- drivers/dma/xgene-dma.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers/dma/xgene-dma.c') diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c index 620fd55ec766..fe87a634b145 100644 --- a/drivers/dma/xgene-dma.c +++ b/drivers/dma/xgene-dma.c @@ -150,7 +150,6 @@ #define XGENE_DMA_PQ_CHANNEL 1 #define XGENE_DMA_MAX_BYTE_CNT 0x4000 /* 16 KB */ #define XGENE_DMA_MAX_64B_DESC_BYTE_CNT 0x14000 /* 80 KB */ -#define XGENE_DMA_XOR_ALIGNMENT 6 /* 64 Bytes */ #define XGENE_DMA_MAX_XOR_SRC 5 #define XGENE_DMA_16K_BUFFER_LEN_CODE 0x0 #define XGENE_DMA_INVALID_LEN_CODE 0x7800000000000000ULL @@ -1740,13 +1739,13 @@ static void xgene_dma_set_caps(struct xgene_dma_chan *chan, if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { dma_dev->device_prep_dma_xor = xgene_dma_prep_xor; dma_dev->max_xor = XGENE_DMA_MAX_XOR_SRC; - dma_dev->xor_align = XGENE_DMA_XOR_ALIGNMENT; + dma_dev->xor_align = DMAENGINE_ALIGN_64_BYTES; } if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) { dma_dev->device_prep_dma_pq = xgene_dma_prep_pq; dma_dev->max_pq = XGENE_DMA_MAX_XOR_SRC; - dma_dev->pq_align = XGENE_DMA_XOR_ALIGNMENT; + dma_dev->pq_align = DMAENGINE_ALIGN_64_BYTES; } } -- cgit v1.2.3 From 89079493437701551938652003eb75b328425c66 Mon Sep 17 00:00:00 2001 From: Rameshwar Prasad Sahu Date: Tue, 21 Jul 2015 18:44:39 +0530 Subject: dmaengine: xgene-dma: Add ACPI support for X-Gene DMA engine driver This patch adds ACPI support for the APM X-Gene DMA engine driver. Signed-off-by: Rameshwar Prasad Sahu Signed-off-by: Vinod Koul --- drivers/dma/xgene-dma.c | 28 +++++++++++++++++++++------- 1 file changed, 21 insertions(+), 7 deletions(-) (limited to 'drivers/dma/xgene-dma.c') diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c index fe87a634b145..d1c8809a0810 100644 --- a/drivers/dma/xgene-dma.c +++ b/drivers/dma/xgene-dma.c @@ -21,6 +21,7 @@ * NOTE: PM support is currently not available. */ +#include #include #include #include @@ -1940,16 +1941,18 @@ static int xgene_dma_probe(struct platform_device *pdev) return ret; pdma->clk = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(pdma->clk)) { + if (IS_ERR(pdma->clk) && !ACPI_COMPANION(&pdev->dev)) { dev_err(&pdev->dev, "Failed to get clk\n"); return PTR_ERR(pdma->clk); } /* Enable clk before accessing registers */ - ret = clk_prepare_enable(pdma->clk); - if (ret) { - dev_err(&pdev->dev, "Failed to enable clk %d\n", ret); - return ret; + if (!IS_ERR(pdma->clk)) { + ret = clk_prepare_enable(pdma->clk); + if (ret) { + dev_err(&pdev->dev, "Failed to enable clk %d\n", ret); + return ret; + } } /* Remove DMA RAM out of shutdown */ @@ -1994,7 +1997,8 @@ err_request_irq: err_dma_mask: err_clk_enable: - clk_disable_unprepare(pdma->clk); + if (!IS_ERR(pdma->clk)) + clk_disable_unprepare(pdma->clk); return ret; } @@ -2018,11 +2022,20 @@ static int xgene_dma_remove(struct platform_device *pdev) xgene_dma_delete_chan_rings(chan); } - clk_disable_unprepare(pdma->clk); + if (!IS_ERR(pdma->clk)) + clk_disable_unprepare(pdma->clk); return 0; } +#ifdef CONFIG_ACPI +static const struct acpi_device_id xgene_dma_acpi_match_ptr[] = { + {"APMC0D43", 0}, + {}, +}; +MODULE_DEVICE_TABLE(acpi, xgene_dma_acpi_match_ptr); +#endif + static const struct of_device_id xgene_dma_of_match_ptr[] = { {.compatible = "apm,xgene-storm-dma",}, {}, @@ -2035,6 +2048,7 @@ static struct platform_driver xgene_dma_driver = { .driver = { .name = "X-Gene-DMA", .of_match_table = xgene_dma_of_match_ptr, + .acpi_match_table = ACPI_PTR(xgene_dma_acpi_match_ptr), }, }; -- cgit v1.2.3 From 005ce70b9448ed86c9a12e6504f1f9896a826e3d Mon Sep 17 00:00:00 2001 From: Rameshwar Prasad Sahu Date: Fri, 21 Aug 2015 14:33:34 +0530 Subject: dmaengine: xgene-dma: Fix the lock to allow client for further submission of requests This patch provides the fix in the cleanup routing such that client can perform further submission by releasing the lock before calling client's callback function. Signed-off-by: Rameshwar Prasad Sahu Signed-off-by: Vinod Koul --- drivers/dma/xgene-dma.c | 33 ++++++++++++++++++++++----------- 1 file changed, 22 insertions(+), 11 deletions(-) (limited to 'drivers/dma/xgene-dma.c') diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c index d1c8809a0810..0b82bc00b83a 100644 --- a/drivers/dma/xgene-dma.c +++ b/drivers/dma/xgene-dma.c @@ -763,12 +763,17 @@ static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan *chan) struct xgene_dma_ring *ring = &chan->rx_ring; struct xgene_dma_desc_sw *desc_sw, *_desc_sw; struct xgene_dma_desc_hw *desc_hw; + struct list_head ld_completed; u8 status; + INIT_LIST_HEAD(&ld_completed); + + spin_lock_bh(&chan->lock); + /* Clean already completed and acked descriptors */ xgene_dma_clean_completed_descriptor(chan); - /* Run the callback for each descriptor, in order */ + /* Move all completed descriptors to ld completed queue, in order */ list_for_each_entry_safe(desc_sw, _desc_sw, &chan->ld_running, node) { /* Get subsequent hw descriptor from DMA rx ring */ desc_hw = &ring->desc_hw[ring->head]; @@ -811,15 +816,17 @@ static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan *chan) /* Mark this hw descriptor as processed */ desc_hw->m0 = cpu_to_le64(XGENE_DMA_DESC_EMPTY_SIGNATURE); - xgene_dma_run_tx_complete_actions(chan, desc_sw); - - xgene_dma_clean_running_descriptor(chan, desc_sw); - /* * Decrement the pending transaction count * as we have processed one */ chan->pending--; + + /* + * Delete this node from ld running queue and append it to + * ld completed queue for further processing + */ + list_move_tail(&desc_sw->node, &ld_completed); } /* @@ -828,6 +835,14 @@ static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan *chan) * ahead and free the descriptors below. */ xgene_chan_xfer_ld_pending(chan); + + spin_unlock_bh(&chan->lock); + + /* Run the callback for each descriptor, in order */ + list_for_each_entry_safe(desc_sw, _desc_sw, &ld_completed, node) { + xgene_dma_run_tx_complete_actions(chan, desc_sw); + xgene_dma_clean_running_descriptor(chan, desc_sw); + } } static int xgene_dma_alloc_chan_resources(struct dma_chan *dchan) @@ -876,11 +891,11 @@ static void xgene_dma_free_chan_resources(struct dma_chan *dchan) if (!chan->desc_pool) return; - spin_lock_bh(&chan->lock); - /* Process all running descriptor */ xgene_dma_cleanup_descriptors(chan); + spin_lock_bh(&chan->lock); + /* Clean all link descriptor queues */ xgene_dma_free_desc_list(chan, &chan->ld_pending); xgene_dma_free_desc_list(chan, &chan->ld_running); @@ -1200,15 +1215,11 @@ static void xgene_dma_tasklet_cb(unsigned long data) { struct xgene_dma_chan *chan = (struct xgene_dma_chan *)data; - spin_lock_bh(&chan->lock); - /* Run all cleanup for descriptors which have been completed */ xgene_dma_cleanup_descriptors(chan); /* Re-enable DMA channel IRQ */ enable_irq(chan->rx_irq); - - spin_unlock_bh(&chan->lock); } static irqreturn_t xgene_dma_chan_ring_isr(int irq, void *id) -- cgit v1.2.3