summaryrefslogtreecommitdiff
path: root/drivers/dma
diff options
context:
space:
mode:
authorVignesh Raghavendra <vigneshr@ti.com>2019-12-09 07:55:38 +0300
committerLokesh Vutla <lokeshvutla@ti.com>2020-01-20 07:40:28 +0300
commitb0ab00839f3ffff9bd3573c204938004d84d05a5 (patch)
tree443e6d3d3fc0f5f3d95f3243efd90a3ea461e2b9 /drivers/dma
parentce1a307358c0b5b1c45cb287f74ae3dd8f86415d (diff)
downloadu-boot-b0ab00839f3ffff9bd3573c204938004d84d05a5.tar.xz
dma: ti: k3-udma: Fix ring push operation for 32 bit cores
UDMA always expects 64 bit address pointer of the transfer descriptor in the Ring. But on 32 bit cores like R5, pointer is always 32 bit in size. Therefore copy over 32 bit pointer value to 64 bit variable before pushing it over to the ring, so that upper 32 bits are 0s. Signed-off-by: Vignesh Raghavendra <vigneshr@ti.com> Reviewed-by: Grygorii Strashko <grygorii.strashko@ti.com> Signed-off-by: Lokesh Vutla <lokeshvutla@ti.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/ti/k3-udma.c14
1 files changed, 11 insertions, 3 deletions
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
index 68affe0b7c..a375854dcd 100644
--- a/drivers/dma/ti/k3-udma.c
+++ b/drivers/dma/ti/k3-udma.c
@@ -1381,6 +1381,14 @@ static int udma_probe(struct udevice *dev)
return ret;
}
+static int udma_push_to_ring(struct k3_nav_ring *ring, void *elem)
+{
+ u64 addr = 0;
+
+ memcpy(&addr, &elem, sizeof(elem));
+ return k3_nav_ringacc_ring_push(ring, &addr);
+}
+
static int *udma_prep_dma_memcpy(struct udma_chan *uc, dma_addr_t dest,
dma_addr_t src, size_t len)
{
@@ -1472,7 +1480,7 @@ static int *udma_prep_dma_memcpy(struct udma_chan *uc, dma_addr_t dest,
ALIGN((u64)tr_desc + desc_size,
ARCH_DMA_MINALIGN));
- k3_nav_ringacc_ring_push(uc->tchan->t_ring, &tr_desc);
+ udma_push_to_ring(uc->tchan->t_ring, tr_desc);
return 0;
}
@@ -1647,7 +1655,7 @@ static int udma_send(struct dma *dma, void *src, size_t len, void *metadata)
ALIGN((u64)desc_tx + uc->hdesc_size,
ARCH_DMA_MINALIGN));
- ret = k3_nav_ringacc_ring_push(uc->tchan->t_ring, &uc->desc_tx);
+ ret = udma_push_to_ring(uc->tchan->t_ring, uc->desc_tx);
if (ret) {
dev_err(dma->dev, "TX dma push fail ch_id %lu %d\n",
dma->id, ret);
@@ -1806,7 +1814,7 @@ int udma_prepare_rcv_buf(struct dma *dma, void *dst, size_t size)
ALIGN((u64)desc_rx + uc->hdesc_size,
ARCH_DMA_MINALIGN));
- k3_nav_ringacc_ring_push(uc->rchan->fd_ring, &desc_rx);
+ udma_push_to_ring(uc->rchan->fd_ring, desc_rx);
uc->num_rx_bufs++;
uc->desc_rx_cur++;