summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/socionext
diff options
context:
space:
mode:
authorIlias Apalodimas <ilias.apalodimas@linaro.org>2019-07-04 17:11:09 +0300
committerDavid S. Miller <davem@davemloft.net>2019-07-06 01:41:24 +0300
commitcd1973a9215ade41d99c3a7c34c127cf0d21774b (patch)
treefa010990f15424038d68e60b34b065364fc7065a /drivers/net/ethernet/socionext
parent2bf8001e5387537f2d9acfecf3d724e0d6044b23 (diff)
downloadlinux-cd1973a9215ade41d99c3a7c34c127cf0d21774b.tar.xz
net: netsec: Sync dma for device on buffer allocation
Quoting Arnd, We have to do a sync_single_for_device /somewhere/ before the buffer is given to the device. On a non-cache-coherent machine with a write-back cache, there may be dirty cache lines that get written back after the device DMA's data into it (e.g. from a previous memset from before the buffer got freed), so you absolutely need to flush any dirty cache lines on it first. Since the coherency is configurable in this device make sure we cover all configurations by explicitly syncing the allocated buffer for the device before refilling it's descriptors Signed-off-by: Ilias Apalodimas <ilias.apalodimas@linaro.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/socionext')
-rw-r--r--drivers/net/ethernet/socionext/netsec.c5
1 files changed, 5 insertions, 0 deletions
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index d8d640b01119..f6e261c6a059 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -726,6 +726,7 @@ static void *netsec_alloc_rx_data(struct netsec_priv *priv,
{
struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
+ enum dma_data_direction dma_dir;
struct page *page;
page = page_pool_dev_alloc_pages(dring->page_pool);
@@ -741,6 +742,10 @@ static void *netsec_alloc_rx_data(struct netsec_priv *priv,
* cases and reserve enough space for headroom + skb_shared_info
*/
*desc_len = PAGE_SIZE - NETSEC_RX_BUF_NON_DATA;
+ dma_dir = page_pool_get_dma_dir(dring->page_pool);
+ dma_sync_single_for_device(priv->dev,
+ *dma_handle - NETSEC_RXBUF_HEADROOM,
+ PAGE_SIZE, dma_dir);
return page_address(page);
}