summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/sfc/rx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/sfc/rx.c')
-rw-r--r--drivers/net/ethernet/sfc/rx.c23
1 files changed, 12 insertions, 11 deletions
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 243e91f3dff9..719319b89d7a 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -155,11 +155,11 @@ static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue)
rx_buf->len = skb_len - NET_IP_ALIGN;
rx_buf->flags = 0;
- rx_buf->dma_addr = pci_map_single(efx->pci_dev,
+ rx_buf->dma_addr = dma_map_single(&efx->pci_dev->dev,
skb->data, rx_buf->len,
- PCI_DMA_FROMDEVICE);
- if (unlikely(pci_dma_mapping_error(efx->pci_dev,
- rx_buf->dma_addr))) {
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
+ rx_buf->dma_addr))) {
dev_kfree_skb_any(skb);
rx_buf->u.skb = NULL;
return -EIO;
@@ -200,10 +200,10 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
efx->rx_buffer_order);
if (unlikely(page == NULL))
return -ENOMEM;
- dma_addr = pci_map_page(efx->pci_dev, page, 0,
+ dma_addr = dma_map_page(&efx->pci_dev->dev, page, 0,
efx_rx_buf_size(efx),
- PCI_DMA_FROMDEVICE);
- if (unlikely(pci_dma_mapping_error(efx->pci_dev, dma_addr))) {
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(&efx->pci_dev->dev, dma_addr))) {
__free_pages(page, efx->rx_buffer_order);
return -EIO;
}
@@ -247,14 +247,14 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx,
state = page_address(rx_buf->u.page);
if (--state->refcnt == 0) {
- pci_unmap_page(efx->pci_dev,
+ dma_unmap_page(&efx->pci_dev->dev,
state->dma_addr,
efx_rx_buf_size(efx),
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
}
} else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) {
- pci_unmap_single(efx->pci_dev, rx_buf->dma_addr,
- rx_buf->len, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&efx->pci_dev->dev, rx_buf->dma_addr,
+ rx_buf->len, DMA_FROM_DEVICE);
}
}
@@ -336,6 +336,7 @@ static void efx_recycle_rx_buffer(struct efx_channel *channel,
/**
* efx_fast_push_rx_descriptors - push new RX descriptors quickly
* @rx_queue: RX descriptor queue
+ *
* This will aim to fill the RX descriptor queue up to
* @rx_queue->@max_fill. If there is insufficient atomic
* memory to do so, a slow fill will be scheduled.