diff options
Diffstat (limited to 'drivers/net/ethernet/amazon/ena/ena_eth_com.c')
-rw-r--r-- | drivers/net/ethernet/amazon/ena/ena_eth_com.c | 285 |
1 files changed, 186 insertions, 99 deletions
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c index 1c682b76190f..f6c2d3855be8 100644 --- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c @@ -59,16 +59,7 @@ static inline struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc( return cdesc; } -static inline void ena_com_cq_inc_head(struct ena_com_io_cq *io_cq) -{ - io_cq->head++; - - /* Switch phase bit in case of wrap around */ - if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0)) - io_cq->phase ^= 1; -} - -static inline void *get_sq_desc(struct ena_com_io_sq *io_sq) +static inline void *get_sq_desc_regular_queue(struct ena_com_io_sq *io_sq) { u16 tail_masked; u32 offset; @@ -80,45 +71,159 @@ static inline void *get_sq_desc(struct ena_com_io_sq *io_sq) return (void *)((uintptr_t)io_sq->desc_addr.virt_addr + offset); } -static inline void ena_com_copy_curr_sq_desc_to_dev(struct ena_com_io_sq *io_sq) +static inline int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq, + u8 *bounce_buffer) { - u16 tail_masked = io_sq->tail & (io_sq->q_depth - 1); - u32 offset = tail_masked * io_sq->desc_entry_size; + struct ena_com_llq_info *llq_info = &io_sq->llq_info; - /* In case this queue isn't a LLQ */ - if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) - return; + u16 dst_tail_mask; + u32 dst_offset; - memcpy_toio(io_sq->desc_addr.pbuf_dev_addr + offset, - io_sq->desc_addr.virt_addr + offset, - io_sq->desc_entry_size); -} + dst_tail_mask = io_sq->tail & (io_sq->q_depth - 1); + dst_offset = dst_tail_mask * llq_info->desc_list_entry_size; + + /* Make sure everything was written into the bounce buffer before + * writing the bounce buffer to the device + */ + wmb(); + + /* The line is completed. Copy it to dev */ + __iowrite64_copy(io_sq->desc_addr.pbuf_dev_addr + dst_offset, + bounce_buffer, (llq_info->desc_list_entry_size) / 8); -static inline void ena_com_sq_update_tail(struct ena_com_io_sq *io_sq) -{ io_sq->tail++; /* Switch phase bit in case of wrap around */ if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0)) io_sq->phase ^= 1; + + return 0; } -static inline int ena_com_write_header(struct ena_com_io_sq *io_sq, - u8 *head_src, u16 header_len) +static inline int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq, + u8 *header_src, + u16 header_len) { - u16 tail_masked = io_sq->tail & (io_sq->q_depth - 1); - u8 __iomem *dev_head_addr = - io_sq->header_addr + (tail_masked * io_sq->tx_max_header_size); + struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl; + struct ena_com_llq_info *llq_info = &io_sq->llq_info; + u8 *bounce_buffer = pkt_ctrl->curr_bounce_buf; + u16 header_offset; - if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) + if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)) return 0; - if (unlikely(!io_sq->header_addr)) { - pr_err("Push buffer header ptr is NULL\n"); - return -EINVAL; + header_offset = + llq_info->descs_num_before_header * io_sq->desc_entry_size; + + if (unlikely((header_offset + header_len) > + llq_info->desc_list_entry_size)) { + pr_err("trying to write header larger than llq entry can accommodate\n"); + return -EFAULT; + } + + if (unlikely(!bounce_buffer)) { + pr_err("bounce buffer is NULL\n"); + return -EFAULT; + } + + memcpy(bounce_buffer + header_offset, header_src, header_len); + + return 0; +} + +static inline void *get_sq_desc_llq(struct ena_com_io_sq *io_sq) +{ + struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl; + u8 *bounce_buffer; + void *sq_desc; + + bounce_buffer = pkt_ctrl->curr_bounce_buf; + + if (unlikely(!bounce_buffer)) { + pr_err("bounce buffer is NULL\n"); + return NULL; + } + + sq_desc = bounce_buffer + pkt_ctrl->idx * io_sq->desc_entry_size; + pkt_ctrl->idx++; + pkt_ctrl->descs_left_in_line--; + + return sq_desc; +} + +static inline int ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq) +{ + struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl; + struct ena_com_llq_info *llq_info = &io_sq->llq_info; + int rc; + + if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)) + return 0; + + /* bounce buffer was used, so write it and get a new one */ + if (pkt_ctrl->idx) { + rc = ena_com_write_bounce_buffer_to_dev(io_sq, + pkt_ctrl->curr_bounce_buf); + if (unlikely(rc)) + return rc; + + pkt_ctrl->curr_bounce_buf = + ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl); + memset(io_sq->llq_buf_ctrl.curr_bounce_buf, + 0x0, llq_info->desc_list_entry_size); + } + + pkt_ctrl->idx = 0; + pkt_ctrl->descs_left_in_line = llq_info->descs_num_before_header; + return 0; +} + +static inline void *get_sq_desc(struct ena_com_io_sq *io_sq) +{ + if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) + return get_sq_desc_llq(io_sq); + + return get_sq_desc_regular_queue(io_sq); +} + +static inline int ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq) +{ + struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl; + struct ena_com_llq_info *llq_info = &io_sq->llq_info; + int rc; + + if (!pkt_ctrl->descs_left_in_line) { + rc = ena_com_write_bounce_buffer_to_dev(io_sq, + pkt_ctrl->curr_bounce_buf); + if (unlikely(rc)) + return rc; + + pkt_ctrl->curr_bounce_buf = + ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl); + memset(io_sq->llq_buf_ctrl.curr_bounce_buf, + 0x0, llq_info->desc_list_entry_size); + + pkt_ctrl->idx = 0; + if (unlikely(llq_info->desc_stride_ctrl == ENA_ADMIN_SINGLE_DESC_PER_ENTRY)) + pkt_ctrl->descs_left_in_line = 1; + else + pkt_ctrl->descs_left_in_line = + llq_info->desc_list_entry_size / io_sq->desc_entry_size; } - memcpy_toio(dev_head_addr, head_src, header_len); + return 0; +} + +static inline int ena_com_sq_update_tail(struct ena_com_io_sq *io_sq) +{ + if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) + return ena_com_sq_update_llq_tail(io_sq); + + io_sq->tail++; + + /* Switch phase bit in case of wrap around */ + if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0)) + io_sq->phase ^= 1; return 0; } @@ -186,8 +291,8 @@ static inline bool ena_com_meta_desc_changed(struct ena_com_io_sq *io_sq, return false; } -static inline void ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq, - struct ena_com_tx_ctx *ena_tx_ctx) +static inline int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq, + struct ena_com_tx_ctx *ena_tx_ctx) { struct ena_eth_io_tx_meta_desc *meta_desc = NULL; struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta; @@ -232,8 +337,7 @@ static inline void ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *i memcpy(&io_sq->cached_tx_meta, ena_meta, sizeof(struct ena_com_tx_meta)); - ena_com_copy_curr_sq_desc_to_dev(io_sq); - ena_com_sq_update_tail(io_sq); + return ena_com_sq_update_tail(io_sq); } static inline void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx, @@ -245,11 +349,14 @@ static inline void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx, (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT; ena_rx_ctx->l3_csum_err = - (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >> - ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT; + !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >> + ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT); ena_rx_ctx->l4_csum_err = - (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >> - ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT; + !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >> + ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT); + ena_rx_ctx->l4_csum_checked = + !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_MASK) >> + ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_SHIFT); ena_rx_ctx->hash = cdesc->hash; ena_rx_ctx->frag = (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >> @@ -271,18 +378,19 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq, { struct ena_eth_io_tx_desc *desc = NULL; struct ena_com_buf *ena_bufs = ena_tx_ctx->ena_bufs; - void *push_header = ena_tx_ctx->push_header; + void *buffer_to_push = ena_tx_ctx->push_header; u16 header_len = ena_tx_ctx->header_len; u16 num_bufs = ena_tx_ctx->num_bufs; - int total_desc, i, rc; + u16 start_tail = io_sq->tail; + int i, rc; bool have_meta; u64 addr_hi; WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_TX, "wrong Q type"); /* num_bufs +1 for potential meta desc */ - if (ena_com_sq_empty_space(io_sq) < (num_bufs + 1)) { - pr_err("Not enough space in the tx queue\n"); + if (unlikely(!ena_com_sq_have_enough_space(io_sq, num_bufs + 1))) { + pr_debug("Not enough space in the tx queue\n"); return -ENOMEM; } @@ -292,23 +400,32 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq, return -EINVAL; } - /* start with pushing the header (if needed) */ - rc = ena_com_write_header(io_sq, push_header, header_len); + if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV && + !buffer_to_push)) + return -EINVAL; + + rc = ena_com_write_header_to_bounce(io_sq, buffer_to_push, header_len); if (unlikely(rc)) return rc; have_meta = ena_tx_ctx->meta_valid && ena_com_meta_desc_changed(io_sq, ena_tx_ctx); - if (have_meta) - ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx); + if (have_meta) { + rc = ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx); + if (unlikely(rc)) + return rc; + } - /* If the caller doesn't want send packets */ + /* If the caller doesn't want to send packets */ if (unlikely(!num_bufs && !header_len)) { - *nb_hw_desc = have_meta ? 0 : 1; - return 0; + rc = ena_com_close_bounce_buffer(io_sq); + *nb_hw_desc = io_sq->tail - start_tail; + return rc; } desc = get_sq_desc(io_sq); + if (unlikely(!desc)) + return -EFAULT; memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc)); /* Set first desc when we don't have meta descriptor */ @@ -360,10 +477,14 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq, for (i = 0; i < num_bufs; i++) { /* The first desc share the same desc as the header */ if (likely(i != 0)) { - ena_com_copy_curr_sq_desc_to_dev(io_sq); - ena_com_sq_update_tail(io_sq); + rc = ena_com_sq_update_tail(io_sq); + if (unlikely(rc)) + return rc; desc = get_sq_desc(io_sq); + if (unlikely(!desc)) + return -EFAULT; + memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc)); desc->len_ctrl |= (io_sq->phase << @@ -386,15 +507,14 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq, /* set the last desc indicator */ desc->len_ctrl |= ENA_ETH_IO_TX_DESC_LAST_MASK; - ena_com_copy_curr_sq_desc_to_dev(io_sq); - - ena_com_sq_update_tail(io_sq); + rc = ena_com_sq_update_tail(io_sq); + if (unlikely(rc)) + return rc; - total_desc = max_t(u16, num_bufs, 1); - total_desc += have_meta ? 1 : 0; + rc = ena_com_close_bounce_buffer(io_sq); - *nb_hw_desc = total_desc; - return 0; + *nb_hw_desc = io_sq->tail - start_tail; + return rc; } int ena_com_rx_pkt(struct ena_com_io_cq *io_cq, @@ -453,15 +573,18 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq, WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type"); - if (unlikely(ena_com_sq_empty_space(io_sq) == 0)) + if (unlikely(!ena_com_sq_have_enough_space(io_sq, 1))) return -ENOSPC; desc = get_sq_desc(io_sq); + if (unlikely(!desc)) + return -EFAULT; + memset(desc, 0x0, sizeof(struct ena_eth_io_rx_desc)); desc->length = ena_buf->len; - desc->ctrl |= ENA_ETH_IO_RX_DESC_FIRST_MASK; + desc->ctrl = ENA_ETH_IO_RX_DESC_FIRST_MASK; desc->ctrl |= ENA_ETH_IO_RX_DESC_LAST_MASK; desc->ctrl |= io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK; desc->ctrl |= ENA_ETH_IO_RX_DESC_COMP_REQ_MASK; @@ -472,43 +595,7 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq, desc->buff_addr_hi = ((ena_buf->paddr & GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32); - ena_com_sq_update_tail(io_sq); - - return 0; -} - -int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id) -{ - u8 expected_phase, cdesc_phase; - struct ena_eth_io_tx_cdesc *cdesc; - u16 masked_head; - - masked_head = io_cq->head & (io_cq->q_depth - 1); - expected_phase = io_cq->phase; - - cdesc = (struct ena_eth_io_tx_cdesc *) - ((uintptr_t)io_cq->cdesc_addr.virt_addr + - (masked_head * io_cq->cdesc_entry_size_in_bytes)); - - /* When the current completion descriptor phase isn't the same as the - * expected, it mean that the device still didn't update - * this completion. - */ - cdesc_phase = READ_ONCE(cdesc->flags) & ENA_ETH_IO_TX_CDESC_PHASE_MASK; - if (cdesc_phase != expected_phase) - return -EAGAIN; - - dma_rmb(); - if (unlikely(cdesc->req_id >= io_cq->q_depth)) { - pr_err("Invalid req id %d\n", cdesc->req_id); - return -EINVAL; - } - - ena_com_cq_inc_head(io_cq); - - *req_id = READ_ONCE(cdesc->req_id); - - return 0; + return ena_com_sq_update_tail(io_sq); } bool ena_com_cq_empty(struct ena_com_io_cq *io_cq) |