diff options
Diffstat (limited to 'drivers/net/ethernet/brocade/bna/bna_tx_rx.c')
-rw-r--r-- | drivers/net/ethernet/brocade/bna/bna_tx_rx.c | 251 |
1 files changed, 211 insertions, 40 deletions
diff --git a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c index 3c07064b2bc4..85e63546abe3 100644 --- a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c +++ b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c @@ -529,13 +529,13 @@ bna_rxf_mcast_cfg_apply(struct bna_rxf *rxf) struct list_head *qe; int ret; - /* Delete multicast entries previousely added */ + /* First delete multicast entries to maintain the count */ while (!list_empty(&rxf->mcast_pending_del_q)) { bfa_q_deq(&rxf->mcast_pending_del_q, &qe); bfa_q_qe_init(qe); mac = (struct bna_mac *)qe; ret = bna_rxf_mcast_del(rxf, mac, BNA_HARD_CLEANUP); - bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac); + bna_cam_mod_mac_put(bna_mcam_mod_del_q(rxf->rx->bna), mac); if (ret) return ret; } @@ -586,7 +586,7 @@ bna_rxf_mcast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup) bfa_q_qe_init(qe); mac = (struct bna_mac *)qe; ret = bna_rxf_mcast_del(rxf, mac, cleanup); - bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac); + bna_cam_mod_mac_put(bna_mcam_mod_del_q(rxf->rx->bna), mac); if (ret) return ret; } @@ -796,20 +796,20 @@ bna_rxf_uninit(struct bna_rxf *rxf) while (!list_empty(&rxf->ucast_pending_add_q)) { bfa_q_deq(&rxf->ucast_pending_add_q, &mac); bfa_q_qe_init(&mac->qe); - bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac); + bna_cam_mod_mac_put(bna_ucam_mod_free_q(rxf->rx->bna), mac); } if (rxf->ucast_pending_mac) { bfa_q_qe_init(&rxf->ucast_pending_mac->qe); - bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, - rxf->ucast_pending_mac); + bna_cam_mod_mac_put(bna_ucam_mod_free_q(rxf->rx->bna), + rxf->ucast_pending_mac); rxf->ucast_pending_mac = NULL; } while (!list_empty(&rxf->mcast_pending_add_q)) { bfa_q_deq(&rxf->mcast_pending_add_q, &mac); bfa_q_qe_init(&mac->qe); - bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac); + bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac); } rxf->rxmode_pending = 0; @@ -869,7 +869,7 @@ bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac, if (rxf->ucast_pending_mac == NULL) { rxf->ucast_pending_mac = - bna_ucam_mod_mac_get(&rxf->rx->bna->ucam_mod); + bna_cam_mod_mac_get(bna_ucam_mod_free_q(rxf->rx->bna)); if (rxf->ucast_pending_mac == NULL) return BNA_CB_UCAST_CAM_FULL; bfa_q_qe_init(&rxf->ucast_pending_mac->qe); @@ -900,7 +900,7 @@ bna_rx_mcast_add(struct bna_rx *rx, u8 *addr, return BNA_CB_SUCCESS; } - mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod); + mac = bna_cam_mod_mac_get(bna_mcam_mod_free_q(rxf->rx->bna)); if (mac == NULL) return BNA_CB_MCAST_LIST_FULL; bfa_q_qe_init(&mac->qe); @@ -916,35 +916,92 @@ bna_rx_mcast_add(struct bna_rx *rx, u8 *addr, } enum bna_cb_status -bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist, +bna_rx_ucast_listset(struct bna_rx *rx, int count, u8 *uclist, void (*cbfn)(struct bnad *, struct bna_rx *)) { + struct bna_ucam_mod *ucam_mod = &rx->bna->ucam_mod; struct bna_rxf *rxf = &rx->rxf; struct list_head list_head; struct list_head *qe; u8 *mcaddr; - struct bna_mac *mac; + struct bna_mac *mac, *del_mac; int i; + /* Purge the pending_add_q */ + while (!list_empty(&rxf->ucast_pending_add_q)) { + bfa_q_deq(&rxf->ucast_pending_add_q, &qe); + bfa_q_qe_init(qe); + mac = (struct bna_mac *)qe; + bna_cam_mod_mac_put(&ucam_mod->free_q, mac); + } + + /* Schedule active_q entries for deletion */ + while (!list_empty(&rxf->ucast_active_q)) { + bfa_q_deq(&rxf->ucast_active_q, &qe); + mac = (struct bna_mac *)qe; + bfa_q_qe_init(&mac->qe); + + del_mac = bna_cam_mod_mac_get(&ucam_mod->del_q); + memcpy(del_mac, mac, sizeof(*del_mac)); + list_add_tail(&del_mac->qe, &rxf->ucast_pending_del_q); + bna_cam_mod_mac_put(&ucam_mod->free_q, mac); + } + /* Allocate nodes */ INIT_LIST_HEAD(&list_head); - for (i = 0, mcaddr = mclist; i < count; i++) { - mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod); + for (i = 0, mcaddr = uclist; i < count; i++) { + mac = bna_cam_mod_mac_get(&ucam_mod->free_q); if (mac == NULL) goto err_return; bfa_q_qe_init(&mac->qe); memcpy(mac->addr, mcaddr, ETH_ALEN); list_add_tail(&mac->qe, &list_head); - mcaddr += ETH_ALEN; } + /* Add the new entries */ + while (!list_empty(&list_head)) { + bfa_q_deq(&list_head, &qe); + mac = (struct bna_mac *)qe; + bfa_q_qe_init(&mac->qe); + list_add_tail(&mac->qe, &rxf->ucast_pending_add_q); + } + + rxf->cam_fltr_cbfn = cbfn; + rxf->cam_fltr_cbarg = rx->bna->bnad; + bfa_fsm_send_event(rxf, RXF_E_CONFIG); + + return BNA_CB_SUCCESS; + +err_return: + while (!list_empty(&list_head)) { + bfa_q_deq(&list_head, &qe); + mac = (struct bna_mac *)qe; + bfa_q_qe_init(&mac->qe); + bna_cam_mod_mac_put(&ucam_mod->free_q, mac); + } + + return BNA_CB_UCAST_CAM_FULL; +} + +enum bna_cb_status +bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist, + void (*cbfn)(struct bnad *, struct bna_rx *)) +{ + struct bna_mcam_mod *mcam_mod = &rx->bna->mcam_mod; + struct bna_rxf *rxf = &rx->rxf; + struct list_head list_head; + struct list_head *qe; + u8 *mcaddr; + struct bna_mac *mac, *del_mac; + int i; + /* Purge the pending_add_q */ while (!list_empty(&rxf->mcast_pending_add_q)) { bfa_q_deq(&rxf->mcast_pending_add_q, &qe); bfa_q_qe_init(qe); mac = (struct bna_mac *)qe; - bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac); + bna_cam_mod_mac_put(&mcam_mod->free_q, mac); } /* Schedule active_q entries for deletion */ @@ -952,7 +1009,26 @@ bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist, bfa_q_deq(&rxf->mcast_active_q, &qe); mac = (struct bna_mac *)qe; bfa_q_qe_init(&mac->qe); - list_add_tail(&mac->qe, &rxf->mcast_pending_del_q); + + del_mac = bna_cam_mod_mac_get(&mcam_mod->del_q); + + memcpy(del_mac, mac, sizeof(*del_mac)); + list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q); + mac->handle = NULL; + bna_cam_mod_mac_put(&mcam_mod->free_q, mac); + } + + /* Allocate nodes */ + INIT_LIST_HEAD(&list_head); + for (i = 0, mcaddr = mclist; i < count; i++) { + mac = bna_cam_mod_mac_get(&mcam_mod->free_q); + if (mac == NULL) + goto err_return; + bfa_q_qe_init(&mac->qe); + memcpy(mac->addr, mcaddr, ETH_ALEN); + list_add_tail(&mac->qe, &list_head); + + mcaddr += ETH_ALEN; } /* Add the new entries */ @@ -974,13 +1050,56 @@ err_return: bfa_q_deq(&list_head, &qe); mac = (struct bna_mac *)qe; bfa_q_qe_init(&mac->qe); - bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac); + bna_cam_mod_mac_put(&mcam_mod->free_q, mac); } return BNA_CB_MCAST_LIST_FULL; } void +bna_rx_mcast_delall(struct bna_rx *rx, + void (*cbfn)(struct bnad *, struct bna_rx *)) +{ + struct bna_rxf *rxf = &rx->rxf; + struct list_head *qe; + struct bna_mac *mac, *del_mac; + int need_hw_config = 0; + + /* Purge all entries from pending_add_q */ + while (!list_empty(&rxf->mcast_pending_add_q)) { + bfa_q_deq(&rxf->mcast_pending_add_q, &qe); + mac = (struct bna_mac *)qe; + bfa_q_qe_init(&mac->qe); + bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac); + } + + /* Schedule all entries in active_q for deletion */ + while (!list_empty(&rxf->mcast_active_q)) { + bfa_q_deq(&rxf->mcast_active_q, &qe); + mac = (struct bna_mac *)qe; + bfa_q_qe_init(&mac->qe); + + del_mac = bna_cam_mod_mac_get(bna_mcam_mod_del_q(rxf->rx->bna)); + + memcpy(del_mac, mac, sizeof(*del_mac)); + list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q); + mac->handle = NULL; + bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac); + need_hw_config = 1; + } + + if (need_hw_config) { + rxf->cam_fltr_cbfn = cbfn; + rxf->cam_fltr_cbarg = rx->bna->bnad; + bfa_fsm_send_event(rxf, RXF_E_CONFIG); + return; + } + + if (cbfn) + (*cbfn)(rx->bna->bnad, rx); +} + +void bna_rx_vlan_add(struct bna_rx *rx, int vlan_id) { struct bna_rxf *rxf = &rx->rxf; @@ -1022,7 +1141,7 @@ bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf) bfa_q_qe_init(qe); mac = (struct bna_mac *)qe; bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_DEL_REQ); - bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac); + bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna), mac); return 1; } @@ -1062,11 +1181,13 @@ bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup) bfa_q_qe_init(qe); mac = (struct bna_mac *)qe; if (cleanup == BNA_SOFT_CLEANUP) - bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac); + bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna), + mac); else { bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_DEL_REQ); - bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac); + bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna), + mac); return 1; } } @@ -1690,6 +1811,7 @@ bna_bfi_rx_enet_start(struct bna_rx *rx) cfg_req->mh.num_entries = htons( bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_cfg_req))); + cfg_req->rx_cfg.frame_size = bna_enet_mtu_get(&rx->bna->enet); cfg_req->num_queue_sets = rx->num_paths; for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q); i < rx->num_paths; @@ -1711,8 +1833,17 @@ bna_bfi_rx_enet_start(struct bna_rx *rx) /* Large/Single RxQ */ bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].ql.q, &q0->qpt); - q0->buffer_size = - bna_enet_mtu_get(&rx->bna->enet); + if (q0->multi_buffer) + /* multi-buffer is enabled by allocating + * a new rx with new set of resources. + * q0->buffer_size should be initialized to + * fragment size. + */ + cfg_req->rx_cfg.multi_buffer = + BNA_STATUS_T_ENABLED; + else + q0->buffer_size = + bna_enet_mtu_get(&rx->bna->enet); cfg_req->q_cfg[i].ql.rx_buffer_size = htons((u16)q0->buffer_size); break; @@ -2262,8 +2393,8 @@ bna_rx_res_req(struct bna_rx_config *q_cfg, struct bna_res_info *res_info) u32 hq_depth; u32 dq_depth; - dq_depth = q_cfg->q_depth; - hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q_depth); + dq_depth = q_cfg->q0_depth; + hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q1_depth); cq_depth = dq_depth + hq_depth; BNA_TO_POWER_OF_2_HIGH(cq_depth); @@ -2380,10 +2511,10 @@ bna_rx_create(struct bna *bna, struct bnad *bnad, struct bna_rxq *q0; struct bna_rxq *q1; struct bna_intr_info *intr_info; - u32 page_count; + struct bna_mem_descr *hqunmap_mem; + struct bna_mem_descr *dqunmap_mem; struct bna_mem_descr *ccb_mem; struct bna_mem_descr *rcb_mem; - struct bna_mem_descr *unmapq_mem; struct bna_mem_descr *cqpt_mem; struct bna_mem_descr *cswqpt_mem; struct bna_mem_descr *cpage_mem; @@ -2393,8 +2524,10 @@ bna_rx_create(struct bna *bna, struct bnad *bnad, struct bna_mem_descr *dsqpt_mem; struct bna_mem_descr *hpage_mem; struct bna_mem_descr *dpage_mem; - int i; - int dpage_count, hpage_count, rcb_idx; + u32 dpage_count, hpage_count; + u32 hq_idx, dq_idx, rcb_idx; + u32 cq_depth, i; + u32 page_count; if (!bna_rx_res_check(rx_mod, rx_cfg)) return NULL; @@ -2402,7 +2535,8 @@ bna_rx_create(struct bna *bna, struct bnad *bnad, intr_info = &res_info[BNA_RX_RES_T_INTR].res_u.intr_info; ccb_mem = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info.mdl[0]; rcb_mem = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info.mdl[0]; - unmapq_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[0]; + dqunmap_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPDQ].res_u.mem_info.mdl[0]; + hqunmap_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPHQ].res_u.mem_info.mdl[0]; cqpt_mem = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info.mdl[0]; cswqpt_mem = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info.mdl[0]; cpage_mem = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.mdl[0]; @@ -2454,7 +2588,8 @@ bna_rx_create(struct bna *bna, struct bnad *bnad, } rx->num_paths = rx_cfg->num_paths; - for (i = 0, rcb_idx = 0; i < rx->num_paths; i++) { + for (i = 0, hq_idx = 0, dq_idx = 0, rcb_idx = 0; + i < rx->num_paths; i++) { rxp = bna_rxp_get(rx_mod); list_add_tail(&rxp->qe, &rx->rxp_q); rxp->type = rx_cfg->rxp_type; @@ -2497,9 +2632,13 @@ bna_rx_create(struct bna *bna, struct bnad *bnad, q0->rxp = rxp; q0->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva; - q0->rcb->unmap_q = (void *)unmapq_mem[rcb_idx].kva; - rcb_idx++; - q0->rcb->q_depth = rx_cfg->q_depth; + q0->rcb->unmap_q = (void *)dqunmap_mem[dq_idx].kva; + rcb_idx++; dq_idx++; + q0->rcb->q_depth = rx_cfg->q0_depth; + q0->q_depth = rx_cfg->q0_depth; + q0->multi_buffer = rx_cfg->q0_multi_buf; + q0->buffer_size = rx_cfg->q0_buf_size; + q0->num_vecs = rx_cfg->q0_num_vecs; q0->rcb->rxq = q0; q0->rcb->bnad = bna->bnad; q0->rcb->id = 0; @@ -2519,15 +2658,18 @@ bna_rx_create(struct bna *bna, struct bnad *bnad, q1->rxp = rxp; q1->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva; - q1->rcb->unmap_q = (void *)unmapq_mem[rcb_idx].kva; - rcb_idx++; - q1->rcb->q_depth = rx_cfg->q_depth; + q1->rcb->unmap_q = (void *)hqunmap_mem[hq_idx].kva; + rcb_idx++; hq_idx++; + q1->rcb->q_depth = rx_cfg->q1_depth; + q1->q_depth = rx_cfg->q1_depth; + q1->multi_buffer = BNA_STATUS_T_DISABLED; + q1->num_vecs = 1; q1->rcb->rxq = q1; q1->rcb->bnad = bna->bnad; q1->rcb->id = 1; q1->buffer_size = (rx_cfg->rxp_type == BNA_RXP_HDS) ? rx_cfg->hds_config.forced_offset - : rx_cfg->small_buff_size; + : rx_cfg->q1_buf_size; q1->rx_packets = q1->rx_bytes = 0; q1->rx_packets_with_error = q1->rxbuf_alloc_failed = 0; @@ -2542,9 +2684,14 @@ bna_rx_create(struct bna *bna, struct bnad *bnad, /* Setup CQ */ rxp->cq.ccb = (struct bna_ccb *) ccb_mem[i].kva; - rxp->cq.ccb->q_depth = rx_cfg->q_depth + - ((rx_cfg->rxp_type == BNA_RXP_SINGLE) ? - 0 : rx_cfg->q_depth); + cq_depth = rx_cfg->q0_depth + + ((rx_cfg->rxp_type == BNA_RXP_SINGLE) ? + 0 : rx_cfg->q1_depth); + /* if multi-buffer is enabled sum of q0_depth + * and q1_depth need not be a power of 2 + */ + BNA_TO_POWER_OF_2_HIGH(cq_depth); + rxp->cq.ccb->q_depth = cq_depth; rxp->cq.ccb->cq = &rxp->cq; rxp->cq.ccb->rcb[0] = q0->rcb; q0->rcb->ccb = rxp->cq.ccb; @@ -2670,6 +2817,30 @@ bna_rx_cleanup_complete(struct bna_rx *rx) bfa_fsm_send_event(rx, RX_E_CLEANUP_DONE); } +void +bna_rx_vlan_strip_enable(struct bna_rx *rx) +{ + struct bna_rxf *rxf = &rx->rxf; + + if (rxf->vlan_strip_status == BNA_STATUS_T_DISABLED) { + rxf->vlan_strip_status = BNA_STATUS_T_ENABLED; + rxf->vlan_strip_pending = true; + bfa_fsm_send_event(rxf, RXF_E_CONFIG); + } +} + +void +bna_rx_vlan_strip_disable(struct bna_rx *rx) +{ + struct bna_rxf *rxf = &rx->rxf; + + if (rxf->vlan_strip_status != BNA_STATUS_T_DISABLED) { + rxf->vlan_strip_status = BNA_STATUS_T_DISABLED; + rxf->vlan_strip_pending = true; + bfa_fsm_send_event(rxf, RXF_E_CONFIG); + } +} + enum bna_cb_status bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode, enum bna_rxmode bitmask, |