summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c48
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.c33
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.h11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c185
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c66
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c30
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c10
15 files changed, 346 insertions, 78 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 50939ad0e760..ad5941ec7b11 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -611,8 +611,6 @@ static enum ice_status ice_get_fw_log_cfg(struct ice_hw *hw)
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging_info);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
-
status = ice_aq_send_cmd(hw, &desc, config, size, NULL);
if (!status) {
u16 i;
@@ -3907,7 +3905,18 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
* Without setting the generic section as valid in valid_sections, the
* Admin queue command will fail with error code ICE_AQ_RC_EINVAL.
*/
- buf->txqs[0].info.valid_sections = ICE_AQC_ELEM_VALID_GENERIC;
+ buf->txqs[0].info.valid_sections =
+ ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
+ ICE_AQC_ELEM_VALID_EIR;
+ buf->txqs[0].info.generic = 0;
+ buf->txqs[0].info.cir_bw.bw_profile_idx =
+ cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
+ buf->txqs[0].info.cir_bw.bw_alloc =
+ cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
+ buf->txqs[0].info.eir_bw.bw_profile_idx =
+ cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
+ buf->txqs[0].info.eir_bw.bw_alloc =
+ cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
/* add the LAN queue */
status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
@@ -4357,3 +4366,36 @@ bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps)
return false;
}
+
+/**
+ * ice_aq_set_lldp_mib - Set the LLDP MIB
+ * @hw: pointer to the HW struct
+ * @mib_type: Local, Remote or both Local and Remote MIBs
+ * @buf: pointer to the caller-supplied buffer to store the MIB block
+ * @buf_size: size of the buffer (in bytes)
+ * @cd: pointer to command details structure or NULL
+ *
+ * Set the LLDP MIB. (0x0A08)
+ */
+enum ice_status
+ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_lldp_set_local_mib *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.lldp_set_mib;
+
+ if (buf_size == 0 || !buf)
+ return ICE_ERR_PARAM;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib);
+
+ desc.flags |= cpu_to_le16((u16)ICE_AQ_FLAG_RD);
+ desc.datalen = cpu_to_le16(buf_size);
+
+ cmd->type = mib_type;
+ cmd->length = cpu_to_le16(buf_size);
+
+ return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index 906113db6be6..3ebb973878c7 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -172,4 +172,7 @@ ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
enum ice_status
ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
struct ice_aqc_txsched_elem_data *buf);
+enum ice_status
+ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
+ struct ice_sq_cd *cd);
#endif /* _ICE_COMMON_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c
index 1e18021aa073..1f46a7828be8 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.c
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.c
@@ -312,9 +312,10 @@ ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
#define ICE_FREE_CQ_BUFS(hw, qi, ring) \
do { \
- int i; \
/* free descriptors */ \
- if ((qi)->ring.r.ring##_bi) \
+ if ((qi)->ring.r.ring##_bi) { \
+ int i; \
+ \
for (i = 0; i < (qi)->num_##ring##_entries; i++) \
if ((qi)->ring.r.ring##_bi[i].pa) { \
dmam_free_coherent(ice_hw_to_dev(hw), \
@@ -325,6 +326,7 @@ do { \
(qi)->ring.r.ring##_bi[i].pa = 0;\
(qi)->ring.r.ring##_bi[i].size = 0;\
} \
+ } \
/* free the buffer info list */ \
if ((qi)->ring.cmd_buf) \
devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf); \
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c
index 2cecc9d08005..2a3147ee0bbb 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb.c
@@ -135,39 +135,6 @@ ice_aq_start_lldp(struct ice_hw *hw, bool persist, struct ice_sq_cd *cd)
}
/**
- * ice_aq_set_lldp_mib - Set the LLDP MIB
- * @hw: pointer to the HW struct
- * @mib_type: Local, Remote or both Local and Remote MIBs
- * @buf: pointer to the caller-supplied buffer to store the MIB block
- * @buf_size: size of the buffer (in bytes)
- * @cd: pointer to command details structure or NULL
- *
- * Set the LLDP MIB. (0x0A08)
- */
-static enum ice_status
-ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
- struct ice_sq_cd *cd)
-{
- struct ice_aqc_lldp_set_local_mib *cmd;
- struct ice_aq_desc desc;
-
- cmd = &desc.params.lldp_set_mib;
-
- if (buf_size == 0 || !buf)
- return ICE_ERR_PARAM;
-
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib);
-
- desc.flags |= cpu_to_le16((u16)ICE_AQ_FLAG_RD);
- desc.datalen = cpu_to_le16(buf_size);
-
- cmd->type = mib_type;
- cmd->length = cpu_to_le16(buf_size);
-
- return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
-}
-
-/**
* ice_get_dcbx_status
* @hw: pointer to the HW struct
*
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
index 979af197f8a3..36abd6b7280c 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -444,10 +444,6 @@ void ice_dcb_rebuild(struct ice_pf *pf)
goto dcb_error;
}
- /* If DCB was not enabled previously, we are done */
- if (!test_bit(ICE_FLAG_DCB_ENA, pf->flags))
- return;
-
mutex_lock(&pf->tc_mutex);
if (!pf->hw.port_info->is_sw_lldp)
@@ -467,7 +463,7 @@ void ice_dcb_rebuild(struct ice_pf *pf)
}
}
- dev_info(dev, "DCB restored after reset\n");
+ dev_info(dev, "DCB info restored\n");
ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
if (ret) {
dev_err(dev, "Query Port ETS failed\n");
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
index 323238669572..35c21d9ae009 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
@@ -53,6 +53,12 @@ ice_set_cgd_num(struct ice_tlan_ctx *tlan_ctx, struct ice_ring *ring)
{
tlan_ctx->cgd_num = ring->dcb_tc;
}
+
+static inline bool ice_is_dcb_active(struct ice_pf *pf)
+{
+ return (test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags) ||
+ test_bit(ICE_FLAG_DCB_ENA, pf->flags));
+}
#else
#define ice_dcb_rebuild(pf) do {} while (0)
@@ -95,6 +101,11 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_ring __always_unused *tx_ring,
return 0;
}
+static inline bool ice_is_dcb_active(struct ice_pf __always_unused *pf)
+{
+ return false;
+}
+
static inline bool
ice_is_pfc_causing_hung_q(struct ice_pf __always_unused *pf,
unsigned int __always_unused txqueue)
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 8f6a191839f1..84202c814c3b 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -127,8 +127,14 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
case ICE_VSI_PF:
case ICE_VSI_CTRL:
case ICE_VSI_LB:
- vsi->num_rx_desc = ICE_DFLT_NUM_RX_DESC;
- vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC;
+ /* a user could change the values of num_[tr]x_desc using
+ * ethtool -G so we should keep those values instead of
+ * overwriting them with the defaults.
+ */
+ if (!vsi->num_rx_desc)
+ vsi->num_rx_desc = ICE_DFLT_NUM_RX_DESC;
+ if (!vsi->num_tx_desc)
+ vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC;
break;
default:
dev_dbg(ice_pf_to_dev(vsi->back), "Not setting number of Tx/Rx descriptors for VSI type %d\n",
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 646c2cbc6904..c0bde24ab344 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -767,6 +767,100 @@ static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up)
}
/**
+ * ice_set_dflt_mib - send a default config MIB to the FW
+ * @pf: private PF struct
+ *
+ * This function sends a default configuration MIB to the FW.
+ *
+ * If this function errors out at any point, the driver is still able to
+ * function. The main impact is that LFC may not operate as expected.
+ * Therefore an error state in this function should be treated with a DBG
+ * message and continue on with driver rebuild/reenable.
+ */
+static void ice_set_dflt_mib(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ u8 mib_type, *buf, *lldpmib = NULL;
+ u16 len, typelen, offset = 0;
+ struct ice_lldp_org_tlv *tlv;
+ struct ice_hw *hw;
+ u32 ouisubtype;
+
+ if (!pf) {
+ dev_dbg(dev, "%s NULL pf pointer\n", __func__);
+ return;
+ }
+
+ hw = &pf->hw;
+ mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB;
+ lldpmib = kzalloc(ICE_LLDPDU_SIZE, GFP_KERNEL);
+ if (!lldpmib) {
+ dev_dbg(dev, "%s Failed to allocate MIB memory\n",
+ __func__);
+ return;
+ }
+
+ /* Add ETS CFG TLV */
+ tlv = (struct ice_lldp_org_tlv *)lldpmib;
+ typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
+ ICE_IEEE_ETS_TLV_LEN);
+ tlv->typelen = htons(typelen);
+ ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
+ ICE_IEEE_SUBTYPE_ETS_CFG);
+ tlv->ouisubtype = htonl(ouisubtype);
+
+ buf = tlv->tlvinfo;
+ buf[0] = 0;
+
+ /* ETS CFG all UPs map to TC 0. Next 4 (1 - 4) Octets = 0.
+ * Octets 5 - 12 are BW values, set octet 5 to 100% BW.
+ * Octets 13 - 20 are TSA values - leave as zeros
+ */
+ buf[5] = 0x64;
+ len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
+ offset += len + 2;
+ tlv = (struct ice_lldp_org_tlv *)
+ ((char *)tlv + sizeof(tlv->typelen) + len);
+
+ /* Add ETS REC TLV */
+ buf = tlv->tlvinfo;
+ tlv->typelen = htons(typelen);
+
+ ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
+ ICE_IEEE_SUBTYPE_ETS_REC);
+ tlv->ouisubtype = htonl(ouisubtype);
+
+ /* First octet of buf is reserved
+ * Octets 1 - 4 map UP to TC - all UPs map to zero
+ * Octets 5 - 12 are BW values - set TC 0 to 100%.
+ * Octets 13 - 20 are TSA value - leave as zeros
+ */
+ buf[5] = 0x64;
+ offset += len + 2;
+ tlv = (struct ice_lldp_org_tlv *)
+ ((char *)tlv + sizeof(tlv->typelen) + len);
+
+ /* Add PFC CFG TLV */
+ typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
+ ICE_IEEE_PFC_TLV_LEN);
+ tlv->typelen = htons(typelen);
+
+ ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
+ ICE_IEEE_SUBTYPE_PFC_CFG);
+ tlv->ouisubtype = htonl(ouisubtype);
+
+ /* Octet 1 left as all zeros - PFC disabled */
+ buf[0] = 0x08;
+ len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
+ offset += len + 2;
+
+ if (ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, offset, NULL))
+ dev_dbg(dev, "%s Failed to set default LLDP MIB\n", __func__);
+
+ kfree(lldpmib);
+}
+
+/**
* ice_link_event - process the link event
* @pf: PF that the link event is associated with
* @pi: port_info for the port that the link event is associated with
@@ -800,6 +894,12 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
dev_dbg(dev, "Failed to update link status and re-enable link events for port %d\n",
pi->lport);
+ /* Check if the link state is up after updating link info, and treat
+ * this event as an UP event since the link is actually UP now.
+ */
+ if (phy_info->link_info.link_info & ICE_AQ_LINK_UP)
+ link_up = true;
+
vsi = ice_get_main_vsi(pf);
if (!vsi || !vsi->port_info)
return -EINVAL;
@@ -821,7 +921,13 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
if (link_up == old_link && link_speed == old_link_speed)
return result;
- ice_dcb_rebuild(pf);
+ if (ice_is_dcb_active(pf)) {
+ if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
+ ice_dcb_rebuild(pf);
+ } else {
+ if (link_up)
+ ice_set_dflt_mib(pf);
+ }
ice_vsi_link_event(vsi, link_up);
ice_print_link_msg(vsi, link_up);
@@ -3478,6 +3584,60 @@ done:
}
/**
+ * ice_set_safe_mode_vlan_cfg - configure PF VSI to allow all VLANs in safe mode
+ * @pf: PF to configure
+ *
+ * No VLAN offloads/filtering are advertised in safe mode so make sure the PF
+ * VSI can still Tx/Rx VLAN tagged packets.
+ */
+static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf)
+{
+ struct ice_vsi *vsi = ice_get_main_vsi(pf);
+ struct ice_vsi_ctx *ctxt;
+ enum ice_status status;
+ struct ice_hw *hw;
+
+ if (!vsi)
+ return;
+
+ ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
+ if (!ctxt)
+ return;
+
+ hw = &pf->hw;
+ ctxt->info = vsi->info;
+
+ ctxt->info.valid_sections =
+ cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
+ ICE_AQ_VSI_PROP_SECURITY_VALID |
+ ICE_AQ_VSI_PROP_SW_VALID);
+
+ /* disable VLAN anti-spoof */
+ ctxt->info.sec_flags &= ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
+ ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
+
+ /* disable VLAN pruning and keep all other settings */
+ ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
+
+ /* allow all VLANs on Tx and don't strip on Rx */
+ ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL |
+ ICE_AQ_VSI_VLAN_EMOD_NOTHING;
+
+ status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
+ if (status) {
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %s aq_err %s\n",
+ ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ } else {
+ vsi->info.sec_flags = ctxt->info.sec_flags;
+ vsi->info.sw_flags2 = ctxt->info.sw_flags2;
+ vsi->info.vlan_flags = ctxt->info.vlan_flags;
+ }
+
+ kfree(ctxt);
+}
+
+/**
* ice_log_pkg_init - log result of DDP package load
* @hw: pointer to hardware info
* @status: status of package load
@@ -3974,7 +4134,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
if (err) {
dev_err(dev, "probe failed sending driver version %s. error: %d\n",
UTS_RELEASE, err);
- goto err_alloc_sw_unroll;
+ goto err_send_version_unroll;
}
/* since everything is good, start the service timer */
@@ -3983,19 +4143,19 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
err = ice_init_link_events(pf->hw.port_info);
if (err) {
dev_err(dev, "ice_init_link_events failed: %d\n", err);
- goto err_alloc_sw_unroll;
+ goto err_send_version_unroll;
}
err = ice_init_nvm_phy_type(pf->hw.port_info);
if (err) {
dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err);
- goto err_alloc_sw_unroll;
+ goto err_send_version_unroll;
}
err = ice_update_link_info(pf->hw.port_info);
if (err) {
dev_err(dev, "ice_update_link_info failed: %d\n", err);
- goto err_alloc_sw_unroll;
+ goto err_send_version_unroll;
}
ice_init_link_dflt_override(pf->hw.port_info);
@@ -4006,7 +4166,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
err = ice_init_phy_user_cfg(pf->hw.port_info);
if (err) {
dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err);
- goto err_alloc_sw_unroll;
+ goto err_send_version_unroll;
}
if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) {
@@ -4033,9 +4193,10 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
/* Disable WoL at init, wait for user to enable */
device_set_wakeup_enable(dev, false);
- /* If no DDP driven features have to be setup, we are done with probe */
- if (ice_is_safe_mode(pf))
+ if (ice_is_safe_mode(pf)) {
+ ice_set_safe_mode_vlan_cfg(pf);
goto probe_done;
+ }
/* initialize DDP driven features */
@@ -4059,6 +4220,8 @@ probe_done:
clear_bit(__ICE_DOWN, pf->state);
return 0;
+err_send_version_unroll:
+ ice_vsi_release_all(pf);
err_alloc_sw_unroll:
ice_devlink_destroy_port(pf);
set_bit(__ICE_SERVICE_DIS, pf->state);
@@ -4517,6 +4680,8 @@ static void ice_pci_err_resume(struct pci_dev *pdev)
return;
}
+ ice_restore_all_vfs_msi_state(pdev);
+
ice_do_reset(pf, ICE_RESET_PFR);
ice_service_task_restart(pf);
mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
@@ -5813,10 +5978,6 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
if (err)
goto err_sched_init_port;
- err = ice_update_link_info(hw->port_info);
- if (err)
- dev_err(dev, "Get link status error %d\n", err);
-
/* start misc vector */
err = ice_req_irq_msix_misc(pf);
if (err) {
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index 1c29cfa1cf33..355f727563e4 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -1276,6 +1276,53 @@ ice_sched_find_node_in_subtree(struct ice_hw *hw, struct ice_sched_node *base,
}
/**
+ * ice_sched_get_free_qgrp - Scan all queue group siblings and find a free node
+ * @pi: port information structure
+ * @vsi_node: software VSI handle
+ * @qgrp_node: first queue group node identified for scanning
+ * @owner: LAN or RDMA
+ *
+ * This function retrieves a free LAN or RDMA queue group node by scanning
+ * qgrp_node and its siblings for the queue group with the fewest number
+ * of queues currently assigned.
+ */
+static struct ice_sched_node *
+ice_sched_get_free_qgrp(struct ice_port_info *pi,
+ struct ice_sched_node *vsi_node,
+ struct ice_sched_node *qgrp_node, u8 owner)
+{
+ struct ice_sched_node *min_qgrp;
+ u8 min_children;
+
+ if (!qgrp_node)
+ return qgrp_node;
+ min_children = qgrp_node->num_children;
+ if (!min_children)
+ return qgrp_node;
+ min_qgrp = qgrp_node;
+ /* scan all queue groups until find a node which has less than the
+ * minimum number of children. This way all queue group nodes get
+ * equal number of shares and active. The bandwidth will be equally
+ * distributed across all queues.
+ */
+ while (qgrp_node) {
+ /* make sure the qgroup node is part of the VSI subtree */
+ if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node))
+ if (qgrp_node->num_children < min_children &&
+ qgrp_node->owner == owner) {
+ /* replace the new min queue group node */
+ min_qgrp = qgrp_node;
+ min_children = min_qgrp->num_children;
+ /* break if it has no children, */
+ if (!min_children)
+ break;
+ }
+ qgrp_node = qgrp_node->sibling;
+ }
+ return min_qgrp;
+}
+
+/**
* ice_sched_get_free_qparent - Get a free LAN or RDMA queue group node
* @pi: port information structure
* @vsi_handle: software VSI handle
@@ -1288,7 +1335,7 @@ struct ice_sched_node *
ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
u8 owner)
{
- struct ice_sched_node *vsi_node, *qgrp_node = NULL;
+ struct ice_sched_node *vsi_node, *qgrp_node;
struct ice_vsi_ctx *vsi_ctx;
u16 max_children;
u8 qgrp_layer;
@@ -1302,7 +1349,7 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
vsi_node = vsi_ctx->sched.vsi_node[tc];
/* validate invalid VSI ID */
if (!vsi_node)
- goto lan_q_exit;
+ return NULL;
/* get the first queue group node from VSI sub-tree */
qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer);
@@ -1315,8 +1362,8 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
qgrp_node = qgrp_node->sibling;
}
-lan_q_exit:
- return qgrp_node;
+ /* Select the best queue group */
+ return ice_sched_get_free_qgrp(pi, vsi_node, qgrp_node, owner);
}
/**
@@ -2153,8 +2200,8 @@ ice_sched_add_rl_profile(struct ice_port_info *pi,
hw = pi->hw;
list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num],
list_entry)
- if (rl_prof_elem->profile.flags == profile_type &&
- rl_prof_elem->bw == bw)
+ if ((rl_prof_elem->profile.flags & ICE_AQC_RL_PROFILE_TYPE_M) ==
+ profile_type && rl_prof_elem->bw == bw)
/* Return existing profile ID info */
return rl_prof_elem;
@@ -2384,7 +2431,8 @@ ice_sched_rm_rl_profile(struct ice_port_info *pi, u8 layer_num, u8 profile_type,
/* Check the existing list for RL profile */
list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num],
list_entry)
- if (rl_prof_elem->profile.flags == profile_type &&
+ if ((rl_prof_elem->profile.flags & ICE_AQC_RL_PROFILE_TYPE_M) ==
+ profile_type &&
le16_to_cpu(rl_prof_elem->profile.profile_id) ==
profile_id) {
if (rl_prof_elem->prof_id_ref)
@@ -2546,8 +2594,8 @@ ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node,
return 0;
return ice_sched_rm_rl_profile(pi, layer_num,
- rl_prof_info->profile.flags,
- old_id);
+ rl_prof_info->profile.flags &
+ ICE_AQC_RL_PROFILE_TYPE_M, old_id);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index ccbe1cc64295..c3a6c41385ee 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -495,6 +495,7 @@ ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
{
struct ice_aq_desc desc;
+ enum ice_status status;
if (opc != ice_aqc_opc_add_sw_rules &&
opc != ice_aqc_opc_update_sw_rules &&
@@ -506,7 +507,12 @@ ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
desc.params.sw_rules.num_rules_fltr_entry_index =
cpu_to_le16(num_rules);
- return ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
+ status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
+ if (opc != ice_aqc_opc_add_sw_rules &&
+ hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
+ status = ICE_ERR_DOES_NOT_EXIST;
+
+ return status;
}
/* ice_init_port_info - Initialize port_info with switch configuration data
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index abdb137c8bb7..a508d4f463e9 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -509,8 +509,8 @@ static unsigned int ice_rx_offset(struct ice_ring *rx_ring)
return 0;
}
-static unsigned int ice_rx_frame_truesize(struct ice_ring *rx_ring,
- unsigned int size)
+static unsigned int
+ice_rx_frame_truesize(struct ice_ring *rx_ring, unsigned int __maybe_unused size)
{
unsigned int truesize;
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index e4349475790f..1eb83d9b0546 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -409,7 +409,7 @@ enum ice_rl_type {
#define ICE_SCHED_DFLT_BW 0xFFFFFFFF /* unlimited */
#define ICE_SCHED_DFLT_RL_PROF_ID 0
#define ICE_SCHED_NO_SHARED_RL_PROF_ID 0xFFFF
-#define ICE_SCHED_DFLT_BW_WT 1
+#define ICE_SCHED_DFLT_BW_WT 4
#define ICE_SCHED_INVAL_PROF_ID 0xFFFF
#define ICE_SCHED_DFLT_BURST_SIZE (15 * 1024) /* in bytes (15k) */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index 9df5ceb26ab9..7061730f0f37 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -4071,3 +4071,33 @@ void ice_print_vfs_mdd_events(struct ice_pf *pf)
}
}
}
+
+/**
+ * ice_restore_all_vfs_msi_state - restore VF MSI state after PF FLR
+ * @pdev: pointer to a pci_dev structure
+ *
+ * Called when recovering from a PF FLR to restore interrupt capability to
+ * the VFs.
+ */
+void ice_restore_all_vfs_msi_state(struct pci_dev *pdev)
+{
+ struct pci_dev *vfdev;
+ u16 vf_id;
+ int pos;
+
+ if (!pci_num_vf(pdev))
+ return;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
+ if (pos) {
+ pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID,
+ &vf_id);
+ vfdev = pci_get_device(pdev->vendor, vf_id, NULL);
+ while (vfdev) {
+ if (vfdev->is_virtfn && vfdev->physfn == pdev)
+ pci_restore_msi_state(vfdev);
+ vfdev = pci_get_device(pdev->vendor, vf_id,
+ vfdev);
+ }
+ }
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
index 67aa9110fdd1..049e0b583383 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
@@ -114,6 +114,7 @@ void ice_vc_notify_link_state(struct ice_pf *pf);
void ice_vc_notify_reset(struct ice_pf *pf);
bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr);
bool ice_reset_vf(struct ice_vf *vf, bool is_vflr);
+void ice_restore_all_vfs_msi_state(struct pci_dev *pdev);
int
ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
@@ -146,6 +147,7 @@ void ice_print_vf_rx_mdd_event(struct ice_vf *vf);
#define ice_vf_lan_overflow_event(pf, event) do {} while (0)
#define ice_print_vfs_mdd_events(pf) do {} while (0)
#define ice_print_vf_rx_mdd_event(vf) do {} while (0)
+#define ice_restore_all_vfs_msi_state(pdev) do {} while (0)
static inline bool
ice_reset_all_vfs(struct ice_pf __always_unused *pf,
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 6badfd62dc63..87862918bc7a 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -706,8 +706,6 @@ static bool ice_xmit_zc(struct ice_ring *xdp_ring, int budget)
if (tx_desc) {
ice_xdp_ring_update_tail(xdp_ring);
xsk_umem_consume_tx_done(xdp_ring->xsk_umem);
- if (xsk_umem_uses_need_wakeup(xdp_ring->xsk_umem))
- xsk_clear_tx_need_wakeup(xdp_ring->xsk_umem);
}
return budget > 0 && work_done;
@@ -783,12 +781,8 @@ bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget)
if (xsk_frames)
xsk_umem_complete_tx(xdp_ring->xsk_umem, xsk_frames);
- if (xsk_umem_uses_need_wakeup(xdp_ring->xsk_umem)) {
- if (xdp_ring->next_to_clean == xdp_ring->next_to_use)
- xsk_set_tx_need_wakeup(xdp_ring->xsk_umem);
- else
- xsk_clear_tx_need_wakeup(xdp_ring->xsk_umem);
- }
+ if (xsk_umem_uses_need_wakeup(xdp_ring->xsk_umem))
+ xsk_set_tx_need_wakeup(xdp_ring->xsk_umem);
ice_update_tx_ring_stats(xdp_ring, total_packets, total_bytes);
xmit_done = ice_xmit_zc(xdp_ring, ICE_DFLT_IRQ_WORK);