summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/intel/ice/ice_lib.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel/ice/ice_lib.c')
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c443
1 files changed, 287 insertions, 156 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index d13c7fc8fb0a..82e2ce23df3d 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -158,6 +158,8 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
if (vsi->type == ICE_VSI_VF)
vsi->vf_id = vf_id;
+ else
+ vsi->vf_id = ICE_INVAL_VFID;
switch (vsi->type) {
case ICE_VSI_PF:
@@ -343,6 +345,9 @@ static int ice_vsi_clear(struct ice_vsi *vsi)
pf->vsi[vsi->idx] = NULL;
if (vsi->idx < pf->next_vsi && vsi->type != ICE_VSI_CTRL)
pf->next_vsi = vsi->idx;
+ if (vsi->idx < pf->next_vsi && vsi->type == ICE_VSI_CTRL &&
+ vsi->vf_id != ICE_INVAL_VFID)
+ pf->next_vsi = vsi->idx;
ice_vsi_free_arrays(vsi);
mutex_unlock(&pf->sw_mutex);
@@ -382,6 +387,8 @@ static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data)
if (!q_vector->tx.ring && !q_vector->rx.ring)
return IRQ_HANDLED;
+ q_vector->total_events++;
+
napi_schedule(&q_vector->napi);
return IRQ_HANDLED;
@@ -419,7 +426,7 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type, u16 vf_id)
vsi->type = vsi_type;
vsi->back = pf;
- set_bit(__ICE_DOWN, vsi->state);
+ set_bit(ICE_VSI_DOWN, vsi->state);
if (vsi_type == ICE_VSI_VF)
ice_vsi_set_num_qs(vsi, vf_id);
@@ -454,8 +461,8 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type, u16 vf_id)
goto unlock_pf;
}
- if (vsi->type == ICE_VSI_CTRL) {
- /* Use the last VSI slot as the index for the control VSI */
+ if (vsi->type == ICE_VSI_CTRL && vf_id == ICE_INVAL_VFID) {
+ /* Use the last VSI slot as the index for PF control VSI */
vsi->idx = pf->num_alloc_vsi - 1;
pf->ctrl_vsi_idx = vsi->idx;
pf->vsi[vsi->idx] = vsi;
@@ -468,6 +475,9 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type, u16 vf_id)
pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi,
pf->next_vsi);
}
+
+ if (vsi->type == ICE_VSI_CTRL && vf_id != ICE_INVAL_VFID)
+ pf->vf[vf_id].ctrl_vsi_idx = vsi->idx;
goto unlock_pf;
err_rings:
@@ -506,7 +516,7 @@ static int ice_alloc_fd_res(struct ice_vsi *vsi)
if (!b_val)
return -EPERM;
- if (vsi->type != ICE_VSI_PF)
+ if (!(vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF))
return -EPERM;
if (!test_bit(ICE_FLAG_FD_ENA, pf->flags))
@@ -517,6 +527,13 @@ static int ice_alloc_fd_res(struct ice_vsi *vsi)
/* each VSI gets same "best_effort" quota */
vsi->num_bfltr = b_val;
+ if (vsi->type == ICE_VSI_VF) {
+ vsi->num_gfltr = 0;
+
+ /* each VSI gets same "best_effort" quota */
+ vsi->num_bfltr = b_val;
+ }
+
return 0;
}
@@ -729,11 +746,10 @@ static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt)
*/
static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
{
- u16 offset = 0, qmap = 0, tx_count = 0;
+ u16 offset = 0, qmap = 0, tx_count = 0, pow = 0;
+ u16 num_txq_per_tc, num_rxq_per_tc;
u16 qcount_tx = vsi->alloc_txq;
u16 qcount_rx = vsi->alloc_rxq;
- u16 tx_numq_tc, rx_numq_tc;
- u16 pow = 0, max_rss = 0;
bool ena_tc0 = false;
u8 netdev_tc = 0;
int i;
@@ -751,12 +767,15 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
vsi->tc_cfg.ena_tc |= 1;
}
- rx_numq_tc = qcount_rx / vsi->tc_cfg.numtc;
- if (!rx_numq_tc)
- rx_numq_tc = 1;
- tx_numq_tc = qcount_tx / vsi->tc_cfg.numtc;
- if (!tx_numq_tc)
- tx_numq_tc = 1;
+ num_rxq_per_tc = min_t(u16, qcount_rx / vsi->tc_cfg.numtc, ICE_MAX_RXQS_PER_TC);
+ if (!num_rxq_per_tc)
+ num_rxq_per_tc = 1;
+ num_txq_per_tc = qcount_tx / vsi->tc_cfg.numtc;
+ if (!num_txq_per_tc)
+ num_txq_per_tc = 1;
+
+ /* find the (rounded up) power-of-2 of qcount */
+ pow = (u16)order_base_2(num_rxq_per_tc);
/* TC mapping is a function of the number of Rx queues assigned to the
* VSI for each traffic class and the offset of these queues.
@@ -769,26 +788,6 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
*
* Setup number and offset of Rx queues for all TCs for the VSI
*/
-
- qcount_rx = rx_numq_tc;
-
- /* qcount will change if RSS is enabled */
- if (test_bit(ICE_FLAG_RSS_ENA, vsi->back->flags)) {
- if (vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF) {
- if (vsi->type == ICE_VSI_PF)
- max_rss = ICE_MAX_LG_RSS_QS;
- else
- max_rss = ICE_MAX_RSS_QS_PER_VF;
- qcount_rx = min_t(u16, rx_numq_tc, max_rss);
- if (!vsi->req_rxq)
- qcount_rx = min_t(u16, qcount_rx,
- vsi->rss_size);
- }
- }
-
- /* find the (rounded up) power-of-2 of qcount */
- pow = (u16)order_base_2(qcount_rx);
-
ice_for_each_traffic_class(i) {
if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
/* TC is not enabled */
@@ -802,16 +801,16 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
/* TC is enabled */
vsi->tc_cfg.tc_info[i].qoffset = offset;
- vsi->tc_cfg.tc_info[i].qcount_rx = qcount_rx;
- vsi->tc_cfg.tc_info[i].qcount_tx = tx_numq_tc;
+ vsi->tc_cfg.tc_info[i].qcount_rx = num_rxq_per_tc;
+ vsi->tc_cfg.tc_info[i].qcount_tx = num_txq_per_tc;
vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++;
qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) &
ICE_AQ_VSI_TC_Q_OFFSET_M) |
((pow << ICE_AQ_VSI_TC_Q_NUM_S) &
ICE_AQ_VSI_TC_Q_NUM_M);
- offset += qcount_rx;
- tx_count += tx_numq_tc;
+ offset += num_rxq_per_tc;
+ tx_count += num_txq_per_tc;
ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
}
@@ -824,7 +823,7 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
if (offset)
vsi->num_rxq = offset;
else
- vsi->num_rxq = qcount_rx;
+ vsi->num_rxq = num_rxq_per_tc;
vsi->num_txq = tx_count;
@@ -856,7 +855,8 @@ static void ice_set_fd_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
u8 dflt_q_group, dflt_q_prio;
u16 dflt_q, report_q, val;
- if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_CTRL)
+ if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_CTRL &&
+ vsi->type != ICE_VSI_VF)
return;
val = ICE_AQ_VSI_PROP_FLOW_DIR_VALID;
@@ -1179,7 +1179,24 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
num_q_vectors = vsi->num_q_vectors;
/* reserve slots from OS requested IRQs */
- base = ice_get_res(pf, pf->irq_tracker, num_q_vectors, vsi->idx);
+ if (vsi->type == ICE_VSI_CTRL && vsi->vf_id != ICE_INVAL_VFID) {
+ struct ice_vf *vf;
+ int i;
+
+ ice_for_each_vf(pf, i) {
+ vf = &pf->vf[i];
+ if (i != vsi->vf_id && vf->ctrl_vsi_idx != ICE_NO_VSI) {
+ base = pf->vsi[vf->ctrl_vsi_idx]->base_vector;
+ break;
+ }
+ }
+ if (i == pf->num_alloc_vfs)
+ base = ice_get_res(pf, pf->irq_tracker, num_q_vectors,
+ ICE_RES_VF_CTRL_VEC_ID);
+ } else {
+ base = ice_get_res(pf, pf->irq_tracker, num_q_vectors,
+ vsi->idx);
+ }
if (base < 0) {
dev_err(dev, "%d MSI-X interrupts available. %s %d failed to get %d MSI-X vectors\n",
@@ -1296,14 +1313,13 @@ err_out:
* LUT, while in the event of enable request for RSS, it will reconfigure RSS
* LUT.
*/
-int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena)
+void ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena)
{
- int err = 0;
u8 *lut;
lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
if (!lut)
- return -ENOMEM;
+ return;
if (ena) {
if (vsi->rss_lut_user)
@@ -1313,9 +1329,8 @@ int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena)
vsi->rss_size);
}
- err = ice_set_rss(vsi, NULL, lut, vsi->rss_table_size);
+ ice_set_rss_lut(vsi, lut, vsi->rss_table_size);
kfree(lut);
- return err;
}
/**
@@ -1324,12 +1339,10 @@ int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena)
*/
static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi)
{
- struct ice_aqc_get_set_rss_keys *key;
struct ice_pf *pf = vsi->back;
- enum ice_status status;
struct device *dev;
- int err = 0;
- u8 *lut;
+ u8 *lut, *key;
+ int err;
dev = ice_pf_to_dev(pf);
vsi->rss_size = min_t(u16, vsi->rss_size, vsi->num_rxq);
@@ -1343,37 +1356,26 @@ static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi)
else
ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size);
- status = ice_aq_set_rss_lut(&pf->hw, vsi->idx, vsi->rss_lut_type, lut,
- vsi->rss_table_size);
-
- if (status) {
- dev_err(dev, "set_rss_lut failed, error %s\n",
- ice_stat_str(status));
- err = -EIO;
+ err = ice_set_rss_lut(vsi, lut, vsi->rss_table_size);
+ if (err) {
+ dev_err(dev, "set_rss_lut failed, error %d\n", err);
goto ice_vsi_cfg_rss_exit;
}
- key = kzalloc(sizeof(*key), GFP_KERNEL);
+ key = kzalloc(ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE, GFP_KERNEL);
if (!key) {
err = -ENOMEM;
goto ice_vsi_cfg_rss_exit;
}
if (vsi->rss_hkey_user)
- memcpy(key,
- (struct ice_aqc_get_set_rss_keys *)vsi->rss_hkey_user,
- ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE);
+ memcpy(key, vsi->rss_hkey_user, ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE);
else
- netdev_rss_key_fill((void *)key,
- ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE);
+ netdev_rss_key_fill((void *)key, ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE);
- status = ice_aq_set_rss_key(&pf->hw, vsi->idx, key);
-
- if (status) {
- dev_err(dev, "set_rss_key failed, error %s\n",
- ice_stat_str(status));
- err = -EIO;
- }
+ err = ice_set_rss_key(vsi, key);
+ if (err)
+ dev_err(dev, "set_rss_key failed, error %d\n", err);
kfree(key);
ice_vsi_cfg_rss_exit:
@@ -1502,13 +1504,13 @@ static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi)
*/
bool ice_pf_state_is_nominal(struct ice_pf *pf)
{
- DECLARE_BITMAP(check_bits, __ICE_STATE_NBITS) = { 0 };
+ DECLARE_BITMAP(check_bits, ICE_STATE_NBITS) = { 0 };
if (!pf)
return false;
- bitmap_set(check_bits, 0, __ICE_STATE_NOMINAL_CHECK_BITS);
- if (bitmap_intersects(pf->state, check_bits, __ICE_STATE_NBITS))
+ bitmap_set(check_bits, 0, ICE_STATE_NOMINAL_CHECK_BITS);
+ if (bitmap_intersects(pf->state, check_bits, ICE_STATE_NBITS))
return false;
return true;
@@ -1773,7 +1775,7 @@ int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi)
* This function converts a decimal interrupt rate limit in usecs to the format
* expected by firmware.
*/
-u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran)
+static u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran)
{
u32 val = intrl / gran;
@@ -1783,6 +1785,51 @@ u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran)
}
/**
+ * ice_write_intrl - write throttle rate limit to interrupt specific register
+ * @q_vector: pointer to interrupt specific structure
+ * @intrl: throttle rate limit in microseconds to write
+ */
+void ice_write_intrl(struct ice_q_vector *q_vector, u8 intrl)
+{
+ struct ice_hw *hw = &q_vector->vsi->back->hw;
+
+ wr32(hw, GLINT_RATE(q_vector->reg_idx),
+ ice_intrl_usec_to_reg(intrl, ICE_INTRL_GRAN_ABOVE_25));
+}
+
+/**
+ * __ice_write_itr - write throttle rate to register
+ * @q_vector: pointer to interrupt data structure
+ * @rc: pointer to ring container
+ * @itr: throttle rate in microseconds to write
+ */
+static void __ice_write_itr(struct ice_q_vector *q_vector,
+ struct ice_ring_container *rc, u16 itr)
+{
+ struct ice_hw *hw = &q_vector->vsi->back->hw;
+
+ wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx),
+ ITR_REG_ALIGN(itr) >> ICE_ITR_GRAN_S);
+}
+
+/**
+ * ice_write_itr - write throttle rate to queue specific register
+ * @rc: pointer to ring container
+ * @itr: throttle rate in microseconds to write
+ */
+void ice_write_itr(struct ice_ring_container *rc, u16 itr)
+{
+ struct ice_q_vector *q_vector;
+
+ if (!rc->ring)
+ return;
+
+ q_vector = rc->ring->q_vector;
+
+ __ice_write_itr(q_vector, rc, itr);
+}
+
+/**
* ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW
* @vsi: the VSI being configured
*
@@ -1802,9 +1849,6 @@ void ice_vsi_cfg_msix(struct ice_vsi *vsi)
ice_cfg_itr(hw, q_vector);
- wr32(hw, GLINT_RATE(reg_idx),
- ice_intrl_usec_to_reg(q_vector->intrl, hw->intrl_gran));
-
/* Both Transmit Queue Interrupt Cause Control register
* and Receive Queue Interrupt Cause control register
* expects MSIX_INDX field to be the vector index
@@ -2308,7 +2352,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
struct ice_vsi *vsi;
int ret, i;
- if (vsi_type == ICE_VSI_VF)
+ if (vsi_type == ICE_VSI_VF || vsi_type == ICE_VSI_CTRL)
vsi = ice_vsi_alloc(pf, vsi_type, vf_id);
else
vsi = ice_vsi_alloc(pf, vsi_type, ICE_INVAL_VFID);
@@ -2323,7 +2367,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
if (vsi->type == ICE_VSI_PF)
vsi->ethtype = ETH_P_PAUSE;
- if (vsi->type == ICE_VSI_VF)
+ if (vsi->type == ICE_VSI_VF || vsi->type == ICE_VSI_CTRL)
vsi->vf_id = vf_id;
ice_alloc_fd_res(vsi);
@@ -2492,11 +2536,10 @@ static void ice_vsi_release_msix(struct ice_vsi *vsi)
for (i = 0; i < vsi->num_q_vectors; i++) {
struct ice_q_vector *q_vector = vsi->q_vectors[i];
- u16 reg_idx = q_vector->reg_idx;
- wr32(hw, GLINT_ITR(ICE_IDX_ITR0, reg_idx), 0);
- wr32(hw, GLINT_ITR(ICE_IDX_ITR1, reg_idx), 0);
+ ice_write_intrl(q_vector, 0);
for (q = 0; q < q_vector->num_ring_tx; q++) {
+ ice_write_itr(&q_vector->tx, 0);
wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0);
if (ice_is_xdp_ena_vsi(vsi)) {
u32 xdp_txq = txq + vsi->num_xdp_txq;
@@ -2507,6 +2550,7 @@ static void ice_vsi_release_msix(struct ice_vsi *vsi)
}
for (q = 0; q < q_vector->num_ring_rx; q++) {
+ ice_write_itr(&q_vector->rx, 0);
wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), 0);
rxq++;
}
@@ -2593,7 +2637,7 @@ void ice_vsi_free_rx_rings(struct ice_vsi *vsi)
*/
void ice_vsi_close(struct ice_vsi *vsi)
{
- if (!test_and_set_bit(__ICE_DOWN, vsi->state))
+ if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state))
ice_down(vsi);
ice_vsi_free_irq(vsi);
@@ -2610,10 +2654,10 @@ int ice_ena_vsi(struct ice_vsi *vsi, bool locked)
{
int err = 0;
- if (!test_bit(__ICE_NEEDS_RESTART, vsi->state))
+ if (!test_bit(ICE_VSI_NEEDS_RESTART, vsi->state))
return 0;
- clear_bit(__ICE_NEEDS_RESTART, vsi->state);
+ clear_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
if (vsi->netdev && vsi->type == ICE_VSI_PF) {
if (netif_running(vsi->netdev)) {
@@ -2639,10 +2683,10 @@ int ice_ena_vsi(struct ice_vsi *vsi, bool locked)
*/
void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
{
- if (test_bit(__ICE_DOWN, vsi->state))
+ if (test_bit(ICE_VSI_DOWN, vsi->state))
return;
- set_bit(__ICE_NEEDS_RESTART, vsi->state);
+ set_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
if (vsi->type == ICE_VSI_PF && vsi->netdev) {
if (netif_running(vsi->netdev)) {
@@ -2752,11 +2796,14 @@ int ice_vsi_release(struct ice_vsi *vsi)
* PF that is running the work queue items currently. This is done to
* avoid check_flush_dependency() warning on this wq
*/
- if (vsi->netdev && !ice_is_reset_in_progress(pf->state)) {
+ if (vsi->netdev && !ice_is_reset_in_progress(pf->state) &&
+ (test_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state))) {
unregister_netdev(vsi->netdev);
- ice_devlink_destroy_port(vsi);
+ clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
}
+ ice_devlink_destroy_port(vsi);
+
if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
ice_rss_clean(vsi);
@@ -2770,7 +2817,24 @@ int ice_vsi_release(struct ice_vsi *vsi)
* many interrupts each VF needs. SR-IOV MSIX resources are also
* cleared in the same manner.
*/
- if (vsi->type != ICE_VSI_VF) {
+ if (vsi->type == ICE_VSI_CTRL && vsi->vf_id != ICE_INVAL_VFID) {
+ struct ice_vf *vf;
+ int i;
+
+ ice_for_each_vf(pf, i) {
+ vf = &pf->vf[i];
+ if (i != vsi->vf_id && vf->ctrl_vsi_idx != ICE_NO_VSI)
+ break;
+ }
+ if (i == pf->num_alloc_vfs) {
+ /* No other VFs left that have control VSI, reclaim SW
+ * interrupts back to the common pool
+ */
+ ice_free_res(pf->irq_tracker, vsi->base_vector,
+ ICE_RES_VF_CTRL_VEC_ID);
+ pf->num_avail_sw_msix += vsi->num_q_vectors;
+ }
+ } else if (vsi->type != ICE_VSI_VF) {
/* reclaim SW interrupts back to the common pool */
ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
pf->num_avail_sw_msix += vsi->num_q_vectors;
@@ -2794,10 +2858,16 @@ int ice_vsi_release(struct ice_vsi *vsi)
ice_vsi_delete(vsi);
ice_vsi_free_q_vectors(vsi);
- /* make sure unregister_netdev() was called by checking __ICE_DOWN */
- if (vsi->netdev && test_bit(__ICE_DOWN, vsi->state)) {
- free_netdev(vsi->netdev);
- vsi->netdev = NULL;
+ if (vsi->netdev) {
+ if (test_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state)) {
+ unregister_netdev(vsi->netdev);
+ clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
+ }
+ if (test_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state)) {
+ free_netdev(vsi->netdev);
+ vsi->netdev = NULL;
+ clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
+ }
}
if (vsi->type == ICE_VSI_VF &&
@@ -2818,39 +2888,6 @@ int ice_vsi_release(struct ice_vsi *vsi)
}
/**
- * ice_vsi_rebuild_update_coalesce - set coalesce for a q_vector
- * @q_vector: pointer to q_vector which is being updated
- * @coalesce: pointer to array of struct with stored coalesce
- *
- * Set coalesce param in q_vector and update these parameters in HW.
- */
-static void
-ice_vsi_rebuild_update_coalesce(struct ice_q_vector *q_vector,
- struct ice_coalesce_stored *coalesce)
-{
- struct ice_ring_container *rx_rc = &q_vector->rx;
- struct ice_ring_container *tx_rc = &q_vector->tx;
- struct ice_hw *hw = &q_vector->vsi->back->hw;
-
- tx_rc->itr_setting = coalesce->itr_tx;
- rx_rc->itr_setting = coalesce->itr_rx;
-
- /* dynamic ITR values will be updated during Tx/Rx */
- if (!ITR_IS_DYNAMIC(tx_rc->itr_setting))
- wr32(hw, GLINT_ITR(tx_rc->itr_idx, q_vector->reg_idx),
- ITR_REG_ALIGN(tx_rc->itr_setting) >>
- ICE_ITR_GRAN_S);
- if (!ITR_IS_DYNAMIC(rx_rc->itr_setting))
- wr32(hw, GLINT_ITR(rx_rc->itr_idx, q_vector->reg_idx),
- ITR_REG_ALIGN(rx_rc->itr_setting) >>
- ICE_ITR_GRAN_S);
-
- q_vector->intrl = coalesce->intrl;
- wr32(hw, GLINT_RATE(q_vector->reg_idx),
- ice_intrl_usec_to_reg(q_vector->intrl, hw->intrl_gran));
-}
-
-/**
* ice_vsi_rebuild_get_coalesce - get coalesce from all q_vectors
* @vsi: VSI connected with q_vectors
* @coalesce: array of struct with stored coalesce
@@ -2869,6 +2906,11 @@ ice_vsi_rebuild_get_coalesce(struct ice_vsi *vsi,
coalesce[i].itr_tx = q_vector->tx.itr_setting;
coalesce[i].itr_rx = q_vector->rx.itr_setting;
coalesce[i].intrl = q_vector->intrl;
+
+ if (i < vsi->num_txq)
+ coalesce[i].tx_valid = true;
+ if (i < vsi->num_rxq)
+ coalesce[i].rx_valid = true;
}
return vsi->num_q_vectors;
@@ -2888,22 +2930,75 @@ static void
ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi,
struct ice_coalesce_stored *coalesce, int size)
{
+ struct ice_ring_container *rc;
int i;
if ((size && !coalesce) || !vsi)
return;
- for (i = 0; i < size && i < vsi->num_q_vectors; i++)
- ice_vsi_rebuild_update_coalesce(vsi->q_vectors[i],
- &coalesce[i]);
+ /* There are a couple of cases that have to be handled here:
+ * 1. The case where the number of queue vectors stays the same, but
+ * the number of Tx or Rx rings changes (the first for loop)
+ * 2. The case where the number of queue vectors increased (the
+ * second for loop)
+ */
+ for (i = 0; i < size && i < vsi->num_q_vectors; i++) {
+ /* There are 2 cases to handle here and they are the same for
+ * both Tx and Rx:
+ * if the entry was valid previously (coalesce[i].[tr]x_valid
+ * and the loop variable is less than the number of rings
+ * allocated, then write the previous values
+ *
+ * if the entry was not valid previously, but the number of
+ * rings is less than are allocated (this means the number of
+ * rings increased from previously), then write out the
+ * values in the first element
+ *
+ * Also, always write the ITR, even if in ITR_IS_DYNAMIC
+ * as there is no harm because the dynamic algorithm
+ * will just overwrite.
+ */
+ if (i < vsi->alloc_rxq && coalesce[i].rx_valid) {
+ rc = &vsi->q_vectors[i]->rx;
+ rc->itr_setting = coalesce[i].itr_rx;
+ ice_write_itr(rc, rc->itr_setting);
+ } else if (i < vsi->alloc_rxq) {
+ rc = &vsi->q_vectors[i]->rx;
+ rc->itr_setting = coalesce[0].itr_rx;
+ ice_write_itr(rc, rc->itr_setting);
+ }
+
+ if (i < vsi->alloc_txq && coalesce[i].tx_valid) {
+ rc = &vsi->q_vectors[i]->tx;
+ rc->itr_setting = coalesce[i].itr_tx;
+ ice_write_itr(rc, rc->itr_setting);
+ } else if (i < vsi->alloc_txq) {
+ rc = &vsi->q_vectors[i]->tx;
+ rc->itr_setting = coalesce[0].itr_tx;
+ ice_write_itr(rc, rc->itr_setting);
+ }
+
+ vsi->q_vectors[i]->intrl = coalesce[i].intrl;
+ ice_write_intrl(vsi->q_vectors[i], coalesce[i].intrl);
+ }
- /* number of q_vectors increased, so assume coalesce settings were
- * changed globally (i.e. ethtool -C eth0 instead of per-queue) and use
- * the previous settings from q_vector 0 for all of the new q_vectors
+ /* the number of queue vectors increased so write whatever is in
+ * the first element
*/
- for (; i < vsi->num_q_vectors; i++)
- ice_vsi_rebuild_update_coalesce(vsi->q_vectors[i],
- &coalesce[0]);
+ for (; i < vsi->num_q_vectors; i++) {
+ /* transmit */
+ rc = &vsi->q_vectors[i]->tx;
+ rc->itr_setting = coalesce[0].itr_tx;
+ ice_write_itr(rc, rc->itr_setting);
+
+ /* receive */
+ rc = &vsi->q_vectors[i]->rx;
+ rc->itr_setting = coalesce[0].itr_rx;
+ ice_write_itr(rc, rc->itr_setting);
+
+ vsi->q_vectors[i]->intrl = coalesce[0].intrl;
+ ice_write_intrl(vsi->q_vectors[i], coalesce[0].intrl);
+ }
}
/**
@@ -2919,6 +3014,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
struct ice_coalesce_stored *coalesce;
int prev_num_q_vectors = 0;
struct ice_vf *vf = NULL;
+ enum ice_vsi_type vtype;
enum ice_status status;
struct ice_pf *pf;
int ret, i;
@@ -2927,14 +3023,17 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
return -EINVAL;
pf = vsi->back;
- if (vsi->type == ICE_VSI_VF)
+ vtype = vsi->type;
+ if (vtype == ICE_VSI_VF)
vf = &pf->vf[vsi->vf_id];
coalesce = kcalloc(vsi->num_q_vectors,
sizeof(struct ice_coalesce_stored), GFP_KERNEL);
- if (coalesce)
- prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi,
- coalesce);
+ if (!coalesce)
+ return -ENOMEM;
+
+ prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi, coalesce);
+
ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
ice_vsi_free_q_vectors(vsi);
@@ -2943,7 +3042,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
* many interrupts each VF needs. SR-IOV MSIX resources are also
* cleared in the same manner.
*/
- if (vsi->type != ICE_VSI_VF) {
+ if (vtype != ICE_VSI_VF) {
/* reclaim SW interrupts back to the common pool */
ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
pf->num_avail_sw_msix += vsi->num_q_vectors;
@@ -2958,7 +3057,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
ice_vsi_put_qs(vsi);
ice_vsi_clear_rings(vsi);
ice_vsi_free_arrays(vsi);
- if (vsi->type == ICE_VSI_VF)
+ if (vtype == ICE_VSI_VF)
ice_vsi_set_num_qs(vsi, vf->vf_id);
else
ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID);
@@ -2977,7 +3076,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
if (ret < 0)
goto err_vsi;
- switch (vsi->type) {
+ switch (vtype) {
case ICE_VSI_CTRL:
case ICE_VSI_PF:
ret = ice_vsi_alloc_q_vectors(vsi);
@@ -3004,7 +3103,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
goto err_vectors;
}
/* ICE_VSI_CTRL does not need RSS so skip RSS processing */
- if (vsi->type != ICE_VSI_CTRL)
+ if (vtype != ICE_VSI_CTRL)
/* Do not exit if configuring RSS had an issue, at
* least receive traffic on first queue. Hence no
* need to capture return value
@@ -3066,7 +3165,7 @@ err_rings:
}
err_vsi:
ice_vsi_clear(vsi);
- set_bit(__ICE_RESET_FAILED, pf->state);
+ set_bit(ICE_RESET_FAILED, pf->state);
kfree(coalesce);
return ret;
}
@@ -3077,10 +3176,10 @@ err_vsi:
*/
bool ice_is_reset_in_progress(unsigned long *state)
{
- return test_bit(__ICE_RESET_OICR_RECV, state) ||
- test_bit(__ICE_PFR_REQ, state) ||
- test_bit(__ICE_CORER_REQ, state) ||
- test_bit(__ICE_GLOBR_REQ, state);
+ return test_bit(ICE_RESET_OICR_RECV, state) ||
+ test_bit(ICE_PFR_REQ, state) ||
+ test_bit(ICE_CORER_REQ, state) ||
+ test_bit(ICE_GLOBR_REQ, state);
}
#ifdef CONFIG_DCB
@@ -3168,20 +3267,15 @@ out:
/**
* ice_update_ring_stats - Update ring statistics
* @ring: ring to update
- * @cont: used to increment per-vector counters
* @pkts: number of processed packets
* @bytes: number of processed bytes
*
* This function assumes that caller has acquired a u64_stats_sync lock.
*/
-static void
-ice_update_ring_stats(struct ice_ring *ring, struct ice_ring_container *cont,
- u64 pkts, u64 bytes)
+static void ice_update_ring_stats(struct ice_ring *ring, u64 pkts, u64 bytes)
{
ring->stats.bytes += bytes;
ring->stats.pkts += pkts;
- cont->total_bytes += bytes;
- cont->total_pkts += pkts;
}
/**
@@ -3193,7 +3287,7 @@ ice_update_ring_stats(struct ice_ring *ring, struct ice_ring_container *cont,
void ice_update_tx_ring_stats(struct ice_ring *tx_ring, u64 pkts, u64 bytes)
{
u64_stats_update_begin(&tx_ring->syncp);
- ice_update_ring_stats(tx_ring, &tx_ring->q_vector->tx, pkts, bytes);
+ ice_update_ring_stats(tx_ring, pkts, bytes);
u64_stats_update_end(&tx_ring->syncp);
}
@@ -3206,7 +3300,7 @@ void ice_update_tx_ring_stats(struct ice_ring *tx_ring, u64 pkts, u64 bytes)
void ice_update_rx_ring_stats(struct ice_ring *rx_ring, u64 pkts, u64 bytes)
{
u64_stats_update_begin(&rx_ring->syncp);
- ice_update_ring_stats(rx_ring, &rx_ring->q_vector->rx, pkts, bytes);
+ ice_update_ring_stats(rx_ring, pkts, bytes);
u64_stats_update_end(&rx_ring->syncp);
}
@@ -3348,3 +3442,40 @@ int ice_clear_dflt_vsi(struct ice_sw *sw)
return 0;
}
+
+/**
+ * ice_set_link - turn on/off physical link
+ * @vsi: VSI to modify physical link on
+ * @ena: turn on/off physical link
+ */
+int ice_set_link(struct ice_vsi *vsi, bool ena)
+{
+ struct device *dev = ice_pf_to_dev(vsi->back);
+ struct ice_port_info *pi = vsi->port_info;
+ struct ice_hw *hw = pi->hw;
+ enum ice_status status;
+
+ if (vsi->type != ICE_VSI_PF)
+ return -EINVAL;
+
+ status = ice_aq_set_link_restart_an(pi, ena, NULL);
+
+ /* if link is owned by manageability, FW will return ICE_AQ_RC_EMODE.
+ * this is not a fatal error, so print a warning message and return
+ * a success code. Return an error if FW returns an error code other
+ * than ICE_AQ_RC_EMODE
+ */
+ if (status == ICE_ERR_AQ_ERROR) {
+ if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
+ dev_warn(dev, "can't set link to %s, err %s aq_err %s. not fatal, continuing\n",
+ (ena ? "ON" : "OFF"), ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ } else if (status) {
+ dev_err(dev, "can't set link to %s, err %s aq_err %s\n",
+ (ena ? "ON" : "OFF"), ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ return -EIO;
+ }
+
+ return 0;
+}