summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/intel/igc/igc_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel/igc/igc_main.c')
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c110
1 files changed, 99 insertions, 11 deletions
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index bdeb36790d77..293b45717683 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -1271,10 +1271,21 @@ static u32 igc_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSO,
(IGC_ADVTXD_DCMD_TSE));
- /* set timestamp bit if present */
+ /* set timestamp bit if present, will select the register set
+ * based on the _TSTAMP(_X) bit.
+ */
cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP,
(IGC_ADVTXD_MAC_TSTAMP));
+ cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP_1,
+ (IGC_ADVTXD_TSTAMP_REG_1));
+
+ cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP_2,
+ (IGC_ADVTXD_TSTAMP_REG_2));
+
+ cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP_3,
+ (IGC_ADVTXD_TSTAMP_REG_3));
+
/* insert frame checksum */
cmd_type ^= IGC_SET_FLAG(skb->no_fcs, 1, IGC_ADVTXD_DCMD_IFCS);
@@ -1533,6 +1544,26 @@ static int igc_tso(struct igc_ring *tx_ring,
return 1;
}
+static bool igc_request_tx_tstamp(struct igc_adapter *adapter, struct sk_buff *skb, u32 *flags)
+{
+ int i;
+
+ for (i = 0; i < IGC_MAX_TX_TSTAMP_REGS; i++) {
+ struct igc_tx_timestamp_request *tstamp = &adapter->tx_tstamp[i];
+
+ if (tstamp->skb)
+ continue;
+
+ tstamp->skb = skb_get(skb);
+ tstamp->start = jiffies;
+ *flags = tstamp->flags;
+
+ return true;
+ }
+
+ return false;
+}
+
static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
struct igc_ring *tx_ring)
{
@@ -1614,14 +1645,12 @@ done:
* timestamping request.
*/
unsigned long flags;
+ u32 tstamp_flags;
spin_lock_irqsave(&adapter->ptp_tx_lock, flags);
- if (!adapter->ptp_tx_skb) {
+ if (igc_request_tx_tstamp(adapter, skb, &tstamp_flags)) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- tx_flags |= IGC_TX_FLAGS_TSTAMP;
-
- adapter->ptp_tx_skb = skb_get(skb);
- adapter->ptp_tx_start = jiffies;
+ tx_flags |= IGC_TX_FLAGS_TSTAMP | tstamp_flags;
} else {
adapter->tx_hwtstamp_skipped++;
}
@@ -4801,6 +4830,7 @@ static int igc_sw_init(struct igc_adapter *adapter)
adapter->nfc_rule_count = 0;
spin_lock_init(&adapter->stats64_lock);
+ spin_lock_init(&adapter->qbv_tx_lock);
/* Assume MSI-X interrupts, will be checked during IRQ allocation */
adapter->flags |= IGC_FLAG_HAS_MSIX;
@@ -6119,15 +6149,15 @@ static int igc_tsn_enable_launchtime(struct igc_adapter *adapter,
return igc_tsn_offload_apply(adapter);
}
-static int igc_tsn_clear_schedule(struct igc_adapter *adapter)
+static int igc_qbv_clear_schedule(struct igc_adapter *adapter)
{
+ unsigned long flags;
int i;
adapter->base_time = 0;
adapter->cycle_time = NSEC_PER_SEC;
adapter->taprio_offload_enable = false;
adapter->qbv_config_change_errors = 0;
- adapter->qbv_transition = false;
adapter->qbv_count = 0;
for (i = 0; i < adapter->num_tx_queues; i++) {
@@ -6136,13 +6166,51 @@ static int igc_tsn_clear_schedule(struct igc_adapter *adapter)
ring->start_time = 0;
ring->end_time = NSEC_PER_SEC;
ring->max_sdu = 0;
+ }
+
+ spin_lock_irqsave(&adapter->qbv_tx_lock, flags);
+
+ adapter->qbv_transition = false;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct igc_ring *ring = adapter->tx_ring[i];
+
ring->oper_gate_closed = false;
ring->admin_gate_closed = false;
}
+ spin_unlock_irqrestore(&adapter->qbv_tx_lock, flags);
+
+ return 0;
+}
+
+static int igc_tsn_clear_schedule(struct igc_adapter *adapter)
+{
+ igc_qbv_clear_schedule(adapter);
+
return 0;
}
+static void igc_taprio_stats(struct net_device *dev,
+ struct tc_taprio_qopt_stats *stats)
+{
+ /* When Strict_End is enabled, the tx_overruns counter
+ * will always be zero.
+ */
+ stats->tx_overruns = 0;
+}
+
+static void igc_taprio_queue_stats(struct net_device *dev,
+ struct tc_taprio_qopt_queue_stats *queue_stats)
+{
+ struct tc_taprio_qopt_stats *stats = &queue_stats->stats;
+
+ /* When Strict_End is enabled, the tx_overruns counter
+ * will always be zero.
+ */
+ stats->tx_overruns = 0;
+}
+
static int igc_save_qbv_schedule(struct igc_adapter *adapter,
struct tc_taprio_qopt_offload *qopt)
{
@@ -6150,14 +6218,24 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
struct igc_hw *hw = &adapter->hw;
u32 start_time = 0, end_time = 0;
struct timespec64 now;
+ unsigned long flags;
size_t n;
int i;
- if (qopt->cmd == TAPRIO_CMD_DESTROY)
+ switch (qopt->cmd) {
+ case TAPRIO_CMD_REPLACE:
+ break;
+ case TAPRIO_CMD_DESTROY:
return igc_tsn_clear_schedule(adapter);
-
- if (qopt->cmd != TAPRIO_CMD_REPLACE)
+ case TAPRIO_CMD_STATS:
+ igc_taprio_stats(adapter->netdev, &qopt->stats);
+ return 0;
+ case TAPRIO_CMD_QUEUE_STATS:
+ igc_taprio_queue_stats(adapter->netdev, &qopt->queue_stats);
+ return 0;
+ default:
return -EOPNOTSUPP;
+ }
if (qopt->base_time < 0)
return -ERANGE;
@@ -6217,6 +6295,8 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
start_time += e->interval;
}
+ spin_lock_irqsave(&adapter->qbv_tx_lock, flags);
+
/* Check whether a queue gets configured.
* If not, set the start and end time to be end time.
*/
@@ -6241,6 +6321,8 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
}
}
+ spin_unlock_irqrestore(&adapter->qbv_tx_lock, flags);
+
for (i = 0; i < adapter->num_tx_queues; i++) {
struct igc_ring *ring = adapter->tx_ring[i];
struct net_device *dev = adapter->netdev;
@@ -6619,8 +6701,11 @@ static enum hrtimer_restart igc_qbv_scheduling_timer(struct hrtimer *timer)
{
struct igc_adapter *adapter = container_of(timer, struct igc_adapter,
hrtimer);
+ unsigned long flags;
unsigned int i;
+ spin_lock_irqsave(&adapter->qbv_tx_lock, flags);
+
adapter->qbv_transition = true;
for (i = 0; i < adapter->num_tx_queues; i++) {
struct igc_ring *tx_ring = adapter->tx_ring[i];
@@ -6633,6 +6718,9 @@ static enum hrtimer_restart igc_qbv_scheduling_timer(struct hrtimer *timer)
}
}
adapter->qbv_transition = false;
+
+ spin_unlock_irqrestore(&adapter->qbv_tx_lock, flags);
+
return HRTIMER_NORESTART;
}