summaryrefslogtreecommitdiff
path: root/drivers/net/wireless/ath
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/ath')
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/Kconfig6
-rw-r--r--drivers/net/wireless/ath/ath10k/Makefile1
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c32
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h8
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/leds.c90
-rw-r--r--drivers/net/wireless/ath/ath10k/leds.h34
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi.c11
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.c6
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-ops.h32
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c54
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h35
-rw-r--r--drivers/net/wireless/ath/ath11k/Kconfig1
-rw-r--r--drivers/net/wireless/ath/ath11k/ahb.c63
-rw-r--r--drivers/net/wireless/ath/ath11k/ce.h6
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c49
-rw-r--r--drivers/net/wireless/ath/ath11k/core.h9
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs.c6
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.c12
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.c107
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.h3
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_tx.c22
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_tx.h4
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.c16
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_tx.h4
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.h4
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c198
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.c22
-rw-r--r--drivers/net/wireless/ath/ath11k/reg.c18
-rw-r--r--drivers/net/wireless/ath/ath11k/reg.h4
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c2
-rw-r--r--drivers/net/wireless/ath/ath12k/Kconfig1
-rw-r--r--drivers/net/wireless/ath/ath12k/Makefile3
-rw-r--r--drivers/net/wireless/ath/ath12k/acpi.c2
-rw-r--r--drivers/net/wireless/ath/ath12k/ce.h6
-rw-r--r--drivers/net/wireless/ath/ath12k/core.c209
-rw-r--r--drivers/net/wireless/ath/ath12k/core.h74
-rw-r--r--drivers/net/wireless/ath/ath12k/debug.h3
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs.c19
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs.h6
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c1540
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h567
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.c83
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.h5
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_mon.c40
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.c169
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.h4
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_tx.c169
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_tx.h5
-rw-r--r--drivers/net/wireless/ath/ath12k/hal.c5
-rw-r--r--drivers/net/wireless/ath/ath12k/hal.h21
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_desc.h73
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_tx.h4
-rw-r--r--drivers/net/wireless/ath/ath12k/hif.h9
-rw-r--r--drivers/net/wireless/ath/ath12k/htc.c6
-rw-r--r--drivers/net/wireless/ath/ath12k/hw.c23
-rw-r--r--drivers/net/wireless/ath/ath12k/hw.h6
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.c852
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.h5
-rw-r--r--drivers/net/wireless/ath/ath12k/mhi.c11
-rw-r--r--drivers/net/wireless/ath/ath12k/pci.c42
-rw-r--r--drivers/net/wireless/ath/ath12k/pci.h1
-rw-r--r--drivers/net/wireless/ath/ath12k/qmi.c8
-rw-r--r--drivers/net/wireless/ath/ath12k/reg.c19
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.c783
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.h632
-rw-r--r--drivers/net/wireless/ath/ath12k/wow.c1026
-rw-r--r--drivers/net/wireless/ath/ath12k/wow.h62
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/base.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c2
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c21
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h2
81 files changed, 6566 insertions, 832 deletions
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
index 5a55db349cb5..156f3650c006 100644
--- a/drivers/net/wireless/ath/ar5523/ar5523.c
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -1061,7 +1061,7 @@ err:
return error;
}
-static void ar5523_stop(struct ieee80211_hw *hw)
+static void ar5523_stop(struct ieee80211_hw *hw, bool suspend)
{
struct ar5523 *ar = hw->priv;
diff --git a/drivers/net/wireless/ath/ath10k/Kconfig b/drivers/net/wireless/ath/ath10k/Kconfig
index 4f385f4a8cef..876aed765833 100644
--- a/drivers/net/wireless/ath/ath10k/Kconfig
+++ b/drivers/net/wireless/ath/ath10k/Kconfig
@@ -68,6 +68,12 @@ config ATH10K_DEBUGFS
If unsure, say Y to make it easier to debug problems.
+config ATH10K_LEDS
+ bool
+ depends on ATH10K
+ depends on LEDS_CLASS=y || LEDS_CLASS=MAC80211
+ default y
+
config ATH10K_SPECTRAL
bool "Atheros ath10k spectral scan support"
depends on ATH10K_DEBUGFS
diff --git a/drivers/net/wireless/ath/ath10k/Makefile b/drivers/net/wireless/ath/ath10k/Makefile
index 142c777b287f..02bf9b629038 100644
--- a/drivers/net/wireless/ath/ath10k/Makefile
+++ b/drivers/net/wireless/ath/ath10k/Makefile
@@ -19,6 +19,7 @@ ath10k_core-$(CONFIG_ATH10K_SPECTRAL) += spectral.o
ath10k_core-$(CONFIG_NL80211_TESTMODE) += testmode.o
ath10k_core-$(CONFIG_ATH10K_TRACING) += trace.o
ath10k_core-$(CONFIG_THERMAL) += thermal.o
+ath10k_core-$(CONFIG_ATH10K_LEDS) += leds.o
ath10k_core-$(CONFIG_MAC80211_DEBUGFS) += debugfs_sta.o
ath10k_core-$(CONFIG_PM) += wow.o
ath10k_core-$(CONFIG_DEV_COREDUMP) += coredump.o
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index bdf0552cd1c3..b3294287bce1 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -27,6 +27,7 @@
#include "testmode.h"
#include "wmi-ops.h"
#include "coredump.h"
+#include "leds.h"
unsigned int ath10k_debug_mask;
EXPORT_SYMBOL(ath10k_debug_mask);
@@ -68,6 +69,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.name = "qca988x hw2.0",
.patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR,
.uart_pin = 7,
+ .led_pin = 1,
.cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_ALL,
.otp_exe_param = 0,
.channel_counters_freq_hz = 88000,
@@ -108,6 +110,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.name = "qca988x hw2.0 ubiquiti",
.patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR,
.uart_pin = 7,
+ .led_pin = 0,
.cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_ALL,
.otp_exe_param = 0,
.channel_counters_freq_hz = 88000,
@@ -149,6 +152,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.name = "qca9887 hw1.0",
.patch_load_addr = QCA9887_HW_1_0_PATCH_LOAD_ADDR,
.uart_pin = 7,
+ .led_pin = 1,
.cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_ALL,
.otp_exe_param = 0,
.channel_counters_freq_hz = 88000,
@@ -190,6 +194,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.name = "qca6174 hw3.2 sdio",
.patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR,
.uart_pin = 19,
+ .led_pin = 0,
.otp_exe_param = 0,
.channel_counters_freq_hz = 88000,
.max_probe_resp_desc_thres = 0,
@@ -226,6 +231,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.name = "qca6164 hw2.1",
.patch_load_addr = QCA6174_HW_2_1_PATCH_LOAD_ADDR,
.uart_pin = 6,
+ .led_pin = 0,
.otp_exe_param = 0,
.channel_counters_freq_hz = 88000,
.max_probe_resp_desc_thres = 0,
@@ -266,6 +272,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.name = "qca6174 hw2.1",
.patch_load_addr = QCA6174_HW_2_1_PATCH_LOAD_ADDR,
.uart_pin = 6,
+ .led_pin = 0,
.otp_exe_param = 0,
.channel_counters_freq_hz = 88000,
.max_probe_resp_desc_thres = 0,
@@ -306,6 +313,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.name = "qca6174 hw3.0",
.patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR,
.uart_pin = 6,
+ .led_pin = 0,
.otp_exe_param = 0,
.channel_counters_freq_hz = 88000,
.max_probe_resp_desc_thres = 0,
@@ -346,6 +354,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.name = "qca6174 hw3.2",
.patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR,
.uart_pin = 6,
+ .led_pin = 0,
.otp_exe_param = 0,
.channel_counters_freq_hz = 88000,
.max_probe_resp_desc_thres = 0,
@@ -390,6 +399,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.name = "qca99x0 hw2.0",
.patch_load_addr = QCA99X0_HW_2_0_PATCH_LOAD_ADDR,
.uart_pin = 7,
+ .led_pin = 17,
.otp_exe_param = 0x00000700,
.continuous_frag_desc = true,
.cck_rate_map_rev2 = true,
@@ -436,6 +446,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.name = "qca9984/qca9994 hw1.0",
.patch_load_addr = QCA9984_HW_1_0_PATCH_LOAD_ADDR,
.uart_pin = 7,
+ .led_pin = 17,
.cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_EACH,
.otp_exe_param = 0x00000700,
.continuous_frag_desc = true,
@@ -488,6 +499,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.name = "qca9888 hw2.0",
.patch_load_addr = QCA9888_HW_2_0_PATCH_LOAD_ADDR,
.uart_pin = 7,
+ .led_pin = 17,
.cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_EACH,
.otp_exe_param = 0x00000700,
.continuous_frag_desc = true,
@@ -538,6 +550,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.name = "qca9377 hw1.0",
.patch_load_addr = QCA9377_HW_1_0_PATCH_LOAD_ADDR,
.uart_pin = 6,
+ .led_pin = 0,
.otp_exe_param = 0,
.channel_counters_freq_hz = 88000,
.max_probe_resp_desc_thres = 0,
@@ -578,6 +591,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.name = "qca9377 hw1.1",
.patch_load_addr = QCA9377_HW_1_0_PATCH_LOAD_ADDR,
.uart_pin = 6,
+ .led_pin = 0,
.otp_exe_param = 0,
.channel_counters_freq_hz = 88000,
.max_probe_resp_desc_thres = 0,
@@ -620,6 +634,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.name = "qca9377 hw1.1 sdio",
.patch_load_addr = QCA9377_HW_1_0_PATCH_LOAD_ADDR,
.uart_pin = 19,
+ .led_pin = 0,
.otp_exe_param = 0,
.channel_counters_freq_hz = 88000,
.max_probe_resp_desc_thres = 0,
@@ -653,6 +668,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.name = "qca4019 hw1.0",
.patch_load_addr = QCA4019_HW_1_0_PATCH_LOAD_ADDR,
.uart_pin = 7,
+ .led_pin = 0,
.cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_EACH,
.otp_exe_param = 0x0010000,
.continuous_frag_desc = true,
@@ -698,6 +714,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.dev_id = 0,
.bus = ATH10K_BUS_SNOC,
.name = "wcn3990 hw1.0",
+ .led_pin = 0,
.continuous_frag_desc = true,
.tx_chain_mask = 0x7,
.rx_chain_mask = 0x7,
@@ -3224,6 +3241,10 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
goto err_hif_stop;
}
+ status = ath10k_leds_start(ar);
+ if (status)
+ goto err_hif_stop;
+
return 0;
err_hif_stop:
@@ -3482,9 +3503,18 @@ static void ath10k_core_register_work(struct work_struct *work)
goto err_spectral_destroy;
}
+ status = ath10k_leds_register(ar);
+ if (status) {
+ ath10k_err(ar, "could not register leds: %d\n",
+ status);
+ goto err_thermal_unregister;
+ }
+
set_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags);
return;
+err_thermal_unregister:
+ ath10k_thermal_unregister(ar);
err_spectral_destroy:
ath10k_spectral_destroy(ar);
err_debug_destroy:
@@ -3520,6 +3550,8 @@ void ath10k_core_unregister(struct ath10k *ar)
if (!test_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags))
return;
+ ath10k_leds_unregister(ar);
+
ath10k_thermal_unregister(ar);
/* Stop spectral before unregistering from mac80211 to remove the
* relayfs debugfs file cleanly. Otherwise the parent debugfs tree
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index b00099f0b24e..446dca74f06a 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -15,6 +15,7 @@
#include <linux/pci.h>
#include <linux/uuid.h>
#include <linux/time.h>
+#include <linux/leds.h>
#include "htt.h"
#include "htc.h"
@@ -1259,6 +1260,13 @@ struct ath10k {
} testmode;
struct {
+ struct gpio_led wifi_led;
+ struct led_classdev cdev;
+ char label[48];
+ u32 gpio_state_pin;
+ } leds;
+
+ struct {
/* protected by data_lock */
u32 rx_crc_err_drop;
u32 fw_crash_counter;
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 48897e5eca06..442091c6dfd2 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -512,6 +512,7 @@ struct ath10k_hw_params {
const char *name;
u32 patch_load_addr;
int uart_pin;
+ int led_pin;
u32 otp_exe_param;
/* Type of hw cycle counter wraparound logic, for more info
diff --git a/drivers/net/wireless/ath/ath10k/leds.c b/drivers/net/wireless/ath/ath10k/leds.c
new file mode 100644
index 000000000000..9b1d04eb4265
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/leds.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018 Sebastian Gottschall <s.gottschall@dd-wrt.com>
+ * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/leds.h>
+
+#include "core.h"
+#include "wmi.h"
+#include "wmi-ops.h"
+
+#include "leds.h"
+
+static int ath10k_leds_set_brightness_blocking(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct ath10k *ar = container_of(led_cdev, struct ath10k,
+ leds.cdev);
+ struct gpio_led *led = &ar->leds.wifi_led;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH10K_STATE_ON)
+ goto out;
+
+ ar->leds.gpio_state_pin = (brightness != LED_OFF) ^ led->active_low;
+ ath10k_wmi_gpio_output(ar, led->gpio, ar->leds.gpio_state_pin);
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+
+ return 0;
+}
+
+int ath10k_leds_start(struct ath10k *ar)
+{
+ if (ar->hw_params.led_pin == 0)
+ /* leds not supported */
+ return 0;
+
+ /* under some circumstances, the gpio pin gets reconfigured
+ * to default state by the firmware, so we need to
+ * reconfigure it this behaviour has only ben seen on
+ * QCA9984 and QCA99XX devices so far
+ */
+ ath10k_wmi_gpio_config(ar, ar->hw_params.led_pin, 0,
+ WMI_GPIO_PULL_NONE, WMI_GPIO_INTTYPE_DISABLE);
+ ath10k_wmi_gpio_output(ar, ar->hw_params.led_pin, 1);
+
+ return 0;
+}
+
+int ath10k_leds_register(struct ath10k *ar)
+{
+ int ret;
+
+ if (ar->hw_params.led_pin == 0)
+ /* leds not supported */
+ return 0;
+
+ snprintf(ar->leds.label, sizeof(ar->leds.label), "ath10k-%s",
+ wiphy_name(ar->hw->wiphy));
+ ar->leds.wifi_led.active_low = 1;
+ ar->leds.wifi_led.gpio = ar->hw_params.led_pin;
+ ar->leds.wifi_led.name = ar->leds.label;
+ ar->leds.wifi_led.default_state = LEDS_GPIO_DEFSTATE_KEEP;
+
+ ar->leds.cdev.name = ar->leds.label;
+ ar->leds.cdev.brightness_set_blocking = ath10k_leds_set_brightness_blocking;
+ ar->leds.cdev.default_trigger = ar->leds.wifi_led.default_trigger;
+
+ ret = led_classdev_register(wiphy_dev(ar->hw->wiphy), &ar->leds.cdev);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+void ath10k_leds_unregister(struct ath10k *ar)
+{
+ if (ar->hw_params.led_pin == 0)
+ /* leds not supported */
+ return;
+
+ led_classdev_unregister(&ar->leds.cdev);
+}
+
diff --git a/drivers/net/wireless/ath/ath10k/leds.h b/drivers/net/wireless/ath/ath10k/leds.h
new file mode 100644
index 000000000000..56325b0875e5
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/leds.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018 Sebastian Gottschall <s.gottschall@dd-wrt.com>
+ * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _LEDS_H_
+#define _LEDS_H_
+
+#include "core.h"
+
+#ifdef CONFIG_ATH10K_LEDS
+void ath10k_leds_unregister(struct ath10k *ar);
+int ath10k_leds_start(struct ath10k *ar);
+int ath10k_leds_register(struct ath10k *ar);
+#else
+static inline void ath10k_leds_unregister(struct ath10k *ar)
+{
+}
+
+static inline int ath10k_leds_start(struct ath10k *ar)
+{
+ return 0;
+}
+
+static inline int ath10k_leds_register(struct ath10k *ar)
+{
+ return 0;
+}
+
+#endif
+#endif /* _LEDS_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index e322b528baaf..a5da32e87106 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -25,6 +25,7 @@
#include "wmi-tlv.h"
#include "wmi-ops.h"
#include "wow.h"
+#include "leds.h"
/*********/
/* Rates */
@@ -5362,7 +5363,7 @@ err:
return ret;
}
-static void ath10k_stop(struct ieee80211_hw *hw)
+static void ath10k_stop(struct ieee80211_hw *hw, bool suspend)
{
struct ath10k *ar = hw->priv;
u32 opt;
diff --git a/drivers/net/wireless/ath/ath10k/qmi.c b/drivers/net/wireless/ath/ath10k/qmi.c
index 38e939f572a9..f1f33af0170a 100644
--- a/drivers/net/wireless/ath/ath10k/qmi.c
+++ b/drivers/net/wireless/ath/ath10k/qmi.c
@@ -1040,6 +1040,10 @@ static void ath10k_qmi_driver_event_work(struct work_struct *work)
switch (event->type) {
case ATH10K_QMI_EVENT_SERVER_ARRIVE:
ath10k_qmi_event_server_arrive(qmi);
+ if (qmi->no_msa_ready_indicator) {
+ ath10k_info(ar, "qmi not waiting for msa_ready indicator");
+ ath10k_qmi_event_msa_ready(qmi);
+ }
break;
case ATH10K_QMI_EVENT_SERVER_EXIT:
ath10k_qmi_event_server_exit(qmi);
@@ -1048,6 +1052,10 @@ static void ath10k_qmi_driver_event_work(struct work_struct *work)
ath10k_qmi_event_fw_ready_ind(qmi);
break;
case ATH10K_QMI_EVENT_MSA_READY_IND:
+ if (qmi->no_msa_ready_indicator) {
+ ath10k_warn(ar, "qmi unexpected msa_ready indicator");
+ break;
+ }
ath10k_qmi_event_msa_ready(qmi);
break;
default:
@@ -1077,6 +1085,9 @@ int ath10k_qmi_init(struct ath10k *ar, u32 msa_size)
if (of_property_read_bool(dev->of_node, "qcom,msa-fixed-perm"))
qmi->msa_fixed_perm = true;
+ if (of_property_read_bool(dev->of_node, "qcom,no-msa-ready-indicator"))
+ qmi->no_msa_ready_indicator = true;
+
ret = qmi_handle_init(&qmi->qmi_hdl,
WLFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN,
&ath10k_qmi_ops, qmi_msg_handler);
diff --git a/drivers/net/wireless/ath/ath10k/qmi.h b/drivers/net/wireless/ath/ath10k/qmi.h
index 89464239fe96..0816eb4e4a18 100644
--- a/drivers/net/wireless/ath/ath10k/qmi.h
+++ b/drivers/net/wireless/ath/ath10k/qmi.h
@@ -107,6 +107,7 @@ struct ath10k_qmi {
char fw_build_timestamp[MAX_TIMESTAMP_LEN + 1];
struct ath10k_qmi_cal_data cal_data[MAX_NUM_CAL_V01];
bool msa_fixed_perm;
+ bool no_msa_ready_indicator;
enum ath10k_qmi_state state;
};
diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
index 8530550cf5df..0fe47d51013c 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.c
+++ b/drivers/net/wireless/ath/ath10k/snoc.c
@@ -1635,10 +1635,10 @@ static int ath10k_fw_init(struct ath10k *ar)
ar_snoc->fw.dev = &pdev->dev;
- iommu_dom = iommu_domain_alloc(&platform_bus_type);
- if (!iommu_dom) {
+ iommu_dom = iommu_paging_domain_alloc(ar_snoc->fw.dev);
+ if (IS_ERR(iommu_dom)) {
ath10k_err(ar, "failed to allocate iommu domain\n");
- ret = -ENOMEM;
+ ret = PTR_ERR(iommu_dom);
goto err_unregister;
}
diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h
index aa57d807491c..f3f6b5954b27 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-ops.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h
@@ -226,7 +226,10 @@ struct wmi_ops {
const struct wmi_bb_timing_cfg_arg *arg);
struct sk_buff *(*gen_per_peer_per_tid_cfg)(struct ath10k *ar,
const struct wmi_per_peer_per_tid_cfg_arg *arg);
+ struct sk_buff *(*gen_gpio_config)(struct ath10k *ar, u32 gpio_num,
+ u32 input, u32 pull_type, u32 intr_mode);
+ struct sk_buff *(*gen_gpio_output)(struct ath10k *ar, u32 gpio_num, u32 set);
};
int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
@@ -1122,6 +1125,35 @@ ath10k_wmi_force_fw_hang(struct ath10k *ar,
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid);
}
+static inline int ath10k_wmi_gpio_config(struct ath10k *ar, u32 gpio_num,
+ u32 input, u32 pull_type, u32 intr_mode)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_gpio_config)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_gpio_config(ar, gpio_num, input, pull_type, intr_mode);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->gpio_config_cmdid);
+}
+
+static inline int ath10k_wmi_gpio_output(struct ath10k *ar, u32 gpio_num, u32 set)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_gpio_config)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_gpio_output(ar, gpio_num, set);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->gpio_output_cmdid);
+}
+
static inline int
ath10k_wmi_dbglog_cfg(struct ath10k *ar, u64 module_enable, u32 log_level)
{
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index aed97fd121ba..dbaf26d6a7a6 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -4606,6 +4606,8 @@ static const struct wmi_ops wmi_tlv_ops = {
.gen_echo = ath10k_wmi_tlv_op_gen_echo,
.gen_vdev_spectral_conf = ath10k_wmi_tlv_op_gen_vdev_spectral_conf,
.gen_vdev_spectral_enable = ath10k_wmi_tlv_op_gen_vdev_spectral_enable,
+ /* .gen_gpio_config not implemented */
+ /* .gen_gpio_output not implemented */
};
static const struct wmi_peer_flags_map wmi_tlv_peer_flags_map = {
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 80d255aaff1b..fe2344598364 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -7493,6 +7493,49 @@ ath10k_wmi_op_gen_peer_set_param(struct ath10k *ar, u32 vdev_id,
return skb;
}
+static struct sk_buff *ath10k_wmi_op_gen_gpio_config(struct ath10k *ar,
+ u32 gpio_num, u32 input,
+ u32 pull_type, u32 intr_mode)
+{
+ struct wmi_gpio_config_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_gpio_config_cmd *)skb->data;
+ cmd->pull_type = __cpu_to_le32(pull_type);
+ cmd->gpio_num = __cpu_to_le32(gpio_num);
+ cmd->input = __cpu_to_le32(input);
+ cmd->intr_mode = __cpu_to_le32(intr_mode);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi gpio_config gpio_num 0x%08x input 0x%08x pull_type 0x%08x intr_mode 0x%08x\n",
+ gpio_num, input, pull_type, intr_mode);
+
+ return skb;
+}
+
+static struct sk_buff *ath10k_wmi_op_gen_gpio_output(struct ath10k *ar,
+ u32 gpio_num, u32 set)
+{
+ struct wmi_gpio_output_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_gpio_output_cmd *)skb->data;
+ cmd->gpio_num = __cpu_to_le32(gpio_num);
+ cmd->set = __cpu_to_le32(set);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi gpio_output gpio_num 0x%08x set 0x%08x\n",
+ gpio_num, set);
+
+ return skb;
+}
+
static struct sk_buff *
ath10k_wmi_op_gen_set_psmode(struct ath10k *ar, u32 vdev_id,
enum wmi_sta_ps_mode psmode)
@@ -9157,6 +9200,9 @@ static const struct wmi_ops wmi_ops = {
.fw_stats_fill = ath10k_wmi_main_op_fw_stats_fill,
.get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype,
.gen_echo = ath10k_wmi_op_gen_echo,
+ .gen_gpio_config = ath10k_wmi_op_gen_gpio_config,
+ .gen_gpio_output = ath10k_wmi_op_gen_gpio_output,
+
/* .gen_bcn_tmpl not implemented */
/* .gen_prb_tmpl not implemented */
/* .gen_p2p_go_bcn_ie not implemented */
@@ -9227,6 +9273,8 @@ static const struct wmi_ops wmi_10_1_ops = {
.fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill,
.get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype,
.gen_echo = ath10k_wmi_op_gen_echo,
+ .gen_gpio_config = ath10k_wmi_op_gen_gpio_config,
+ .gen_gpio_output = ath10k_wmi_op_gen_gpio_output,
/* .gen_bcn_tmpl not implemented */
/* .gen_prb_tmpl not implemented */
/* .gen_p2p_go_bcn_ie not implemented */
@@ -9299,6 +9347,8 @@ static const struct wmi_ops wmi_10_2_ops = {
.gen_delba_send = ath10k_wmi_op_gen_delba_send,
.fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill,
.get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype,
+ .gen_gpio_config = ath10k_wmi_op_gen_gpio_config,
+ .gen_gpio_output = ath10k_wmi_op_gen_gpio_output,
/* .gen_pdev_enable_adaptive_cca not implemented */
};
@@ -9370,6 +9420,8 @@ static const struct wmi_ops wmi_10_2_4_ops = {
ath10k_wmi_op_gen_pdev_enable_adaptive_cca,
.get_vdev_subtype = ath10k_wmi_10_2_4_op_get_vdev_subtype,
.gen_bb_timing = ath10k_wmi_10_2_4_op_gen_bb_timing,
+ .gen_gpio_config = ath10k_wmi_op_gen_gpio_config,
+ .gen_gpio_output = ath10k_wmi_op_gen_gpio_output,
/* .gen_bcn_tmpl not implemented */
/* .gen_prb_tmpl not implemented */
/* .gen_p2p_go_bcn_ie not implemented */
@@ -9451,6 +9503,8 @@ static const struct wmi_ops wmi_10_4_ops = {
.gen_pdev_bss_chan_info_req = ath10k_wmi_10_2_op_gen_pdev_bss_chan_info,
.gen_echo = ath10k_wmi_op_gen_echo,
.gen_pdev_get_tpc_config = ath10k_wmi_10_2_4_op_gen_pdev_get_tpc_config,
+ .gen_gpio_config = ath10k_wmi_op_gen_gpio_config,
+ .gen_gpio_output = ath10k_wmi_op_gen_gpio_output,
};
int ath10k_wmi_attach(struct ath10k *ar)
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 2379501225a4..0faefc0a9a40 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -3034,6 +3034,41 @@ enum wmi_10_4_feature_mask {
};
+/* WMI_GPIO_CONFIG_CMDID */
+enum {
+ WMI_GPIO_PULL_NONE,
+ WMI_GPIO_PULL_UP,
+ WMI_GPIO_PULL_DOWN,
+};
+
+enum {
+ WMI_GPIO_INTTYPE_DISABLE,
+ WMI_GPIO_INTTYPE_RISING_EDGE,
+ WMI_GPIO_INTTYPE_FALLING_EDGE,
+ WMI_GPIO_INTTYPE_BOTH_EDGE,
+ WMI_GPIO_INTTYPE_LEVEL_LOW,
+ WMI_GPIO_INTTYPE_LEVEL_HIGH
+};
+
+/* WMI_GPIO_CONFIG_CMDID */
+struct wmi_gpio_config_cmd {
+ __le32 gpio_num; /* GPIO number to be setup */
+ __le32 input; /* 0 - Output/ 1 - Input */
+ __le32 pull_type; /* Pull type defined above */
+ __le32 intr_mode; /* Interrupt mode defined above (Input) */
+} __packed;
+
+/* WMI_GPIO_OUTPUT_CMDID */
+struct wmi_gpio_output_cmd {
+ __le32 gpio_num; /* GPIO number to be setup */
+ __le32 set; /* Set the GPIO pin*/
+} __packed;
+
+/* WMI_GPIO_INPUT_EVENTID */
+struct wmi_gpio_input_event {
+ __le32 gpio_num; /* GPIO number which changed state */
+} __packed;
+
struct wmi_ext_resource_config_10_4_cmd {
/* contains enum wmi_host_platform_type */
__le32 host_platform_config;
diff --git a/drivers/net/wireless/ath/ath11k/Kconfig b/drivers/net/wireless/ath/ath11k/Kconfig
index 27f0523bf967..2e935d381b6b 100644
--- a/drivers/net/wireless/ath/ath11k/Kconfig
+++ b/drivers/net/wireless/ath/ath11k/Kconfig
@@ -24,6 +24,7 @@ config ATH11K_PCI
select MHI_BUS
select QRTR
select QRTR_MHI
+ select PCI_PWRCTL_PWRSEQ if HAVE_PWRCTL
help
This module adds support for PCIE bus
diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c
index ca0f17ddebba..634d385fd9ad 100644
--- a/drivers/net/wireless/ath/ath11k/ahb.c
+++ b/drivers/net/wireless/ath/ath11k/ahb.c
@@ -954,6 +954,36 @@ static int ath11k_ahb_setup_msa_resources(struct ath11k_base *ab)
return 0;
}
+static int ath11k_ahb_ce_remap(struct ath11k_base *ab)
+{
+ const struct ce_remap *ce_remap = ab->hw_params.ce_remap;
+ struct platform_device *pdev = ab->pdev;
+
+ if (!ce_remap) {
+ /* no separate CE register space */
+ ab->mem_ce = ab->mem;
+ return 0;
+ }
+
+ /* ce register space is moved out of wcss unlike ipq8074 or ipq6018
+ * and the space is not contiguous, hence remapping the CE registers
+ * to a new space for accessing them.
+ */
+ ab->mem_ce = ioremap(ce_remap->base, ce_remap->size);
+ if (!ab->mem_ce) {
+ dev_err(&pdev->dev, "ce ioremap error\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void ath11k_ahb_ce_unmap(struct ath11k_base *ab)
+{
+ if (ab->hw_params.ce_remap)
+ iounmap(ab->mem_ce);
+}
+
static int ath11k_ahb_fw_resources_init(struct ath11k_base *ab)
{
struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
@@ -1001,10 +1031,10 @@ static int ath11k_ahb_fw_resources_init(struct ath11k_base *ab)
ab_ahb->fw.dev = &pdev->dev;
- iommu_dom = iommu_domain_alloc(&platform_bus_type);
- if (!iommu_dom) {
+ iommu_dom = iommu_paging_domain_alloc(ab_ahb->fw.dev);
+ if (IS_ERR(iommu_dom)) {
ath11k_err(ab, "failed to allocate iommu domain\n");
- ret = -ENOMEM;
+ ret = PTR_ERR(iommu_dom);
goto err_unregister;
}
@@ -1146,25 +1176,13 @@ static int ath11k_ahb_probe(struct platform_device *pdev)
if (ret)
goto err_core_free;
- ab->mem_ce = ab->mem;
-
- if (ab->hw_params.ce_remap) {
- const struct ce_remap *ce_remap = ab->hw_params.ce_remap;
- /* ce register space is moved out of wcss unlike ipq8074 or ipq6018
- * and the space is not contiguous, hence remapping the CE registers
- * to a new space for accessing them.
- */
- ab->mem_ce = ioremap(ce_remap->base, ce_remap->size);
- if (!ab->mem_ce) {
- dev_err(&pdev->dev, "ce ioremap error\n");
- ret = -ENOMEM;
- goto err_core_free;
- }
- }
+ ret = ath11k_ahb_ce_remap(ab);
+ if (ret)
+ goto err_core_free;
ret = ath11k_ahb_fw_resources_init(ab);
if (ret)
- goto err_core_free;
+ goto err_ce_unmap;
ret = ath11k_ahb_setup_smp2p_handle(ab);
if (ret)
@@ -1216,6 +1234,9 @@ err_release_smp2p_handle:
err_fw_deinit:
ath11k_ahb_fw_resource_deinit(ab);
+err_ce_unmap:
+ ath11k_ahb_ce_unmap(ab);
+
err_core_free:
ath11k_core_free(ab);
platform_set_drvdata(pdev, NULL);
@@ -1248,9 +1269,7 @@ static void ath11k_ahb_free_resources(struct ath11k_base *ab)
ath11k_ahb_release_smp2p_handle(ab);
ath11k_ahb_fw_resource_deinit(ab);
ath11k_ce_free_pipes(ab);
-
- if (ab->hw_params.ce_remap)
- iounmap(ab->mem_ce);
+ ath11k_ahb_ce_unmap(ab);
ath11k_core_free(ab);
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/net/wireless/ath/ath11k/ce.h b/drivers/net/wireless/ath/ath11k/ce.h
index 69946fc70077..bcde2fcf02cf 100644
--- a/drivers/net/wireless/ath/ath11k/ce.h
+++ b/drivers/net/wireless/ath/ath11k/ce.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_CE_H
@@ -146,7 +146,7 @@ struct ath11k_ce_ring {
/* Host address space */
void *base_addr_owner_space_unaligned;
/* CE address space */
- u32 base_addr_ce_space_unaligned;
+ dma_addr_t base_addr_ce_space_unaligned;
/* Actual start of descriptors.
* Aligned to descriptor-size boundary.
@@ -156,7 +156,7 @@ struct ath11k_ce_ring {
void *base_addr_owner_space;
/* CE address space */
- u32 base_addr_ce_space;
+ dma_addr_t base_addr_ce_space;
/* HAL ring id */
u32 hal_ring_id;
diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
index b82e8fb28541..03187df26000 100644
--- a/drivers/net/wireless/ath/ath11k/core.c
+++ b/drivers/net/wireless/ath/ath11k/core.c
@@ -62,7 +62,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.ce_ie_addr = &ath11k_ce_ie_addr_ipq8074,
.single_pdev_only = false,
.rxdma1_enable = true,
- .num_rxmda_per_pdev = 1,
+ .num_rxdma_per_pdev = 1,
.rx_mac_buf_ring = false,
.vdev_start_delay = false,
.htt_peer_map_v2 = true,
@@ -148,7 +148,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.ce_ie_addr = &ath11k_ce_ie_addr_ipq8074,
.single_pdev_only = false,
.rxdma1_enable = true,
- .num_rxmda_per_pdev = 1,
+ .num_rxdma_per_pdev = 1,
.rx_mac_buf_ring = false,
.vdev_start_delay = false,
.htt_peer_map_v2 = true,
@@ -232,7 +232,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.ce_ie_addr = &ath11k_ce_ie_addr_ipq8074,
.single_pdev_only = true,
.rxdma1_enable = false,
- .num_rxmda_per_pdev = 2,
+ .num_rxdma_per_pdev = 2,
.rx_mac_buf_ring = true,
.vdev_start_delay = true,
.htt_peer_map_v2 = false,
@@ -320,7 +320,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.svc_to_ce_map_len = 18,
.ce_ie_addr = &ath11k_ce_ie_addr_ipq8074,
.rxdma1_enable = true,
- .num_rxmda_per_pdev = 1,
+ .num_rxdma_per_pdev = 1,
.rx_mac_buf_ring = false,
.vdev_start_delay = false,
.htt_peer_map_v2 = true,
@@ -404,7 +404,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.ce_ie_addr = &ath11k_ce_ie_addr_ipq8074,
.single_pdev_only = true,
.rxdma1_enable = false,
- .num_rxmda_per_pdev = 2,
+ .num_rxdma_per_pdev = 2,
.rx_mac_buf_ring = true,
.vdev_start_delay = true,
.htt_peer_map_v2 = false,
@@ -492,7 +492,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.svc_to_ce_map_len = 14,
.single_pdev_only = true,
.rxdma1_enable = false,
- .num_rxmda_per_pdev = 2,
+ .num_rxdma_per_pdev = 2,
.rx_mac_buf_ring = true,
.vdev_start_delay = true,
.htt_peer_map_v2 = false,
@@ -580,7 +580,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.ce_ie_addr = &ath11k_ce_ie_addr_ipq8074,
.single_pdev_only = true,
.rxdma1_enable = false,
- .num_rxmda_per_pdev = 1,
+ .num_rxdma_per_pdev = 1,
.rx_mac_buf_ring = true,
.vdev_start_delay = true,
.htt_peer_map_v2 = false,
@@ -673,7 +673,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.ce_ie_addr = &ath11k_ce_ie_addr_ipq5018,
.ce_remap = &ath11k_ce_remap_ipq5018,
.rxdma1_enable = true,
- .num_rxmda_per_pdev = RXDMA_PER_PDEV_5018,
+ .num_rxdma_per_pdev = RXDMA_PER_PDEV_5018,
.rx_mac_buf_ring = false,
.vdev_start_delay = false,
.htt_peer_map_v2 = true,
@@ -744,7 +744,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.ce_ie_addr = &ath11k_ce_ie_addr_ipq8074,
.single_pdev_only = true,
.rxdma1_enable = false,
- .num_rxmda_per_pdev = 2,
+ .num_rxdma_per_pdev = 2,
.rx_mac_buf_ring = true,
.vdev_start_delay = true,
.htt_peer_map_v2 = false,
@@ -1009,6 +1009,16 @@ int ath11k_core_resume(struct ath11k_base *ab)
return -ETIMEDOUT;
}
+ if (ab->hw_params.current_cc_support &&
+ ar->alpha2[0] != 0 && ar->alpha2[1] != 0) {
+ ret = ath11k_reg_set_cc(ar);
+ if (ret) {
+ ath11k_warn(ab, "failed to set country code during resume: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
ret = ath11k_dp_rx_pktlog_start(ab);
if (ret)
ath11k_warn(ab, "failed to start rx pktlog during resume: %d\n",
@@ -1801,7 +1811,7 @@ static int ath11k_core_start(struct ath11k_base *ab)
}
/* put hardware to DBS mode */
- if (ab->hw_params.single_pdev_only && ab->hw_params.num_rxmda_per_pdev > 1) {
+ if (ab->hw_params.single_pdev_only && ab->hw_params.num_rxdma_per_pdev > 1) {
ret = ath11k_wmi_set_hw_mode(ab, WMI_HOST_HW_MODE_DBS);
if (ret) {
ath11k_err(ab, "failed to send dbs mode: %d\n", ret);
@@ -1978,23 +1988,20 @@ static void ath11k_update_11d(struct work_struct *work)
struct ath11k_base *ab = container_of(work, struct ath11k_base, update_11d_work);
struct ath11k *ar;
struct ath11k_pdev *pdev;
- struct wmi_set_current_country_params set_current_param = {};
int ret, i;
- spin_lock_bh(&ab->base_lock);
- memcpy(&set_current_param.alpha2, &ab->new_alpha2, 2);
- spin_unlock_bh(&ab->base_lock);
-
- ath11k_dbg(ab, ATH11K_DBG_WMI, "update 11d new cc %c%c\n",
- set_current_param.alpha2[0],
- set_current_param.alpha2[1]);
-
for (i = 0; i < ab->num_radios; i++) {
pdev = &ab->pdevs[i];
ar = pdev->ar;
- memcpy(&ar->alpha2, &set_current_param.alpha2, 2);
- ret = ath11k_wmi_send_set_current_country_cmd(ar, &set_current_param);
+ spin_lock_bh(&ab->base_lock);
+ memcpy(&ar->alpha2, &ab->new_alpha2, 2);
+ spin_unlock_bh(&ab->base_lock);
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "update 11d new cc %c%c for pdev %d\n",
+ ar->alpha2[0], ar->alpha2[1], i);
+
+ ret = ath11k_reg_set_cc(ar);
if (ret)
ath11k_warn(ar->ab,
"pdev id %d failed set current country code: %d\n",
diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h
index 205f40ee6b66..df24f0e409af 100644
--- a/drivers/net/wireless/ath/ath11k/core.h
+++ b/drivers/net/wireless/ath/ath11k/core.h
@@ -330,6 +330,9 @@ struct ath11k_chan_power_info {
s8 tx_power;
};
+/* ath11k only deals with 160 MHz, so 8 subchannels */
+#define ATH11K_NUM_PWR_LEVELS 8
+
/**
* struct ath11k_reg_tpc_power_info - regulatory TPC power info
* @is_psd_power: is PSD power or not
@@ -346,10 +349,10 @@ struct ath11k_reg_tpc_power_info {
u8 eirp_power;
enum wmi_reg_6ghz_ap_type ap_power_type;
u8 num_pwr_levels;
- u8 reg_max[IEEE80211_MAX_NUM_PWR_LEVEL];
+ u8 reg_max[ATH11K_NUM_PWR_LEVELS];
u8 ap_constraint_power;
- s8 tpe[IEEE80211_MAX_NUM_PWR_LEVEL];
- struct ath11k_chan_power_info chan_power_info[IEEE80211_MAX_NUM_PWR_LEVEL];
+ s8 tpe[ATH11K_NUM_PWR_LEVELS];
+ struct ath11k_chan_power_info chan_power_info[ATH11K_NUM_PWR_LEVELS];
};
struct ath11k_vif {
diff --git a/drivers/net/wireless/ath/ath11k/debugfs.c b/drivers/net/wireless/ath/ath11k/debugfs.c
index 414a5ce279f7..57281a135dd7 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs.c
+++ b/drivers/net/wireless/ath/ath11k/debugfs.c
@@ -668,7 +668,7 @@ static ssize_t ath11k_write_extd_rx_stats(struct file *file,
ar->debug.rx_filter = tlv_filter.rx_filter;
- for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
ring_id = ar->dp.rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, ar->dp.mac_id,
HAL_RXDMA_MONITOR_STATUS,
@@ -1112,7 +1112,7 @@ static ssize_t ath11k_write_pktlog_filter(struct file *file,
}
/* Clear rx filter set for monitor mode and rx status */
- for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
ring_id = ar->dp.rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, ar->dp.mac_id,
HAL_RXDMA_MONITOR_STATUS,
@@ -1171,7 +1171,7 @@ static ssize_t ath11k_write_pktlog_filter(struct file *file,
HTT_RX_FP_DATA_FILTER_FLASG3;
}
- for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
ring_id = ar->dp.rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
ret = ath11k_dp_tx_htt_rx_filter_setup(ab, ring_id,
ar->dp.mac_id + i,
diff --git a/drivers/net/wireless/ath/ath11k/dp.c b/drivers/net/wireless/ath/ath11k/dp.c
index 1a62407e5a9f..fbf666d0ecf1 100644
--- a/drivers/net/wireless/ath/ath11k/dp.c
+++ b/drivers/net/wireless/ath/ath11k/dp.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <crypto/hash.h>
@@ -830,8 +830,8 @@ int ath11k_dp_service_srng(struct ath11k_base *ab,
if (ab->hw_params.ring_mask->rx_mon_status[grp_id]) {
for (i = 0; i < ab->num_radios; i++) {
- for (j = 0; j < ab->hw_params.num_rxmda_per_pdev; j++) {
- int id = i * ab->hw_params.num_rxmda_per_pdev + j;
+ for (j = 0; j < ab->hw_params.num_rxdma_per_pdev; j++) {
+ int id = i * ab->hw_params.num_rxdma_per_pdev + j;
if (ab->hw_params.ring_mask->rx_mon_status[grp_id] &
BIT(id)) {
@@ -853,8 +853,8 @@ int ath11k_dp_service_srng(struct ath11k_base *ab,
ath11k_dp_process_reo_status(ab);
for (i = 0; i < ab->num_radios; i++) {
- for (j = 0; j < ab->hw_params.num_rxmda_per_pdev; j++) {
- int id = i * ab->hw_params.num_rxmda_per_pdev + j;
+ for (j = 0; j < ab->hw_params.num_rxdma_per_pdev; j++) {
+ int id = i * ab->hw_params.num_rxdma_per_pdev + j;
if (ab->hw_params.ring_mask->rxdma2host[grp_id] & BIT(id)) {
work_done = ath11k_dp_process_rxdma_err(ab, id, budget);
@@ -913,7 +913,7 @@ void ath11k_dp_pdev_pre_alloc(struct ath11k_base *ab)
spin_lock_init(&dp->rx_refill_buf_ring.idr_lock);
atomic_set(&dp->num_tx_pending, 0);
init_waitqueue_head(&dp->tx_empty_waitq);
- for (j = 0; j < ab->hw_params.num_rxmda_per_pdev; j++) {
+ for (j = 0; j < ab->hw_params.num_rxdma_per_pdev; j++) {
idr_init(&dp->rx_mon_status_refill_ring[j].bufs_idr);
spin_lock_init(&dp->rx_mon_status_refill_ring[j].idr_lock);
}
diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c
index afd481f5858f..86485580dd89 100644
--- a/drivers/net/wireless/ath/ath11k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/ieee80211.h>
@@ -311,7 +311,7 @@ static void ath11k_dp_service_mon_ring(struct timer_list *t)
struct ath11k_base *ab = from_timer(ab, t, mon_reap_timer);
int i;
- for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++)
+ for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++)
ath11k_dp_rx_process_mon_rings(ab, i, NULL, DP_MON_SERVICE_BUDGET);
mod_timer(&ab->mon_reap_timer, jiffies +
@@ -324,7 +324,7 @@ static int ath11k_dp_purge_mon_ring(struct ath11k_base *ab)
unsigned long timeout = jiffies + msecs_to_jiffies(DP_MON_PURGE_TIMEOUT_MS);
do {
- for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++)
+ for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++)
reaped += ath11k_dp_rx_process_mon_rings(ab, i,
NULL,
DP_MON_SERVICE_BUDGET);
@@ -468,7 +468,7 @@ static int ath11k_dp_rxdma_pdev_buf_free(struct ath11k *ar)
rx_ring = &dp->rxdma_mon_buf_ring;
ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
- for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
rx_ring = &dp->rx_mon_status_refill_ring[i];
ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
}
@@ -506,7 +506,7 @@ static int ath11k_dp_rxdma_pdev_buf_setup(struct ath11k *ar)
ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_BUF);
}
- for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
rx_ring = &dp->rx_mon_status_refill_ring[i];
ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_STATUS);
}
@@ -522,7 +522,7 @@ static void ath11k_dp_rx_pdev_srng_free(struct ath11k *ar)
ath11k_dp_srng_cleanup(ab, &dp->rx_refill_buf_ring.refill_buf_ring);
- for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
if (ab->hw_params.rx_mac_buf_ring)
ath11k_dp_srng_cleanup(ab, &dp->rx_mac_buf_ring[i]);
@@ -585,7 +585,7 @@ static int ath11k_dp_rx_pdev_srng_alloc(struct ath11k *ar)
}
if (ar->ab->hw_params.rx_mac_buf_ring) {
- for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
ret = ath11k_dp_srng_setup(ar->ab,
&dp->rx_mac_buf_ring[i],
HAL_RXDMA_BUF, 1,
@@ -598,7 +598,7 @@ static int ath11k_dp_rx_pdev_srng_alloc(struct ath11k *ar)
}
}
- for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_err_dst_ring[i],
HAL_RXDMA_DST, 0, dp->mac_id + i,
DP_RXDMA_ERR_DST_RING_SIZE);
@@ -608,7 +608,7 @@ static int ath11k_dp_rx_pdev_srng_alloc(struct ath11k *ar)
}
}
- for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
srng = &dp->rx_mon_status_refill_ring[i].refill_buf_ring;
ret = ath11k_dp_srng_setup(ar->ab,
srng,
@@ -1877,8 +1877,7 @@ static void ath11k_dp_rx_h_csum_offload(struct ath11k *ar, struct sk_buff *msdu)
CHECKSUM_NONE : CHECKSUM_UNNECESSARY;
}
-static int ath11k_dp_rx_crypto_mic_len(struct ath11k *ar,
- enum hal_encrypt_type enctype)
+int ath11k_dp_rx_crypto_mic_len(struct ath11k *ar, enum hal_encrypt_type enctype)
{
switch (enctype) {
case HAL_ENCRYPT_TYPE_OPEN:
@@ -2990,11 +2989,52 @@ ath11k_dp_rx_mon_update_status_buf_state(struct ath11k_mon_data *pmon,
}
}
+static enum dp_mon_status_buf_state
+ath11k_dp_rx_mon_buf_done(struct ath11k_base *ab, struct hal_srng *srng,
+ struct dp_rxdma_ring *rx_ring)
+{
+ struct ath11k_skb_rxcb *rxcb;
+ struct hal_tlv_hdr *tlv;
+ struct sk_buff *skb;
+ void *status_desc;
+ dma_addr_t paddr;
+ u32 cookie;
+ int buf_id;
+ u8 rbm;
+
+ status_desc = ath11k_hal_srng_src_next_peek(ab, srng);
+ if (!status_desc)
+ return DP_MON_STATUS_NO_DMA;
+
+ ath11k_hal_rx_buf_addr_info_get(status_desc, &paddr, &cookie, &rbm);
+
+ buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie);
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ skb = idr_find(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+
+ if (!skb)
+ return DP_MON_STATUS_NO_DMA;
+
+ rxcb = ATH11K_SKB_RXCB(skb);
+ dma_sync_single_for_cpu(ab->dev, rxcb->paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+
+ tlv = (struct hal_tlv_hdr *)skb->data;
+ if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) != HAL_RX_STATUS_BUFFER_DONE)
+ return DP_MON_STATUS_NO_DMA;
+
+ return DP_MON_STATUS_REPLINISH;
+}
+
static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id,
int *budget, struct sk_buff_head *skb_list)
{
struct ath11k *ar;
const struct ath11k_hw_hal_params *hal_params;
+ enum dp_mon_status_buf_state reap_status;
struct ath11k_pdev_dp *dp;
struct dp_rxdma_ring *rx_ring;
struct ath11k_mon_data *pmon;
@@ -3057,15 +3097,38 @@ static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id,
ath11k_warn(ab, "mon status DONE not set %lx, buf_id %d\n",
FIELD_GET(HAL_TLV_HDR_TAG,
tlv->tl), buf_id);
- /* If done status is missing, hold onto status
- * ring until status is done for this status
- * ring buffer.
- * Keep HP in mon_status_ring unchanged,
- * and break from here.
- * Check status for same buffer for next time
+ /* RxDMA status done bit might not be set even
+ * though tp is moved by HW.
*/
- pmon->buf_state = DP_MON_STATUS_NO_DMA;
- break;
+
+ /* If done status is missing:
+ * 1. As per MAC team's suggestion,
+ * when HP + 1 entry is peeked and if DMA
+ * is not done and if HP + 2 entry's DMA done
+ * is set. skip HP + 1 entry and
+ * start processing in next interrupt.
+ * 2. If HP + 2 entry's DMA done is not set,
+ * poll onto HP + 1 entry DMA done to be set.
+ * Check status for same buffer for next time
+ * dp_rx_mon_status_srng_process
+ */
+
+ reap_status = ath11k_dp_rx_mon_buf_done(ab, srng,
+ rx_ring);
+ if (reap_status == DP_MON_STATUS_NO_DMA)
+ continue;
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ idr_remove(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+
+ dma_unmap_single(ab->dev, rxcb->paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+
+ dev_kfree_skb_any(skb);
+ pmon->buf_state = DP_MON_STATUS_REPLINISH;
+ goto move_next;
}
spin_lock_bh(&rx_ring->idr_lock);
@@ -4391,7 +4454,7 @@ int ath11k_dp_rx_pdev_alloc(struct ath11k_base *ab, int mac_id)
}
if (ab->hw_params.rx_mac_buf_ring) {
- for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
ring_id = dp->rx_mac_buf_ring[i].ring_id;
ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id,
mac_id + i, HAL_RXDMA_BUF);
@@ -4403,7 +4466,7 @@ int ath11k_dp_rx_pdev_alloc(struct ath11k_base *ab, int mac_id)
}
}
- for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
ring_id = dp->rxdma_err_dst_ring[i].ring_id;
ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id,
mac_id + i, HAL_RXDMA_DST);
@@ -4443,7 +4506,7 @@ int ath11k_dp_rx_pdev_alloc(struct ath11k_base *ab, int mac_id)
}
config_refill_ring:
- for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
ring_id = dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id + i,
HAL_RXDMA_MONITOR_STATUS);
diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.h b/drivers/net/wireless/ath/ath11k/dp_rx.h
index 623da3bf9dc8..c322e30caa96 100644
--- a/drivers/net/wireless/ath/ath11k/dp_rx.h
+++ b/drivers/net/wireless/ath/ath11k/dp_rx.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_DP_RX_H
#define ATH11K_DP_RX_H
@@ -95,4 +96,6 @@ int ath11k_peer_rx_frag_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id
int ath11k_dp_rx_pktlog_start(struct ath11k_base *ab);
int ath11k_dp_rx_pktlog_stop(struct ath11k_base *ab, bool stop_timer);
+int ath11k_dp_rx_crypto_mic_len(struct ath11k *ar, enum hal_encrypt_type enctype);
+
#endif /* ATH11K_DP_RX_H */
diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.c b/drivers/net/wireless/ath/ath11k/dp_tx.c
index 272b1c35f98d..8522c67baabf 100644
--- a/drivers/net/wireless/ath/ath11k/dp_tx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_tx.c
@@ -353,8 +353,12 @@ ath11k_dp_tx_htt_tx_complete_buf(struct ath11k_base *ab,
if (ts->acked) {
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
info->flags |= IEEE80211_TX_STAT_ACK;
- info->status.ack_signal = ATH11K_DEFAULT_NOISE_FLOOR +
- ts->ack_rssi;
+ info->status.ack_signal = ts->ack_rssi;
+
+ if (!test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT,
+ ab->wmi_ab.svc_map))
+ info->status.ack_signal += ATH11K_DEFAULT_NOISE_FLOOR;
+
info->status.flags |=
IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
} else {
@@ -584,8 +588,12 @@ static void ath11k_dp_tx_complete_msdu(struct ath11k *ar,
if (ts->status == HAL_WBM_TQM_REL_REASON_FRAME_ACKED &&
!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
info->flags |= IEEE80211_TX_STAT_ACK;
- info->status.ack_signal = ATH11K_DEFAULT_NOISE_FLOOR +
- ts->ack_rssi;
+ info->status.ack_signal = ts->ack_rssi;
+
+ if (!test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT,
+ ab->wmi_ab.svc_map))
+ info->status.ack_signal += ATH11K_DEFAULT_NOISE_FLOOR;
+
info->status.flags |= IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
}
@@ -1035,7 +1043,7 @@ int ath11k_dp_tx_htt_h2t_ppdu_stats_req(struct ath11k *ar, u32 mask)
int ret;
int i;
- for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
skb = ath11k_htc_alloc_skb(ab, len);
if (!skb)
return -ENOMEM;
@@ -1218,7 +1226,7 @@ int ath11k_dp_tx_htt_monitor_mode_ring_config(struct ath11k *ar, bool reset)
&tlv_filter);
} else if (!reset) {
/* set in monitor mode only */
- for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
ring_id = dp->rx_mac_buf_ring[i].ring_id;
ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id,
dp->mac_id + i,
@@ -1231,7 +1239,7 @@ int ath11k_dp_tx_htt_monitor_mode_ring_config(struct ath11k *ar, bool reset)
if (ret)
return ret;
- for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
ring_id = dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
if (!reset) {
tlv_filter.rx_filter =
diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.h b/drivers/net/wireless/ath/ath11k/dp_tx.h
index 61be2265e09f..795fe3b8fa0d 100644
--- a/drivers/net/wireless/ath/ath11k/dp_tx.h
+++ b/drivers/net/wireless/ath/ath11k/dp_tx.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021, 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021, 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_DP_TX_H
@@ -13,7 +13,7 @@
struct ath11k_dp_htt_wbm_tx_status {
u32 msdu_id;
bool acked;
- int ack_rssi;
+ s8 ack_rssi;
u16 peer_id;
};
diff --git a/drivers/net/wireless/ath/ath11k/hal.c b/drivers/net/wireless/ath/ath11k/hal.c
index f3d04568c221..f02599bd1c36 100644
--- a/drivers/net/wireless/ath/ath11k/hal.c
+++ b/drivers/net/wireless/ath/ath11k/hal.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/dma-mapping.h>
#include "hal_tx.h"
@@ -796,6 +796,20 @@ u32 *ath11k_hal_srng_src_get_next_reaped(struct ath11k_base *ab,
return desc;
}
+u32 *ath11k_hal_srng_src_next_peek(struct ath11k_base *ab, struct hal_srng *srng)
+{
+ u32 next_hp;
+
+ lockdep_assert_held(&srng->lock);
+
+ next_hp = (srng->u.src_ring.hp + srng->entry_size) % srng->ring_size;
+
+ if (next_hp != srng->u.src_ring.cached_tp)
+ return srng->ring_base_vaddr + next_hp;
+
+ return NULL;
+}
+
u32 *ath11k_hal_srng_src_peek(struct ath11k_base *ab, struct hal_srng *srng)
{
lockdep_assert_held(&srng->lock);
diff --git a/drivers/net/wireless/ath/ath11k/hal.h b/drivers/net/wireless/ath/ath11k/hal.h
index e453c137385e..dc8bbe073017 100644
--- a/drivers/net/wireless/ath/ath11k/hal.h
+++ b/drivers/net/wireless/ath/ath11k/hal.h
@@ -947,6 +947,8 @@ u32 *ath11k_hal_srng_dst_peek(struct ath11k_base *ab, struct hal_srng *srng);
int ath11k_hal_srng_dst_num_free(struct ath11k_base *ab, struct hal_srng *srng,
bool sync_hw_ptr);
u32 *ath11k_hal_srng_src_peek(struct ath11k_base *ab, struct hal_srng *srng);
+u32 *ath11k_hal_srng_src_next_peek(struct ath11k_base *ab,
+ struct hal_srng *srng);
u32 *ath11k_hal_srng_src_get_next_reaped(struct ath11k_base *ab,
struct hal_srng *srng);
u32 *ath11k_hal_srng_src_reap_next(struct ath11k_base *ab,
diff --git a/drivers/net/wireless/ath/ath11k/hal_tx.h b/drivers/net/wireless/ath/ath11k/hal_tx.h
index c5e88364afe5..46d17abd808b 100644
--- a/drivers/net/wireless/ath/ath11k/hal_tx.h
+++ b/drivers/net/wireless/ath/ath11k/hal_tx.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_HAL_TX_H
@@ -54,7 +54,7 @@ struct hal_tx_info {
struct hal_tx_status {
enum hal_wbm_rel_src_module buf_rel_source;
enum hal_wbm_tqm_rel_reason status;
- u8 ack_rssi;
+ s8 ack_rssi;
u32 flags; /* %HAL_TX_STATUS_FLAGS_ */
u32 ppdu_id;
u8 try_cnt;
diff --git a/drivers/net/wireless/ath/ath11k/hw.h b/drivers/net/wireless/ath/ath11k/hw.h
index 14ef4eb48f80..300322535766 100644
--- a/drivers/net/wireless/ath/ath11k/hw.h
+++ b/drivers/net/wireless/ath/ath11k/hw.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_HW_H
@@ -167,7 +167,7 @@ struct ath11k_hw_params {
bool single_pdev_only;
bool rxdma1_enable;
- int num_rxmda_per_pdev;
+ int num_rxdma_per_pdev;
bool rx_mac_buf_ring;
bool vdev_start_delay;
bool htt_peer_map_v2;
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index 9b96dbb21d83..ba910ae2c676 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -4229,6 +4229,7 @@ static int ath11k_install_key(struct ath11k_vif *arvif,
switch (key->cipher) {
case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_CCMP_256:
arg.key_cipher = WMI_CIPHER_AES_CCM;
/* TODO: Re-check if flag is valid */
key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT;
@@ -4238,12 +4239,10 @@ static int ath11k_install_key(struct ath11k_vif *arvif,
arg.key_txmic_len = 8;
arg.key_rxmic_len = 8;
break;
- case WLAN_CIPHER_SUITE_CCMP_256:
- arg.key_cipher = WMI_CIPHER_AES_CCM;
- break;
case WLAN_CIPHER_SUITE_GCMP:
case WLAN_CIPHER_SUITE_GCMP_256:
arg.key_cipher = WMI_CIPHER_AES_GCM;
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT;
break;
default:
ath11k_warn(ar->ab, "cipher %d is not supported\n", key->cipher);
@@ -5903,7 +5902,10 @@ static int ath11k_mac_mgmt_tx_wmi(struct ath11k *ar, struct ath11k_vif *arvif,
{
struct ath11k_base *ab = ar->ab;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB(skb);
struct ieee80211_tx_info *info;
+ enum hal_encrypt_type enctype;
+ unsigned int mic_len;
dma_addr_t paddr;
int buf_id;
int ret;
@@ -5927,7 +5929,12 @@ static int ath11k_mac_mgmt_tx_wmi(struct ath11k *ar, struct ath11k_vif *arvif,
ieee80211_is_deauth(hdr->frame_control) ||
ieee80211_is_disassoc(hdr->frame_control)) &&
ieee80211_has_protected(hdr->frame_control)) {
- skb_put(skb, IEEE80211_CCMP_MIC_LEN);
+ if (!(skb_cb->flags & ATH11K_SKB_CIPHER_SET))
+ ath11k_warn(ab, "WMI management tx frame without ATH11K_SKB_CIPHER_SET");
+
+ enctype = ath11k_dp_tx_get_encrypt_type(skb_cb->cipher);
+ mic_len = ath11k_dp_rx_crypto_mic_len(ar, enctype);
+ skb_put(skb, mic_len);
}
}
@@ -6108,7 +6115,7 @@ static int ath11k_mac_config_mon_status_default(struct ath11k *ar, bool enable)
tlv_filter.rx_filter = ath11k_debugfs_rx_filter(ar);
}
- for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
ring_id = ar->dp.rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id,
ar->dp.mac_id + i,
@@ -6278,7 +6285,7 @@ err:
return ret;
}
-static void ath11k_mac_op_stop(struct ieee80211_hw *hw)
+static void ath11k_mac_op_stop(struct ieee80211_hw *hw, bool suspend)
{
struct ath11k *ar = hw->priv;
struct htt_ppdu_stats_info *ppdu_stats, *tmp;
@@ -7507,32 +7514,6 @@ static int ath11k_mac_stop_vdev_early(struct ieee80211_hw *hw,
return 0;
}
-static u8 ath11k_mac_get_tpe_count(u8 txpwr_intrprt, u8 txpwr_cnt)
-{
- switch (txpwr_intrprt) {
- /* Refer "Table 9-276-Meaning of Maximum Transmit Power Count subfield
- * if the Maximum Transmit Power Interpretation subfield is 0 or 2" of
- * "IEEE Std 802.11ax 2021".
- */
- case IEEE80211_TPE_LOCAL_EIRP:
- case IEEE80211_TPE_REG_CLIENT_EIRP:
- txpwr_cnt = txpwr_cnt <= 3 ? txpwr_cnt : 3;
- txpwr_cnt = txpwr_cnt + 1;
- break;
- /* Refer "Table 9-277-Meaning of Maximum Transmit Power Count subfield
- * if Maximum Transmit Power Interpretation subfield is 1 or 3" of
- * "IEEE Std 802.11ax 2021".
- */
- case IEEE80211_TPE_LOCAL_EIRP_PSD:
- case IEEE80211_TPE_REG_CLIENT_EIRP_PSD:
- txpwr_cnt = txpwr_cnt <= 4 ? txpwr_cnt : 4;
- txpwr_cnt = txpwr_cnt ? (BIT(txpwr_cnt - 1)) : 1;
- break;
- }
-
- return txpwr_cnt;
-}
-
static u8 ath11k_mac_get_num_pwr_levels(struct cfg80211_chan_def *chan_def)
{
if (chan_def->chan->flags & IEEE80211_CHAN_PSD) {
@@ -7688,7 +7669,7 @@ void ath11k_mac_fill_reg_tpc_info(struct ath11k *ar,
struct ieee80211_channel *chan, *temp_chan;
u8 pwr_lvl_idx, num_pwr_levels, pwr_reduction;
bool is_psd_power = false, is_tpe_present = false;
- s8 max_tx_power[IEEE80211_MAX_NUM_PWR_LEVEL],
+ s8 max_tx_power[ATH11K_NUM_PWR_LEVELS],
psd_power, tx_power;
s8 eirp_power = 0;
u16 start_freq, center_freq;
@@ -7701,7 +7682,8 @@ void ath11k_mac_fill_reg_tpc_info(struct ath11k *ar,
is_tpe_present = true;
num_pwr_levels = arvif->reg_tpc_info.num_pwr_levels;
} else {
- num_pwr_levels = ath11k_mac_get_num_pwr_levels(&ctx->def);
+ num_pwr_levels =
+ ath11k_mac_get_num_pwr_levels(&bss_conf->chanreq.oper);
}
for (pwr_lvl_idx = 0; pwr_lvl_idx < num_pwr_levels; pwr_lvl_idx++) {
@@ -7858,33 +7840,23 @@ static void ath11k_mac_parse_tx_pwr_env(struct ath11k *ar,
struct ath11k_base *ab = ar->ab;
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
- struct ieee80211_tx_pwr_env *single_tpe;
+ struct ieee80211_parsed_tpe_eirp *non_psd = NULL;
+ struct ieee80211_parsed_tpe_psd *psd = NULL;
enum wmi_reg_6ghz_client_type client_type;
struct cur_regulatory_info *reg_info;
+ u8 local_tpe_count, reg_tpe_count;
+ bool use_local_tpe;
int i;
- u8 pwr_count, pwr_interpret, pwr_category;
- u8 psd_index = 0, non_psd_index = 0, local_tpe_count = 0, reg_tpe_count = 0;
- bool use_local_tpe, non_psd_set = false, psd_set = false;
reg_info = &ab->reg_info_store[ar->pdev_idx];
client_type = reg_info->client_type;
- for (i = 0; i < bss_conf->tx_pwr_env_num; i++) {
- single_tpe = &bss_conf->tx_pwr_env[i];
- pwr_category = u8_get_bits(single_tpe->tx_power_info,
- IEEE80211_TX_PWR_ENV_INFO_CATEGORY);
- pwr_interpret = u8_get_bits(single_tpe->tx_power_info,
- IEEE80211_TX_PWR_ENV_INFO_INTERPRET);
-
- if (pwr_category == client_type) {
- if (pwr_interpret == IEEE80211_TPE_LOCAL_EIRP ||
- pwr_interpret == IEEE80211_TPE_LOCAL_EIRP_PSD)
- local_tpe_count++;
- else if (pwr_interpret == IEEE80211_TPE_REG_CLIENT_EIRP ||
- pwr_interpret == IEEE80211_TPE_REG_CLIENT_EIRP_PSD)
- reg_tpe_count++;
- }
- }
+ local_tpe_count =
+ bss_conf->tpe.max_local[client_type].valid +
+ bss_conf->tpe.psd_local[client_type].valid;
+ reg_tpe_count =
+ bss_conf->tpe.max_reg_client[client_type].valid +
+ bss_conf->tpe.psd_reg_client[client_type].valid;
if (!reg_tpe_count && !local_tpe_count) {
ath11k_warn(ab,
@@ -7897,83 +7869,44 @@ static void ath11k_mac_parse_tx_pwr_env(struct ath11k *ar,
use_local_tpe = false;
}
- for (i = 0; i < bss_conf->tx_pwr_env_num; i++) {
- single_tpe = &bss_conf->tx_pwr_env[i];
- pwr_category = u8_get_bits(single_tpe->tx_power_info,
- IEEE80211_TX_PWR_ENV_INFO_CATEGORY);
- pwr_interpret = u8_get_bits(single_tpe->tx_power_info,
- IEEE80211_TX_PWR_ENV_INFO_INTERPRET);
-
- if (pwr_category != client_type)
- continue;
-
- /* get local transmit power envelope */
- if (use_local_tpe) {
- if (pwr_interpret == IEEE80211_TPE_LOCAL_EIRP) {
- non_psd_index = i;
- non_psd_set = true;
- } else if (pwr_interpret == IEEE80211_TPE_LOCAL_EIRP_PSD) {
- psd_index = i;
- psd_set = true;
- }
- /* get regulatory transmit power envelope */
- } else {
- if (pwr_interpret == IEEE80211_TPE_REG_CLIENT_EIRP) {
- non_psd_index = i;
- non_psd_set = true;
- } else if (pwr_interpret == IEEE80211_TPE_REG_CLIENT_EIRP_PSD) {
- psd_index = i;
- psd_set = true;
- }
- }
+ if (use_local_tpe) {
+ psd = &bss_conf->tpe.psd_local[client_type];
+ if (!psd->valid)
+ psd = NULL;
+ non_psd = &bss_conf->tpe.max_local[client_type];
+ if (!non_psd->valid)
+ non_psd = NULL;
+ } else {
+ psd = &bss_conf->tpe.psd_reg_client[client_type];
+ if (!psd->valid)
+ psd = NULL;
+ non_psd = &bss_conf->tpe.max_reg_client[client_type];
+ if (!non_psd->valid)
+ non_psd = NULL;
}
- if (non_psd_set && !psd_set) {
- single_tpe = &bss_conf->tx_pwr_env[non_psd_index];
- pwr_count = u8_get_bits(single_tpe->tx_power_info,
- IEEE80211_TX_PWR_ENV_INFO_COUNT);
- pwr_interpret = u8_get_bits(single_tpe->tx_power_info,
- IEEE80211_TX_PWR_ENV_INFO_INTERPRET);
+ if (non_psd && !psd) {
arvif->reg_tpc_info.is_psd_power = false;
arvif->reg_tpc_info.eirp_power = 0;
- arvif->reg_tpc_info.num_pwr_levels =
- ath11k_mac_get_tpe_count(pwr_interpret, pwr_count);
+ arvif->reg_tpc_info.num_pwr_levels = non_psd->count;
for (i = 0; i < arvif->reg_tpc_info.num_pwr_levels; i++) {
ath11k_dbg(ab, ATH11K_DBG_MAC,
"non PSD power[%d] : %d\n",
- i, single_tpe->tx_power[i]);
- arvif->reg_tpc_info.tpe[i] = single_tpe->tx_power[i] / 2;
+ i, non_psd->power[i]);
+ arvif->reg_tpc_info.tpe[i] = non_psd->power[i] / 2;
}
}
- if (psd_set) {
- single_tpe = &bss_conf->tx_pwr_env[psd_index];
- pwr_count = u8_get_bits(single_tpe->tx_power_info,
- IEEE80211_TX_PWR_ENV_INFO_COUNT);
- pwr_interpret = u8_get_bits(single_tpe->tx_power_info,
- IEEE80211_TX_PWR_ENV_INFO_INTERPRET);
- arvif->reg_tpc_info.is_psd_power = true;
+ if (psd) {
+ arvif->reg_tpc_info.num_pwr_levels = psd->count;
- if (pwr_count == 0) {
+ for (i = 0; i < arvif->reg_tpc_info.num_pwr_levels; i++) {
ath11k_dbg(ab, ATH11K_DBG_MAC,
- "TPE PSD power : %d\n", single_tpe->tx_power[0]);
- arvif->reg_tpc_info.num_pwr_levels =
- ath11k_mac_get_num_pwr_levels(&ctx->def);
-
- for (i = 0; i < arvif->reg_tpc_info.num_pwr_levels; i++)
- arvif->reg_tpc_info.tpe[i] = single_tpe->tx_power[0] / 2;
- } else {
- arvif->reg_tpc_info.num_pwr_levels =
- ath11k_mac_get_tpe_count(pwr_interpret, pwr_count);
-
- for (i = 0; i < arvif->reg_tpc_info.num_pwr_levels; i++) {
- ath11k_dbg(ab, ATH11K_DBG_MAC,
- "TPE PSD power[%d] : %d\n",
- i, single_tpe->tx_power[i]);
- arvif->reg_tpc_info.tpe[i] = single_tpe->tx_power[i] / 2;
- }
+ "TPE PSD power[%d] : %d\n",
+ i, psd->power[i]);
+ arvif->reg_tpc_info.tpe[i] = psd->power[i] / 2;
}
}
}
@@ -8851,12 +8784,8 @@ ath11k_mac_op_reconfig_complete(struct ieee80211_hw *hw,
ieee80211_wake_queues(ar->hw);
if (ar->ab->hw_params.current_cc_support &&
- ar->alpha2[0] != 0 && ar->alpha2[1] != 0) {
- struct wmi_set_current_country_params set_current_param = {};
-
- memcpy(&set_current_param.alpha2, ar->alpha2, 2);
- ath11k_wmi_send_set_current_country_cmd(ar, &set_current_param);
- }
+ ar->alpha2[0] != 0 && ar->alpha2[1] != 0)
+ ath11k_reg_set_cc(ar);
if (ab->is_reset) {
recovery_count = atomic_inc_return(&ab->recovery_count);
@@ -9055,8 +8984,11 @@ static void ath11k_mac_op_sta_statistics(struct ieee80211_hw *hw,
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
}
- sinfo->signal_avg = ewma_avg_rssi_read(&arsta->avg_rssi) +
- ATH11K_DEFAULT_NOISE_FLOOR;
+ sinfo->signal_avg = ewma_avg_rssi_read(&arsta->avg_rssi);
+
+ if (!db2dbm)
+ sinfo->signal_avg += ATH11K_DEFAULT_NOISE_FLOOR;
+
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG);
}
@@ -9091,7 +9023,6 @@ static void ath11k_mac_op_ipv6_changed(struct ieee80211_hw *hw,
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
struct inet6_ifaddr *ifa6;
struct ifacaddr6 *ifaca6;
- struct list_head *p;
u32 count, scope;
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "op ipv6 changed\n");
@@ -9099,7 +9030,12 @@ static void ath11k_mac_op_ipv6_changed(struct ieee80211_hw *hw,
offload = &arvif->arp_ns_offload;
count = 0;
- /* Note: read_lock_bh() calls rcu_read_lock() */
+ /* The _ipv6_changed() is called with RCU lock already held in
+ * atomic_notifier_call_chain(), so we don't need to call
+ * rcu_read_lock() again here. But note that with CONFIG_PREEMPT_RT
+ * enabled, read_lock_bh() also calls rcu_read_lock(). This is OK
+ * because RCU read critical section is allowed to get nested.
+ */
read_lock_bh(&idev->lock);
memset(offload->ipv6_addr, 0, sizeof(offload->ipv6_addr));
@@ -9107,11 +9043,10 @@ static void ath11k_mac_op_ipv6_changed(struct ieee80211_hw *hw,
memcpy(offload->mac_addr, vif->addr, ETH_ALEN);
/* get unicast address */
- list_for_each(p, &idev->addr_list) {
+ list_for_each_entry(ifa6, &idev->addr_list, if_list) {
if (count >= ATH11K_IPV6_MAX_COUNT)
goto generate;
- ifa6 = list_entry(p, struct inet6_ifaddr, if_list);
if (ifa6->flags & IFA_F_DADFAILED)
continue;
scope = ipv6_addr_src_scope(&ifa6->addr);
@@ -10325,11 +10260,8 @@ static int __ath11k_mac_register(struct ath11k *ar)
}
if (ab->hw_params.current_cc_support && ab->new_alpha2[0]) {
- struct wmi_set_current_country_params set_current_param = {};
-
- memcpy(&set_current_param.alpha2, ab->new_alpha2, 2);
memcpy(&ar->alpha2, ab->new_alpha2, 2);
- ret = ath11k_wmi_send_set_current_country_cmd(ar, &set_current_param);
+ ret = ath11k_reg_set_cc(ar);
if (ret)
ath11k_warn(ar->ab,
"failed set cc code for mac register: %d\n", ret);
diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c
index d4a243b64f6c..1bc648920ab6 100644
--- a/drivers/net/wireless/ath/ath11k/qmi.c
+++ b/drivers/net/wireless/ath/ath11k/qmi.c
@@ -2293,7 +2293,7 @@ static int ath11k_qmi_load_file_target_mem(struct ath11k_base *ab,
struct qmi_txn txn;
const u8 *temp = data;
void __iomem *bdf_addr = NULL;
- int ret;
+ int ret = 0;
u32 remaining = len;
req = kzalloc(sizeof(*req), GFP_KERNEL);
@@ -2859,7 +2859,7 @@ int ath11k_qmi_firmware_start(struct ath11k_base *ab,
int ath11k_qmi_fwreset_from_cold_boot(struct ath11k_base *ab)
{
- int timeout;
+ long time_left;
if (!ath11k_core_coldboot_cal_support(ab) ||
ab->hw_params.cbcal_restart_fw == 0)
@@ -2867,11 +2867,11 @@ int ath11k_qmi_fwreset_from_cold_boot(struct ath11k_base *ab)
ath11k_dbg(ab, ATH11K_DBG_QMI, "wait for cold boot done\n");
- timeout = wait_event_timeout(ab->qmi.cold_boot_waitq,
- (ab->qmi.cal_done == 1),
- ATH11K_COLD_BOOT_FW_RESET_DELAY);
+ time_left = wait_event_timeout(ab->qmi.cold_boot_waitq,
+ (ab->qmi.cal_done == 1),
+ ATH11K_COLD_BOOT_FW_RESET_DELAY);
- if (timeout <= 0) {
+ if (time_left <= 0) {
ath11k_warn(ab, "Coldboot Calibration timed out\n");
return -ETIMEDOUT;
}
@@ -2886,7 +2886,7 @@ EXPORT_SYMBOL(ath11k_qmi_fwreset_from_cold_boot);
static int ath11k_qmi_process_coldboot_calibration(struct ath11k_base *ab)
{
- int timeout;
+ long time_left;
int ret;
ret = ath11k_qmi_wlanfw_mode_send(ab, ATH11K_FIRMWARE_MODE_COLD_BOOT);
@@ -2897,10 +2897,10 @@ static int ath11k_qmi_process_coldboot_calibration(struct ath11k_base *ab)
ath11k_dbg(ab, ATH11K_DBG_QMI, "Coldboot calibration wait started\n");
- timeout = wait_event_timeout(ab->qmi.cold_boot_waitq,
- (ab->qmi.cal_done == 1),
- ATH11K_COLD_BOOT_FW_RESET_DELAY);
- if (timeout <= 0) {
+ time_left = wait_event_timeout(ab->qmi.cold_boot_waitq,
+ (ab->qmi.cal_done == 1),
+ ATH11K_COLD_BOOT_FW_RESET_DELAY);
+ if (time_left <= 0) {
ath11k_warn(ab, "coldboot calibration timed out\n");
return 0;
}
diff --git a/drivers/net/wireless/ath/ath11k/reg.c b/drivers/net/wireless/ath/ath11k/reg.c
index 737fcd450d4b..b0f289784dd3 100644
--- a/drivers/net/wireless/ath/ath11k/reg.c
+++ b/drivers/net/wireless/ath/ath11k/reg.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/rtnetlink.h>
@@ -49,7 +49,6 @@ ath11k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct wmi_init_country_params init_country_param;
- struct wmi_set_current_country_params set_current_param = {};
struct ath11k *ar = hw->priv;
int ret;
@@ -83,9 +82,8 @@ ath11k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
* reg info
*/
if (ar->ab->hw_params.current_cc_support) {
- memcpy(&set_current_param.alpha2, request->alpha2, 2);
- memcpy(&ar->alpha2, &set_current_param.alpha2, 2);
- ret = ath11k_wmi_send_set_current_country_cmd(ar, &set_current_param);
+ memcpy(&ar->alpha2, request->alpha2, 2);
+ ret = ath11k_reg_set_cc(ar);
if (ret)
ath11k_warn(ar->ab,
"failed set current country code: %d\n", ret);
@@ -878,7 +876,7 @@ int ath11k_reg_handle_chan_list(struct ath11k_base *ab,
ath11k_reg_reset_info(reg_info);
if (ab->hw_params.single_pdev_only &&
- pdev_idx < ab->hw_params.num_rxmda_per_pdev)
+ pdev_idx < ab->hw_params.num_rxdma_per_pdev)
return 0;
goto fallback;
}
@@ -1017,3 +1015,11 @@ void ath11k_reg_free(struct ath11k_base *ab)
kfree(ab->new_regd[i]);
}
}
+
+int ath11k_reg_set_cc(struct ath11k *ar)
+{
+ struct wmi_set_current_country_params set_current_param = {};
+
+ memcpy(&set_current_param.alpha2, ar->alpha2, 2);
+ return ath11k_wmi_send_set_current_country_cmd(ar, &set_current_param);
+}
diff --git a/drivers/net/wireless/ath/ath11k/reg.h b/drivers/net/wireless/ath/ath11k/reg.h
index 64edb794260a..263ea9061948 100644
--- a/drivers/net/wireless/ath/ath11k/reg.h
+++ b/drivers/net/wireless/ath/ath11k/reg.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_REG_H
@@ -45,5 +45,5 @@ ath11k_reg_ap_pwr_convert(enum ieee80211_ap_reg_power power_type);
int ath11k_reg_handle_chan_list(struct ath11k_base *ab,
struct cur_regulatory_info *reg_info,
enum ieee80211_ap_reg_power power_type);
-
+int ath11k_reg_set_cc(struct ath11k *ar);
#endif
diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
index 6ff01c45f165..38f175dd1557 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.c
+++ b/drivers/net/wireless/ath/ath11k/wmi.c
@@ -9082,7 +9082,7 @@ int ath11k_wmi_attach(struct ath11k_base *ab)
ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_MAX;
/* It's overwritten when service_ext_ready is handled */
- if (ab->hw_params.single_pdev_only && ab->hw_params.num_rxmda_per_pdev > 1)
+ if (ab->hw_params.single_pdev_only && ab->hw_params.num_rxdma_per_pdev > 1)
ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_SINGLE;
/* TODO: Init remaining wmi soc resources required */
diff --git a/drivers/net/wireless/ath/ath12k/Kconfig b/drivers/net/wireless/ath/ath12k/Kconfig
index eceab9153e98..f64e7c322216 100644
--- a/drivers/net/wireless/ath/ath12k/Kconfig
+++ b/drivers/net/wireless/ath/ath12k/Kconfig
@@ -7,6 +7,7 @@ config ATH12K
select MHI_BUS
select QRTR
select QRTR_MHI
+ select PCI_PWRCTL_PWRSEQ if HAVE_PWRCTL
help
Enable support for Qualcomm Technologies Wi-Fi 7 (IEEE
802.11be) family of chipsets, for example WCN7850 and
diff --git a/drivers/net/wireless/ath/ath12k/Makefile b/drivers/net/wireless/ath/ath12k/Makefile
index d42480db7463..5a1ed20d730e 100644
--- a/drivers/net/wireless/ath/ath12k/Makefile
+++ b/drivers/net/wireless/ath/ath12k/Makefile
@@ -23,9 +23,10 @@ ath12k-y += core.o \
fw.o \
p2p.o
-ath12k-$(CONFIG_ATH12K_DEBUGFS) += debugfs.o
+ath12k-$(CONFIG_ATH12K_DEBUGFS) += debugfs.o debugfs_htt_stats.o
ath12k-$(CONFIG_ACPI) += acpi.o
ath12k-$(CONFIG_ATH12K_TRACING) += trace.o
+ath12k-$(CONFIG_PM) += wow.o
# for tracing framework to find trace.h
CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/wireless/ath/ath12k/acpi.c b/drivers/net/wireless/ath/ath12k/acpi.c
index 443ba12e01f3..0555d35aab47 100644
--- a/drivers/net/wireless/ath/ath12k/acpi.c
+++ b/drivers/net/wireless/ath/ath12k/acpi.c
@@ -391,4 +391,6 @@ void ath12k_acpi_stop(struct ath12k_base *ab)
acpi_remove_notify_handler(ACPI_HANDLE(ab->dev),
ACPI_DEVICE_NOTIFY,
ath12k_acpi_dsm_notify);
+
+ memset(&ab->acpi, 0, sizeof(ab->acpi));
}
diff --git a/drivers/net/wireless/ath/ath12k/ce.h b/drivers/net/wireless/ath/ath12k/ce.h
index 79af3b6159f1..857bc5f9e946 100644
--- a/drivers/net/wireless/ath/ath12k/ce.h
+++ b/drivers/net/wireless/ath/ath12k/ce.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_CE_H
@@ -119,7 +119,7 @@ struct ath12k_ce_ring {
/* Host address space */
void *base_addr_owner_space_unaligned;
/* CE address space */
- u32 base_addr_ce_space_unaligned;
+ dma_addr_t base_addr_ce_space_unaligned;
/* Actual start of descriptors.
* Aligned to descriptor-size boundary.
@@ -129,7 +129,7 @@ struct ath12k_ce_ring {
void *base_addr_owner_space;
/* CE address space */
- u32 base_addr_ce_space;
+ dma_addr_t base_addr_ce_space;
/* HAL ring id */
u32 hal_ring_id;
diff --git a/drivers/net/wireless/ath/ath12k/core.c b/drivers/net/wireless/ath/ath12k/core.c
index 6663f4e1792d..51252e8bc1ae 100644
--- a/drivers/net/wireless/ath/ath12k/core.c
+++ b/drivers/net/wireless/ath/ath12k/core.c
@@ -16,6 +16,7 @@
#include "hif.h"
#include "fw.h"
#include "debugfs.h"
+#include "wow.h"
unsigned int ath12k_debug_mask;
module_param_named(debug_mask, ath12k_debug_mask, uint, 0644);
@@ -42,27 +43,48 @@ static int ath12k_core_rfkill_config(struct ath12k_base *ab)
return ret;
}
-int ath12k_core_suspend(struct ath12k_base *ab)
+/* Check if we need to continue with suspend/resume operation.
+ * Return:
+ * a negative value: error happens and don't continue.
+ * 0: no error but don't continue.
+ * positive value: no error and do continue.
+ */
+static int ath12k_core_continue_suspend_resume(struct ath12k_base *ab)
{
struct ath12k *ar;
- int ret, i;
if (!ab->hw_params->supports_suspend)
return -EOPNOTSUPP;
- rcu_read_lock();
+ /* so far single_pdev_only chips have supports_suspend as true
+ * so pass 0 as a dummy pdev_id here.
+ */
+ ar = ab->pdevs[0].ar;
+ if (!ar || !ar->ah || ar->ah->state != ATH12K_HW_STATE_OFF)
+ return 0;
+
+ return 1;
+}
+
+int ath12k_core_suspend(struct ath12k_base *ab)
+{
+ struct ath12k *ar;
+ int ret, i;
+
+ ret = ath12k_core_continue_suspend_resume(ab);
+ if (ret <= 0)
+ return ret;
+
for (i = 0; i < ab->num_radios; i++) {
- ar = ath12k_mac_get_ar_by_pdev_id(ab, i);
+ ar = ab->pdevs[i].ar;
if (!ar)
continue;
ret = ath12k_mac_wait_tx_complete(ar);
if (ret) {
ath12k_warn(ab, "failed to wait tx complete: %d\n", ret);
- rcu_read_unlock();
return ret;
}
}
- rcu_read_unlock();
/* PM framework skips suspend_late/resume_early callbacks
* if other devices report errors in their suspend callbacks.
@@ -83,8 +105,13 @@ EXPORT_SYMBOL(ath12k_core_suspend);
int ath12k_core_suspend_late(struct ath12k_base *ab)
{
- if (!ab->hw_params->supports_suspend)
- return -EOPNOTSUPP;
+ int ret;
+
+ ret = ath12k_core_continue_suspend_resume(ab);
+ if (ret <= 0)
+ return ret;
+
+ ath12k_acpi_stop(ab);
ath12k_hif_irq_disable(ab);
ath12k_hif_ce_irq_disable(ab);
@@ -99,8 +126,9 @@ int ath12k_core_resume_early(struct ath12k_base *ab)
{
int ret;
- if (!ab->hw_params->supports_suspend)
- return -EOPNOTSUPP;
+ ret = ath12k_core_continue_suspend_resume(ab);
+ if (ret <= 0)
+ return ret;
reinit_completion(&ab->restart_completed);
ret = ath12k_hif_power_up(ab);
@@ -114,9 +142,11 @@ EXPORT_SYMBOL(ath12k_core_resume_early);
int ath12k_core_resume(struct ath12k_base *ab)
{
long time_left;
+ int ret;
- if (!ab->hw_params->supports_suspend)
- return -EOPNOTSUPP;
+ ret = ath12k_core_continue_suspend_resume(ab);
+ if (ret <= 0)
+ return ret;
time_left = wait_for_completion_timeout(&ab->restart_completed,
ATH12K_RESET_TIMEOUT_HZ);
@@ -994,9 +1024,8 @@ void ath12k_core_halt(struct ath12k *ar)
static void ath12k_core_pre_reconfigure_recovery(struct ath12k_base *ab)
{
struct ath12k *ar;
- struct ath12k_pdev *pdev;
struct ath12k_hw *ah;
- int i;
+ int i, j;
spin_lock_bh(&ab->base_lock);
ab->stats.fw_crash_counter++;
@@ -1006,35 +1035,32 @@ static void ath12k_core_pre_reconfigure_recovery(struct ath12k_base *ab)
set_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags);
for (i = 0; i < ab->num_hw; i++) {
- if (!ab->ah[i])
+ ah = ab->ah[i];
+ if (!ah || ah->state == ATH12K_HW_STATE_OFF)
continue;
- ah = ab->ah[i];
ieee80211_stop_queues(ah->hw);
- }
- for (i = 0; i < ab->num_radios; i++) {
- pdev = &ab->pdevs[i];
- ar = pdev->ar;
- if (!ar || ar->state == ATH12K_STATE_OFF)
- continue;
+ for (j = 0; j < ah->num_radio; j++) {
+ ar = &ah->radio[j];
- ath12k_mac_drain_tx(ar);
- complete(&ar->scan.started);
- complete(&ar->scan.completed);
- complete(&ar->scan.on_channel);
- complete(&ar->peer_assoc_done);
- complete(&ar->peer_delete_done);
- complete(&ar->install_key_done);
- complete(&ar->vdev_setup_done);
- complete(&ar->vdev_delete_done);
- complete(&ar->bss_survey_done);
-
- wake_up(&ar->dp.tx_empty_waitq);
- idr_for_each(&ar->txmgmt_idr,
- ath12k_mac_tx_mgmt_pending_free, ar);
- idr_destroy(&ar->txmgmt_idr);
- wake_up(&ar->txmgmt_empty_waitq);
+ ath12k_mac_drain_tx(ar);
+ complete(&ar->scan.started);
+ complete(&ar->scan.completed);
+ complete(&ar->scan.on_channel);
+ complete(&ar->peer_assoc_done);
+ complete(&ar->peer_delete_done);
+ complete(&ar->install_key_done);
+ complete(&ar->vdev_setup_done);
+ complete(&ar->vdev_delete_done);
+ complete(&ar->bss_survey_done);
+
+ wake_up(&ar->dp.tx_empty_waitq);
+ idr_for_each(&ar->txmgmt_idr,
+ ath12k_mac_tx_mgmt_pending_free, ar);
+ idr_destroy(&ar->txmgmt_idr);
+ wake_up(&ar->txmgmt_empty_waitq);
+ }
}
wake_up(&ab->wmi_ab.tx_credits_wq);
@@ -1043,48 +1069,57 @@ static void ath12k_core_pre_reconfigure_recovery(struct ath12k_base *ab)
static void ath12k_core_post_reconfigure_recovery(struct ath12k_base *ab)
{
+ struct ath12k_hw *ah;
struct ath12k *ar;
- struct ath12k_pdev *pdev;
- int i;
+ int i, j;
- for (i = 0; i < ab->num_radios; i++) {
- pdev = &ab->pdevs[i];
- ar = pdev->ar;
- if (!ar || ar->state == ATH12K_STATE_OFF)
+ for (i = 0; i < ab->num_hw; i++) {
+ ah = ab->ah[i];
+ if (!ah || ah->state == ATH12K_HW_STATE_OFF)
continue;
- mutex_lock(&ar->conf_mutex);
+ mutex_lock(&ah->hw_mutex);
+
+ switch (ah->state) {
+ case ATH12K_HW_STATE_ON:
+ ah->state = ATH12K_HW_STATE_RESTARTING;
+
+ for (j = 0; j < ah->num_radio; j++) {
+ ar = &ah->radio[j];
+
+ mutex_lock(&ar->conf_mutex);
+ ath12k_core_halt(ar);
+ mutex_unlock(&ar->conf_mutex);
+ }
- switch (ar->state) {
- case ATH12K_STATE_ON:
- ar->state = ATH12K_STATE_RESTARTING;
- ath12k_core_halt(ar);
- ieee80211_restart_hw(ath12k_ar_to_hw(ar));
break;
- case ATH12K_STATE_OFF:
+ case ATH12K_HW_STATE_OFF:
ath12k_warn(ab,
- "cannot restart radio %d that hasn't been started\n",
+ "cannot restart hw %d that hasn't been started\n",
i);
break;
- case ATH12K_STATE_RESTARTING:
+ case ATH12K_HW_STATE_RESTARTING:
break;
- case ATH12K_STATE_RESTARTED:
- ar->state = ATH12K_STATE_WEDGED;
+ case ATH12K_HW_STATE_RESTARTED:
+ ah->state = ATH12K_HW_STATE_WEDGED;
fallthrough;
- case ATH12K_STATE_WEDGED:
+ case ATH12K_HW_STATE_WEDGED:
ath12k_warn(ab,
- "device is wedged, will not restart radio %d\n", i);
+ "device is wedged, will not restart hw %d\n", i);
break;
}
- mutex_unlock(&ar->conf_mutex);
+
+ mutex_unlock(&ah->hw_mutex);
}
+
complete(&ab->driver_recovery);
}
static void ath12k_core_restart(struct work_struct *work)
{
struct ath12k_base *ab = container_of(work, struct ath12k_base, restart_work);
- int ret;
+ struct ath12k_hw *ah;
+ int ret, i;
ret = ath12k_core_reconfigure_on_crash(ab);
if (ret) {
@@ -1092,8 +1127,12 @@ static void ath12k_core_restart(struct work_struct *work)
return;
}
- if (ab->is_reset)
- complete_all(&ab->reconfigure_complete);
+ if (ab->is_reset) {
+ for (i = 0; i < ab->num_hw; i++) {
+ ah = ab->ah[i];
+ ieee80211_restart_hw(ah->hw);
+ }
+ }
complete(&ab->restart_completed);
}
@@ -1147,20 +1186,14 @@ static void ath12k_core_reset(struct work_struct *work)
ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset starting\n");
ab->is_reset = true;
- atomic_set(&ab->recovery_start_count, 0);
- reinit_completion(&ab->recovery_start);
atomic_set(&ab->recovery_count, 0);
ath12k_core_pre_reconfigure_recovery(ab);
- reinit_completion(&ab->reconfigure_complete);
ath12k_core_post_reconfigure_recovery(ab);
ath12k_dbg(ab, ATH12K_DBG_BOOT, "waiting recovery start...\n");
- time_left = wait_for_completion_timeout(&ab->recovery_start,
- ATH12K_RECOVER_START_TIMEOUT_HZ);
-
ath12k_hif_irq_disable(ab);
ath12k_hif_ce_irq_disable(ab);
@@ -1185,6 +1218,29 @@ int ath12k_core_pre_init(struct ath12k_base *ab)
return 0;
}
+static int ath12k_core_panic_handler(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct ath12k_base *ab = container_of(nb, struct ath12k_base,
+ panic_nb);
+
+ return ath12k_hif_panic_handler(ab);
+}
+
+static int ath12k_core_panic_notifier_register(struct ath12k_base *ab)
+{
+ ab->panic_nb.notifier_call = ath12k_core_panic_handler;
+
+ return atomic_notifier_chain_register(&panic_notifier_list,
+ &ab->panic_nb);
+}
+
+static void ath12k_core_panic_notifier_unregister(struct ath12k_base *ab)
+{
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &ab->panic_nb);
+}
+
int ath12k_core_init(struct ath12k_base *ab)
{
int ret;
@@ -1195,11 +1251,17 @@ int ath12k_core_init(struct ath12k_base *ab)
return ret;
}
+ ret = ath12k_core_panic_notifier_register(ab);
+ if (ret)
+ ath12k_warn(ab, "failed to register panic handler: %d\n", ret);
+
return 0;
}
void ath12k_core_deinit(struct ath12k_base *ab)
{
+ ath12k_core_panic_notifier_unregister(ab);
+
mutex_lock(&ab->core_lock);
ath12k_core_pdev_destroy(ab);
@@ -1243,8 +1305,6 @@ struct ath12k_base *ath12k_core_alloc(struct device *dev, size_t priv_size,
mutex_init(&ab->core_lock);
spin_lock_init(&ab->base_lock);
init_completion(&ab->reset_complete);
- init_completion(&ab->reconfigure_complete);
- init_completion(&ab->recovery_start);
INIT_LIST_HEAD(&ab->peers);
init_waitqueue_head(&ab->peer_mapping_wq);
@@ -1256,12 +1316,23 @@ struct ath12k_base *ath12k_core_alloc(struct device *dev, size_t priv_size,
timer_setup(&ab->rx_replenish_retry, ath12k_ce_rx_replenish_retry, 0);
init_completion(&ab->htc_suspend);
init_completion(&ab->restart_completed);
+ init_completion(&ab->wow.wakeup_completed);
ab->dev = dev;
ab->hif.bus = bus;
ab->qmi.num_radios = U8_MAX;
ab->mlo_capable_flags = ATH12K_INTRA_DEVICE_MLO_SUPPORT;
+ /* Device index used to identify the devices in a group.
+ *
+ * In Intra-device MLO, only one device present in a group,
+ * so it is always zero.
+ *
+ * In Inter-device MLO, Multiple device present in a group,
+ * expect non-zero value.
+ */
+ ab->device_id = 0;
+
return ab;
err_free_wq:
diff --git a/drivers/net/wireless/ath/ath12k/core.h b/drivers/net/wireless/ath/ath12k/core.h
index 47dde4401210..cdfd43a7321a 100644
--- a/drivers/net/wireless/ath/ath12k/core.h
+++ b/drivers/net/wireless/ath/ath12k/core.h
@@ -14,6 +14,7 @@
#include <linux/dmi.h>
#include <linux/ctype.h>
#include <linux/firmware.h>
+#include <linux/panic_notifier.h>
#include "qmi.h"
#include "htc.h"
#include "wmi.h"
@@ -27,6 +28,8 @@
#include "dbring.h"
#include "fw.h"
#include "acpi.h"
+#include "wow.h"
+#include "debugfs_htt_stats.h"
#define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
@@ -146,7 +149,7 @@ struct ath12k_ext_irq_grp {
u32 grp_id;
u64 timestamp;
struct napi_struct napi;
- struct net_device napi_ndev;
+ struct net_device *napi_ndev;
};
struct ath12k_smbios_bdf {
@@ -180,8 +183,6 @@ struct ath12k_he {
u32 heop_param;
};
-#define MAX_RADIOS 3
-
enum {
WMI_HOST_TP_SCALE_MAX = 0,
WMI_HOST_TP_SCALE_50 = 1,
@@ -212,10 +213,6 @@ enum ath12k_dev_flags {
ATH12K_FLAG_EXT_IRQ_ENABLED,
};
-enum ath12k_monitor_flags {
- ATH12K_FLAG_MONITOR_ENABLED,
-};
-
struct ath12k_tx_conf {
bool changed;
u16 ac;
@@ -234,6 +231,13 @@ struct ath12k_vif_cache {
u32 bss_conf_changed;
};
+struct ath12k_rekey_data {
+ u8 kck[NL80211_KCK_LEN];
+ u8 kek[NL80211_KCK_LEN];
+ u64 replay_ctr;
+ bool enable_offload;
+};
+
struct ath12k_vif {
u32 vdev_id;
enum wmi_vdev_type vdev_type;
@@ -290,6 +294,7 @@ struct ath12k_vif {
u32 punct_bitmap;
bool ps;
struct ath12k_vif_cache *cache;
+ struct ath12k_rekey_data rekey_data;
};
struct ath12k_vif_iter {
@@ -454,15 +459,15 @@ struct ath12k_sta {
#define ATH12K_MIN_5G_FREQ 4150
#define ATH12K_MIN_6G_FREQ 5925
#define ATH12K_MAX_6G_FREQ 7115
-#define ATH12K_NUM_CHANS 100
+#define ATH12K_NUM_CHANS 101
#define ATH12K_MAX_5G_CHAN 173
-enum ath12k_state {
- ATH12K_STATE_OFF,
- ATH12K_STATE_ON,
- ATH12K_STATE_RESTARTING,
- ATH12K_STATE_RESTARTED,
- ATH12K_STATE_WEDGED,
+enum ath12k_hw_state {
+ ATH12K_HW_STATE_OFF,
+ ATH12K_HW_STATE_ON,
+ ATH12K_HW_STATE_RESTARTING,
+ ATH12K_HW_STATE_RESTARTED,
+ ATH12K_HW_STATE_WEDGED,
/* Add other states as required */
};
@@ -477,8 +482,17 @@ struct ath12k_fw_stats {
struct list_head bcn;
};
+struct ath12k_dbg_htt_stats {
+ enum ath12k_dbg_htt_ext_stats_type type;
+ u32 cfg_param[4];
+ u8 reset;
+ struct debug_htt_stats_req *stats_req;
+};
+
struct ath12k_debug {
struct dentry *debugfs_pdev;
+ struct dentry *debugfs_pdev_symlink;
+ struct ath12k_dbg_htt_stats htt_stats;
};
struct ath12k_per_peer_tx_stats {
@@ -511,7 +525,6 @@ struct ath12k {
u32 ht_cap_info;
u32 vht_cap_info;
struct ath12k_he ar_he;
- enum ath12k_state state;
bool supports_6ghz;
struct {
struct completion started;
@@ -533,7 +546,6 @@ struct ath12k {
unsigned long dev_flags;
unsigned int filter_flags;
- unsigned long monitor_flags;
u32 min_tx_power;
u32 max_tx_power;
u32 txpower_limit_2g;
@@ -613,6 +625,9 @@ struct ath12k {
struct work_struct wmi_mgmt_tx_work;
struct sk_buff_head wmi_mgmt_tx_queue;
+ struct ath12k_wow wow;
+ struct completion target_suspend;
+ bool target_suspend_ack;
struct ath12k_per_peer_tx_stats peer_tx_stats;
struct list_head ppdu_stats_info;
u32 ppdu_stat_list_depth;
@@ -632,14 +647,22 @@ struct ath12k {
u32 freq_low;
u32 freq_high;
+
+ bool nlo_enabled;
};
struct ath12k_hw {
struct ieee80211_hw *hw;
+ /* Protect the write operation of the hardware state ath12k_hw::state
+ * between hardware start<=>reconfigure<=>stop transitions.
+ */
+ struct mutex hw_mutex;
+ enum ath12k_hw_state state;
bool regd_updated;
bool use_6ghz_regd;
-
u8 num_radio;
+
+ /* Keep last */
struct ath12k radio[] __aligned(sizeof(void *));
};
@@ -689,6 +712,7 @@ struct mlo_timestamp {
struct ath12k_pdev {
struct ath12k *ar;
u32 pdev_id;
+ u32 hw_link_id;
struct ath12k_pdev_cap cap;
u8 mac_addr[ETH_ALEN];
struct mlo_timestamp timestamp;
@@ -747,6 +771,7 @@ struct ath12k_base {
struct ath12k_qmi qmi;
struct ath12k_wmi_base wmi_ab;
struct completion fw_ready;
+ u8 device_id;
int num_radios;
/* HW channel counters frequency value in hertz common to all MACs */
u32 cc_freq_hz;
@@ -763,6 +788,11 @@ struct ath12k_base {
const struct ath12k_hif_ops *ops;
} hif;
+ struct {
+ struct completion wakeup_completed;
+ u32 wmi_conf_rx_decap_mode;
+ } wow;
+
struct ath12k_ce ce;
struct timer_list rx_replenish_retry;
struct ath12k_hal hal;
@@ -845,11 +875,8 @@ struct ath12k_base {
struct work_struct reset_work;
atomic_t reset_count;
atomic_t recovery_count;
- atomic_t recovery_start_count;
bool is_reset;
struct completion reset_complete;
- struct completion reconfigure_complete;
- struct completion recovery_start;
/* continuous recovery fail count */
atomic_t fail_cont_count;
unsigned long reset_fail_timeout;
@@ -923,6 +950,8 @@ struct ath12k_base {
#endif /* CONFIG_ACPI */
+ struct notifier_block panic_nb;
+
/* must be last */
u8 drv_priv[] __aligned(sizeof(void *));
};
@@ -1037,6 +1066,11 @@ static inline struct ath12k *ath12k_ah_to_ar(struct ath12k_hw *ah, u8 hw_link_id
return &ah->radio[hw_link_id];
}
+static inline struct ath12k_hw *ath12k_ar_to_ah(struct ath12k *ar)
+{
+ return ar->ah;
+}
+
static inline struct ieee80211_hw *ath12k_ar_to_hw(struct ath12k *ar)
{
return ar->ah->hw;
diff --git a/drivers/net/wireless/ath/ath12k/debug.h b/drivers/net/wireless/ath/ath12k/debug.h
index aa685295f8a4..f7005917362c 100644
--- a/drivers/net/wireless/ath/ath12k/debug.h
+++ b/drivers/net/wireless/ath/ath12k/debug.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _ATH12K_DEBUG_H_
@@ -25,6 +25,7 @@ enum ath12k_debug_mask {
ATH12K_DBG_PCI = 0x00001000,
ATH12K_DBG_DP_TX = 0x00002000,
ATH12K_DBG_DP_RX = 0x00004000,
+ ATH12K_DBG_WOW = 0x00008000,
ATH12K_DBG_ANY = 0xffffffff,
};
diff --git a/drivers/net/wireless/ath/ath12k/debugfs.c b/drivers/net/wireless/ath/ath12k/debugfs.c
index 8d8ba951093b..2a977c36af00 100644
--- a/drivers/net/wireless/ath/ath12k/debugfs.c
+++ b/drivers/net/wireless/ath/ath12k/debugfs.c
@@ -6,6 +6,7 @@
#include "core.h"
#include "debugfs.h"
+#include "debugfs_htt_stats.h"
static ssize_t ath12k_write_simulate_radar(struct file *file,
const char __user *user_buf,
@@ -80,11 +81,27 @@ void ath12k_debugfs_register(struct ath12k *ar)
/* Create a symlink under ieee80211/phy* */
scnprintf(buf, sizeof(buf), "../../ath12k/%pd2", ar->debug.debugfs_pdev);
- debugfs_create_symlink("ath12k", hw->wiphy->debugfsdir, buf);
+ ar->debug.debugfs_pdev_symlink = debugfs_create_symlink("ath12k",
+ hw->wiphy->debugfsdir,
+ buf);
if (ar->mac.sbands[NL80211_BAND_5GHZ].channels) {
debugfs_create_file("dfs_simulate_radar", 0200,
ar->debug.debugfs_pdev, ar,
&fops_simulate_radar);
}
+
+ ath12k_debugfs_htt_stats_register(ar);
+}
+
+void ath12k_debugfs_unregister(struct ath12k *ar)
+{
+ if (!ar->debug.debugfs_pdev)
+ return;
+
+ /* Remove symlink under ieee80211/phy* */
+ debugfs_remove(ar->debug.debugfs_pdev_symlink);
+ debugfs_remove_recursive(ar->debug.debugfs_pdev);
+ ar->debug.debugfs_pdev_symlink = NULL;
+ ar->debug.debugfs_pdev = NULL;
}
diff --git a/drivers/net/wireless/ath/ath12k/debugfs.h b/drivers/net/wireless/ath/ath12k/debugfs.h
index a62f2a550b23..8d64ba03aa9a 100644
--- a/drivers/net/wireless/ath/ath12k/debugfs.h
+++ b/drivers/net/wireless/ath/ath12k/debugfs.h
@@ -11,7 +11,7 @@
void ath12k_debugfs_soc_create(struct ath12k_base *ab);
void ath12k_debugfs_soc_destroy(struct ath12k_base *ab);
void ath12k_debugfs_register(struct ath12k *ar);
-
+void ath12k_debugfs_unregister(struct ath12k *ar);
#else
static inline void ath12k_debugfs_soc_create(struct ath12k_base *ab)
{
@@ -25,6 +25,10 @@ static inline void ath12k_debugfs_register(struct ath12k *ar)
{
}
+static inline void ath12k_debugfs_unregister(struct ath12k *ar)
+{
+}
+
#endif /* CONFIG_ATH12K_DEBUGFS */
#endif /* _ATH12K_DEBUGFS_H_ */
diff --git a/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c
new file mode 100644
index 000000000000..ce80e7b5175b
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c
@@ -0,0 +1,1540 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/vmalloc.h>
+#include "core.h"
+#include "debug.h"
+#include "debugfs_htt_stats.h"
+#include "dp_tx.h"
+#include "dp_rx.h"
+
+static u32
+print_array_to_buf(u8 *buf, u32 offset, const char *header,
+ const __le32 *array, u32 array_len, const char *footer)
+{
+ int index = 0;
+ u8 i;
+
+ if (header) {
+ index += scnprintf(buf + offset,
+ ATH12K_HTT_STATS_BUF_SIZE - offset,
+ "%s = ", header);
+ }
+ for (i = 0; i < array_len; i++) {
+ index += scnprintf(buf + offset + index,
+ (ATH12K_HTT_STATS_BUF_SIZE - offset) - index,
+ " %u:%u,", i, le32_to_cpu(array[i]));
+ }
+ /* To overwrite the last trailing comma */
+ index--;
+ *(buf + offset + index) = '\0';
+
+ if (footer) {
+ index += scnprintf(buf + offset + index,
+ (ATH12K_HTT_STATS_BUF_SIZE - offset) - index,
+ "%s", footer);
+ }
+ return index;
+}
+
+static void
+htt_print_tx_pdev_stats_cmn_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_pdev_stats_cmn_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 mac_id_word;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ mac_id_word = le32_to_cpu(htt_stats_buf->mac_id__word);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_CMN_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+ u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID));
+ len += scnprintf(buf + len, buf_len - len, "comp_delivered = %u\n",
+ le32_to_cpu(htt_stats_buf->comp_delivered));
+ len += scnprintf(buf + len, buf_len - len, "self_triggers = %u\n",
+ le32_to_cpu(htt_stats_buf->self_triggers));
+ len += scnprintf(buf + len, buf_len - len, "hw_queued = %u\n",
+ le32_to_cpu(htt_stats_buf->hw_queued));
+ len += scnprintf(buf + len, buf_len - len, "hw_reaped = %u\n",
+ le32_to_cpu(htt_stats_buf->hw_reaped));
+ len += scnprintf(buf + len, buf_len - len, "underrun = %u\n",
+ le32_to_cpu(htt_stats_buf->underrun));
+ len += scnprintf(buf + len, buf_len - len, "hw_paused = %u\n",
+ le32_to_cpu(htt_stats_buf->hw_paused));
+ len += scnprintf(buf + len, buf_len - len, "hw_flush = %u\n",
+ le32_to_cpu(htt_stats_buf->hw_flush));
+ len += scnprintf(buf + len, buf_len - len, "hw_filt = %u\n",
+ le32_to_cpu(htt_stats_buf->hw_filt));
+ len += scnprintf(buf + len, buf_len - len, "tx_abort = %u\n",
+ le32_to_cpu(htt_stats_buf->tx_abort));
+ len += scnprintf(buf + len, buf_len - len, "ppdu_ok = %u\n",
+ le32_to_cpu(htt_stats_buf->ppdu_ok));
+ len += scnprintf(buf + len, buf_len - len, "mpdu_requeued = %u\n",
+ le32_to_cpu(htt_stats_buf->mpdu_requed));
+ len += scnprintf(buf + len, buf_len - len, "tx_xretry = %u\n",
+ le32_to_cpu(htt_stats_buf->tx_xretry));
+ len += scnprintf(buf + len, buf_len - len, "data_rc = %u\n",
+ le32_to_cpu(htt_stats_buf->data_rc));
+ len += scnprintf(buf + len, buf_len - len, "mpdu_dropped_xretry = %u\n",
+ le32_to_cpu(htt_stats_buf->mpdu_dropped_xretry));
+ len += scnprintf(buf + len, buf_len - len, "illegal_rate_phy_err = %u\n",
+ le32_to_cpu(htt_stats_buf->illgl_rate_phy_err));
+ len += scnprintf(buf + len, buf_len - len, "cont_xretry = %u\n",
+ le32_to_cpu(htt_stats_buf->cont_xretry));
+ len += scnprintf(buf + len, buf_len - len, "tx_timeout = %u\n",
+ le32_to_cpu(htt_stats_buf->tx_timeout));
+ len += scnprintf(buf + len, buf_len - len, "tx_time_dur_data = %u\n",
+ le32_to_cpu(htt_stats_buf->tx_time_dur_data));
+ len += scnprintf(buf + len, buf_len - len, "pdev_resets = %u\n",
+ le32_to_cpu(htt_stats_buf->pdev_resets));
+ len += scnprintf(buf + len, buf_len - len, "phy_underrun = %u\n",
+ le32_to_cpu(htt_stats_buf->phy_underrun));
+ len += scnprintf(buf + len, buf_len - len, "txop_ovf = %u\n",
+ le32_to_cpu(htt_stats_buf->txop_ovf));
+ len += scnprintf(buf + len, buf_len - len, "seq_posted = %u\n",
+ le32_to_cpu(htt_stats_buf->seq_posted));
+ len += scnprintf(buf + len, buf_len - len, "seq_failed_queueing = %u\n",
+ le32_to_cpu(htt_stats_buf->seq_failed_queueing));
+ len += scnprintf(buf + len, buf_len - len, "seq_completed = %u\n",
+ le32_to_cpu(htt_stats_buf->seq_completed));
+ len += scnprintf(buf + len, buf_len - len, "seq_restarted = %u\n",
+ le32_to_cpu(htt_stats_buf->seq_restarted));
+ len += scnprintf(buf + len, buf_len - len, "seq_txop_repost_stop = %u\n",
+ le32_to_cpu(htt_stats_buf->seq_txop_repost_stop));
+ len += scnprintf(buf + len, buf_len - len, "next_seq_cancel = %u\n",
+ le32_to_cpu(htt_stats_buf->next_seq_cancel));
+ len += scnprintf(buf + len, buf_len - len, "dl_mu_mimo_seq_posted = %u\n",
+ le32_to_cpu(htt_stats_buf->mu_seq_posted));
+ len += scnprintf(buf + len, buf_len - len, "dl_mu_ofdma_seq_posted = %u\n",
+ le32_to_cpu(htt_stats_buf->mu_ofdma_seq_posted));
+ len += scnprintf(buf + len, buf_len - len, "ul_mu_mimo_seq_posted = %u\n",
+ le32_to_cpu(htt_stats_buf->ul_mumimo_seq_posted));
+ len += scnprintf(buf + len, buf_len - len, "ul_mu_ofdma_seq_posted = %u\n",
+ le32_to_cpu(htt_stats_buf->ul_ofdma_seq_posted));
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_peer_blacklisted = %u\n",
+ le32_to_cpu(htt_stats_buf->num_mu_peer_blacklisted));
+ len += scnprintf(buf + len, buf_len - len, "seq_qdepth_repost_stop = %u\n",
+ le32_to_cpu(htt_stats_buf->seq_qdepth_repost_stop));
+ len += scnprintf(buf + len, buf_len - len, "seq_min_msdu_repost_stop = %u\n",
+ le32_to_cpu(htt_stats_buf->seq_min_msdu_repost_stop));
+ len += scnprintf(buf + len, buf_len - len, "mu_seq_min_msdu_repost_stop = %u\n",
+ le32_to_cpu(htt_stats_buf->mu_seq_min_msdu_repost_stop));
+ len += scnprintf(buf + len, buf_len - len, "seq_switch_hw_paused = %u\n",
+ le32_to_cpu(htt_stats_buf->seq_switch_hw_paused));
+ len += scnprintf(buf + len, buf_len - len, "next_seq_posted_dsr = %u\n",
+ le32_to_cpu(htt_stats_buf->next_seq_posted_dsr));
+ len += scnprintf(buf + len, buf_len - len, "seq_posted_isr = %u\n",
+ le32_to_cpu(htt_stats_buf->seq_posted_isr));
+ len += scnprintf(buf + len, buf_len - len, "seq_ctrl_cached = %u\n",
+ le32_to_cpu(htt_stats_buf->seq_ctrl_cached));
+ len += scnprintf(buf + len, buf_len - len, "mpdu_count_tqm = %u\n",
+ le32_to_cpu(htt_stats_buf->mpdu_count_tqm));
+ len += scnprintf(buf + len, buf_len - len, "msdu_count_tqm = %u\n",
+ le32_to_cpu(htt_stats_buf->msdu_count_tqm));
+ len += scnprintf(buf + len, buf_len - len, "mpdu_removed_tqm = %u\n",
+ le32_to_cpu(htt_stats_buf->mpdu_removed_tqm));
+ len += scnprintf(buf + len, buf_len - len, "msdu_removed_tqm = %u\n",
+ le32_to_cpu(htt_stats_buf->msdu_removed_tqm));
+ len += scnprintf(buf + len, buf_len - len, "remove_mpdus_max_retries = %u\n",
+ le32_to_cpu(htt_stats_buf->remove_mpdus_max_retries));
+ len += scnprintf(buf + len, buf_len - len, "mpdus_sw_flush = %u\n",
+ le32_to_cpu(htt_stats_buf->mpdus_sw_flush));
+ len += scnprintf(buf + len, buf_len - len, "mpdus_hw_filter = %u\n",
+ le32_to_cpu(htt_stats_buf->mpdus_hw_filter));
+ len += scnprintf(buf + len, buf_len - len, "mpdus_truncated = %u\n",
+ le32_to_cpu(htt_stats_buf->mpdus_truncated));
+ len += scnprintf(buf + len, buf_len - len, "mpdus_ack_failed = %u\n",
+ le32_to_cpu(htt_stats_buf->mpdus_ack_failed));
+ len += scnprintf(buf + len, buf_len - len, "mpdus_expired = %u\n",
+ le32_to_cpu(htt_stats_buf->mpdus_expired));
+ len += scnprintf(buf + len, buf_len - len, "mpdus_seq_hw_retry = %u\n",
+ le32_to_cpu(htt_stats_buf->mpdus_seq_hw_retry));
+ len += scnprintf(buf + len, buf_len - len, "ack_tlv_proc = %u\n",
+ le32_to_cpu(htt_stats_buf->ack_tlv_proc));
+ len += scnprintf(buf + len, buf_len - len, "coex_abort_mpdu_cnt_valid = %u\n",
+ le32_to_cpu(htt_stats_buf->coex_abort_mpdu_cnt_valid));
+ len += scnprintf(buf + len, buf_len - len, "coex_abort_mpdu_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->coex_abort_mpdu_cnt));
+ len += scnprintf(buf + len, buf_len - len, "num_total_ppdus_tried_ota = %u\n",
+ le32_to_cpu(htt_stats_buf->num_total_ppdus_tried_ota));
+ len += scnprintf(buf + len, buf_len - len, "num_data_ppdus_tried_ota = %u\n",
+ le32_to_cpu(htt_stats_buf->num_data_ppdus_tried_ota));
+ len += scnprintf(buf + len, buf_len - len, "local_ctrl_mgmt_enqued = %u\n",
+ le32_to_cpu(htt_stats_buf->local_ctrl_mgmt_enqued));
+ len += scnprintf(buf + len, buf_len - len, "local_ctrl_mgmt_freed = %u\n",
+ le32_to_cpu(htt_stats_buf->local_ctrl_mgmt_freed));
+ len += scnprintf(buf + len, buf_len - len, "local_data_enqued = %u\n",
+ le32_to_cpu(htt_stats_buf->local_data_enqued));
+ len += scnprintf(buf + len, buf_len - len, "local_data_freed = %u\n",
+ le32_to_cpu(htt_stats_buf->local_data_freed));
+ len += scnprintf(buf + len, buf_len - len, "mpdu_tried = %u\n",
+ le32_to_cpu(htt_stats_buf->mpdu_tried));
+ len += scnprintf(buf + len, buf_len - len, "isr_wait_seq_posted = %u\n",
+ le32_to_cpu(htt_stats_buf->isr_wait_seq_posted));
+ len += scnprintf(buf + len, buf_len - len, "tx_active_dur_us_low = %u\n",
+ le32_to_cpu(htt_stats_buf->tx_active_dur_us_low));
+ len += scnprintf(buf + len, buf_len - len, "tx_active_dur_us_high = %u\n",
+ le32_to_cpu(htt_stats_buf->tx_active_dur_us_high));
+ len += scnprintf(buf + len, buf_len - len, "fes_offsets_err_cnt = %u\n\n",
+ le32_to_cpu(htt_stats_buf->fes_offsets_err_cnt));
+
+ stats_req->buf_len = len;
+}
+
+static void
+htt_print_tx_pdev_stats_urrn_tlv(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_pdev_stats_urrn_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u16 num_elems = min_t(u16, (tag_len >> 2),
+ HTT_TX_PDEV_MAX_URRN_STATS);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_STATS_URRN_TLV:\n");
+
+ len += print_array_to_buf(buf, len, "urrn_stats", htt_stats_buf->urrn_stats,
+ num_elems, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+htt_print_tx_pdev_stats_flush_tlv(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_pdev_stats_flush_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u16 num_elems = min_t(u16, (tag_len >> 2),
+ ATH12K_HTT_TX_PDEV_MAX_FLUSH_REASON_STATS);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_STATS_FLUSH_TLV:\n");
+
+ len += print_array_to_buf(buf, len, "flush_errs", htt_stats_buf->flush_errs,
+ num_elems, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+htt_print_tx_pdev_stats_sifs_tlv(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_pdev_stats_sifs_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u16 num_elems = min_t(u16, (tag_len >> 2),
+ ATH12K_HTT_TX_PDEV_MAX_SIFS_BURST_STATS);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_STATS_SIFS_TLV:\n");
+
+ len += print_array_to_buf(buf, len, "sifs_status", htt_stats_buf->sifs_status,
+ num_elems, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+htt_print_tx_pdev_mu_ppdu_dist_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_pdev_mu_ppdu_dist_stats_tlv *htt_stats_buf = tag_buf;
+ char *mode;
+ u8 j, hw_mode, i, str_buf_len;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 stats_value;
+ u8 max_ppdu = ATH12K_HTT_STATS_MAX_NUM_MU_PPDU_PER_BURST;
+ u8 max_sched = ATH12K_HTT_STATS_MAX_NUM_SCHED_STATUS;
+ char str_buf[ATH12K_HTT_MAX_STRING_LEN];
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ hw_mode = le32_to_cpu(htt_stats_buf->hw_mode);
+
+ switch (hw_mode) {
+ case ATH12K_HTT_STATS_HWMODE_AC:
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_AC_MU_PPDU_DISTRIBUTION_STATS:\n");
+ mode = "ac";
+ break;
+ case ATH12K_HTT_STATS_HWMODE_AX:
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_AX_MU_PPDU_DISTRIBUTION_STATS:\n");
+ mode = "ax";
+ break;
+ case ATH12K_HTT_STATS_HWMODE_BE:
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_BE_MU_PPDU_DISTRIBUTION_STATS:\n");
+ mode = "be";
+ break;
+ default:
+ return;
+ }
+
+ for (i = 0; i < ATH12K_HTT_STATS_NUM_NR_BINS ; i++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "%s_mu_mimo_num_seq_posted_nr%u = %u\n", mode,
+ ((i + 1) * 4), htt_stats_buf->num_seq_posted[i]);
+ str_buf_len = 0;
+ memset(str_buf, 0x0, sizeof(str_buf));
+ for (j = 0; j < ATH12K_HTT_STATS_MAX_NUM_MU_PPDU_PER_BURST ; j++) {
+ stats_value = le32_to_cpu(htt_stats_buf->num_ppdu_posted_per_burst
+ [i * max_ppdu + j]);
+ str_buf_len += scnprintf(&str_buf[str_buf_len],
+ ATH12K_HTT_MAX_STRING_LEN - str_buf_len,
+ " %u:%u,", j, stats_value);
+ }
+ /* To overwrite the last trailing comma */
+ str_buf[str_buf_len - 1] = '\0';
+ len += scnprintf(buf + len, buf_len - len,
+ "%s_mu_mimo_num_ppdu_posted_per_burst_nr%u = %s\n",
+ mode, ((i + 1) * 4), str_buf);
+ str_buf_len = 0;
+ memset(str_buf, 0x0, sizeof(str_buf));
+ for (j = 0; j < ATH12K_HTT_STATS_MAX_NUM_MU_PPDU_PER_BURST ; j++) {
+ stats_value = le32_to_cpu(htt_stats_buf->num_ppdu_cmpl_per_burst
+ [i * max_ppdu + j]);
+ str_buf_len += scnprintf(&str_buf[str_buf_len],
+ ATH12K_HTT_MAX_STRING_LEN - str_buf_len,
+ " %u:%u,", j, stats_value);
+ }
+ /* To overwrite the last trailing comma */
+ str_buf[str_buf_len - 1] = '\0';
+ len += scnprintf(buf + len, buf_len - len,
+ "%s_mu_mimo_num_ppdu_completed_per_burst_nr%u = %s\n",
+ mode, ((i + 1) * 4), str_buf);
+ str_buf_len = 0;
+ memset(str_buf, 0x0, sizeof(str_buf));
+ for (j = 0; j < ATH12K_HTT_STATS_MAX_NUM_SCHED_STATUS ; j++) {
+ stats_value = le32_to_cpu(htt_stats_buf->num_seq_term_status
+ [i * max_sched + j]);
+ str_buf_len += scnprintf(&str_buf[str_buf_len],
+ ATH12K_HTT_MAX_STRING_LEN - str_buf_len,
+ " %u:%u,", j, stats_value);
+ }
+ /* To overwrite the last trailing comma */
+ str_buf[str_buf_len - 1] = '\0';
+ len += scnprintf(buf + len, buf_len - len,
+ "%s_mu_mimo_num_seq_term_status_nr%u = %s\n\n",
+ mode, ((i + 1) * 4), str_buf);
+ }
+
+ stats_req->buf_len = len;
+}
+
+static void
+htt_print_tx_pdev_stats_sifs_hist_tlv(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_pdev_stats_sifs_hist_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u16 num_elems = min_t(u16, (tag_len >> 2),
+ ATH12K_HTT_TX_PDEV_MAX_SIFS_BURST_HIST_STATS);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_STATS_SIFS_HIST_TLV:\n");
+
+ len += print_array_to_buf(buf, len, "sifs_hist_status",
+ htt_stats_buf->sifs_hist_status, num_elems, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+htt_print_pdev_ctrl_path_tx_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_pdev_ctrl_path_tx_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+ if (len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_STATS_CTRL_PATH_TX_STATS:\n");
+ len += print_array_to_buf(buf, len, "fw_tx_mgmt_subtype",
+ htt_stats_buf->fw_tx_mgmt_subtype,
+ ATH12K_HTT_STATS_SUBTYPE_MAX, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_stats_tx_sched_cmn_tlv(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_stats_tx_sched_cmn_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 mac_id_word;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ mac_id_word = __le32_to_cpu(htt_stats_buf->mac_id__word);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_STATS_TX_SCHED_CMN_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+ u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID));
+ len += scnprintf(buf + len, buf_len - len, "current_timestamp = %u\n\n",
+ le32_to_cpu(htt_stats_buf->current_timestamp));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_pdev_stats_sched_per_txq_tlv(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_pdev_stats_sched_per_txq_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 mac_id_word;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ mac_id_word = __le32_to_cpu(htt_stats_buf->mac_id__word);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_STATS_SCHED_PER_TXQ_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+ u32_get_bits(mac_id_word,
+ ATH12K_HTT_TX_PDEV_STATS_SCHED_PER_TXQ_MAC_ID));
+ len += scnprintf(buf + len, buf_len - len, "txq_id = %u\n",
+ u32_get_bits(mac_id_word,
+ ATH12K_HTT_TX_PDEV_STATS_SCHED_PER_TXQ_ID));
+ len += scnprintf(buf + len, buf_len - len, "sched_policy = %u\n",
+ le32_to_cpu(htt_stats_buf->sched_policy));
+ len += scnprintf(buf + len, buf_len - len,
+ "last_sched_cmd_posted_timestamp = %u\n",
+ le32_to_cpu(htt_stats_buf->last_sched_cmd_posted_timestamp));
+ len += scnprintf(buf + len, buf_len - len,
+ "last_sched_cmd_compl_timestamp = %u\n",
+ le32_to_cpu(htt_stats_buf->last_sched_cmd_compl_timestamp));
+ len += scnprintf(buf + len, buf_len - len, "sched_2_tac_lwm_count = %u\n",
+ le32_to_cpu(htt_stats_buf->sched_2_tac_lwm_count));
+ len += scnprintf(buf + len, buf_len - len, "sched_2_tac_ring_full = %u\n",
+ le32_to_cpu(htt_stats_buf->sched_2_tac_ring_full));
+ len += scnprintf(buf + len, buf_len - len, "sched_cmd_post_failure = %u\n",
+ le32_to_cpu(htt_stats_buf->sched_cmd_post_failure));
+ len += scnprintf(buf + len, buf_len - len, "num_active_tids = %u\n",
+ le32_to_cpu(htt_stats_buf->num_active_tids));
+ len += scnprintf(buf + len, buf_len - len, "num_ps_schedules = %u\n",
+ le32_to_cpu(htt_stats_buf->num_ps_schedules));
+ len += scnprintf(buf + len, buf_len - len, "sched_cmds_pending = %u\n",
+ le32_to_cpu(htt_stats_buf->sched_cmds_pending));
+ len += scnprintf(buf + len, buf_len - len, "num_tid_register = %u\n",
+ le32_to_cpu(htt_stats_buf->num_tid_register));
+ len += scnprintf(buf + len, buf_len - len, "num_tid_unregister = %u\n",
+ le32_to_cpu(htt_stats_buf->num_tid_unregister));
+ len += scnprintf(buf + len, buf_len - len, "num_qstats_queried = %u\n",
+ le32_to_cpu(htt_stats_buf->num_qstats_queried));
+ len += scnprintf(buf + len, buf_len - len, "qstats_update_pending = %u\n",
+ le32_to_cpu(htt_stats_buf->qstats_update_pending));
+ len += scnprintf(buf + len, buf_len - len, "last_qstats_query_timestamp = %u\n",
+ le32_to_cpu(htt_stats_buf->last_qstats_query_timestamp));
+ len += scnprintf(buf + len, buf_len - len, "num_tqm_cmdq_full = %u\n",
+ le32_to_cpu(htt_stats_buf->num_tqm_cmdq_full));
+ len += scnprintf(buf + len, buf_len - len, "num_de_sched_algo_trigger = %u\n",
+ le32_to_cpu(htt_stats_buf->num_de_sched_algo_trigger));
+ len += scnprintf(buf + len, buf_len - len, "num_rt_sched_algo_trigger = %u\n",
+ le32_to_cpu(htt_stats_buf->num_rt_sched_algo_trigger));
+ len += scnprintf(buf + len, buf_len - len, "num_tqm_sched_algo_trigger = %u\n",
+ le32_to_cpu(htt_stats_buf->num_tqm_sched_algo_trigger));
+ len += scnprintf(buf + len, buf_len - len, "notify_sched = %u\n",
+ le32_to_cpu(htt_stats_buf->notify_sched));
+ len += scnprintf(buf + len, buf_len - len, "dur_based_sendn_term = %u\n",
+ le32_to_cpu(htt_stats_buf->dur_based_sendn_term));
+ len += scnprintf(buf + len, buf_len - len, "su_notify2_sched = %u\n",
+ le32_to_cpu(htt_stats_buf->su_notify2_sched));
+ len += scnprintf(buf + len, buf_len - len, "su_optimal_queued_msdus_sched = %u\n",
+ le32_to_cpu(htt_stats_buf->su_optimal_queued_msdus_sched));
+ len += scnprintf(buf + len, buf_len - len, "su_delay_timeout_sched = %u\n",
+ le32_to_cpu(htt_stats_buf->su_delay_timeout_sched));
+ len += scnprintf(buf + len, buf_len - len, "su_min_txtime_sched_delay = %u\n",
+ le32_to_cpu(htt_stats_buf->su_min_txtime_sched_delay));
+ len += scnprintf(buf + len, buf_len - len, "su_no_delay = %u\n",
+ le32_to_cpu(htt_stats_buf->su_no_delay));
+ len += scnprintf(buf + len, buf_len - len, "num_supercycles = %u\n",
+ le32_to_cpu(htt_stats_buf->num_supercycles));
+ len += scnprintf(buf + len, buf_len - len, "num_subcycles_with_sort = %u\n",
+ le32_to_cpu(htt_stats_buf->num_subcycles_with_sort));
+ len += scnprintf(buf + len, buf_len - len, "num_subcycles_no_sort = %u\n\n",
+ le32_to_cpu(htt_stats_buf->num_subcycles_no_sort));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_sched_txq_cmd_posted_tlv(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_sched_txq_cmd_posted_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u16 num_elements = tag_len >> 2;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_SCHED_TXQ_CMD_POSTED_TLV:\n");
+ len += print_array_to_buf(buf, len, "sched_cmd_posted",
+ htt_stats_buf->sched_cmd_posted, num_elements, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_sched_txq_cmd_reaped_tlv(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_sched_txq_cmd_reaped_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u16 num_elements = tag_len >> 2;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_SCHED_TXQ_CMD_REAPED_TLV:\n");
+ len += print_array_to_buf(buf, len, "sched_cmd_reaped",
+ htt_stats_buf->sched_cmd_reaped, num_elements, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_sched_txq_sched_order_su_tlv(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_sched_txq_sched_order_su_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 sched_order_su_num_entries = min_t(u32, (tag_len >> 2),
+ ATH12K_HTT_TX_PDEV_NUM_SCHED_ORDER_LOG);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_SCHED_TXQ_SCHED_ORDER_SU_TLV:\n");
+ len += print_array_to_buf(buf, len, "sched_order_su",
+ htt_stats_buf->sched_order_su,
+ sched_order_su_num_entries, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_sched_txq_sched_ineligibility_tlv(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_sched_txq_sched_ineligibility_tlv *htt_stats_buf =
+ tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 sched_ineligibility_num_entries = tag_len >> 2;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_SCHED_TXQ_SCHED_INELIGIBILITY:\n");
+ len += print_array_to_buf(buf, len, "sched_ineligibility",
+ htt_stats_buf->sched_ineligibility,
+ sched_ineligibility_num_entries, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_sched_txq_supercycle_trigger_tlv(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_sched_txq_supercycle_triggers_tlv *htt_stats_buf =
+ tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u16 num_elems = min_t(u16, (tag_len >> 2),
+ ATH12K_HTT_SCHED_SUPERCYCLE_TRIGGER_MAX);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_SCHED_TXQ_SUPERCYCLE_TRIGGER:\n");
+ len += print_array_to_buf(buf, len, "supercycle_triggers",
+ htt_stats_buf->supercycle_triggers, num_elems, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_hw_stats_pdev_errs_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_hw_stats_pdev_errs_tlv *htt_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 mac_id_word;
+
+ if (tag_len < sizeof(*htt_buf))
+ return;
+
+ mac_id_word = le32_to_cpu(htt_buf->mac_id__word);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_HW_STATS_PDEV_ERRS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+ u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID));
+ len += scnprintf(buf + len, buf_len - len, "tx_abort = %u\n",
+ le32_to_cpu(htt_buf->tx_abort));
+ len += scnprintf(buf + len, buf_len - len, "tx_abort_fail_count = %u\n",
+ le32_to_cpu(htt_buf->tx_abort_fail_count));
+ len += scnprintf(buf + len, buf_len - len, "rx_abort = %u\n",
+ le32_to_cpu(htt_buf->rx_abort));
+ len += scnprintf(buf + len, buf_len - len, "rx_abort_fail_count = %u\n",
+ le32_to_cpu(htt_buf->rx_abort_fail_count));
+ len += scnprintf(buf + len, buf_len - len, "rx_flush_cnt = %u\n",
+ le32_to_cpu(htt_buf->rx_flush_cnt));
+ len += scnprintf(buf + len, buf_len - len, "warm_reset = %u\n",
+ le32_to_cpu(htt_buf->warm_reset));
+ len += scnprintf(buf + len, buf_len - len, "cold_reset = %u\n",
+ le32_to_cpu(htt_buf->cold_reset));
+ len += scnprintf(buf + len, buf_len - len, "mac_cold_reset_restore_cal = %u\n",
+ le32_to_cpu(htt_buf->mac_cold_reset_restore_cal));
+ len += scnprintf(buf + len, buf_len - len, "mac_cold_reset = %u\n",
+ le32_to_cpu(htt_buf->mac_cold_reset));
+ len += scnprintf(buf + len, buf_len - len, "mac_warm_reset = %u\n",
+ le32_to_cpu(htt_buf->mac_warm_reset));
+ len += scnprintf(buf + len, buf_len - len, "mac_only_reset = %u\n",
+ le32_to_cpu(htt_buf->mac_only_reset));
+ len += scnprintf(buf + len, buf_len - len, "phy_warm_reset = %u\n",
+ le32_to_cpu(htt_buf->phy_warm_reset));
+ len += scnprintf(buf + len, buf_len - len, "phy_warm_reset_ucode_trig = %u\n",
+ le32_to_cpu(htt_buf->phy_warm_reset_ucode_trig));
+ len += scnprintf(buf + len, buf_len - len, "mac_warm_reset_restore_cal = %u\n",
+ le32_to_cpu(htt_buf->mac_warm_reset_restore_cal));
+ len += scnprintf(buf + len, buf_len - len, "mac_sfm_reset = %u\n",
+ le32_to_cpu(htt_buf->mac_sfm_reset));
+ len += scnprintf(buf + len, buf_len - len, "phy_warm_reset_m3_ssr = %u\n",
+ le32_to_cpu(htt_buf->phy_warm_reset_m3_ssr));
+ len += scnprintf(buf + len, buf_len - len, "fw_rx_rings_reset = %u\n",
+ le32_to_cpu(htt_buf->fw_rx_rings_reset));
+ len += scnprintf(buf + len, buf_len - len, "tx_flush = %u\n",
+ le32_to_cpu(htt_buf->tx_flush));
+ len += scnprintf(buf + len, buf_len - len, "tx_glb_reset = %u\n",
+ le32_to_cpu(htt_buf->tx_glb_reset));
+ len += scnprintf(buf + len, buf_len - len, "tx_txq_reset = %u\n",
+ le32_to_cpu(htt_buf->tx_txq_reset));
+ len += scnprintf(buf + len, buf_len - len, "rx_timeout_reset = %u\n\n",
+ le32_to_cpu(htt_buf->rx_timeout_reset));
+
+ len += scnprintf(buf + len, buf_len - len, "PDEV_PHY_WARM_RESET_REASONS:\n");
+ len += scnprintf(buf + len, buf_len - len, "phy_warm_reset_reason_phy_m3 = %u\n",
+ le32_to_cpu(htt_buf->phy_warm_reset_reason_phy_m3));
+ len += scnprintf(buf + len, buf_len - len,
+ "phy_warm_reset_reason_tx_hw_stuck = %u\n",
+ le32_to_cpu(htt_buf->phy_warm_reset_reason_tx_hw_stuck));
+ len += scnprintf(buf + len, buf_len - len,
+ "phy_warm_reset_reason_num_cca_rx_frame_stuck = %u\n",
+ le32_to_cpu(htt_buf->phy_warm_reset_reason_num_rx_frame_stuck));
+ len += scnprintf(buf + len, buf_len - len,
+ "phy_warm_reset_reason_wal_rx_recovery_rst_rx_busy = %u\n",
+ le32_to_cpu(htt_buf->phy_warm_reset_reason_wal_rx_rec_rx_busy));
+ len += scnprintf(buf + len, buf_len - len,
+ "phy_warm_reset_reason_wal_rx_recovery_rst_mac_hang = %u\n",
+ le32_to_cpu(htt_buf->phy_warm_reset_reason_wal_rx_rec_mac_hng));
+ len += scnprintf(buf + len, buf_len - len,
+ "phy_warm_reset_reason_mac_reset_converted_phy_reset = %u\n",
+ le32_to_cpu(htt_buf->phy_warm_reset_reason_mac_conv_phy_reset));
+ len += scnprintf(buf + len, buf_len - len,
+ "phy_warm_reset_reason_tx_lifetime_expiry_cca_stuck = %u\n",
+ le32_to_cpu(htt_buf->phy_warm_reset_reason_tx_exp_cca_stuck));
+ len += scnprintf(buf + len, buf_len - len,
+ "phy_warm_reset_reason_tx_consecutive_flush9_war = %u\n",
+ le32_to_cpu(htt_buf->phy_warm_reset_reason_tx_consec_flsh_war));
+ len += scnprintf(buf + len, buf_len - len,
+ "phy_warm_reset_reason_tx_hwsch_reset_war = %u\n",
+ le32_to_cpu(htt_buf->phy_warm_reset_reason_tx_hwsch_reset_war));
+ len += scnprintf(buf + len, buf_len - len,
+ "phy_warm_reset_reason_hwsch_wdog_or_cca_wdog_war = %u\n\n",
+ le32_to_cpu(htt_buf->phy_warm_reset_reason_hwsch_cca_wdog_war));
+
+ len += scnprintf(buf + len, buf_len - len, "WAL_RX_RECOVERY_STATS:\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "wal_rx_recovery_rst_mac_hang_count = %u\n",
+ le32_to_cpu(htt_buf->wal_rx_recovery_rst_mac_hang_cnt));
+ len += scnprintf(buf + len, buf_len - len,
+ "wal_rx_recovery_rst_known_sig_count = %u\n",
+ le32_to_cpu(htt_buf->wal_rx_recovery_rst_known_sig_cnt));
+ len += scnprintf(buf + len, buf_len - len,
+ "wal_rx_recovery_rst_no_rx_count = %u\n",
+ le32_to_cpu(htt_buf->wal_rx_recovery_rst_no_rx_cnt));
+ len += scnprintf(buf + len, buf_len - len,
+ "wal_rx_recovery_rst_no_rx_consecutive_count = %u\n",
+ le32_to_cpu(htt_buf->wal_rx_recovery_rst_no_rx_consec_cnt));
+ len += scnprintf(buf + len, buf_len - len,
+ "wal_rx_recovery_rst_rx_busy_count = %u\n",
+ le32_to_cpu(htt_buf->wal_rx_recovery_rst_rx_busy_cnt));
+ len += scnprintf(buf + len, buf_len - len,
+ "wal_rx_recovery_rst_phy_mac_hang_count = %u\n\n",
+ le32_to_cpu(htt_buf->wal_rx_recovery_rst_phy_mac_hang_cnt));
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_RX_DEST_DRAIN_STATS:\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_dest_drain_rx_descs_leak_prevention_done = %u\n",
+ le32_to_cpu(htt_buf->rx_dest_drain_rx_descs_leak_prevented));
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_dest_drain_rx_descs_saved_cnt = %u\n",
+ le32_to_cpu(htt_buf->rx_dest_drain_rx_descs_saved_cnt));
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_dest_drain_rxdma2reo_leak_detected = %u\n",
+ le32_to_cpu(htt_buf->rx_dest_drain_rxdma2reo_leak_detected));
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_dest_drain_rxdma2fw_leak_detected = %u\n",
+ le32_to_cpu(htt_buf->rx_dest_drain_rxdma2fw_leak_detected));
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_dest_drain_rxdma2wbm_leak_detected = %u\n",
+ le32_to_cpu(htt_buf->rx_dest_drain_rxdma2wbm_leak_detected));
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_dest_drain_rxdma1_2sw_leak_detected = %u\n",
+ le32_to_cpu(htt_buf->rx_dest_drain_rxdma1_2sw_leak_detected));
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_dest_drain_rx_drain_ok_mac_idle = %u\n",
+ le32_to_cpu(htt_buf->rx_dest_drain_rx_drain_ok_mac_idle));
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_dest_drain_ok_mac_not_idle = %u\n",
+ le32_to_cpu(htt_buf->rx_dest_drain_ok_mac_not_idle));
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_dest_drain_prerequisite_invld = %u\n",
+ le32_to_cpu(htt_buf->rx_dest_drain_prerequisite_invld));
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_dest_drain_skip_for_non_lmac_reset = %u\n",
+ le32_to_cpu(htt_buf->rx_dest_drain_skip_non_lmac_reset));
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_dest_drain_hw_fifo_not_empty_post_drain_wait = %u\n\n",
+ le32_to_cpu(htt_buf->rx_dest_drain_hw_fifo_notempty_post_wait));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_hw_stats_intr_misc_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_hw_stats_intr_misc_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_HW_STATS_INTR_MISC_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "hw_intr_name = %s\n",
+ htt_stats_buf->hw_intr_name);
+ len += scnprintf(buf + len, buf_len - len, "mask = %u\n",
+ le32_to_cpu(htt_stats_buf->mask));
+ len += scnprintf(buf + len, buf_len - len, "count = %u\n\n",
+ le32_to_cpu(htt_stats_buf->count));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_hw_stats_whal_tx_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_hw_stats_whal_tx_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 mac_id_word;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ mac_id_word = __le32_to_cpu(htt_stats_buf->mac_id__word);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_HW_STATS_WHAL_TX_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+ u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID));
+ len += scnprintf(buf + len, buf_len - len, "last_unpause_ppdu_id = %u\n",
+ le32_to_cpu(htt_stats_buf->last_unpause_ppdu_id));
+ len += scnprintf(buf + len, buf_len - len, "hwsch_unpause_wait_tqm_write = %u\n",
+ le32_to_cpu(htt_stats_buf->hwsch_unpause_wait_tqm_write));
+ len += scnprintf(buf + len, buf_len - len, "hwsch_dummy_tlv_skipped = %u\n",
+ le32_to_cpu(htt_stats_buf->hwsch_dummy_tlv_skipped));
+ len += scnprintf(buf + len, buf_len - len,
+ "hwsch_misaligned_offset_received = %u\n",
+ le32_to_cpu(htt_stats_buf->hwsch_misaligned_offset_received));
+ len += scnprintf(buf + len, buf_len - len, "hwsch_reset_count = %u\n",
+ le32_to_cpu(htt_stats_buf->hwsch_reset_count));
+ len += scnprintf(buf + len, buf_len - len, "hwsch_dev_reset_war = %u\n",
+ le32_to_cpu(htt_stats_buf->hwsch_dev_reset_war));
+ len += scnprintf(buf + len, buf_len - len, "hwsch_delayed_pause = %u\n",
+ le32_to_cpu(htt_stats_buf->hwsch_delayed_pause));
+ len += scnprintf(buf + len, buf_len - len, "hwsch_long_delayed_pause = %u\n",
+ le32_to_cpu(htt_stats_buf->hwsch_long_delayed_pause));
+ len += scnprintf(buf + len, buf_len - len, "sch_rx_ppdu_no_response = %u\n",
+ le32_to_cpu(htt_stats_buf->sch_rx_ppdu_no_response));
+ len += scnprintf(buf + len, buf_len - len, "sch_selfgen_response = %u\n",
+ le32_to_cpu(htt_stats_buf->sch_selfgen_response));
+ len += scnprintf(buf + len, buf_len - len, "sch_rx_sifs_resp_trigger= %u\n\n",
+ le32_to_cpu(htt_stats_buf->sch_rx_sifs_resp_trigger));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_hw_war_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_hw_war_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u16 fixed_len, array_len;
+ u8 i, array_words;
+ u32 mac_id;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ mac_id = __le32_to_cpu(htt_stats_buf->mac_id__word);
+ fixed_len = sizeof(*htt_stats_buf);
+ array_len = tag_len - fixed_len;
+ array_words = array_len >> 2;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_HW_WAR_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+ u32_get_bits(mac_id, ATH12K_HTT_STATS_MAC_ID));
+
+ for (i = 0; i < array_words; i++) {
+ len += scnprintf(buf + len, buf_len - len, "hw_war %u = %u\n\n",
+ i, le32_to_cpu(htt_stats_buf->hw_wars[i]));
+ }
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_tqm_cmn_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_tqm_cmn_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 mac_id_word;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ mac_id_word = __le32_to_cpu(htt_stats_buf->mac_id__word);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_CMN_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+ u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID));
+ len += scnprintf(buf + len, buf_len - len, "max_cmdq_id = %u\n",
+ le32_to_cpu(htt_stats_buf->max_cmdq_id));
+ len += scnprintf(buf + len, buf_len - len, "list_mpdu_cnt_hist_intvl = %u\n",
+ le32_to_cpu(htt_stats_buf->list_mpdu_cnt_hist_intvl));
+ len += scnprintf(buf + len, buf_len - len, "add_msdu = %u\n",
+ le32_to_cpu(htt_stats_buf->add_msdu));
+ len += scnprintf(buf + len, buf_len - len, "q_empty = %u\n",
+ le32_to_cpu(htt_stats_buf->q_empty));
+ len += scnprintf(buf + len, buf_len - len, "q_not_empty = %u\n",
+ le32_to_cpu(htt_stats_buf->q_not_empty));
+ len += scnprintf(buf + len, buf_len - len, "drop_notification = %u\n",
+ le32_to_cpu(htt_stats_buf->drop_notification));
+ len += scnprintf(buf + len, buf_len - len, "desc_threshold = %u\n",
+ le32_to_cpu(htt_stats_buf->desc_threshold));
+ len += scnprintf(buf + len, buf_len - len, "hwsch_tqm_invalid_status = %u\n",
+ le32_to_cpu(htt_stats_buf->hwsch_tqm_invalid_status));
+ len += scnprintf(buf + len, buf_len - len, "missed_tqm_gen_mpdus = %u\n",
+ le32_to_cpu(htt_stats_buf->missed_tqm_gen_mpdus));
+ len += scnprintf(buf + len, buf_len - len,
+ "total_msduq_timestamp_updates = %u\n",
+ le32_to_cpu(htt_stats_buf->msduq_timestamp_updates));
+ len += scnprintf(buf + len, buf_len - len,
+ "total_msduq_timestamp_updates_by_get_mpdu_head_info_cmd = %u\n",
+ le32_to_cpu(htt_stats_buf->msduq_updates_mpdu_head_info_cmd));
+ len += scnprintf(buf + len, buf_len - len,
+ "total_msduq_timestamp_updates_by_emp_to_nonemp_status = %u\n",
+ le32_to_cpu(htt_stats_buf->msduq_updates_emp_to_nonemp_status));
+ len += scnprintf(buf + len, buf_len - len,
+ "total_get_mpdu_head_info_cmds_by_sched_algo_la_query = %u\n",
+ le32_to_cpu(htt_stats_buf->get_mpdu_head_info_cmds_by_query));
+ len += scnprintf(buf + len, buf_len - len,
+ "total_get_mpdu_head_info_cmds_by_tac = %u\n",
+ le32_to_cpu(htt_stats_buf->get_mpdu_head_info_cmds_by_tac));
+ len += scnprintf(buf + len, buf_len - len,
+ "total_gen_mpdu_cmds_by_sched_algo_la_query = %u\n",
+ le32_to_cpu(htt_stats_buf->gen_mpdu_cmds_by_query));
+ len += scnprintf(buf + len, buf_len - len, "active_tqm_tids = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_active_tids));
+ len += scnprintf(buf + len, buf_len - len, "inactive_tqm_tids = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_inactive_tids));
+ len += scnprintf(buf + len, buf_len - len, "tqm_active_msduq_flows = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_active_msduq_flows));
+ len += scnprintf(buf + len, buf_len - len, "hi_prio_q_not_empty = %u\n\n",
+ le32_to_cpu(htt_stats_buf->high_prio_q_not_empty));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_tqm_error_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_tqm_error_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_ERROR_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "q_empty_failure = %u\n",
+ le32_to_cpu(htt_stats_buf->q_empty_failure));
+ len += scnprintf(buf + len, buf_len - len, "q_not_empty_failure = %u\n",
+ le32_to_cpu(htt_stats_buf->q_not_empty_failure));
+ len += scnprintf(buf + len, buf_len - len, "add_msdu_failure = %u\n\n",
+ le32_to_cpu(htt_stats_buf->add_msdu_failure));
+
+ len += scnprintf(buf + len, buf_len - len, "TQM_ERROR_RESET_STATS:\n");
+ len += scnprintf(buf + len, buf_len - len, "tqm_cache_ctl_err = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_cache_ctl_err));
+ len += scnprintf(buf + len, buf_len - len, "tqm_soft_reset = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_soft_reset));
+ len += scnprintf(buf + len, buf_len - len,
+ "tqm_reset_total_num_in_use_link_descs = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_reset_num_in_use_link_descs));
+ len += scnprintf(buf + len, buf_len - len,
+ "tqm_reset_worst_case_num_lost_link_descs = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_reset_num_lost_link_descs));
+ len += scnprintf(buf + len, buf_len - len,
+ "tqm_reset_worst_case_num_lost_host_tx_bufs_count = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_reset_num_lost_host_tx_buf_cnt));
+ len += scnprintf(buf + len, buf_len - len,
+ "tqm_reset_num_in_use_link_descs_internal_tqm = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_reset_num_in_use_internal_tqm));
+ len += scnprintf(buf + len, buf_len - len,
+ "tqm_reset_num_in_use_link_descs_wbm_idle_link_ring = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_reset_num_in_use_idle_link_rng));
+ len += scnprintf(buf + len, buf_len - len,
+ "tqm_reset_time_to_tqm_hang_delta_ms = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_reset_time_to_tqm_hang_delta_ms));
+ len += scnprintf(buf + len, buf_len - len, "tqm_reset_recovery_time_ms = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_reset_recovery_time_ms));
+ len += scnprintf(buf + len, buf_len - len, "tqm_reset_num_peers_hdl = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_reset_num_peers_hdl));
+ len += scnprintf(buf + len, buf_len - len,
+ "tqm_reset_cumm_dirty_hw_mpduq_proc_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_reset_cumm_dirty_hw_mpduq_cnt));
+ len += scnprintf(buf + len, buf_len - len,
+ "tqm_reset_cumm_dirty_hw_msduq_proc = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_reset_cumm_dirty_hw_msduq_proc));
+ len += scnprintf(buf + len, buf_len - len,
+ "tqm_reset_flush_cache_cmd_su_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_reset_flush_cache_cmd_su_cnt));
+ len += scnprintf(buf + len, buf_len - len,
+ "tqm_reset_flush_cache_cmd_other_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_reset_flush_cache_cmd_other_cnt));
+ len += scnprintf(buf + len, buf_len - len,
+ "tqm_reset_flush_cache_cmd_trig_type = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_reset_flush_cache_cmd_trig_type));
+ len += scnprintf(buf + len, buf_len - len,
+ "tqm_reset_flush_cache_cmd_trig_cfg = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_reset_flush_cache_cmd_trig_cfg));
+ len += scnprintf(buf + len, buf_len - len,
+ "tqm_reset_flush_cache_cmd_skip_cmd_status_null = %u\n\n",
+ le32_to_cpu(htt_stats_buf->tqm_reset_flush_cmd_skp_status_null));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_tqm_gen_mpdu_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_tqm_gen_mpdu_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u16 num_elements = tag_len >> 2;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_GEN_MPDU_STATS_TLV:\n");
+ len += print_array_to_buf(buf, len, "gen_mpdu_end_reason",
+ htt_stats_buf->gen_mpdu_end_reason, num_elements,
+ "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_tqm_list_mpdu_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_tqm_list_mpdu_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u16 num_elems = min_t(u16, (tag_len >> 2),
+ ATH12K_HTT_TX_TQM_MAX_LIST_MPDU_END_REASON);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_LIST_MPDU_STATS_TLV:\n");
+ len += print_array_to_buf(buf, len, "list_mpdu_end_reason",
+ htt_stats_buf->list_mpdu_end_reason, num_elems, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_tqm_list_mpdu_cnt_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_tqm_list_mpdu_cnt_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u16 num_elems = min_t(u16, (tag_len >> 2),
+ ATH12K_HTT_TX_TQM_MAX_LIST_MPDU_CNT_HISTOGRAM_BINS);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_LIST_MPDU_CNT_TLV_V:\n");
+ len += print_array_to_buf(buf, len, "list_mpdu_cnt_hist",
+ htt_stats_buf->list_mpdu_cnt_hist, num_elems, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_tqm_pdev_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_tqm_pdev_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_PDEV_STATS_TLV_V:\n");
+ len += scnprintf(buf + len, buf_len - len, "msdu_count = %u\n",
+ le32_to_cpu(htt_stats_buf->msdu_count));
+ len += scnprintf(buf + len, buf_len - len, "mpdu_count = %u\n",
+ le32_to_cpu(htt_stats_buf->mpdu_count));
+ len += scnprintf(buf + len, buf_len - len, "remove_msdu = %u\n",
+ le32_to_cpu(htt_stats_buf->remove_msdu));
+ len += scnprintf(buf + len, buf_len - len, "remove_mpdu = %u\n",
+ le32_to_cpu(htt_stats_buf->remove_mpdu));
+ len += scnprintf(buf + len, buf_len - len, "remove_msdu_ttl = %u\n",
+ le32_to_cpu(htt_stats_buf->remove_msdu_ttl));
+ len += scnprintf(buf + len, buf_len - len, "send_bar = %u\n",
+ le32_to_cpu(htt_stats_buf->send_bar));
+ len += scnprintf(buf + len, buf_len - len, "bar_sync = %u\n",
+ le32_to_cpu(htt_stats_buf->bar_sync));
+ len += scnprintf(buf + len, buf_len - len, "notify_mpdu = %u\n",
+ le32_to_cpu(htt_stats_buf->notify_mpdu));
+ len += scnprintf(buf + len, buf_len - len, "sync_cmd = %u\n",
+ le32_to_cpu(htt_stats_buf->sync_cmd));
+ len += scnprintf(buf + len, buf_len - len, "write_cmd = %u\n",
+ le32_to_cpu(htt_stats_buf->write_cmd));
+ len += scnprintf(buf + len, buf_len - len, "hwsch_trigger = %u\n",
+ le32_to_cpu(htt_stats_buf->hwsch_trigger));
+ len += scnprintf(buf + len, buf_len - len, "ack_tlv_proc = %u\n",
+ le32_to_cpu(htt_stats_buf->ack_tlv_proc));
+ len += scnprintf(buf + len, buf_len - len, "gen_mpdu_cmd = %u\n",
+ le32_to_cpu(htt_stats_buf->gen_mpdu_cmd));
+ len += scnprintf(buf + len, buf_len - len, "gen_list_cmd = %u\n",
+ le32_to_cpu(htt_stats_buf->gen_list_cmd));
+ len += scnprintf(buf + len, buf_len - len, "remove_mpdu_cmd = %u\n",
+ le32_to_cpu(htt_stats_buf->remove_mpdu_cmd));
+ len += scnprintf(buf + len, buf_len - len, "remove_mpdu_tried_cmd = %u\n",
+ le32_to_cpu(htt_stats_buf->remove_mpdu_tried_cmd));
+ len += scnprintf(buf + len, buf_len - len, "mpdu_queue_stats_cmd = %u\n",
+ le32_to_cpu(htt_stats_buf->mpdu_queue_stats_cmd));
+ len += scnprintf(buf + len, buf_len - len, "mpdu_head_info_cmd = %u\n",
+ le32_to_cpu(htt_stats_buf->mpdu_head_info_cmd));
+ len += scnprintf(buf + len, buf_len - len, "msdu_flow_stats_cmd = %u\n",
+ le32_to_cpu(htt_stats_buf->msdu_flow_stats_cmd));
+ len += scnprintf(buf + len, buf_len - len, "remove_msdu_cmd = %u\n",
+ le32_to_cpu(htt_stats_buf->remove_msdu_cmd));
+ len += scnprintf(buf + len, buf_len - len, "remove_msdu_ttl_cmd = %u\n",
+ le32_to_cpu(htt_stats_buf->remove_msdu_ttl_cmd));
+ len += scnprintf(buf + len, buf_len - len, "flush_cache_cmd = %u\n",
+ le32_to_cpu(htt_stats_buf->flush_cache_cmd));
+ len += scnprintf(buf + len, buf_len - len, "update_mpduq_cmd = %u\n",
+ le32_to_cpu(htt_stats_buf->update_mpduq_cmd));
+ len += scnprintf(buf + len, buf_len - len, "enqueue = %u\n",
+ le32_to_cpu(htt_stats_buf->enqueue));
+ len += scnprintf(buf + len, buf_len - len, "enqueue_notify = %u\n",
+ le32_to_cpu(htt_stats_buf->enqueue_notify));
+ len += scnprintf(buf + len, buf_len - len, "notify_mpdu_at_head = %u\n",
+ le32_to_cpu(htt_stats_buf->notify_mpdu_at_head));
+ len += scnprintf(buf + len, buf_len - len, "notify_mpdu_state_valid = %u\n",
+ le32_to_cpu(htt_stats_buf->notify_mpdu_state_valid));
+ len += scnprintf(buf + len, buf_len - len, "sched_udp_notify1 = %u\n",
+ le32_to_cpu(htt_stats_buf->sched_udp_notify1));
+ len += scnprintf(buf + len, buf_len - len, "sched_udp_notify2 = %u\n",
+ le32_to_cpu(htt_stats_buf->sched_udp_notify2));
+ len += scnprintf(buf + len, buf_len - len, "sched_nonudp_notify1 = %u\n",
+ le32_to_cpu(htt_stats_buf->sched_nonudp_notify1));
+ len += scnprintf(buf + len, buf_len - len, "sched_nonudp_notify2 = %u\n\n",
+ le32_to_cpu(htt_stats_buf->sched_nonudp_notify2));
+
+ stats_req->buf_len = len;
+}
+
+static int ath12k_dbg_htt_ext_stats_parse(struct ath12k_base *ab,
+ u16 tag, u16 len, const void *tag_buf,
+ void *user_data)
+{
+ struct debug_htt_stats_req *stats_req = user_data;
+
+ switch (tag) {
+ case HTT_STATS_TX_PDEV_CMN_TAG:
+ htt_print_tx_pdev_stats_cmn_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_PDEV_UNDERRUN_TAG:
+ htt_print_tx_pdev_stats_urrn_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_PDEV_SIFS_TAG:
+ htt_print_tx_pdev_stats_sifs_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_PDEV_FLUSH_TAG:
+ htt_print_tx_pdev_stats_flush_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_PDEV_SIFS_HIST_TAG:
+ htt_print_tx_pdev_stats_sifs_hist_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_PDEV_CTRL_PATH_TX_STATS_TAG:
+ htt_print_pdev_ctrl_path_tx_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_MU_PPDU_DIST_TAG:
+ htt_print_tx_pdev_mu_ppdu_dist_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_SCHED_CMN_TAG:
+ ath12k_htt_print_stats_tx_sched_cmn_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_PDEV_SCHEDULER_TXQ_STATS_TAG:
+ ath12k_htt_print_tx_pdev_stats_sched_per_txq_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_SCHED_TXQ_CMD_POSTED_TAG:
+ ath12k_htt_print_sched_txq_cmd_posted_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_SCHED_TXQ_CMD_REAPED_TAG:
+ ath12k_htt_print_sched_txq_cmd_reaped_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_SCHED_TXQ_SCHED_ORDER_SU_TAG:
+ ath12k_htt_print_sched_txq_sched_order_su_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_SCHED_TXQ_SCHED_INELIGIBILITY_TAG:
+ ath12k_htt_print_sched_txq_sched_ineligibility_tlv(tag_buf, len,
+ stats_req);
+ break;
+ case HTT_STATS_SCHED_TXQ_SUPERCYCLE_TRIGGER_TAG:
+ ath12k_htt_print_sched_txq_supercycle_trigger_tlv(tag_buf, len,
+ stats_req);
+ break;
+ case HTT_STATS_HW_PDEV_ERRS_TAG:
+ ath12k_htt_print_hw_stats_pdev_errs_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_HW_INTR_MISC_TAG:
+ ath12k_htt_print_hw_stats_intr_misc_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_WHAL_TX_TAG:
+ ath12k_htt_print_hw_stats_whal_tx_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_HW_WAR_TAG:
+ ath12k_htt_print_hw_war_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_TQM_CMN_TAG:
+ ath12k_htt_print_tx_tqm_cmn_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_TQM_ERROR_STATS_TAG:
+ ath12k_htt_print_tx_tqm_error_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_TQM_GEN_MPDU_TAG:
+ ath12k_htt_print_tx_tqm_gen_mpdu_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_TQM_LIST_MPDU_TAG:
+ ath12k_htt_print_tx_tqm_list_mpdu_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_TQM_LIST_MPDU_CNT_TAG:
+ ath12k_htt_print_tx_tqm_list_mpdu_cnt_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_TQM_PDEV_TAG:
+ ath12k_htt_print_tx_tqm_pdev_stats_tlv(tag_buf, len, stats_req);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+void ath12k_debugfs_htt_ext_stats_handler(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ struct ath12k_htt_extd_stats_msg *msg;
+ struct debug_htt_stats_req *stats_req;
+ struct ath12k *ar;
+ u32 len, pdev_id, stats_info;
+ u64 cookie;
+ int ret;
+ bool send_completion = false;
+
+ msg = (struct ath12k_htt_extd_stats_msg *)skb->data;
+ cookie = le64_to_cpu(msg->cookie);
+
+ if (u64_get_bits(cookie, ATH12K_HTT_STATS_COOKIE_MSB) !=
+ ATH12K_HTT_STATS_MAGIC_VALUE) {
+ ath12k_warn(ab, "received invalid htt ext stats event\n");
+ return;
+ }
+
+ pdev_id = u64_get_bits(cookie, ATH12K_HTT_STATS_COOKIE_LSB);
+ rcu_read_lock();
+ ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id);
+ if (!ar) {
+ ath12k_warn(ab, "failed to get ar for pdev_id %d\n", pdev_id);
+ goto exit;
+ }
+
+ stats_req = ar->debug.htt_stats.stats_req;
+ if (!stats_req)
+ goto exit;
+
+ spin_lock_bh(&ar->data_lock);
+
+ stats_info = le32_to_cpu(msg->info1);
+ stats_req->done = u32_get_bits(stats_info, ATH12K_HTT_T2H_EXT_STATS_INFO1_DONE);
+ if (stats_req->done)
+ send_completion = true;
+
+ spin_unlock_bh(&ar->data_lock);
+
+ len = u32_get_bits(stats_info, ATH12K_HTT_T2H_EXT_STATS_INFO1_LENGTH);
+ if (len > skb->len) {
+ ath12k_warn(ab, "invalid length %d for HTT stats", len);
+ goto exit;
+ }
+
+ ret = ath12k_dp_htt_tlv_iter(ab, msg->data, len,
+ ath12k_dbg_htt_ext_stats_parse,
+ stats_req);
+ if (ret)
+ ath12k_warn(ab, "Failed to parse tlv %d\n", ret);
+
+ if (send_completion)
+ complete(&stats_req->htt_stats_rcvd);
+exit:
+ rcu_read_unlock();
+}
+
+static ssize_t ath12k_read_htt_stats_type(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath12k *ar = file->private_data;
+ enum ath12k_dbg_htt_ext_stats_type type;
+ char buf[32];
+ size_t len;
+
+ mutex_lock(&ar->conf_mutex);
+ type = ar->debug.htt_stats.type;
+ mutex_unlock(&ar->conf_mutex);
+
+ len = scnprintf(buf, sizeof(buf), "%u\n", type);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath12k_write_htt_stats_type(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath12k *ar = file->private_data;
+ enum ath12k_dbg_htt_ext_stats_type type;
+ unsigned int cfg_param[4] = {0};
+ const int size = 32;
+ int num_args;
+
+ char *buf __free(kfree) = kzalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ if (copy_from_user(buf, user_buf, count))
+ return -EFAULT;
+
+ num_args = sscanf(buf, "%u %u %u %u %u\n", &type, &cfg_param[0],
+ &cfg_param[1], &cfg_param[2], &cfg_param[3]);
+ if (!num_args || num_args > 5)
+ return -EINVAL;
+
+ if (type == ATH12K_DBG_HTT_EXT_STATS_RESET ||
+ type >= ATH12K_DBG_HTT_NUM_EXT_STATS)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ar->debug.htt_stats.type = type;
+ ar->debug.htt_stats.cfg_param[0] = cfg_param[0];
+ ar->debug.htt_stats.cfg_param[1] = cfg_param[1];
+ ar->debug.htt_stats.cfg_param[2] = cfg_param[2];
+ ar->debug.htt_stats.cfg_param[3] = cfg_param[3];
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return count;
+}
+
+static const struct file_operations fops_htt_stats_type = {
+ .read = ath12k_read_htt_stats_type,
+ .write = ath12k_write_htt_stats_type,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static int ath12k_debugfs_htt_stats_req(struct ath12k *ar)
+{
+ struct debug_htt_stats_req *stats_req = ar->debug.htt_stats.stats_req;
+ enum ath12k_dbg_htt_ext_stats_type type = stats_req->type;
+ u64 cookie;
+ int ret, pdev_id;
+ struct htt_ext_stats_cfg_params cfg_params = { 0 };
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ init_completion(&stats_req->htt_stats_rcvd);
+
+ pdev_id = ath12k_mac_get_target_pdev_id(ar);
+ stats_req->done = false;
+ stats_req->pdev_id = pdev_id;
+
+ cookie = u64_encode_bits(ATH12K_HTT_STATS_MAGIC_VALUE,
+ ATH12K_HTT_STATS_COOKIE_MSB);
+ cookie |= u64_encode_bits(pdev_id, ATH12K_HTT_STATS_COOKIE_LSB);
+
+ if (stats_req->override_cfg_param) {
+ cfg_params.cfg0 = stats_req->cfg_param[0];
+ cfg_params.cfg1 = stats_req->cfg_param[1];
+ cfg_params.cfg2 = stats_req->cfg_param[2];
+ cfg_params.cfg3 = stats_req->cfg_param[3];
+ }
+
+ ret = ath12k_dp_tx_htt_h2t_ext_stats_req(ar, type, &cfg_params, cookie);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to send htt stats request: %d\n", ret);
+ return ret;
+ }
+ if (!wait_for_completion_timeout(&stats_req->htt_stats_rcvd, 3 * HZ)) {
+ spin_lock_bh(&ar->data_lock);
+ if (!stats_req->done) {
+ stats_req->done = true;
+ spin_unlock_bh(&ar->data_lock);
+ ath12k_warn(ar->ab, "stats request timed out\n");
+ return -ETIMEDOUT;
+ }
+ spin_unlock_bh(&ar->data_lock);
+ }
+
+ return 0;
+}
+
+static int ath12k_open_htt_stats(struct inode *inode,
+ struct file *file)
+{
+ struct ath12k *ar = inode->i_private;
+ struct debug_htt_stats_req *stats_req;
+ enum ath12k_dbg_htt_ext_stats_type type = ar->debug.htt_stats.type;
+ struct ath12k_hw *ah = ath12k_ar_to_ah(ar);
+ int ret;
+
+ if (type == ATH12K_DBG_HTT_EXT_STATS_RESET)
+ return -EPERM;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ah->state != ATH12K_HW_STATE_ON) {
+ ret = -ENETDOWN;
+ goto err_unlock;
+ }
+
+ if (ar->debug.htt_stats.stats_req) {
+ ret = -EAGAIN;
+ goto err_unlock;
+ }
+
+ stats_req = kzalloc(sizeof(*stats_req) + ATH12K_HTT_STATS_BUF_SIZE, GFP_KERNEL);
+ if (!stats_req) {
+ ret = -ENOMEM;
+ goto err_unlock;
+ }
+
+ ar->debug.htt_stats.stats_req = stats_req;
+ stats_req->type = type;
+ stats_req->cfg_param[0] = ar->debug.htt_stats.cfg_param[0];
+ stats_req->cfg_param[1] = ar->debug.htt_stats.cfg_param[1];
+ stats_req->cfg_param[2] = ar->debug.htt_stats.cfg_param[2];
+ stats_req->cfg_param[3] = ar->debug.htt_stats.cfg_param[3];
+ stats_req->override_cfg_param = !!stats_req->cfg_param[0] ||
+ !!stats_req->cfg_param[1] ||
+ !!stats_req->cfg_param[2] ||
+ !!stats_req->cfg_param[3];
+
+ ret = ath12k_debugfs_htt_stats_req(ar);
+ if (ret < 0)
+ goto out;
+
+ file->private_data = stats_req;
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return 0;
+out:
+ kfree(stats_req);
+ ar->debug.htt_stats.stats_req = NULL;
+err_unlock:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static int ath12k_release_htt_stats(struct inode *inode,
+ struct file *file)
+{
+ struct ath12k *ar = inode->i_private;
+
+ mutex_lock(&ar->conf_mutex);
+ kfree(file->private_data);
+ ar->debug.htt_stats.stats_req = NULL;
+ mutex_unlock(&ar->conf_mutex);
+
+ return 0;
+}
+
+static ssize_t ath12k_read_htt_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct debug_htt_stats_req *stats_req = file->private_data;
+ char *buf;
+ u32 length;
+
+ buf = stats_req->buf;
+ length = min_t(u32, stats_req->buf_len, ATH12K_HTT_STATS_BUF_SIZE);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, length);
+}
+
+static const struct file_operations fops_dump_htt_stats = {
+ .open = ath12k_open_htt_stats,
+ .release = ath12k_release_htt_stats,
+ .read = ath12k_read_htt_stats,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath12k_write_htt_stats_reset(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath12k *ar = file->private_data;
+ enum ath12k_dbg_htt_ext_stats_type type;
+ struct htt_ext_stats_cfg_params cfg_params = { 0 };
+ u8 param_pos;
+ int ret;
+
+ ret = kstrtou32_from_user(user_buf, count, 0, &type);
+ if (ret)
+ return ret;
+
+ if (type >= ATH12K_DBG_HTT_NUM_EXT_STATS ||
+ type == ATH12K_DBG_HTT_EXT_STATS_RESET)
+ return -E2BIG;
+
+ mutex_lock(&ar->conf_mutex);
+ cfg_params.cfg0 = HTT_STAT_DEFAULT_RESET_START_OFFSET;
+ param_pos = (type >> 5) + 1;
+
+ switch (param_pos) {
+ case ATH12K_HTT_STATS_RESET_PARAM_CFG_32_BYTES:
+ cfg_params.cfg1 = 1 << (cfg_params.cfg0 + type);
+ break;
+ case ATH12K_HTT_STATS_RESET_PARAM_CFG_64_BYTES:
+ cfg_params.cfg2 = ATH12K_HTT_STATS_RESET_BITMAP32_BIT(cfg_params.cfg0 +
+ type);
+ break;
+ case ATH12K_HTT_STATS_RESET_PARAM_CFG_128_BYTES:
+ cfg_params.cfg3 = ATH12K_HTT_STATS_RESET_BITMAP64_BIT(cfg_params.cfg0 +
+ type);
+ break;
+ default:
+ break;
+ }
+
+ ret = ath12k_dp_tx_htt_h2t_ext_stats_req(ar,
+ ATH12K_DBG_HTT_EXT_STATS_RESET,
+ &cfg_params,
+ 0ULL);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to send htt stats request: %d\n", ret);
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+ }
+
+ ar->debug.htt_stats.reset = type;
+ mutex_unlock(&ar->conf_mutex);
+
+ return count;
+}
+
+static const struct file_operations fops_htt_stats_reset = {
+ .write = ath12k_write_htt_stats_reset,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+void ath12k_debugfs_htt_stats_register(struct ath12k *ar)
+{
+ debugfs_create_file("htt_stats_type", 0600, ar->debug.debugfs_pdev,
+ ar, &fops_htt_stats_type);
+ debugfs_create_file("htt_stats", 0400, ar->debug.debugfs_pdev,
+ ar, &fops_dump_htt_stats);
+ debugfs_create_file("htt_stats_reset", 0200, ar->debug.debugfs_pdev,
+ ar, &fops_htt_stats_reset);
+}
diff --git a/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h
new file mode 100644
index 000000000000..6294a082cf8a
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h
@@ -0,0 +1,567 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef DEBUG_HTT_STATS_H
+#define DEBUG_HTT_STATS_H
+
+#define ATH12K_HTT_STATS_BUF_SIZE (1024 * 512)
+#define ATH12K_HTT_STATS_COOKIE_LSB GENMASK_ULL(31, 0)
+#define ATH12K_HTT_STATS_COOKIE_MSB GENMASK_ULL(63, 32)
+#define ATH12K_HTT_STATS_MAGIC_VALUE 0xF0F0F0F0
+#define ATH12K_HTT_STATS_SUBTYPE_MAX 16
+#define ATH12K_HTT_MAX_STRING_LEN 256
+
+#define ATH12K_HTT_STATS_RESET_BITMAP32_OFFSET(_idx) ((_idx) & 0x1f)
+#define ATH12K_HTT_STATS_RESET_BITMAP64_OFFSET(_idx) ((_idx) & 0x3f)
+#define ATH12K_HTT_STATS_RESET_BITMAP32_BIT(_idx) (1 << \
+ ATH12K_HTT_STATS_RESET_BITMAP32_OFFSET(_idx))
+#define ATH12K_HTT_STATS_RESET_BITMAP64_BIT(_idx) (1 << \
+ ATH12K_HTT_STATS_RESET_BITMAP64_OFFSET(_idx))
+
+void ath12k_debugfs_htt_stats_register(struct ath12k *ar);
+
+#ifdef CONFIG_ATH12K_DEBUGFS
+void ath12k_debugfs_htt_ext_stats_handler(struct ath12k_base *ab,
+ struct sk_buff *skb);
+#else /* CONFIG_ATH12K_DEBUGFS */
+static inline void ath12k_debugfs_htt_ext_stats_handler(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+}
+#endif
+
+/**
+ * DOC: target -> host extended statistics upload
+ *
+ * The following field definitions describe the format of the HTT
+ * target to host stats upload confirmation message.
+ * The message contains a cookie echoed from the HTT host->target stats
+ * upload request, which identifies which request the confirmation is
+ * for, and a single stats can span over multiple HTT stats indication
+ * due to the HTT message size limitation so every HTT ext stats
+ * indication will have tag-length-value stats information elements.
+ * The tag-length header for each HTT stats IND message also includes a
+ * status field, to indicate whether the request for the stat type in
+ * question was fully met, partially met, unable to be met, or invalid
+ * (if the stat type in question is disabled in the target).
+ * A Done bit 1's indicate the end of the of stats info elements.
+ *
+ *
+ * |31 16|15 12|11|10 8|7 5|4 0|
+ * |--------------------------------------------------------------|
+ * | reserved | msg type |
+ * |--------------------------------------------------------------|
+ * | cookie LSBs |
+ * |--------------------------------------------------------------|
+ * | cookie MSBs |
+ * |--------------------------------------------------------------|
+ * | stats entry length | rsvd | D| S | stat type |
+ * |--------------------------------------------------------------|
+ * | type-specific stats info |
+ * | (see debugfs_htt_stats.h) |
+ * |--------------------------------------------------------------|
+ * Header fields:
+ * - MSG_TYPE
+ * Bits 7:0
+ * Purpose: Identifies this is a extended statistics upload confirmation
+ * message.
+ * Value: 0x1c
+ * - COOKIE_LSBS
+ * Bits 31:0
+ * Purpose: Provide a mechanism to match a target->host stats confirmation
+ * message with its preceding host->target stats request message.
+ * Value: MSBs of the opaque cookie specified by the host-side requestor
+ * - COOKIE_MSBS
+ * Bits 31:0
+ * Purpose: Provide a mechanism to match a target->host stats confirmation
+ * message with its preceding host->target stats request message.
+ * Value: MSBs of the opaque cookie specified by the host-side requestor
+ *
+ * Stats Information Element tag-length header fields:
+ * - STAT_TYPE
+ * Bits 7:0
+ * Purpose: identifies the type of statistics info held in the
+ * following information element
+ * Value: ath12k_dbg_htt_ext_stats_type
+ * - STATUS
+ * Bits 10:8
+ * Purpose: indicate whether the requested stats are present
+ * Value:
+ * 0 -> The requested stats have been delivered in full
+ * 1 -> The requested stats have been delivered in part
+ * 2 -> The requested stats could not be delivered (error case)
+ * 3 -> The requested stat type is either not recognized (invalid)
+ * - DONE
+ * Bits 11
+ * Purpose:
+ * Indicates the completion of the stats entry, this will be the last
+ * stats conf HTT segment for the requested stats type.
+ * Value:
+ * 0 -> the stats retrieval is ongoing
+ * 1 -> the stats retrieval is complete
+ * - LENGTH
+ * Bits 31:16
+ * Purpose: indicate the stats information size
+ * Value: This field specifies the number of bytes of stats information
+ * that follows the element tag-length header.
+ * It is expected but not required that this length is a multiple of
+ * 4 bytes.
+ */
+
+#define ATH12K_HTT_T2H_EXT_STATS_INFO1_DONE BIT(11)
+#define ATH12K_HTT_T2H_EXT_STATS_INFO1_LENGTH GENMASK(31, 16)
+
+struct ath12k_htt_extd_stats_msg {
+ __le32 info0;
+ __le64 cookie;
+ __le32 info1;
+ u8 data[];
+} __packed;
+
+/* htt_dbg_ext_stats_type */
+enum ath12k_dbg_htt_ext_stats_type {
+ ATH12K_DBG_HTT_EXT_STATS_RESET = 0,
+ ATH12K_DBG_HTT_EXT_STATS_PDEV_TX = 1,
+ ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_SCHED = 4,
+ ATH12K_DBG_HTT_EXT_STATS_PDEV_ERROR = 5,
+ ATH12K_DBG_HTT_EXT_STATS_PDEV_TQM = 6,
+
+ /* keep this last */
+ ATH12K_DBG_HTT_NUM_EXT_STATS,
+};
+
+enum ath12k_dbg_htt_tlv_tag {
+ HTT_STATS_TX_PDEV_CMN_TAG = 0,
+ HTT_STATS_TX_PDEV_UNDERRUN_TAG = 1,
+ HTT_STATS_TX_PDEV_SIFS_TAG = 2,
+ HTT_STATS_TX_PDEV_FLUSH_TAG = 3,
+ HTT_STATS_TX_TQM_GEN_MPDU_TAG = 11,
+ HTT_STATS_TX_TQM_LIST_MPDU_TAG = 12,
+ HTT_STATS_TX_TQM_LIST_MPDU_CNT_TAG = 13,
+ HTT_STATS_TX_TQM_CMN_TAG = 14,
+ HTT_STATS_TX_TQM_PDEV_TAG = 15,
+ HTT_STATS_TX_PDEV_SCHEDULER_TXQ_STATS_TAG = 36,
+ HTT_STATS_TX_SCHED_CMN_TAG = 37,
+ HTT_STATS_SCHED_TXQ_CMD_POSTED_TAG = 39,
+ HTT_STATS_TX_TQM_ERROR_STATS_TAG = 43,
+ HTT_STATS_SCHED_TXQ_CMD_REAPED_TAG = 44,
+ HTT_STATS_HW_INTR_MISC_TAG = 54,
+ HTT_STATS_HW_PDEV_ERRS_TAG = 56,
+ HTT_STATS_WHAL_TX_TAG = 66,
+ HTT_STATS_TX_PDEV_SIFS_HIST_TAG = 67,
+ HTT_STATS_SCHED_TXQ_SCHED_ORDER_SU_TAG = 86,
+ HTT_STATS_SCHED_TXQ_SCHED_INELIGIBILITY_TAG = 87,
+ HTT_STATS_HW_WAR_TAG = 89,
+ HTT_STATS_SCHED_TXQ_SUPERCYCLE_TRIGGER_TAG = 100,
+ HTT_STATS_PDEV_CTRL_PATH_TX_STATS_TAG = 102,
+ HTT_STATS_MU_PPDU_DIST_TAG = 129,
+
+ HTT_STATS_MAX_TAG,
+};
+
+#define ATH12K_HTT_STATS_MAC_ID GENMASK(7, 0)
+
+#define ATH12K_HTT_TX_PDEV_MAX_SIFS_BURST_STATS 9
+#define ATH12K_HTT_TX_PDEV_MAX_FLUSH_REASON_STATS 150
+
+/* MU MIMO distribution stats is a 2-dimensional array
+ * with dimension one denoting stats for nr4[0] or nr8[1]
+ */
+#define ATH12K_HTT_STATS_NUM_NR_BINS 2
+#define ATH12K_HTT_STATS_MAX_NUM_MU_PPDU_PER_BURST 10
+#define ATH12K_HTT_TX_PDEV_MAX_SIFS_BURST_HIST_STATS 10
+#define ATH12K_HTT_STATS_MAX_NUM_SCHED_STATUS 9
+#define ATH12K_HTT_STATS_NUM_SCHED_STATUS_WORDS \
+ (ATH12K_HTT_STATS_NUM_NR_BINS * ATH12K_HTT_STATS_MAX_NUM_SCHED_STATUS)
+#define ATH12K_HTT_STATS_MU_PPDU_PER_BURST_WORDS \
+ (ATH12K_HTT_STATS_NUM_NR_BINS * ATH12K_HTT_STATS_MAX_NUM_MU_PPDU_PER_BURST)
+
+enum ath12k_htt_tx_pdev_underrun_enum {
+ HTT_STATS_TX_PDEV_NO_DATA_UNDERRUN = 0,
+ HTT_STATS_TX_PDEV_DATA_UNDERRUN_BETWEEN_MPDU = 1,
+ HTT_STATS_TX_PDEV_DATA_UNDERRUN_WITHIN_MPDU = 2,
+ HTT_TX_PDEV_MAX_URRN_STATS = 3,
+};
+
+enum ath12k_htt_stats_reset_cfg_param_alloc_pos {
+ ATH12K_HTT_STATS_RESET_PARAM_CFG_32_BYTES = 1,
+ ATH12K_HTT_STATS_RESET_PARAM_CFG_64_BYTES,
+ ATH12K_HTT_STATS_RESET_PARAM_CFG_128_BYTES,
+};
+
+struct debug_htt_stats_req {
+ bool done;
+ bool override_cfg_param;
+ u8 pdev_id;
+ enum ath12k_dbg_htt_ext_stats_type type;
+ u32 cfg_param[4];
+ u8 peer_addr[ETH_ALEN];
+ struct completion htt_stats_rcvd;
+ u32 buf_len;
+ u8 buf[];
+};
+
+struct ath12k_htt_tx_pdev_stats_cmn_tlv {
+ __le32 mac_id__word;
+ __le32 hw_queued;
+ __le32 hw_reaped;
+ __le32 underrun;
+ __le32 hw_paused;
+ __le32 hw_flush;
+ __le32 hw_filt;
+ __le32 tx_abort;
+ __le32 mpdu_requed;
+ __le32 tx_xretry;
+ __le32 data_rc;
+ __le32 mpdu_dropped_xretry;
+ __le32 illgl_rate_phy_err;
+ __le32 cont_xretry;
+ __le32 tx_timeout;
+ __le32 pdev_resets;
+ __le32 phy_underrun;
+ __le32 txop_ovf;
+ __le32 seq_posted;
+ __le32 seq_failed_queueing;
+ __le32 seq_completed;
+ __le32 seq_restarted;
+ __le32 mu_seq_posted;
+ __le32 seq_switch_hw_paused;
+ __le32 next_seq_posted_dsr;
+ __le32 seq_posted_isr;
+ __le32 seq_ctrl_cached;
+ __le32 mpdu_count_tqm;
+ __le32 msdu_count_tqm;
+ __le32 mpdu_removed_tqm;
+ __le32 msdu_removed_tqm;
+ __le32 mpdus_sw_flush;
+ __le32 mpdus_hw_filter;
+ __le32 mpdus_truncated;
+ __le32 mpdus_ack_failed;
+ __le32 mpdus_expired;
+ __le32 mpdus_seq_hw_retry;
+ __le32 ack_tlv_proc;
+ __le32 coex_abort_mpdu_cnt_valid;
+ __le32 coex_abort_mpdu_cnt;
+ __le32 num_total_ppdus_tried_ota;
+ __le32 num_data_ppdus_tried_ota;
+ __le32 local_ctrl_mgmt_enqued;
+ __le32 local_ctrl_mgmt_freed;
+ __le32 local_data_enqued;
+ __le32 local_data_freed;
+ __le32 mpdu_tried;
+ __le32 isr_wait_seq_posted;
+
+ __le32 tx_active_dur_us_low;
+ __le32 tx_active_dur_us_high;
+ __le32 remove_mpdus_max_retries;
+ __le32 comp_delivered;
+ __le32 ppdu_ok;
+ __le32 self_triggers;
+ __le32 tx_time_dur_data;
+ __le32 seq_qdepth_repost_stop;
+ __le32 mu_seq_min_msdu_repost_stop;
+ __le32 seq_min_msdu_repost_stop;
+ __le32 seq_txop_repost_stop;
+ __le32 next_seq_cancel;
+ __le32 fes_offsets_err_cnt;
+ __le32 num_mu_peer_blacklisted;
+ __le32 mu_ofdma_seq_posted;
+ __le32 ul_mumimo_seq_posted;
+ __le32 ul_ofdma_seq_posted;
+
+ __le32 thermal_suspend_cnt;
+ __le32 dfs_suspend_cnt;
+ __le32 tx_abort_suspend_cnt;
+ __le32 tgt_specific_opaque_txq_suspend_info;
+ __le32 last_suspend_reason;
+} __packed;
+
+struct ath12k_htt_tx_pdev_stats_urrn_tlv {
+ DECLARE_FLEX_ARRAY(__le32, urrn_stats);
+} __packed;
+
+struct ath12k_htt_tx_pdev_stats_flush_tlv {
+ DECLARE_FLEX_ARRAY(__le32, flush_errs);
+} __packed;
+
+struct ath12k_htt_tx_pdev_stats_phy_err_tlv {
+ DECLARE_FLEX_ARRAY(__le32, phy_errs);
+} __packed;
+
+struct ath12k_htt_tx_pdev_stats_sifs_tlv {
+ DECLARE_FLEX_ARRAY(__le32, sifs_status);
+} __packed;
+
+struct ath12k_htt_pdev_ctrl_path_tx_stats_tlv {
+ __le32 fw_tx_mgmt_subtype[ATH12K_HTT_STATS_SUBTYPE_MAX];
+} __packed;
+
+struct ath12k_htt_tx_pdev_stats_sifs_hist_tlv {
+ DECLARE_FLEX_ARRAY(__le32, sifs_hist_status);
+} __packed;
+
+enum ath12k_htt_stats_hw_mode {
+ ATH12K_HTT_STATS_HWMODE_AC = 0,
+ ATH12K_HTT_STATS_HWMODE_AX = 1,
+ ATH12K_HTT_STATS_HWMODE_BE = 2,
+};
+
+struct ath12k_htt_tx_pdev_mu_ppdu_dist_stats_tlv {
+ __le32 hw_mode;
+ __le32 num_seq_term_status[ATH12K_HTT_STATS_NUM_SCHED_STATUS_WORDS];
+ __le32 num_ppdu_cmpl_per_burst[ATH12K_HTT_STATS_MU_PPDU_PER_BURST_WORDS];
+ __le32 num_seq_posted[ATH12K_HTT_STATS_NUM_NR_BINS];
+ __le32 num_ppdu_posted_per_burst[ATH12K_HTT_STATS_MU_PPDU_PER_BURST_WORDS];
+} __packed;
+
+#define ATH12K_HTT_TX_PDEV_STATS_SCHED_PER_TXQ_MAC_ID GENMASK(7, 0)
+#define ATH12K_HTT_TX_PDEV_STATS_SCHED_PER_TXQ_ID GENMASK(15, 8)
+
+#define ATH12K_HTT_TX_PDEV_NUM_SCHED_ORDER_LOG 20
+
+struct ath12k_htt_stats_tx_sched_cmn_tlv {
+ __le32 mac_id__word;
+ __le32 current_timestamp;
+} __packed;
+
+struct ath12k_htt_tx_pdev_stats_sched_per_txq_tlv {
+ __le32 mac_id__word;
+ __le32 sched_policy;
+ __le32 last_sched_cmd_posted_timestamp;
+ __le32 last_sched_cmd_compl_timestamp;
+ __le32 sched_2_tac_lwm_count;
+ __le32 sched_2_tac_ring_full;
+ __le32 sched_cmd_post_failure;
+ __le32 num_active_tids;
+ __le32 num_ps_schedules;
+ __le32 sched_cmds_pending;
+ __le32 num_tid_register;
+ __le32 num_tid_unregister;
+ __le32 num_qstats_queried;
+ __le32 qstats_update_pending;
+ __le32 last_qstats_query_timestamp;
+ __le32 num_tqm_cmdq_full;
+ __le32 num_de_sched_algo_trigger;
+ __le32 num_rt_sched_algo_trigger;
+ __le32 num_tqm_sched_algo_trigger;
+ __le32 notify_sched;
+ __le32 dur_based_sendn_term;
+ __le32 su_notify2_sched;
+ __le32 su_optimal_queued_msdus_sched;
+ __le32 su_delay_timeout_sched;
+ __le32 su_min_txtime_sched_delay;
+ __le32 su_no_delay;
+ __le32 num_supercycles;
+ __le32 num_subcycles_with_sort;
+ __le32 num_subcycles_no_sort;
+} __packed;
+
+struct ath12k_htt_sched_txq_cmd_posted_tlv {
+ DECLARE_FLEX_ARRAY(__le32, sched_cmd_posted);
+} __packed;
+
+struct ath12k_htt_sched_txq_cmd_reaped_tlv {
+ DECLARE_FLEX_ARRAY(__le32, sched_cmd_reaped);
+} __packed;
+
+struct ath12k_htt_sched_txq_sched_order_su_tlv {
+ DECLARE_FLEX_ARRAY(__le32, sched_order_su);
+} __packed;
+
+struct ath12k_htt_sched_txq_sched_ineligibility_tlv {
+ DECLARE_FLEX_ARRAY(__le32, sched_ineligibility);
+} __packed;
+
+enum ath12k_htt_sched_txq_supercycle_triggers_tlv_enum {
+ ATH12K_HTT_SCHED_SUPERCYCLE_TRIGGER_NONE = 0,
+ ATH12K_HTT_SCHED_SUPERCYCLE_TRIGGER_FORCED,
+ ATH12K_HTT_SCHED_SUPERCYCLE_TRIGGER_LESS_NUM_TIDQ_ENTRIES,
+ ATH12K_HTT_SCHED_SUPERCYCLE_TRIGGER_LESS_NUM_ACTIVE_TIDS,
+ ATH12K_HTT_SCHED_SUPERCYCLE_TRIGGER_MAX_ITR_REACHED,
+ ATH12K_HTT_SCHED_SUPERCYCLE_TRIGGER_DUR_THRESHOLD_REACHED,
+ ATH12K_HTT_SCHED_SUPERCYCLE_TRIGGER_TWT_TRIGGER,
+ ATH12K_HTT_SCHED_SUPERCYCLE_TRIGGER_MAX,
+};
+
+struct ath12k_htt_sched_txq_supercycle_triggers_tlv {
+ DECLARE_FLEX_ARRAY(__le32, supercycle_triggers);
+} __packed;
+
+struct ath12k_htt_hw_stats_pdev_errs_tlv {
+ __le32 mac_id__word;
+ __le32 tx_abort;
+ __le32 tx_abort_fail_count;
+ __le32 rx_abort;
+ __le32 rx_abort_fail_count;
+ __le32 warm_reset;
+ __le32 cold_reset;
+ __le32 tx_flush;
+ __le32 tx_glb_reset;
+ __le32 tx_txq_reset;
+ __le32 rx_timeout_reset;
+ __le32 mac_cold_reset_restore_cal;
+ __le32 mac_cold_reset;
+ __le32 mac_warm_reset;
+ __le32 mac_only_reset;
+ __le32 phy_warm_reset;
+ __le32 phy_warm_reset_ucode_trig;
+ __le32 mac_warm_reset_restore_cal;
+ __le32 mac_sfm_reset;
+ __le32 phy_warm_reset_m3_ssr;
+ __le32 phy_warm_reset_reason_phy_m3;
+ __le32 phy_warm_reset_reason_tx_hw_stuck;
+ __le32 phy_warm_reset_reason_num_rx_frame_stuck;
+ __le32 phy_warm_reset_reason_wal_rx_rec_rx_busy;
+ __le32 phy_warm_reset_reason_wal_rx_rec_mac_hng;
+ __le32 phy_warm_reset_reason_mac_conv_phy_reset;
+ __le32 wal_rx_recovery_rst_mac_hang_cnt;
+ __le32 wal_rx_recovery_rst_known_sig_cnt;
+ __le32 wal_rx_recovery_rst_no_rx_cnt;
+ __le32 wal_rx_recovery_rst_no_rx_consec_cnt;
+ __le32 wal_rx_recovery_rst_rx_busy_cnt;
+ __le32 wal_rx_recovery_rst_phy_mac_hang_cnt;
+ __le32 rx_flush_cnt;
+ __le32 phy_warm_reset_reason_tx_exp_cca_stuck;
+ __le32 phy_warm_reset_reason_tx_consec_flsh_war;
+ __le32 phy_warm_reset_reason_tx_hwsch_reset_war;
+ __le32 phy_warm_reset_reason_hwsch_cca_wdog_war;
+ __le32 fw_rx_rings_reset;
+ __le32 rx_dest_drain_rx_descs_leak_prevented;
+ __le32 rx_dest_drain_rx_descs_saved_cnt;
+ __le32 rx_dest_drain_rxdma2reo_leak_detected;
+ __le32 rx_dest_drain_rxdma2fw_leak_detected;
+ __le32 rx_dest_drain_rxdma2wbm_leak_detected;
+ __le32 rx_dest_drain_rxdma1_2sw_leak_detected;
+ __le32 rx_dest_drain_rx_drain_ok_mac_idle;
+ __le32 rx_dest_drain_ok_mac_not_idle;
+ __le32 rx_dest_drain_prerequisite_invld;
+ __le32 rx_dest_drain_skip_non_lmac_reset;
+ __le32 rx_dest_drain_hw_fifo_notempty_post_wait;
+} __packed;
+
+#define ATH12K_HTT_STATS_MAX_HW_INTR_NAME_LEN 8
+struct ath12k_htt_hw_stats_intr_misc_tlv {
+ u8 hw_intr_name[ATH12K_HTT_STATS_MAX_HW_INTR_NAME_LEN];
+ __le32 mask;
+ __le32 count;
+} __packed;
+
+struct ath12k_htt_hw_stats_whal_tx_tlv {
+ __le32 mac_id__word;
+ __le32 last_unpause_ppdu_id;
+ __le32 hwsch_unpause_wait_tqm_write;
+ __le32 hwsch_dummy_tlv_skipped;
+ __le32 hwsch_misaligned_offset_received;
+ __le32 hwsch_reset_count;
+ __le32 hwsch_dev_reset_war;
+ __le32 hwsch_delayed_pause;
+ __le32 hwsch_long_delayed_pause;
+ __le32 sch_rx_ppdu_no_response;
+ __le32 sch_selfgen_response;
+ __le32 sch_rx_sifs_resp_trigger;
+} __packed;
+
+struct ath12k_htt_hw_war_stats_tlv {
+ __le32 mac_id__word;
+ DECLARE_FLEX_ARRAY(__le32, hw_wars);
+} __packed;
+
+struct ath12k_htt_tx_tqm_cmn_stats_tlv {
+ __le32 mac_id__word;
+ __le32 max_cmdq_id;
+ __le32 list_mpdu_cnt_hist_intvl;
+ __le32 add_msdu;
+ __le32 q_empty;
+ __le32 q_not_empty;
+ __le32 drop_notification;
+ __le32 desc_threshold;
+ __le32 hwsch_tqm_invalid_status;
+ __le32 missed_tqm_gen_mpdus;
+ __le32 tqm_active_tids;
+ __le32 tqm_inactive_tids;
+ __le32 tqm_active_msduq_flows;
+ __le32 msduq_timestamp_updates;
+ __le32 msduq_updates_mpdu_head_info_cmd;
+ __le32 msduq_updates_emp_to_nonemp_status;
+ __le32 get_mpdu_head_info_cmds_by_query;
+ __le32 get_mpdu_head_info_cmds_by_tac;
+ __le32 gen_mpdu_cmds_by_query;
+ __le32 high_prio_q_not_empty;
+} __packed;
+
+struct ath12k_htt_tx_tqm_error_stats_tlv {
+ __le32 q_empty_failure;
+ __le32 q_not_empty_failure;
+ __le32 add_msdu_failure;
+ __le32 tqm_cache_ctl_err;
+ __le32 tqm_soft_reset;
+ __le32 tqm_reset_num_in_use_link_descs;
+ __le32 tqm_reset_num_lost_link_descs;
+ __le32 tqm_reset_num_lost_host_tx_buf_cnt;
+ __le32 tqm_reset_num_in_use_internal_tqm;
+ __le32 tqm_reset_num_in_use_idle_link_rng;
+ __le32 tqm_reset_time_to_tqm_hang_delta_ms;
+ __le32 tqm_reset_recovery_time_ms;
+ __le32 tqm_reset_num_peers_hdl;
+ __le32 tqm_reset_cumm_dirty_hw_mpduq_cnt;
+ __le32 tqm_reset_cumm_dirty_hw_msduq_proc;
+ __le32 tqm_reset_flush_cache_cmd_su_cnt;
+ __le32 tqm_reset_flush_cache_cmd_other_cnt;
+ __le32 tqm_reset_flush_cache_cmd_trig_type;
+ __le32 tqm_reset_flush_cache_cmd_trig_cfg;
+ __le32 tqm_reset_flush_cmd_skp_status_null;
+} __packed;
+
+struct ath12k_htt_tx_tqm_gen_mpdu_stats_tlv {
+ DECLARE_FLEX_ARRAY(__le32, gen_mpdu_end_reason);
+} __packed;
+
+#define ATH12K_HTT_TX_TQM_MAX_LIST_MPDU_END_REASON 16
+#define ATH12K_HTT_TX_TQM_MAX_LIST_MPDU_CNT_HISTOGRAM_BINS 16
+
+struct ath12k_htt_tx_tqm_list_mpdu_stats_tlv {
+ DECLARE_FLEX_ARRAY(__le32, list_mpdu_end_reason);
+} __packed;
+
+struct ath12k_htt_tx_tqm_list_mpdu_cnt_tlv {
+ DECLARE_FLEX_ARRAY(__le32, list_mpdu_cnt_hist);
+} __packed;
+
+struct ath12k_htt_tx_tqm_pdev_stats_tlv {
+ __le32 msdu_count;
+ __le32 mpdu_count;
+ __le32 remove_msdu;
+ __le32 remove_mpdu;
+ __le32 remove_msdu_ttl;
+ __le32 send_bar;
+ __le32 bar_sync;
+ __le32 notify_mpdu;
+ __le32 sync_cmd;
+ __le32 write_cmd;
+ __le32 hwsch_trigger;
+ __le32 ack_tlv_proc;
+ __le32 gen_mpdu_cmd;
+ __le32 gen_list_cmd;
+ __le32 remove_mpdu_cmd;
+ __le32 remove_mpdu_tried_cmd;
+ __le32 mpdu_queue_stats_cmd;
+ __le32 mpdu_head_info_cmd;
+ __le32 msdu_flow_stats_cmd;
+ __le32 remove_msdu_cmd;
+ __le32 remove_msdu_ttl_cmd;
+ __le32 flush_cache_cmd;
+ __le32 update_mpduq_cmd;
+ __le32 enqueue;
+ __le32 enqueue_notify;
+ __le32 notify_mpdu_at_head;
+ __le32 notify_mpdu_state_valid;
+ __le32 sched_udp_notify1;
+ __le32 sched_udp_notify2;
+ __le32 sched_nonudp_notify1;
+ __le32 sched_nonudp_notify2;
+} __packed;
+
+#endif
diff --git a/drivers/net/wireless/ath/ath12k/dp.c b/drivers/net/wireless/ath/ath12k/dp.c
index 7843c76a82c1..61aa78d8bd8c 100644
--- a/drivers/net/wireless/ath/ath12k/dp.c
+++ b/drivers/net/wireless/ath/ath12k/dp.c
@@ -132,7 +132,9 @@ static int ath12k_dp_srng_find_ring_in_mask(int ring_num, const u8 *grp_mask)
static int ath12k_dp_srng_calculate_msi_group(struct ath12k_base *ab,
enum hal_ring_type type, int ring_num)
{
+ const struct ath12k_hal_tcl_to_wbm_rbm_map *map;
const u8 *grp_mask;
+ int i;
switch (type) {
case HAL_WBM2SW_RELEASE:
@@ -140,6 +142,14 @@ static int ath12k_dp_srng_calculate_msi_group(struct ath12k_base *ab,
grp_mask = &ab->hw_params->ring_mask->rx_wbm_rel[0];
ring_num = 0;
} else {
+ map = ab->hw_params->hal_ops->tcl_to_wbm_rbm_map;
+ for (i = 0; i < ab->hw_params->max_tx_ring; i++) {
+ if (ring_num == map[i].wbm_ring_num) {
+ ring_num = i;
+ break;
+ }
+ }
+
grp_mask = &ab->hw_params->ring_mask->tx[0];
}
break;
@@ -457,8 +467,6 @@ static void ath12k_dp_srng_common_cleanup(struct ath12k_base *ab)
ath12k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_comp_ring);
ath12k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_data_ring);
}
- ath12k_dp_srng_cleanup(ab, &dp->tcl_status_ring);
- ath12k_dp_srng_cleanup(ab, &dp->tcl_cmd_ring);
ath12k_dp_srng_cleanup(ab, &dp->wbm_desc_rel_ring);
}
@@ -479,20 +487,6 @@ static int ath12k_dp_srng_common_setup(struct ath12k_base *ab)
goto err;
}
- ret = ath12k_dp_srng_setup(ab, &dp->tcl_cmd_ring, HAL_TCL_CMD, 0, 0,
- DP_TCL_CMD_RING_SIZE);
- if (ret) {
- ath12k_warn(ab, "failed to set up tcl_cmd ring :%d\n", ret);
- goto err;
- }
-
- ret = ath12k_dp_srng_setup(ab, &dp->tcl_status_ring, HAL_TCL_STATUS,
- 0, 0, DP_TCL_STATUS_RING_SIZE);
- if (ret) {
- ath12k_warn(ab, "failed to set up tcl_status ring :%d\n", ret);
- goto err;
- }
-
for (i = 0; i < ab->hw_params->max_tx_ring; i++) {
map = ab->hw_params->hal_ops->tcl_to_wbm_rbm_map;
tx_comp_ring_num = map[i].wbm_ring_num;
@@ -616,6 +610,7 @@ static int ath12k_dp_scatter_idle_link_desc_setup(struct ath12k_base *ab,
int i;
int ret = 0;
u32 end_offset, cookie;
+ enum hal_rx_buf_return_buf_manager rbm = dp->idle_link_rbm;
n_entries_per_buf = HAL_WBM_IDLE_SCATTER_BUF_SIZE /
ath12k_hal_srng_get_entrysize(ab, HAL_WBM_IDLE_LINK);
@@ -646,7 +641,8 @@ static int ath12k_dp_scatter_idle_link_desc_setup(struct ath12k_base *ab,
paddr = link_desc_banks[i].paddr;
while (n_entries) {
cookie = DP_LINK_DESC_COOKIE_SET(n_entries, i);
- ath12k_hal_set_link_desc_addr(scatter_buf, cookie, paddr);
+ ath12k_hal_set_link_desc_addr(scatter_buf, cookie,
+ paddr, rbm);
n_entries--;
paddr += HAL_LINK_DESC_SIZE;
if (rem_entries) {
@@ -790,6 +786,7 @@ int ath12k_dp_link_desc_setup(struct ath12k_base *ab,
u32 paddr;
int i, ret;
u32 cookie;
+ enum hal_rx_buf_return_buf_manager rbm = ab->dp.idle_link_rbm;
tot_mem_sz = n_link_desc * HAL_LINK_DESC_SIZE;
tot_mem_sz += HAL_LINK_DESC_ALIGN;
@@ -850,8 +847,7 @@ int ath12k_dp_link_desc_setup(struct ath12k_base *ab,
while (n_entries &&
(desc = ath12k_hal_srng_src_get_next_entry(ab, srng))) {
cookie = DP_LINK_DESC_COOKIE_SET(n_entries, i);
- ath12k_hal_set_link_desc_addr(desc,
- cookie, paddr);
+ ath12k_hal_set_link_desc_addr(desc, cookie, paddr, rbm);
n_entries--;
paddr += HAL_LINK_DESC_SIZE;
}
@@ -881,11 +877,9 @@ int ath12k_dp_service_srng(struct ath12k_base *ab,
enum dp_monitor_mode monitor_mode;
u8 ring_mask;
- while (i < ab->hw_params->max_tx_ring) {
- if (ab->hw_params->ring_mask->tx[grp_id] &
- BIT(ab->hw_params->hal_ops->tcl_to_wbm_rbm_map[i].wbm_ring_num))
- ath12k_dp_tx_completion_handler(ab, i);
- i++;
+ if (ab->hw_params->ring_mask->tx[grp_id]) {
+ i = fls(ab->hw_params->ring_mask->tx[grp_id]) - 1;
+ ath12k_dp_tx_completion_handler(ab, i);
}
if (ab->hw_params->ring_mask->rx_err[grp_id]) {
@@ -921,8 +915,8 @@ int ath12k_dp_service_srng(struct ath12k_base *ab,
monitor_mode = ATH12K_DP_RX_MONITOR_MODE;
ring_mask = ab->hw_params->ring_mask->rx_mon_dest[grp_id];
for (i = 0; i < ab->num_radios; i++) {
- for (j = 0; j < ab->hw_params->num_rxmda_per_pdev; j++) {
- int id = i * ab->hw_params->num_rxmda_per_pdev + j;
+ for (j = 0; j < ab->hw_params->num_rxdma_per_pdev; j++) {
+ int id = i * ab->hw_params->num_rxdma_per_pdev + j;
if (ring_mask & BIT(id)) {
work_done =
@@ -942,8 +936,8 @@ int ath12k_dp_service_srng(struct ath12k_base *ab,
monitor_mode = ATH12K_DP_TX_MONITOR_MODE;
ring_mask = ab->hw_params->ring_mask->tx_mon_dest[grp_id];
for (i = 0; i < ab->num_radios; i++) {
- for (j = 0; j < ab->hw_params->num_rxmda_per_pdev; j++) {
- int id = i * ab->hw_params->num_rxmda_per_pdev + j;
+ for (j = 0; j < ab->hw_params->num_rxdma_per_pdev; j++) {
+ int id = i * ab->hw_params->num_rxdma_per_pdev + j;
if (ring_mask & BIT(id)) {
work_done =
@@ -1031,7 +1025,7 @@ static void ath12k_dp_service_mon_ring(struct timer_list *t)
struct ath12k_base *ab = from_timer(ab, t, mon_reap_timer);
int i;
- for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++)
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++)
ath12k_dp_mon_process_ring(ab, i, NULL, DP_MON_SERVICE_BUDGET,
ATH12K_DP_RX_MONITOR_MODE);
@@ -1355,13 +1349,14 @@ static inline void *ath12k_dp_cc_get_desc_addr_ptr(struct ath12k_base *ab,
struct ath12k_rx_desc_info *ath12k_dp_get_rx_desc(struct ath12k_base *ab,
u32 cookie)
{
+ struct ath12k_dp *dp = &ab->dp;
struct ath12k_rx_desc_info **desc_addr_ptr;
u16 start_ppt_idx, end_ppt_idx, ppt_idx, spt_idx;
ppt_idx = u32_get_bits(cookie, ATH12K_DP_CC_COOKIE_PPT);
spt_idx = u32_get_bits(cookie, ATH12K_DP_CC_COOKIE_SPT);
- start_ppt_idx = ATH12K_RX_SPT_PAGE_OFFSET;
+ start_ppt_idx = dp->rx_ppt_base + ATH12K_RX_SPT_PAGE_OFFSET;
end_ppt_idx = start_ppt_idx + ATH12K_NUM_RX_SPT_PAGES;
if (ppt_idx < start_ppt_idx ||
@@ -1369,6 +1364,7 @@ struct ath12k_rx_desc_info *ath12k_dp_get_rx_desc(struct ath12k_base *ab,
spt_idx > ATH12K_MAX_SPT_ENTRIES)
return NULL;
+ ppt_idx = ppt_idx - dp->rx_ppt_base;
desc_addr_ptr = ath12k_dp_cc_get_desc_addr_ptr(ab, ppt_idx, spt_idx);
return *desc_addr_ptr;
@@ -1403,7 +1399,7 @@ static int ath12k_dp_cc_desc_init(struct ath12k_base *ab)
struct ath12k_rx_desc_info *rx_descs, **rx_desc_addr;
struct ath12k_tx_desc_info *tx_descs, **tx_desc_addr;
u32 i, j, pool_id, tx_spt_page;
- u32 ppt_idx;
+ u32 ppt_idx, cookie_ppt_idx;
spin_lock_bh(&dp->rx_desc_lock);
@@ -1418,10 +1414,11 @@ static int ath12k_dp_cc_desc_init(struct ath12k_base *ab)
}
ppt_idx = ATH12K_RX_SPT_PAGE_OFFSET + i;
+ cookie_ppt_idx = dp->rx_ppt_base + ppt_idx;
dp->spt_info->rxbaddr[i] = &rx_descs[0];
for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) {
- rx_descs[j].cookie = ath12k_dp_cc_cookie_gen(ppt_idx, j);
+ rx_descs[j].cookie = ath12k_dp_cc_cookie_gen(cookie_ppt_idx, j);
rx_descs[j].magic = ATH12K_DP_RX_DESC_MAGIC;
list_add_tail(&rx_descs[j].list, &dp->rx_desc_free_list);
@@ -1482,6 +1479,7 @@ static int ath12k_dp_cmem_init(struct ath12k_base *ab,
end = start + ATH12K_NUM_TX_SPT_PAGES;
break;
case ATH12K_DP_RX_DESC:
+ cmem_base += ATH12K_PPT_ADDR_OFFSET(dp->rx_ppt_base);
start = ATH12K_RX_SPT_PAGE_OFFSET;
end = start + ATH12K_NUM_RX_SPT_PAGES;
break;
@@ -1524,6 +1522,8 @@ static int ath12k_dp_cc_init(struct ath12k_base *ab)
return -ENOMEM;
}
+ dp->rx_ppt_base = ab->device_id * ATH12K_NUM_RX_SPT_PAGES;
+
for (i = 0; i < dp->num_spt_pages; i++) {
dp->spt_info[i].vaddr = dma_alloc_coherent(ab->dev,
ATH12K_PAGE_SIZE,
@@ -1587,6 +1587,24 @@ static int ath12k_dp_reoq_lut_setup(struct ath12k_base *ab)
return 0;
}
+static enum hal_rx_buf_return_buf_manager
+ath12k_dp_get_idle_link_rbm(struct ath12k_base *ab)
+{
+ switch (ab->device_id) {
+ case 0:
+ return HAL_RX_BUF_RBM_WBM_DEV0_IDLE_DESC_LIST;
+ case 1:
+ return HAL_RX_BUF_RBM_WBM_DEV1_IDLE_DESC_LIST;
+ case 2:
+ return HAL_RX_BUF_RBM_WBM_DEV2_IDLE_DESC_LIST;
+ default:
+ ath12k_warn(ab, "invalid %d device id, so choose default rbm\n",
+ ab->device_id);
+ WARN_ON(1);
+ return HAL_RX_BUF_RBM_WBM_DEV0_IDLE_DESC_LIST;
+ }
+}
+
int ath12k_dp_alloc(struct ath12k_base *ab)
{
struct ath12k_dp *dp = &ab->dp;
@@ -1603,6 +1621,7 @@ int ath12k_dp_alloc(struct ath12k_base *ab)
spin_lock_init(&dp->reo_cmd_lock);
dp->reo_cmd_cache_flush_count = 0;
+ dp->idle_link_rbm = ath12k_dp_get_idle_link_rbm(ab);
ret = ath12k_wbm_idle_ring_setup(ab, &n_link_desc);
if (ret) {
diff --git a/drivers/net/wireless/ath/ath12k/dp.h b/drivers/net/wireless/ath/ath12k/dp.h
index 5cf0d21ef184..b77497c14ac4 100644
--- a/drivers/net/wireless/ath/ath12k/dp.h
+++ b/drivers/net/wireless/ath/ath12k/dp.h
@@ -325,15 +325,15 @@ struct ath12k_dp {
u8 htt_tgt_ver_major;
u8 htt_tgt_ver_minor;
struct dp_link_desc_bank link_desc_banks[DP_LINK_DESC_BANKS_MAX];
+ enum hal_rx_buf_return_buf_manager idle_link_rbm;
struct dp_srng wbm_idle_ring;
struct dp_srng wbm_desc_rel_ring;
- struct dp_srng tcl_cmd_ring;
- struct dp_srng tcl_status_ring;
struct dp_srng reo_reinject_ring;
struct dp_srng rx_rel_ring;
struct dp_srng reo_except_ring;
struct dp_srng reo_cmd_ring;
struct dp_srng reo_status_ring;
+ enum ath12k_peer_metadata_version peer_metadata_ver;
struct dp_srng reo_dst_ring[DP_REO_DST_RING_MAX];
struct dp_tx_ring tx_ring[DP_TCL_NUM_RING_MAX];
struct hal_wbm_idle_scatter_list scatter_list[DP_IDLE_SCATTER_BUFS_MAX];
@@ -351,6 +351,7 @@ struct ath12k_dp {
struct ath12k_hp_update_timer tx_ring_timer[DP_TCL_NUM_RING_MAX];
struct ath12k_spt_info *spt_info;
u32 num_spt_pages;
+ u32 rx_ppt_base;
struct list_head rx_desc_free_list;
/* protects the free desc list */
spinlock_t rx_desc_lock;
diff --git a/drivers/net/wireless/ath/ath12k/dp_mon.c b/drivers/net/wireless/ath/ath12k/dp_mon.c
index 6b0b72477540..5c6749bc4039 100644
--- a/drivers/net/wireless/ath/ath12k/dp_mon.c
+++ b/drivers/net/wireless/ath/ath12k/dp_mon.c
@@ -1903,43 +1903,6 @@ ath12k_dp_mon_tx_parse_status_tlv(struct ath12k_base *ab,
break;
}
- case HAL_MON_BUF_ADDR: {
- struct dp_rxdma_mon_ring *buf_ring = &ab->dp.tx_mon_buf_ring;
- struct dp_mon_packet_info *packet_info =
- (struct dp_mon_packet_info *)tlv_data;
- int buf_id = u32_get_bits(packet_info->cookie,
- DP_RXDMA_BUF_COOKIE_BUF_ID);
- struct sk_buff *msdu;
- struct dp_mon_mpdu *mon_mpdu = tx_ppdu_info->tx_mon_mpdu;
- struct ath12k_skb_rxcb *rxcb;
-
- spin_lock_bh(&buf_ring->idr_lock);
- msdu = idr_remove(&buf_ring->bufs_idr, buf_id);
- spin_unlock_bh(&buf_ring->idr_lock);
-
- if (unlikely(!msdu)) {
- ath12k_warn(ab, "monitor destination with invalid buf_id %d\n",
- buf_id);
- return DP_MON_TX_STATUS_PPDU_NOT_DONE;
- }
-
- rxcb = ATH12K_SKB_RXCB(msdu);
- dma_unmap_single(ab->dev, rxcb->paddr,
- msdu->len + skb_tailroom(msdu),
- DMA_FROM_DEVICE);
-
- if (!mon_mpdu->head)
- mon_mpdu->head = msdu;
- else if (mon_mpdu->tail)
- mon_mpdu->tail->next = msdu;
-
- mon_mpdu->tail = msdu;
-
- ath12k_dp_mon_buf_replenish(ab, buf_ring, 1);
- status = DP_MON_TX_BUFFER_ADDR;
- break;
- }
-
case HAL_TX_MPDU_END:
list_add_tail(&tx_ppdu_info->tx_mon_mpdu->list,
&tx_ppdu_info->dp_tx_mon_mpdu_list);
@@ -2088,8 +2051,7 @@ int ath12k_dp_mon_srng_process(struct ath12k *ar, int mac_id, int *budget,
mon_dst_ring = &pdev_dp->rxdma_mon_dst_ring[srng_id];
buf_ring = &dp->rxdma_mon_buf_ring;
} else {
- mon_dst_ring = &pdev_dp->tx_mon_dst_ring[srng_id];
- buf_ring = &dp->tx_mon_buf_ring;
+ return 0;
}
srng = &ab->hal.srng_list[mon_dst_ring->ring_id];
diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.c b/drivers/net/wireless/ath/ath12k/dp_rx.c
index 75df622f25d8..14236d0a0c89 100644
--- a/drivers/net/wireless/ath/ath12k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath12k/dp_rx.c
@@ -17,6 +17,7 @@
#include "dp_tx.h"
#include "peer.h"
#include "dp_mon.h"
+#include "debugfs_htt_stats.h"
#define ATH12K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ)
@@ -422,8 +423,6 @@ static int ath12k_dp_rxdma_buf_free(struct ath12k_base *ab)
ath12k_dp_rxdma_mon_buf_ring_free(ab, &dp->rxdma_mon_buf_ring);
- ath12k_dp_rxdma_mon_buf_ring_free(ab, &dp->tx_mon_buf_ring);
-
return 0;
}
@@ -476,15 +475,6 @@ static int ath12k_dp_rxdma_buf_setup(struct ath12k_base *ab)
"failed to setup HAL_RXDMA_MONITOR_BUF\n");
return ret;
}
-
- ret = ath12k_dp_rxdma_mon_ring_buf_setup(ab,
- &dp->tx_mon_buf_ring,
- HAL_TX_MONITOR_BUF);
- if (ret) {
- ath12k_warn(ab,
- "failed to setup HAL_TX_MONITOR_BUF\n");
- return ret;
- }
}
return 0;
@@ -496,10 +486,8 @@ static void ath12k_dp_rx_pdev_srng_free(struct ath12k *ar)
struct ath12k_base *ab = ar->ab;
int i;
- for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++)
ath12k_dp_srng_cleanup(ab, &dp->rxdma_mon_dst_ring[i]);
- ath12k_dp_srng_cleanup(ab, &dp->tx_mon_dst_ring[i]);
- }
}
void ath12k_dp_rx_pdev_reo_cleanup(struct ath12k_base *ab)
@@ -543,7 +531,7 @@ static int ath12k_dp_rx_pdev_srng_alloc(struct ath12k *ar)
int ret;
u32 mac_id = dp->mac_id;
- for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
ret = ath12k_dp_srng_setup(ar->ab,
&dp->rxdma_mon_dst_ring[i],
HAL_RXDMA_MONITOR_DST,
@@ -554,17 +542,6 @@ static int ath12k_dp_rx_pdev_srng_alloc(struct ath12k *ar)
"failed to setup HAL_RXDMA_MONITOR_DST\n");
return ret;
}
-
- ret = ath12k_dp_srng_setup(ar->ab,
- &dp->tx_mon_dst_ring[i],
- HAL_TX_MONITOR_DST,
- 0, mac_id + i,
- DP_TX_MONITOR_DEST_RING_SIZE);
- if (ret) {
- ath12k_warn(ar->ab,
- "failed to setup HAL_TX_MONITOR_DST\n");
- return ret;
- }
}
return 0;
@@ -1292,10 +1269,10 @@ static int ath12k_htt_tlv_ppdu_stats_parse(struct ath12k_base *ab,
return 0;
}
-static int ath12k_dp_htt_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len,
- int (*iter)(struct ath12k_base *ar, u16 tag, u16 len,
- const void *ptr, void *data),
- void *data)
+int ath12k_dp_htt_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len,
+ int (*iter)(struct ath12k_base *ar, u16 tag, u16 len,
+ const void *ptr, void *data),
+ void *data)
{
const struct htt_tlv *tlv;
const void *begin = ptr;
@@ -1765,6 +1742,7 @@ void ath12k_dp_htt_htc_t2h_msg_handler(struct ath12k_base *ab,
ath12k_htt_pull_ppdu_stats(ab, skb);
break;
case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
+ ath12k_debugfs_htt_ext_stats_handler(ab, skb);
break;
case HTT_T2H_MSG_TYPE_MLO_TIMESTAMP_OFFSET_IND:
ath12k_htt_mlo_offset_event_handler(ab, skb);
@@ -2383,8 +2361,10 @@ void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct hal_rx_desc *rx_desc,
channel_num = meta_data;
center_freq = meta_data >> 16;
- if (center_freq >= 5935 && center_freq <= 7105) {
+ if (center_freq >= ATH12K_MIN_6G_FREQ &&
+ center_freq <= ATH12K_MAX_6G_FREQ) {
rx_status->band = NL80211_BAND_6GHZ;
+ rx_status->freq = center_freq;
} else if (channel_num >= 1 && channel_num <= 14) {
rx_status->band = NL80211_BAND_2GHZ;
} else if (channel_num >= 36 && channel_num <= 173) {
@@ -2402,8 +2382,9 @@ void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct hal_rx_desc *rx_desc,
rx_desc, sizeof(*rx_desc));
}
- rx_status->freq = ieee80211_channel_to_frequency(channel_num,
- rx_status->band);
+ if (rx_status->band != NL80211_BAND_6GHZ)
+ rx_status->freq = ieee80211_channel_to_frequency(channel_num,
+ rx_status->band);
ath12k_dp_rx_h_rate(ar, rx_desc, rx_status);
}
@@ -2604,6 +2585,29 @@ static void ath12k_dp_rx_process_received_packets(struct ath12k_base *ab,
rcu_read_unlock();
}
+static u16 ath12k_dp_rx_get_peer_id(struct ath12k_base *ab,
+ enum ath12k_peer_metadata_version ver,
+ __le32 peer_metadata)
+{
+ switch (ver) {
+ default:
+ ath12k_warn(ab, "Unknown peer metadata version: %d", ver);
+ fallthrough;
+ case ATH12K_PEER_METADATA_V0:
+ return le32_get_bits(peer_metadata,
+ RX_MPDU_DESC_META_DATA_V0_PEER_ID);
+ case ATH12K_PEER_METADATA_V1:
+ return le32_get_bits(peer_metadata,
+ RX_MPDU_DESC_META_DATA_V1_PEER_ID);
+ case ATH12K_PEER_METADATA_V1A:
+ return le32_get_bits(peer_metadata,
+ RX_MPDU_DESC_META_DATA_V1A_PEER_ID);
+ case ATH12K_PEER_METADATA_V1B:
+ return le32_get_bits(peer_metadata,
+ RX_MPDU_DESC_META_DATA_V1B_PEER_ID);
+ }
+}
+
int ath12k_dp_rx_process(struct ath12k_base *ab, int ring_id,
struct napi_struct *napi, int budget)
{
@@ -2632,6 +2636,8 @@ try_again:
ath12k_hal_srng_access_begin(ab, srng);
while ((desc = ath12k_hal_srng_dst_get_next_entry(ab, srng))) {
+ struct rx_mpdu_desc *mpdu_info;
+ struct rx_msdu_desc *msdu_info;
enum hal_reo_dest_ring_push_reason push_reason;
u32 cookie;
@@ -2649,7 +2655,8 @@ try_again:
if (!desc_info) {
desc_info = ath12k_dp_get_rx_desc(ab, cookie);
if (!desc_info) {
- ath12k_warn(ab, "Invalid cookie in manual desc retrieval");
+ ath12k_warn(ab, "Invalid cookie in manual descriptor retrieval: 0x%x\n",
+ cookie);
continue;
}
}
@@ -2678,16 +2685,19 @@ try_again:
continue;
}
- rxcb->is_first_msdu = !!(le32_to_cpu(desc->rx_msdu_info.info0) &
+ msdu_info = &desc->rx_msdu_info;
+ mpdu_info = &desc->rx_mpdu_info;
+
+ rxcb->is_first_msdu = !!(le32_to_cpu(msdu_info->info0) &
RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU);
- rxcb->is_last_msdu = !!(le32_to_cpu(desc->rx_msdu_info.info0) &
+ rxcb->is_last_msdu = !!(le32_to_cpu(msdu_info->info0) &
RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU);
- rxcb->is_continuation = !!(le32_to_cpu(desc->rx_msdu_info.info0) &
+ rxcb->is_continuation = !!(le32_to_cpu(msdu_info->info0) &
RX_MSDU_DESC_INFO0_MSDU_CONTINUATION);
rxcb->mac_id = mac_id;
- rxcb->peer_id = le32_get_bits(desc->rx_mpdu_info.peer_meta_data,
- RX_MPDU_DESC_META_DATA_PEER_ID);
- rxcb->tid = le32_get_bits(desc->rx_mpdu_info.info0,
+ rxcb->peer_id = ath12k_dp_rx_get_peer_id(ab, dp->peer_metadata_ver,
+ mpdu_info->peer_meta_data);
+ rxcb->tid = le32_get_bits(mpdu_info->info0,
RX_MPDU_DESC_INFO0_TID);
__skb_queue_tail(&msdu_list, msdu);
@@ -2762,6 +2772,7 @@ int ath12k_dp_rx_peer_frag_setup(struct ath12k *ar, const u8 *peer_mac, int vdev
peer = ath12k_peer_find(ab, vdev_id, peer_mac);
if (!peer) {
spin_unlock_bh(&ab->base_lock);
+ crypto_free_shash(tfm);
ath12k_warn(ab, "failed to find the peer to set up fragment info\n");
return -ENOENT;
}
@@ -2991,9 +3002,10 @@ static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar,
struct hal_srng *srng;
dma_addr_t link_paddr, buf_paddr;
u32 desc_bank, msdu_info, msdu_ext_info, mpdu_info;
- u32 cookie, hal_rx_desc_sz, dest_ring_info0;
+ u32 cookie, hal_rx_desc_sz, dest_ring_info0, queue_addr_hi;
int ret;
struct ath12k_rx_desc_info *desc_info;
+ enum hal_rx_buf_return_buf_manager idle_link_rbm = dp->idle_link_rbm;
u8 dst_ind;
hal_rx_desc_sz = ab->hal.hal_desc_sz;
@@ -3027,7 +3039,7 @@ static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar,
buf_paddr = dma_map_single(ab->dev, defrag_skb->data,
defrag_skb->len + skb_tailroom(defrag_skb),
- DMA_FROM_DEVICE);
+ DMA_TO_DEVICE);
if (dma_mapping_error(ab->dev, buf_paddr))
return -ENOMEM;
@@ -3071,7 +3083,7 @@ static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar,
ath12k_hal_rx_buf_addr_info_set(&reo_ent_ring->buf_addr_info, link_paddr,
cookie,
- HAL_RX_BUF_RBM_WBM_CHIP0_IDLE_DESC_LIST);
+ idle_link_rbm);
mpdu_info = u32_encode_bits(1, RX_MPDU_DESC_INFO0_MSDU_COUNT) |
u32_encode_bits(0, RX_MPDU_DESC_INFO0_FRAG_FLAG) |
@@ -3083,13 +3095,11 @@ static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar,
reo_ent_ring->rx_mpdu_info.peer_meta_data =
reo_dest_ring->rx_mpdu_info.peer_meta_data;
- /* Firmware expects physical address to be filled in queue_addr_lo in
- * the MLO scenario and in case of non MLO peer meta data needs to be
- * filled.
- * TODO: Need to handle for MLO scenario.
- */
- reo_ent_ring->queue_addr_lo = reo_dest_ring->rx_mpdu_info.peer_meta_data;
- reo_ent_ring->info0 = le32_encode_bits(dst_ind,
+ reo_ent_ring->queue_addr_lo = cpu_to_le32(lower_32_bits(rx_tid->paddr));
+ queue_addr_hi = upper_32_bits(rx_tid->paddr);
+ reo_ent_ring->info0 = le32_encode_bits(queue_addr_hi,
+ HAL_REO_ENTR_RING_INFO0_QUEUE_ADDR_HI) |
+ le32_encode_bits(dst_ind,
HAL_REO_ENTR_RING_INFO0_DEST_IND);
reo_ent_ring->info1 = le32_encode_bits(rx_tid->cur_sn,
@@ -3113,7 +3123,7 @@ err_free_desc:
spin_unlock_bh(&dp->rx_desc_lock);
err_unmap_dma:
dma_unmap_single(ab->dev, buf_paddr, defrag_skb->len + skb_tailroom(defrag_skb),
- DMA_FROM_DEVICE);
+ DMA_TO_DEVICE);
return ret;
}
@@ -3346,7 +3356,8 @@ ath12k_dp_process_rx_err_buf(struct ath12k *ar, struct hal_reo_dest_ring *desc,
if (!desc_info) {
desc_info = ath12k_dp_get_rx_desc(ab, cookie);
if (!desc_info) {
- ath12k_warn(ab, "Invalid cookie in manual desc retrieval");
+ ath12k_warn(ab, "Invalid cookie in DP rx error descriptor retrieval: 0x%x\n",
+ cookie);
return -EINVAL;
}
}
@@ -3421,7 +3432,7 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
struct ath12k *ar;
dma_addr_t paddr;
bool is_frag;
- bool drop = false;
+ bool drop;
int pdev_id;
tot_n_bufs_reaped = 0;
@@ -3439,7 +3450,9 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
while (budget &&
(reo_desc = ath12k_hal_srng_dst_get_next_entry(ab, srng))) {
+ drop = false;
ab->soc_stats.err_ring_pkts++;
+
ret = ath12k_hal_desc_reo_parse_err(ab, reo_desc, &paddr,
&desc_bank);
if (ret) {
@@ -3451,7 +3464,7 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
(paddr - link_desc_banks[desc_bank].paddr);
ath12k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies,
&rbm);
- if (rbm != HAL_RX_BUF_RBM_WBM_CHIP0_IDLE_DESC_LIST &&
+ if (rbm != dp->idle_link_rbm &&
rbm != HAL_RX_BUF_RBM_SW3_BM &&
rbm != ab->hw_params->hal_params->rx_buf_rbm) {
ab->soc_stats.invalid_rbm++;
@@ -3765,7 +3778,8 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
if (!desc_info) {
desc_info = ath12k_dp_get_rx_desc(ab, err_info.cookie);
if (!desc_info) {
- ath12k_warn(ab, "Invalid cookie in manual desc retrieval");
+ ath12k_warn(ab, "Invalid cookie in DP WBM rx error descriptor retrieval: 0x%x\n",
+ err_info.cookie);
continue;
}
}
@@ -3961,7 +3975,7 @@ void ath12k_dp_rx_free(struct ath12k_base *ab)
ath12k_dp_srng_cleanup(ab, &dp->rx_refill_buf_ring.refill_buf_ring);
- for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
if (ab->hw_params->rx_mac_buf_ring)
ath12k_dp_srng_cleanup(ab, &dp->rx_mac_buf_ring[i]);
}
@@ -3970,7 +3984,6 @@ void ath12k_dp_rx_free(struct ath12k_base *ab)
ath12k_dp_srng_cleanup(ab, &dp->rxdma_err_dst_ring[i]);
ath12k_dp_srng_cleanup(ab, &dp->rxdma_mon_buf_ring.refill_buf_ring);
- ath12k_dp_srng_cleanup(ab, &dp->tx_mon_buf_ring.refill_buf_ring);
ath12k_dp_rxdma_buf_free(ab);
}
@@ -4028,7 +4041,7 @@ int ath12k_dp_rxdma_ring_sel_config_wcn7850(struct ath12k_base *ab)
struct ath12k_dp *dp = &ab->dp;
struct htt_rx_ring_tlv_filter tlv_filter = {0};
u32 ring_id;
- int ret;
+ int ret = 0;
u32 hal_rx_desc_sz = ab->hal.hal_desc_sz;
int i;
@@ -4054,7 +4067,7 @@ int ath12k_dp_rxdma_ring_sel_config_wcn7850(struct ath12k_base *ab)
* and modify the rx_desc struct
*/
- for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
ring_id = dp->rx_mac_buf_ring[i].ring_id;
ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id, i,
HAL_RXDMA_BUF,
@@ -4081,7 +4094,7 @@ int ath12k_dp_rx_htt_setup(struct ath12k_base *ab)
}
if (ab->hw_params->rx_mac_buf_ring) {
- for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
ring_id = dp->rx_mac_buf_ring[i].ring_id;
ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
i, HAL_RXDMA_BUF);
@@ -4113,15 +4126,6 @@ int ath12k_dp_rx_htt_setup(struct ath12k_base *ab)
ret);
return ret;
}
-
- ring_id = dp->tx_mon_buf_ring.refill_buf_ring.ring_id;
- ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
- 0, HAL_TX_MONITOR_BUF);
- if (ret) {
- ath12k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n",
- ret);
- return ret;
- }
}
ret = ab->hw_params->hw_ops->rxdma_ring_sel_config(ab);
@@ -4141,9 +4145,6 @@ int ath12k_dp_rx_alloc(struct ath12k_base *ab)
idr_init(&dp->rxdma_mon_buf_ring.bufs_idr);
spin_lock_init(&dp->rxdma_mon_buf_ring.idr_lock);
- idr_init(&dp->tx_mon_buf_ring.bufs_idr);
- spin_lock_init(&dp->tx_mon_buf_ring.idr_lock);
-
ret = ath12k_dp_srng_setup(ab,
&dp->rx_refill_buf_ring.refill_buf_ring,
HAL_RXDMA_BUF, 0, 0,
@@ -4154,7 +4155,7 @@ int ath12k_dp_rx_alloc(struct ath12k_base *ab)
}
if (ab->hw_params->rx_mac_buf_ring) {
- for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
ret = ath12k_dp_srng_setup(ab,
&dp->rx_mac_buf_ring[i],
HAL_RXDMA_BUF, 1,
@@ -4186,15 +4187,6 @@ int ath12k_dp_rx_alloc(struct ath12k_base *ab)
ath12k_warn(ab, "failed to setup HAL_RXDMA_MONITOR_BUF\n");
return ret;
}
-
- ret = ath12k_dp_srng_setup(ab,
- &dp->tx_mon_buf_ring.refill_buf_ring,
- HAL_TX_MONITOR_BUF, 0, 0,
- DP_TX_MONITOR_BUF_RING_SIZE);
- if (ret) {
- ath12k_warn(ab, "failed to setup DP_TX_MONITOR_BUF_RING_SIZE\n");
- return ret;
- }
}
ret = ath12k_dp_rxdma_buf_setup(ab);
@@ -4223,7 +4215,7 @@ int ath12k_dp_rx_pdev_alloc(struct ath12k_base *ab, int mac_id)
return ret;
}
- for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
ring_id = dp->rxdma_mon_dst_ring[i].ring_id;
ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
mac_id + i,
@@ -4234,17 +4226,6 @@ int ath12k_dp_rx_pdev_alloc(struct ath12k_base *ab, int mac_id)
i, ret);
return ret;
}
-
- ring_id = dp->tx_mon_dst_ring[i].ring_id;
- ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
- mac_id + i,
- HAL_TX_MONITOR_DST);
- if (ret) {
- ath12k_warn(ab,
- "failed to configure tx_mon_dst_ring %d %d\n",
- i, ret);
- return ret;
- }
}
out:
return 0;
diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.h b/drivers/net/wireless/ath/ath12k/dp_rx.h
index 2ff421160181..eb1f92559179 100644
--- a/drivers/net/wireless/ath/ath12k/dp_rx.h
+++ b/drivers/net/wireless/ath/ath12k/dp_rx.h
@@ -139,4 +139,8 @@ ath12k_dp_rx_h_find_peer(struct ath12k_base *ab, struct sk_buff *msdu);
int ath12k_dp_rxdma_ring_sel_config_qcn9274(struct ath12k_base *ab);
int ath12k_dp_rxdma_ring_sel_config_wcn7850(struct ath12k_base *ab);
+int ath12k_dp_htt_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len,
+ int (*iter)(struct ath12k_base *ar, u16 tag, u16 len,
+ const void *ptr, void *data),
+ void *data);
#endif /* ATH12K_DP_RX_H */
diff --git a/drivers/net/wireless/ath/ath12k/dp_tx.c b/drivers/net/wireless/ath/ath12k/dp_tx.c
index 9b6d7d72f57c..d08c04343e90 100644
--- a/drivers/net/wireless/ath/ath12k/dp_tx.c
+++ b/drivers/net/wireless/ath/ath12k/dp_tx.c
@@ -124,6 +124,44 @@ static void ath12k_hal_tx_cmd_ext_desc_setup(struct ath12k_base *ab,
HAL_TX_MSDU_EXT_INFO1_ENCRYPT_TYPE);
}
+#define HTT_META_DATA_ALIGNMENT 0x8
+
+static void *ath12k_dp_metadata_align_skb(struct sk_buff *skb, u8 tail_len)
+{
+ struct sk_buff *tail;
+ void *metadata;
+
+ if (unlikely(skb_cow_data(skb, tail_len, &tail) < 0))
+ return NULL;
+
+ metadata = pskb_put(skb, tail, tail_len);
+ memset(metadata, 0, tail_len);
+ return metadata;
+}
+
+/* Preparing HTT Metadata when utilized with ext MSDU */
+static int ath12k_dp_prepare_htt_metadata(struct sk_buff *skb)
+{
+ struct hal_tx_msdu_metadata *desc_ext;
+ u8 htt_desc_size;
+ /* Size rounded of multiple of 8 bytes */
+ u8 htt_desc_size_aligned;
+
+ htt_desc_size = sizeof(struct hal_tx_msdu_metadata);
+ htt_desc_size_aligned = ALIGN(htt_desc_size, HTT_META_DATA_ALIGNMENT);
+
+ desc_ext = ath12k_dp_metadata_align_skb(skb, htt_desc_size_aligned);
+ if (!desc_ext)
+ return -ENOMEM;
+
+ desc_ext->info0 = le32_encode_bits(1, HAL_TX_MSDU_METADATA_INFO0_ENCRYPT_FLAG) |
+ le32_encode_bits(0, HAL_TX_MSDU_METADATA_INFO0_ENCRYPT_TYPE) |
+ le32_encode_bits(1,
+ HAL_TX_MSDU_METADATA_INFO0_HOST_TX_DESC_POOL);
+
+ return 0;
+}
+
int ath12k_dp_tx(struct ath12k *ar, struct ath12k_vif *arvif,
struct sk_buff *skb)
{
@@ -145,6 +183,7 @@ int ath12k_dp_tx(struct ath12k *ar, struct ath12k_vif *arvif,
u8 ring_selector, ring_map = 0;
bool tcl_ring_retry;
bool msdu_ext_desc = false;
+ bool add_htt_metadata = false;
if (test_bit(ATH12K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags))
return -ESHUTDOWN;
@@ -248,6 +287,18 @@ tcl_ring_sel:
goto fail_remove_tx_buf;
}
+ if (!test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags) &&
+ !(skb_cb->flags & ATH12K_SKB_HW_80211_ENCAP) &&
+ !(skb_cb->flags & ATH12K_SKB_CIPHER_SET) &&
+ ieee80211_has_protected(hdr->frame_control)) {
+ /* Add metadata for sw encrypted vlan group traffic */
+ add_htt_metadata = true;
+ msdu_ext_desc = true;
+ ti.flags0 |= u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_TO_FW);
+ ti.encap_type = HAL_TCL_ENCAP_TYPE_RAW;
+ ti.encrypt_type = HAL_ENCRYPT_TYPE_OPEN;
+ }
+
tx_desc->skb = skb;
tx_desc->mac_id = ar->pdev_idx;
ti.desc_id = tx_desc->desc_id;
@@ -269,6 +320,15 @@ tcl_ring_sel:
msg = (struct hal_tx_msdu_ext_desc *)skb_ext_desc->data;
ath12k_hal_tx_cmd_ext_desc_setup(ab, msg, &ti);
+ if (add_htt_metadata) {
+ ret = ath12k_dp_prepare_htt_metadata(skb_ext_desc);
+ if (ret < 0) {
+ ath12k_dbg(ab, ATH12K_DBG_DP_TX,
+ "Failed to add HTT meta data, dropping packet\n");
+ goto fail_unmap_dma;
+ }
+ }
+
ti.paddr = dma_map_single(ab->dev, skb_ext_desc->data,
skb_ext_desc->len, DMA_TO_DEVICE);
ret = dma_mapping_error(ab->dev, ti.paddr);
@@ -352,15 +412,15 @@ static void ath12k_dp_tx_free_txbuf(struct ath12k_base *ab,
u8 pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id);
skb_cb = ATH12K_SKB_CB(msdu);
+ ar = ab->pdevs[pdev_id].ar;
dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
if (skb_cb->paddr_ext_desc)
dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc,
sizeof(struct hal_tx_msdu_ext_desc), DMA_TO_DEVICE);
- dev_kfree_skb_any(msdu);
+ ieee80211_free_txskb(ar->ah->hw, msdu);
- ar = ab->pdevs[pdev_id].ar;
if (atomic_dec_and_test(&ar->dp.num_tx_pending))
wake_up(&ar->dp.tx_empty_waitq);
}
@@ -393,8 +453,12 @@ ath12k_dp_tx_htt_tx_complete_buf(struct ath12k_base *ab,
if (ts->acked) {
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
info->flags |= IEEE80211_TX_STAT_ACK;
- info->status.ack_signal = ATH12K_DEFAULT_NOISE_FLOOR +
- ts->ack_rssi;
+ info->status.ack_signal = ts->ack_rssi;
+
+ if (!test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT,
+ ab->wmi_ab.svc_map))
+ info->status.ack_signal += ATH12K_DEFAULT_NOISE_FLOOR;
+
info->status.flags = IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
} else {
info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
@@ -448,6 +512,7 @@ static void ath12k_dp_tx_complete_msdu(struct ath12k *ar,
struct hal_tx_status *ts)
{
struct ath12k_base *ab = ar->ab;
+ struct ath12k_hw *ah = ar->ah;
struct ieee80211_tx_info *info;
struct ath12k_skb_cb *skb_cb;
@@ -466,12 +531,12 @@ static void ath12k_dp_tx_complete_msdu(struct ath12k *ar,
rcu_read_lock();
if (!rcu_dereference(ab->pdevs_active[ar->pdev_idx])) {
- dev_kfree_skb_any(msdu);
+ ieee80211_free_txskb(ah->hw, msdu);
goto exit;
}
if (!skb_cb->vif) {
- dev_kfree_skb_any(msdu);
+ ieee80211_free_txskb(ah->hw, msdu);
goto exit;
}
@@ -481,17 +546,39 @@ static void ath12k_dp_tx_complete_msdu(struct ath12k *ar,
/* skip tx rate update from ieee80211_status*/
info->status.rates[0].idx = -1;
- if (ts->status == HAL_WBM_TQM_REL_REASON_FRAME_ACKED &&
- !(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
- info->flags |= IEEE80211_TX_STAT_ACK;
- info->status.ack_signal = ATH12K_DEFAULT_NOISE_FLOOR +
- ts->ack_rssi;
- info->status.flags = IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
- }
+ switch (ts->status) {
+ case HAL_WBM_TQM_REL_REASON_FRAME_ACKED:
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ info->status.ack_signal = ts->ack_rssi;
- if (ts->status == HAL_WBM_TQM_REL_REASON_CMD_REMOVE_TX &&
- (info->flags & IEEE80211_TX_CTL_NO_ACK))
- info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
+ if (!test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT,
+ ab->wmi_ab.svc_map))
+ info->status.ack_signal += ATH12K_DEFAULT_NOISE_FLOOR;
+
+ info->status.flags = IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
+ }
+ break;
+ case HAL_WBM_TQM_REL_REASON_CMD_REMOVE_TX:
+ if (info->flags & IEEE80211_TX_CTL_NO_ACK) {
+ info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
+ break;
+ }
+ fallthrough;
+ case HAL_WBM_TQM_REL_REASON_CMD_REMOVE_MPDU:
+ case HAL_WBM_TQM_REL_REASON_DROP_THRESHOLD:
+ case HAL_WBM_TQM_REL_REASON_CMD_REMOVE_AGED_FRAMES:
+ /* The failure status is due to internal firmware tx failure
+ * hence drop the frame; do not update the status of frame to
+ * the upper layer
+ */
+ ieee80211_free_txskb(ah->hw, msdu);
+ goto exit;
+ default:
+ ath12k_dbg(ab, ATH12K_DBG_DP_TX, "tx frame is not acked status %d\n",
+ ts->status);
+ break;
+ }
/* NOTE: Tx rate status reporting. Tx completion status does not have
* necessary information (for example nss) to build the tx rate.
@@ -669,14 +756,6 @@ ath12k_dp_tx_get_ring_id_type(struct ath12k_base *ab,
*htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
*htt_ring_type = HTT_SW_TO_HW_RING;
break;
- case HAL_TX_MONITOR_BUF:
- *htt_ring_id = HTT_TX_MON_HOST2MON_BUF_RING;
- *htt_ring_type = HTT_SW_TO_HW_RING;
- break;
- case HAL_TX_MONITOR_DST:
- *htt_ring_id = HTT_TX_MON_MON2HOST_DEST_RING;
- *htt_ring_type = HTT_HW_TO_SW_RING;
- break;
default:
ath12k_warn(ab, "Unsupported ring type in DP :%d\n", ring_type);
ret = -EINVAL;
@@ -854,7 +933,7 @@ int ath12k_dp_tx_htt_h2t_ppdu_stats_req(struct ath12k *ar, u32 mask)
int ret;
int i;
- for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
skb = ath12k_htc_alloc_skb(ab, len);
if (!skb)
return -ENOMEM;
@@ -1007,6 +1086,7 @@ ath12k_dp_tx_htt_h2t_ext_stats_req(struct ath12k *ar, u8 type,
struct htt_ext_stats_cfg_cmd *cmd;
int len = sizeof(*cmd);
int ret;
+ u32 pdev_id;
skb = ath12k_htc_alloc_skb(ab, len);
if (!skb)
@@ -1018,7 +1098,8 @@ ath12k_dp_tx_htt_h2t_ext_stats_req(struct ath12k *ar, u8 type,
memset(cmd, 0, sizeof(*cmd));
cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_EXT_STATS_CFG;
- cmd->hdr.pdev_mask = 1 << ar->pdev->pdev_id;
+ pdev_id = ath12k_mac_get_target_pdev_id(ar);
+ cmd->hdr.pdev_mask = 1 << pdev_id;
cmd->hdr.stats_type = type;
cmd->cfg_param0 = cpu_to_le32(cfg_params->cfg0);
@@ -1044,13 +1125,7 @@ int ath12k_dp_tx_htt_monitor_mode_ring_config(struct ath12k *ar, bool reset)
struct ath12k_base *ab = ar->ab;
int ret;
- ret = ath12k_dp_tx_htt_tx_monitor_mode_ring_config(ar, reset);
- if (ret) {
- ath12k_err(ab, "failed to setup tx monitor filter %d\n", ret);
- return ret;
- }
-
- ret = ath12k_dp_tx_htt_tx_monitor_mode_ring_config(ar, reset);
+ ret = ath12k_dp_tx_htt_rx_monitor_mode_ring_config(ar, reset);
if (ret) {
ath12k_err(ab, "failed to setup rx monitor filter %d\n", ret);
return ret;
@@ -1209,31 +1284,3 @@ err_free:
dev_kfree_skb_any(skb);
return ret;
}
-
-int ath12k_dp_tx_htt_tx_monitor_mode_ring_config(struct ath12k *ar, bool reset)
-{
- struct ath12k_base *ab = ar->ab;
- struct ath12k_dp *dp = &ab->dp;
- struct htt_tx_ring_tlv_filter tlv_filter = {0};
- int ret, ring_id;
-
- ring_id = dp->tx_mon_buf_ring.refill_buf_ring.ring_id;
-
- /* TODO: Need to set upstream/downstream tlv filters
- * here
- */
-
- if (ab->hw_params->rxdma1_enable) {
- ret = ath12k_dp_tx_htt_tx_filter_setup(ar->ab, ring_id, 0,
- HAL_TX_MONITOR_BUF,
- DP_RXDMA_REFILL_RING_SIZE,
- &tlv_filter);
- if (ret) {
- ath12k_err(ab,
- "failed to setup filter for monitor buf %d\n", ret);
- return ret;
- }
- }
-
- return 0;
-}
diff --git a/drivers/net/wireless/ath/ath12k/dp_tx.h b/drivers/net/wireless/ath/ath12k/dp_tx.h
index 436d77e5e9ee..55ff8cc721e3 100644
--- a/drivers/net/wireless/ath/ath12k/dp_tx.h
+++ b/drivers/net/wireless/ath/ath12k/dp_tx.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_DP_TX_H
@@ -12,7 +12,7 @@
struct ath12k_dp_htt_wbm_tx_status {
bool acked;
- int ack_rssi;
+ s8 ack_rssi;
};
int ath12k_dp_tx_htt_h2t_ver_req_msg(struct ath12k_base *ab);
@@ -36,6 +36,5 @@ int ath12k_dp_tx_htt_tx_filter_setup(struct ath12k_base *ab, u32 ring_id,
int mac_id, enum hal_ring_type ring_type,
int tx_buf_size,
struct htt_tx_ring_tlv_filter *htt_tlv_filter);
-int ath12k_dp_tx_htt_tx_monitor_mode_ring_config(struct ath12k *ar, bool reset);
int ath12k_dp_tx_htt_monitor_mode_ring_config(struct ath12k *ar, bool reset);
#endif
diff --git a/drivers/net/wireless/ath/ath12k/hal.c b/drivers/net/wireless/ath/ath12k/hal.c
index 78310da8cfe8..ca04bfae8bdc 100644
--- a/drivers/net/wireless/ath/ath12k/hal.c
+++ b/drivers/net/wireless/ath/ath12k/hal.c
@@ -1969,14 +1969,15 @@ u32 ath12k_hal_ce_dst_status_get_length(struct hal_ce_srng_dst_status_desc *desc
}
void ath12k_hal_set_link_desc_addr(struct hal_wbm_link_desc *desc, u32 cookie,
- dma_addr_t paddr)
+ dma_addr_t paddr,
+ enum hal_rx_buf_return_buf_manager rbm)
{
desc->buf_addr_info.info0 = le32_encode_bits((paddr & HAL_ADDR_LSB_REG_MASK),
BUFFER_ADDR_INFO0_ADDR);
desc->buf_addr_info.info1 =
le32_encode_bits(((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT),
BUFFER_ADDR_INFO1_ADDR) |
- le32_encode_bits(1, BUFFER_ADDR_INFO1_RET_BUF_MGR) |
+ le32_encode_bits(rbm, BUFFER_ADDR_INFO1_RET_BUF_MGR) |
le32_encode_bits(cookie, BUFFER_ADDR_INFO1_SW_COOKIE);
}
diff --git a/drivers/net/wireless/ath/ath12k/hal.h b/drivers/net/wireless/ath/ath12k/hal.h
index dbb9205bfa10..8a78bb9a10bc 100644
--- a/drivers/net/wireless/ath/ath12k/hal.h
+++ b/drivers/net/wireless/ath/ath12k/hal.h
@@ -770,12 +770,12 @@ struct hal_srng_config {
* enum hal_rx_buf_return_buf_manager - manager for returned rx buffers
*
* @HAL_RX_BUF_RBM_WBM_IDLE_BUF_LIST: Buffer returned to WBM idle buffer list
- * @HAL_RX_BUF_RBM_WBM_CHIP0_IDLE_DESC_LIST: Descriptor returned to WBM idle
- * descriptor list, where the chip 0 WBM is chosen in case of a multi-chip config
- * @HAL_RX_BUF_RBM_WBM_CHIP1_IDLE_DESC_LIST: Descriptor returned to WBM idle
- * descriptor list, where the chip 1 WBM is chosen in case of a multi-chip config
- * @HAL_RX_BUF_RBM_WBM_CHIP2_IDLE_DESC_LIST: Descriptor returned to WBM idle
- * descriptor list, where the chip 2 WBM is chosen in case of a multi-chip config
+ * @HAL_RX_BUF_RBM_WBM_DEV0_IDLE_DESC_LIST: Descriptor returned to WBM idle
+ * descriptor list, where the device 0 WBM is chosen in case of a multi-device config
+ * @HAL_RX_BUF_RBM_WBM_DEV1_IDLE_DESC_LIST: Descriptor returned to WBM idle
+ * descriptor list, where the device 1 WBM is chosen in case of a multi-device config
+ * @HAL_RX_BUF_RBM_WBM_DEV2_IDLE_DESC_LIST: Descriptor returned to WBM idle
+ * descriptor list, where the device 2 WBM is chosen in case of a multi-device config
* @HAL_RX_BUF_RBM_FW_BM: Buffer returned to FW
* @HAL_RX_BUF_RBM_SW0_BM: For ring 0 -- returned to host
* @HAL_RX_BUF_RBM_SW1_BM: For ring 1 -- returned to host
@@ -788,9 +788,9 @@ struct hal_srng_config {
enum hal_rx_buf_return_buf_manager {
HAL_RX_BUF_RBM_WBM_IDLE_BUF_LIST,
- HAL_RX_BUF_RBM_WBM_CHIP0_IDLE_DESC_LIST,
- HAL_RX_BUF_RBM_WBM_CHIP1_IDLE_DESC_LIST,
- HAL_RX_BUF_RBM_WBM_CHIP2_IDLE_DESC_LIST,
+ HAL_RX_BUF_RBM_WBM_DEV0_IDLE_DESC_LIST,
+ HAL_RX_BUF_RBM_WBM_DEV1_IDLE_DESC_LIST,
+ HAL_RX_BUF_RBM_WBM_DEV2_IDLE_DESC_LIST,
HAL_RX_BUF_RBM_FW_BM,
HAL_RX_BUF_RBM_SW0_BM,
HAL_RX_BUF_RBM_SW1_BM,
@@ -1113,7 +1113,8 @@ dma_addr_t ath12k_hal_srng_get_tp_addr(struct ath12k_base *ab,
dma_addr_t ath12k_hal_srng_get_hp_addr(struct ath12k_base *ab,
struct hal_srng *srng);
void ath12k_hal_set_link_desc_addr(struct hal_wbm_link_desc *desc, u32 cookie,
- dma_addr_t paddr);
+ dma_addr_t paddr,
+ enum hal_rx_buf_return_buf_manager rbm);
u32 ath12k_hal_ce_get_desc_size(enum hal_ce_desc type);
void ath12k_hal_ce_src_set_desc(struct hal_ce_srng_src_desc *desc, dma_addr_t paddr,
u32 len, u32 id, u8 byte_swap_data);
diff --git a/drivers/net/wireless/ath/ath12k/hal_desc.h b/drivers/net/wireless/ath/ath12k/hal_desc.h
index 63340256d3f6..739f73370015 100644
--- a/drivers/net/wireless/ath/ath12k/hal_desc.h
+++ b/drivers/net/wireless/ath/ath12k/hal_desc.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"
@@ -597,8 +597,30 @@ struct hal_tlv_64_hdr {
#define RX_MPDU_DESC_INFO0_MPDU_QOS_CTRL_VALID BIT(27)
#define RX_MPDU_DESC_INFO0_TID GENMASK(31, 28)
-/* TODO revisit after meta data is concluded */
-#define RX_MPDU_DESC_META_DATA_PEER_ID GENMASK(15, 0)
+/* Peer Metadata classification */
+
+/* Version 0 */
+#define RX_MPDU_DESC_META_DATA_V0_PEER_ID GENMASK(15, 0)
+#define RX_MPDU_DESC_META_DATA_V0_VDEV_ID GENMASK(23, 16)
+
+/* Version 1 */
+#define RX_MPDU_DESC_META_DATA_V1_PEER_ID GENMASK(13, 0)
+#define RX_MPDU_DESC_META_DATA_V1_LOGICAL_LINK_ID GENMASK(15, 14)
+#define RX_MPDU_DESC_META_DATA_V1_VDEV_ID GENMASK(23, 16)
+#define RX_MPDU_DESC_META_DATA_V1_LMAC_ID GENMASK(25, 24)
+#define RX_MPDU_DESC_META_DATA_V1_DEVICE_ID GENMASK(28, 26)
+
+/* Version 1A */
+#define RX_MPDU_DESC_META_DATA_V1A_PEER_ID GENMASK(13, 0)
+#define RX_MPDU_DESC_META_DATA_V1A_VDEV_ID GENMASK(21, 14)
+#define RX_MPDU_DESC_META_DATA_V1A_LOGICAL_LINK_ID GENMASK(25, 22)
+#define RX_MPDU_DESC_META_DATA_V1A_DEVICE_ID GENMASK(28, 26)
+
+/* Version 1B */
+#define RX_MPDU_DESC_META_DATA_V1B_PEER_ID GENMASK(13, 0)
+#define RX_MPDU_DESC_META_DATA_V1B_VDEV_ID GENMASK(21, 14)
+#define RX_MPDU_DESC_META_DATA_V1B_HW_LINK_ID GENMASK(25, 22)
+#define RX_MPDU_DESC_META_DATA_V1B_DEVICE_ID GENMASK(28, 26)
struct rx_mpdu_desc {
__le32 info0; /* %RX_MPDU_DESC_INFO */
@@ -2048,6 +2070,19 @@ struct hal_wbm_release_ring {
* fw with fw_reason2.
* @HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON3: Remove command initiated by
* fw with fw_reason3.
+ * @HAL_WBM_TQM_REL_REASON_CMD_DISABLE_QUEUE: Remove command initiated by
+ * fw with disable queue.
+ * @HAL_WBM_TQM_REL_REASON_CMD_TILL_NONMATCHING: Remove command initiated by
+ * fw to remove all mpdu until 1st non-match.
+ * @HAL_WBM_TQM_REL_REASON_DROP_THRESHOLD: Dropped due to drop threshold
+ * criteria
+ * @HAL_WBM_TQM_REL_REASON_DROP_LINK_DESC_UNAVAIL: Dropped due to link desc
+ * not available
+ * @HAL_WBM_TQM_REL_REASON_DROP_OR_INVALID_MSDU: Dropped due drop bit set or
+ * null flow
+ * @HAL_WBM_TQM_REL_REASON_MULTICAST_DROP: Dropped due mcast drop set for VDEV
+ * @HAL_WBM_TQM_REL_REASON_VDEV_MISMATCH_DROP: Dropped due to being set with
+ * 'TCL_drop_reason'
*/
enum hal_wbm_tqm_rel_reason {
HAL_WBM_TQM_REL_REASON_FRAME_ACKED,
@@ -2058,6 +2093,13 @@ enum hal_wbm_tqm_rel_reason {
HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON1,
HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON2,
HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON3,
+ HAL_WBM_TQM_REL_REASON_CMD_DISABLE_QUEUE,
+ HAL_WBM_TQM_REL_REASON_CMD_TILL_NONMATCHING,
+ HAL_WBM_TQM_REL_REASON_DROP_THRESHOLD,
+ HAL_WBM_TQM_REL_REASON_DROP_LINK_DESC_UNAVAIL,
+ HAL_WBM_TQM_REL_REASON_DROP_OR_INVALID_MSDU,
+ HAL_WBM_TQM_REL_REASON_MULTICAST_DROP,
+ HAL_WBM_TQM_REL_REASON_VDEV_MISMATCH_DROP,
};
struct hal_wbm_buffer_ring {
@@ -2964,4 +3006,29 @@ struct hal_mon_dest_desc {
* updated by SRNG.
*/
+#define HAL_TX_MSDU_METADATA_INFO0_ENCRYPT_FLAG BIT(8)
+#define HAL_TX_MSDU_METADATA_INFO0_ENCRYPT_TYPE GENMASK(16, 15)
+#define HAL_TX_MSDU_METADATA_INFO0_HOST_TX_DESC_POOL BIT(31)
+
+struct hal_tx_msdu_metadata {
+ __le32 info0;
+ __le32 rsvd0[6];
+} __packed;
+
+/* hal_tx_msdu_metadata
+ * valid_encrypt_type
+ * if set, encrypt type is valid
+ * encrypt_type
+ * 0 = NO_ENCRYPT,
+ * 1 = ENCRYPT,
+ * 2 ~ 3 - Reserved
+ * host_tx_desc_pool
+ * If set, Firmware allocates tx_descriptors
+ * in WAL_BUFFERID_TX_HOST_DATA_EXP,instead
+ * of WAL_BUFFERID_TX_TCL_DATA_EXP.
+ * Use cases:
+ * Any time firmware uses TQM-BYPASS for Data
+ * TID, firmware expect host to set this bit.
+ */
+
#endif /* ATH12K_HAL_DESC_H */
diff --git a/drivers/net/wireless/ath/ath12k/hal_tx.h b/drivers/net/wireless/ath/ath12k/hal_tx.h
index 7c837094a6f7..3cf5973771d7 100644
--- a/drivers/net/wireless/ath/ath12k/hal_tx.h
+++ b/drivers/net/wireless/ath/ath12k/hal_tx.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_HAL_TX_H
@@ -57,7 +57,7 @@ struct hal_tx_info {
struct hal_tx_status {
enum hal_wbm_rel_src_module buf_rel_source;
enum hal_wbm_tqm_rel_reason status;
- u8 ack_rssi;
+ s8 ack_rssi;
u32 flags; /* %HAL_TX_STATUS_FLAGS_ */
u32 ppdu_id;
u8 try_cnt;
diff --git a/drivers/net/wireless/ath/ath12k/hif.h b/drivers/net/wireless/ath/ath12k/hif.h
index 7f0926fe751d..0e53ec269fa4 100644
--- a/drivers/net/wireless/ath/ath12k/hif.h
+++ b/drivers/net/wireless/ath/ath12k/hif.h
@@ -30,6 +30,7 @@ struct ath12k_hif_ops {
void (*ce_irq_enable)(struct ath12k_base *ab);
void (*ce_irq_disable)(struct ath12k_base *ab);
void (*get_ce_msi_idx)(struct ath12k_base *ab, u32 ce_id, u32 *msi_idx);
+ int (*panic_handler)(struct ath12k_base *ab);
};
static inline int ath12k_hif_map_service_to_pipe(struct ath12k_base *ab, u16 service_id,
@@ -147,4 +148,12 @@ static inline void ath12k_hif_power_down(struct ath12k_base *ab, bool is_suspend
ab->hif.ops->power_down(ab, is_suspend);
}
+static inline int ath12k_hif_panic_handler(struct ath12k_base *ab)
+{
+ if (!ab->hif.ops->panic_handler)
+ return NOTIFY_DONE;
+
+ return ab->hif.ops->panic_handler(ab);
+}
+
#endif /* ATH12K_HIF_H */
diff --git a/drivers/net/wireless/ath/ath12k/htc.c b/drivers/net/wireless/ath/ath12k/htc.c
index 2f2230f565bb..d13616bf07f4 100644
--- a/drivers/net/wireless/ath/ath12k/htc.c
+++ b/drivers/net/wireless/ath/ath12k/htc.c
@@ -244,6 +244,11 @@ static void ath12k_htc_suspend_complete(struct ath12k_base *ab, bool ack)
complete(&ab->htc_suspend);
}
+static void ath12k_htc_wakeup_from_suspend(struct ath12k_base *ab)
+{
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot wakeup from suspend is received\n");
+}
+
void ath12k_htc_rx_completion_handler(struct ath12k_base *ab,
struct sk_buff *skb)
{
@@ -349,6 +354,7 @@ void ath12k_htc_rx_completion_handler(struct ath12k_base *ab,
ath12k_htc_suspend_complete(ab, false);
break;
case ATH12K_HTC_MSG_WAKEUP_FROM_SUSPEND_ID:
+ ath12k_htc_wakeup_from_suspend(ab);
break;
default:
ath12k_warn(ab, "ignoring unsolicited htc ep0 event %u\n",
diff --git a/drivers/net/wireless/ath/ath12k/hw.c b/drivers/net/wireless/ath/ath12k/hw.c
index f4c827015821..2e11ea763574 100644
--- a/drivers/net/wireless/ath/ath12k/hw.c
+++ b/drivers/net/wireless/ath/ath12k/hw.c
@@ -544,9 +544,6 @@ static const struct ath12k_hw_ring_mask ath12k_hw_ring_mask_qcn9274 = {
},
.rx_mon_dest = {
0, 0, 0,
- ATH12K_RX_MON_RING_MASK_0,
- ATH12K_RX_MON_RING_MASK_1,
- ATH12K_RX_MON_RING_MASK_2,
},
.rx = {
0, 0, 0, 0,
@@ -572,16 +569,15 @@ static const struct ath12k_hw_ring_mask ath12k_hw_ring_mask_qcn9274 = {
ATH12K_HOST2RXDMA_RING_MASK_0,
},
.tx_mon_dest = {
- ATH12K_TX_MON_RING_MASK_0,
- ATH12K_TX_MON_RING_MASK_1,
+ 0, 0, 0,
},
};
static const struct ath12k_hw_ring_mask ath12k_hw_ring_mask_wcn7850 = {
.tx = {
ATH12K_TX_RING_MASK_0,
+ ATH12K_TX_RING_MASK_1,
ATH12K_TX_RING_MASK_2,
- ATH12K_TX_RING_MASK_4,
},
.rx_mon_dest = {
},
@@ -884,14 +880,15 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.hal_params = &ath12k_hw_hal_params_qcn9274,
.rxdma1_enable = false,
- .num_rxmda_per_pdev = 1,
+ .num_rxdma_per_pdev = 1,
.num_rxdma_dst_ring = 0,
.rx_mac_buf_ring = false,
.vdev_start_delay = false,
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP) |
- BIT(NL80211_IFTYPE_MESH_POINT),
+ BIT(NL80211_IFTYPE_MESH_POINT) |
+ BIT(NL80211_IFTYPE_AP_VLAN),
.supports_monitor = false,
.idle_ps = false,
@@ -926,6 +923,7 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.supports_sta_ps = false,
.acpi_guid = NULL,
+ .supports_dynamic_smps_6ghz = true,
},
{
.name = "wcn7850 hw2.0",
@@ -956,7 +954,7 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.hal_params = &ath12k_hw_hal_params_wcn7850,
.rxdma1_enable = false,
- .num_rxmda_per_pdev = 2,
+ .num_rxdma_per_pdev = 2,
.num_rxdma_dst_ring = 1,
.rx_mac_buf_ring = true,
.vdev_start_delay = true,
@@ -1001,6 +999,7 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.supports_sta_ps = true,
.acpi_guid = &wcn7850_uuid,
+ .supports_dynamic_smps_6ghz = false,
},
{
.name = "qcn9274 hw2.0",
@@ -1029,14 +1028,15 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.hal_params = &ath12k_hw_hal_params_qcn9274,
.rxdma1_enable = false,
- .num_rxmda_per_pdev = 1,
+ .num_rxdma_per_pdev = 1,
.num_rxdma_dst_ring = 0,
.rx_mac_buf_ring = false,
.vdev_start_delay = false,
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP) |
- BIT(NL80211_IFTYPE_MESH_POINT),
+ BIT(NL80211_IFTYPE_MESH_POINT) |
+ BIT(NL80211_IFTYPE_AP_VLAN),
.supports_monitor = false,
.idle_ps = false,
@@ -1071,6 +1071,7 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.supports_sta_ps = false,
.acpi_guid = NULL,
+ .supports_dynamic_smps_6ghz = true,
},
};
diff --git a/drivers/net/wireless/ath/ath12k/hw.h b/drivers/net/wireless/ath/ath12k/hw.h
index 3f450ee93f34..e792eb6b249b 100644
--- a/drivers/net/wireless/ath/ath12k/hw.h
+++ b/drivers/net/wireless/ath/ath12k/hw.h
@@ -78,8 +78,7 @@
#define TARGET_NUM_WDS_ENTRIES 32
#define TARGET_DMA_BURST_SIZE 1
#define TARGET_RX_BATCHMODE 1
-#define TARGET_RX_PEER_METADATA_VER_V1A 2
-#define TARGET_RX_PEER_METADATA_VER_V1B 3
+#define TARGET_EMA_MAX_PROFILE_PERIOD 8
#define ATH12K_HW_DEFAULT_QUEUE 0
#define ATH12K_HW_MAX_QUEUES 4
@@ -174,7 +173,7 @@ struct ath12k_hw_params {
const struct ath12k_hw_hal_params *hal_params;
bool rxdma1_enable:1;
- int num_rxmda_per_pdev;
+ int num_rxdma_per_pdev;
int num_rxdma_dst_ring;
bool rx_mac_buf_ring:1;
bool vdev_start_delay:1;
@@ -215,6 +214,7 @@ struct ath12k_hw_params {
bool supports_sta_ps;
const guid_t *acpi_guid;
+ bool supports_dynamic_smps_6ghz;
};
struct ath12k_hw_ops {
diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c
index 805cb084484a..8106297f0bc1 100644
--- a/drivers/net/wireless/ath/ath12k/mac.c
+++ b/drivers/net/wireless/ath/ath12k/mac.c
@@ -6,6 +6,7 @@
#include <net/mac80211.h>
#include <linux/etherdevice.h>
+
#include "mac.h"
#include "core.h"
#include "debug.h"
@@ -15,6 +16,8 @@
#include "dp_rx.h"
#include "peer.h"
#include "debugfs.h"
+#include "hif.h"
+#include "wow.h"
#define CHAN2G(_channel, _freq, _flags) { \
.band = NL80211_BAND_2GHZ, \
@@ -91,6 +94,10 @@ static const struct ieee80211_channel ath12k_5ghz_channels[] = {
};
static const struct ieee80211_channel ath12k_6ghz_channels[] = {
+ /* Operating Class 136 */
+ CHAN6G(2, 5935, 0),
+
+ /* Operating Classes 131-135 */
CHAN6G(1, 5955, 0),
CHAN6G(5, 5975, 0),
CHAN6G(9, 5995, 0),
@@ -666,6 +673,82 @@ static struct ath12k *ath12k_get_ar_by_vif(struct ieee80211_hw *hw,
return NULL;
}
+static struct ath12k_vif *ath12k_mac_get_vif_up(struct ath12k *ar)
+{
+ struct ath12k_vif *arvif;
+
+ lockdep_assert_held(&ar->conf_mutex);
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (arvif->is_up)
+ return arvif;
+ }
+
+ return NULL;
+}
+
+static bool ath12k_mac_band_match(enum nl80211_band band1, enum WMI_HOST_WLAN_BAND band2)
+{
+ switch (band1) {
+ case NL80211_BAND_2GHZ:
+ if (band2 & WMI_HOST_WLAN_2G_CAP)
+ return true;
+ break;
+ case NL80211_BAND_5GHZ:
+ case NL80211_BAND_6GHZ:
+ if (band2 & WMI_HOST_WLAN_5G_CAP)
+ return true;
+ break;
+ default:
+ return false;
+ }
+
+ return false;
+}
+
+static u8 ath12k_mac_get_target_pdev_id_from_vif(struct ath12k_vif *arvif)
+{
+ struct ath12k *ar = arvif->ar;
+ struct ath12k_base *ab = ar->ab;
+ struct ieee80211_vif *vif = arvif->vif;
+ struct cfg80211_chan_def def;
+ enum nl80211_band band;
+ u8 pdev_id = ab->fw_pdev[0].pdev_id;
+ int i;
+
+ if (WARN_ON(ath12k_mac_vif_chan(vif, &def)))
+ return pdev_id;
+
+ band = def.chan->band;
+
+ for (i = 0; i < ab->fw_pdev_count; i++) {
+ if (ath12k_mac_band_match(band, ab->fw_pdev[i].supported_bands))
+ return ab->fw_pdev[i].pdev_id;
+ }
+
+ return pdev_id;
+}
+
+u8 ath12k_mac_get_target_pdev_id(struct ath12k *ar)
+{
+ struct ath12k_vif *arvif;
+ struct ath12k_base *ab = ar->ab;
+
+ if (!ab->hw_params->single_pdev_only)
+ return ar->pdev->pdev_id;
+
+ arvif = ath12k_mac_get_vif_up(ar);
+
+ /* fw_pdev array has pdev ids derived from phy capability
+ * service ready event (pdev_and_hw_link_ids).
+ * If no vif is active, return default first index.
+ */
+ if (!arvif)
+ return ar->ab->fw_pdev[0].pdev_id;
+
+ /* If active vif is found, return the pdev id matching chandef band */
+ return ath12k_mac_get_target_pdev_id_from_vif(arvif);
+}
+
static void ath12k_pdev_caps_update(struct ath12k *ar)
{
struct ath12k_base *ab = ar->ab;
@@ -863,9 +946,12 @@ static int ath12k_mac_vdev_setup_sync(struct ath12k *ar)
static int ath12k_monitor_vdev_up(struct ath12k *ar, int vdev_id)
{
+ struct ath12k_wmi_vdev_up_params params = {};
int ret;
- ret = ath12k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr);
+ params.vdev_id = vdev_id;
+ params.bssid = ar->mac_addr;
+ ret = ath12k_wmi_vdev_up(ar, &params);
if (ret) {
ath12k_warn(ar->ab, "failed to put up monitor vdev %i: %d\n",
vdev_id, ret);
@@ -882,6 +968,7 @@ static int ath12k_mac_monitor_vdev_start(struct ath12k *ar, int vdev_id,
{
struct ieee80211_channel *channel;
struct wmi_vdev_start_req_arg arg = {};
+ struct ath12k_wmi_vdev_up_params params = {};
int ret;
lockdep_assert_held(&ar->conf_mutex);
@@ -922,7 +1009,9 @@ static int ath12k_mac_monitor_vdev_start(struct ath12k *ar, int vdev_id,
return ret;
}
- ret = ath12k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr);
+ params.vdev_id = vdev_id;
+ params.bssid = ar->mac_addr;
+ ret = ath12k_wmi_vdev_up(ar, &params);
if (ret) {
ath12k_warn(ar->ab, "failed to put up monitor vdev %i: %d\n",
vdev_id, ret);
@@ -1289,37 +1378,188 @@ static int ath12k_mac_remove_vendor_ie(struct sk_buff *skb, unsigned int oui,
return 0;
}
+static void ath12k_mac_set_arvif_ies(struct ath12k_vif *arvif, struct sk_buff *bcn,
+ u8 bssid_index, bool *nontx_profile_found)
+{
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)bcn->data;
+ const struct element *elem, *nontx, *index, *nie;
+ const u8 *start, *tail;
+ u16 rem_len;
+ u8 i;
+
+ start = bcn->data + ieee80211_get_hdrlen_from_skb(bcn) + sizeof(mgmt->u.beacon);
+ tail = skb_tail_pointer(bcn);
+ rem_len = tail - start;
+
+ arvif->rsnie_present = false;
+ arvif->wpaie_present = false;
+
+ if (cfg80211_find_ie(WLAN_EID_RSN, start, rem_len))
+ arvif->rsnie_present = true;
+ if (cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, WLAN_OUI_TYPE_MICROSOFT_WPA,
+ start, rem_len))
+ arvif->wpaie_present = true;
+
+ /* Return from here for the transmitted profile */
+ if (!bssid_index)
+ return;
+
+ /* Initial rsnie_present for the nontransmitted profile is set to be same as that
+ * of the transmitted profile. It will be changed if security configurations are
+ * different.
+ */
+ *nontx_profile_found = false;
+ for_each_element_id(elem, WLAN_EID_MULTIPLE_BSSID, start, rem_len) {
+ /* Fixed minimum MBSSID element length with at least one
+ * nontransmitted BSSID profile is 12 bytes as given below;
+ * 1 (max BSSID indicator) +
+ * 2 (Nontransmitted BSSID profile: Subelement ID + length) +
+ * 4 (Nontransmitted BSSID Capabilities: tag + length + info)
+ * 2 (Nontransmitted BSSID SSID: tag + length)
+ * 3 (Nontransmitted BSSID Index: tag + length + BSSID index
+ */
+ if (elem->datalen < 12 || elem->data[0] < 1)
+ continue; /* Max BSSID indicator must be >=1 */
+
+ for_each_element(nontx, elem->data + 1, elem->datalen - 1) {
+ start = nontx->data;
+
+ if (nontx->id != 0 || nontx->datalen < 4)
+ continue; /* Invalid nontransmitted profile */
+
+ if (nontx->data[0] != WLAN_EID_NON_TX_BSSID_CAP ||
+ nontx->data[1] != 2) {
+ continue; /* Missing nontransmitted BSS capabilities */
+ }
+
+ if (nontx->data[4] != WLAN_EID_SSID)
+ continue; /* Missing SSID for nontransmitted BSS */
+
+ index = cfg80211_find_elem(WLAN_EID_MULTI_BSSID_IDX,
+ start, nontx->datalen);
+ if (!index || index->datalen < 1 || index->data[0] == 0)
+ continue; /* Invalid MBSSID Index element */
+
+ if (index->data[0] == bssid_index) {
+ *nontx_profile_found = true;
+ if (cfg80211_find_ie(WLAN_EID_RSN,
+ nontx->data,
+ nontx->datalen)) {
+ arvif->rsnie_present = true;
+ return;
+ } else if (!arvif->rsnie_present) {
+ return; /* Both tx and nontx BSS are open */
+ }
+
+ nie = cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE,
+ nontx->data,
+ nontx->datalen);
+ if (!nie || nie->datalen < 2)
+ return; /* Invalid non-inheritance element */
+
+ for (i = 1; i < nie->datalen - 1; i++) {
+ if (nie->data[i] == WLAN_EID_RSN) {
+ arvif->rsnie_present = false;
+ break;
+ }
+ }
+
+ return;
+ }
+ }
+ }
+}
+
+static int ath12k_mac_setup_bcn_tmpl_ema(struct ath12k_vif *arvif)
+{
+ struct ieee80211_bss_conf *bss_conf = &arvif->vif->bss_conf;
+ struct ath12k_wmi_bcn_tmpl_ema_arg ema_args;
+ struct ieee80211_ema_beacons *beacons;
+ struct ath12k_vif *tx_arvif;
+ bool nontx_profile_found = false;
+ int ret = 0;
+ u8 i;
+
+ tx_arvif = ath12k_vif_to_arvif(arvif->vif->mbssid_tx_vif);
+ beacons = ieee80211_beacon_get_template_ema_list(ath12k_ar_to_hw(tx_arvif->ar),
+ tx_arvif->vif, 0);
+ if (!beacons || !beacons->cnt) {
+ ath12k_warn(arvif->ar->ab,
+ "failed to get ema beacon templates from mac80211\n");
+ return -EPERM;
+ }
+
+ if (tx_arvif == arvif)
+ ath12k_mac_set_arvif_ies(arvif, beacons->bcn[0].skb, 0, NULL);
+
+ for (i = 0; i < beacons->cnt; i++) {
+ if (tx_arvif != arvif && !nontx_profile_found)
+ ath12k_mac_set_arvif_ies(arvif, beacons->bcn[i].skb,
+ bss_conf->bssid_index,
+ &nontx_profile_found);
+
+ ema_args.bcn_cnt = beacons->cnt;
+ ema_args.bcn_index = i;
+ ret = ath12k_wmi_bcn_tmpl(tx_arvif->ar, tx_arvif->vdev_id,
+ &beacons->bcn[i].offs,
+ beacons->bcn[i].skb, &ema_args);
+ if (ret) {
+ ath12k_warn(tx_arvif->ar->ab,
+ "failed to set ema beacon template id %i error %d\n",
+ i, ret);
+ break;
+ }
+ }
+
+ if (tx_arvif != arvif && !nontx_profile_found)
+ ath12k_warn(arvif->ar->ab,
+ "nontransmitted bssid index %u not found in beacon template\n",
+ bss_conf->bssid_index);
+
+ ieee80211_beacon_free_ema_list(beacons);
+ return ret;
+}
+
static int ath12k_mac_setup_bcn_tmpl(struct ath12k_vif *arvif)
{
+ struct ath12k_vif *tx_arvif = arvif;
struct ath12k *ar = arvif->ar;
struct ath12k_base *ab = ar->ab;
- struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
struct ieee80211_vif *vif = arvif->vif;
struct ieee80211_mutable_offsets offs = {};
+ bool nontx_profile_found = false;
struct sk_buff *bcn;
- struct ieee80211_mgmt *mgmt;
- u8 *ies;
int ret;
if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
return 0;
- bcn = ieee80211_beacon_get_template(hw, vif, &offs, 0);
+ if (vif->mbssid_tx_vif) {
+ tx_arvif = ath12k_vif_to_arvif(vif->mbssid_tx_vif);
+ if (tx_arvif != arvif && arvif->is_up)
+ return 0;
+
+ if (vif->bss_conf.ema_ap)
+ return ath12k_mac_setup_bcn_tmpl_ema(arvif);
+ }
+
+ bcn = ieee80211_beacon_get_template(ath12k_ar_to_hw(tx_arvif->ar), tx_arvif->vif,
+ &offs, 0);
if (!bcn) {
ath12k_warn(ab, "failed to get beacon template from mac80211\n");
return -EPERM;
}
- ies = bcn->data + ieee80211_get_hdrlen_from_skb(bcn);
- ies += sizeof(mgmt->u.beacon);
-
- if (cfg80211_find_ie(WLAN_EID_RSN, ies, (skb_tail_pointer(bcn) - ies)))
- arvif->rsnie_present = true;
-
- if (cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
- WLAN_OUI_TYPE_MICROSOFT_WPA,
- ies, (skb_tail_pointer(bcn) - ies)))
- arvif->wpaie_present = true;
+ if (tx_arvif == arvif) {
+ ath12k_mac_set_arvif_ies(arvif, bcn, 0, NULL);
+ } else {
+ ath12k_mac_set_arvif_ies(arvif, bcn,
+ arvif->vif->bss_conf.bssid_index,
+ &nontx_profile_found);
+ if (!nontx_profile_found)
+ ath12k_warn(ab,
+ "nontransmitted profile not found in beacon template\n");
+ }
if (arvif->vif->type == NL80211_IFTYPE_AP && arvif->vif->p2p) {
ret = ath12k_mac_setup_bcn_p2p_ie(arvif, bcn);
@@ -1344,7 +1584,7 @@ static int ath12k_mac_setup_bcn_tmpl(struct ath12k_vif *arvif)
}
}
- ret = ath12k_wmi_bcn_tmpl(ar, arvif->vdev_id, &offs, bcn);
+ ret = ath12k_wmi_bcn_tmpl(ar, arvif->vdev_id, &offs, bcn, NULL);
if (ret)
ath12k_warn(ab, "failed to submit beacon template command: %d\n",
@@ -1358,6 +1598,7 @@ free_bcn_skb:
static void ath12k_control_beaconing(struct ath12k_vif *arvif,
struct ieee80211_bss_conf *info)
{
+ struct ath12k_wmi_vdev_up_params params = {};
struct ath12k *ar = arvif->ar;
int ret;
@@ -1385,8 +1626,15 @@ static void ath12k_control_beaconing(struct ath12k_vif *arvif,
ether_addr_copy(arvif->bssid, info->bssid);
- ret = ath12k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
- arvif->bssid);
+ params.vdev_id = arvif->vdev_id;
+ params.aid = arvif->aid;
+ params.bssid = arvif->bssid;
+ if (arvif->vif->mbssid_tx_vif) {
+ params.tx_bssid = ath12k_vif_to_arvif(arvif->vif->mbssid_tx_vif)->bssid;
+ params.nontx_profile_idx = info->bssid_index;
+ params.nontx_profile_cnt = 1 << info->bssid_indicator;
+ }
+ ret = ath12k_wmi_vdev_up(arvif->ar, &params);
if (ret) {
ath12k_warn(ar->ab, "failed to bring up vdev %d: %i\n",
arvif->vdev_id, ret);
@@ -1881,7 +2129,9 @@ static void ath12k_peer_assoc_h_he(struct ath12k *ar,
{
const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap;
int i;
- u8 ampdu_factor, rx_mcs_80, rx_mcs_160, max_nss;
+ u8 ampdu_factor, max_nss;
+ u8 rx_mcs_80 = IEEE80211_HE_MCS_NOT_SUPPORTED;
+ u8 rx_mcs_160 = IEEE80211_HE_MCS_NOT_SUPPORTED;
u16 mcs_160_map, mcs_80_map;
bool support_160;
u16 v;
@@ -2028,17 +2278,88 @@ static void ath12k_peer_assoc_h_he(struct ath12k *ar,
}
}
+static void ath12k_peer_assoc_h_he_6ghz(struct ath12k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ath12k_wmi_peer_assoc_arg *arg)
+{
+ const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap;
+ struct cfg80211_chan_def def;
+ enum nl80211_band band;
+ u8 ampdu_factor, mpdu_density;
+
+ if (WARN_ON(ath12k_mac_vif_chan(vif, &def)))
+ return;
+
+ band = def.chan->band;
+
+ if (!arg->he_flag || band != NL80211_BAND_6GHZ || !sta->deflink.he_6ghz_capa.capa)
+ return;
+
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
+ arg->bw_40 = true;
+
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
+ arg->bw_80 = true;
+
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160)
+ arg->bw_160 = true;
+
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_320)
+ arg->bw_320 = true;
+
+ arg->peer_he_caps_6ghz = le16_to_cpu(sta->deflink.he_6ghz_capa.capa);
+
+ mpdu_density = u32_get_bits(arg->peer_he_caps_6ghz,
+ IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START);
+ arg->peer_mpdu_density = ath12k_parse_mpdudensity(mpdu_density);
+
+ /* From IEEE Std 802.11ax-2021 - Section 10.12.2: An HE STA shall be capable of
+ * receiving A-MPDU where the A-MPDU pre-EOF padding length is up to the value
+ * indicated by the Maximum A-MPDU Length Exponent Extension field in the HE
+ * Capabilities element and the Maximum A-MPDU Length Exponent field in HE 6 GHz
+ * Band Capabilities element in the 6 GHz band.
+ *
+ * Here, we are extracting the Max A-MPDU Exponent Extension from HE caps and
+ * factor is the Maximum A-MPDU Length Exponent from HE 6 GHZ Band capability.
+ */
+ ampdu_factor = u8_get_bits(he_cap->he_cap_elem.mac_cap_info[3],
+ IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK) +
+ u32_get_bits(arg->peer_he_caps_6ghz,
+ IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP);
+
+ arg->peer_max_mpdu = (1u << (IEEE80211_HE_6GHZ_MAX_AMPDU_FACTOR +
+ ampdu_factor)) - 1;
+}
+
+static int ath12k_get_smps_from_capa(const struct ieee80211_sta_ht_cap *ht_cap,
+ const struct ieee80211_he_6ghz_capa *he_6ghz_capa,
+ int *smps)
+{
+ if (ht_cap->ht_supported)
+ *smps = u16_get_bits(ht_cap->cap, IEEE80211_HT_CAP_SM_PS);
+ else
+ *smps = le16_get_bits(he_6ghz_capa->capa,
+ IEEE80211_HE_6GHZ_CAP_SM_PS);
+
+ if (*smps >= ARRAY_SIZE(ath12k_smps_map))
+ return -EINVAL;
+
+ return 0;
+}
+
static void ath12k_peer_assoc_h_smps(struct ieee80211_sta *sta,
struct ath12k_wmi_peer_assoc_arg *arg)
{
+ const struct ieee80211_he_6ghz_capa *he_6ghz_capa = &sta->deflink.he_6ghz_capa;
const struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap;
int smps;
- if (!ht_cap->ht_supported)
+ if (!ht_cap->ht_supported && !he_6ghz_capa->capa)
return;
- smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS;
- smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT;
+ if (ath12k_get_smps_from_capa(ht_cap, he_6ghz_capa, &smps))
+ return;
switch (smps) {
case WLAN_HT_CAP_SM_PS_STATIC:
@@ -2500,6 +2821,7 @@ static void ath12k_peer_assoc_prepare(struct ath12k *ar,
ath12k_peer_assoc_h_ht(ar, vif, sta, arg);
ath12k_peer_assoc_h_vht(ar, vif, sta, arg);
ath12k_peer_assoc_h_he(ar, vif, sta, arg);
+ ath12k_peer_assoc_h_he_6ghz(ar, vif, sta, arg);
ath12k_peer_assoc_h_eht(ar, vif, sta, arg);
ath12k_peer_assoc_h_qos(ar, vif, sta, arg);
ath12k_peer_assoc_h_phymode(ar, vif, sta, arg);
@@ -2510,18 +2832,17 @@ static void ath12k_peer_assoc_prepare(struct ath12k *ar,
static int ath12k_setup_peer_smps(struct ath12k *ar, struct ath12k_vif *arvif,
const u8 *addr,
- const struct ieee80211_sta_ht_cap *ht_cap)
+ const struct ieee80211_sta_ht_cap *ht_cap,
+ const struct ieee80211_he_6ghz_capa *he_6ghz_capa)
{
- int smps;
+ int smps, ret = 0;
- if (!ht_cap->ht_supported)
+ if (!ht_cap->ht_supported && !he_6ghz_capa)
return 0;
- smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS;
- smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT;
-
- if (smps >= ARRAY_SIZE(ath12k_smps_map))
- return -EINVAL;
+ ret = ath12k_get_smps_from_capa(ht_cap, he_6ghz_capa, &smps);
+ if (ret < 0)
+ return ret;
return ath12k_wmi_set_peer_param(ar, addr, arvif->vdev_id,
WMI_PEER_MIMO_PS_STATE,
@@ -2533,6 +2854,7 @@ static void ath12k_bss_assoc(struct ath12k *ar,
struct ieee80211_bss_conf *bss_conf)
{
struct ieee80211_vif *vif = arvif->vif;
+ struct ath12k_wmi_vdev_up_params params = {};
struct ath12k_wmi_peer_assoc_arg peer_arg;
struct ieee80211_sta *ap_sta;
struct ath12k_peer *peer;
@@ -2572,7 +2894,8 @@ static void ath12k_bss_assoc(struct ath12k *ar,
}
ret = ath12k_setup_peer_smps(ar, arvif, bss_conf->bssid,
- &ap_sta->deflink.ht_cap);
+ &ap_sta->deflink.ht_cap,
+ &ap_sta->deflink.he_6ghz_capa);
if (ret) {
ath12k_warn(ar->ab, "failed to setup peer SMPS for vdev %d: %d\n",
arvif->vdev_id, ret);
@@ -2584,7 +2907,10 @@ static void ath12k_bss_assoc(struct ath12k *ar,
arvif->aid = vif->cfg.aid;
ether_addr_copy(arvif->bssid, bss_conf->bssid);
- ret = ath12k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid);
+ params.vdev_id = arvif->vdev_id;
+ params.aid = arvif->aid;
+ params.bssid = arvif->bssid;
+ ret = ath12k_wmi_vdev_up(ar, &params);
if (ret) {
ath12k_warn(ar->ab, "failed to set vdev %d up: %d\n",
arvif->vdev_id, ret);
@@ -2592,6 +2918,7 @@ static void ath12k_bss_assoc(struct ath12k *ar,
}
arvif->is_up = true;
+ arvif->rekey_data.enable_offload = false;
ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
"mac vdev %d up (associated) bssid %pM aid %d\n",
@@ -2639,6 +2966,8 @@ static void ath12k_bss_disassoc(struct ath12k *ar,
arvif->is_up = false;
+ memset(&arvif->rekey_data, 0, sizeof(arvif->rekey_data));
+
cancel_delayed_work(&arvif->connection_loss_work);
}
@@ -3130,7 +3459,7 @@ static void ath12k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
static struct ath12k*
ath12k_mac_select_scan_device(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
- struct ieee80211_scan_request *req)
+ u32 center_freq)
{
struct ath12k_hw *ah = hw->priv;
enum nl80211_band band;
@@ -3147,9 +3476,9 @@ ath12k_mac_select_scan_device(struct ieee80211_hw *hw,
* split the hw request and perform multiple scans
*/
- if (req->req.channels[0]->center_freq < ATH12K_MIN_5G_FREQ)
+ if (center_freq < ATH12K_MIN_5G_FREQ)
band = NL80211_BAND_2GHZ;
- else if (req->req.channels[0]->center_freq < ATH12K_MIN_6G_FREQ)
+ else if (center_freq < ATH12K_MIN_6G_FREQ)
band = NL80211_BAND_5GHZ;
else
band = NL80211_BAND_6GHZ;
@@ -3349,7 +3678,7 @@ static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw,
/* Since the targeted scan device could depend on the frequency
* requested in the hw_req, select the corresponding radio
*/
- ar = ath12k_mac_select_scan_device(hw, vif, hw_req);
+ ar = ath12k_mac_select_scan_device(hw, vif, hw_req->req.channels[0]->center_freq);
if (!ar)
return -EINVAL;
@@ -3845,6 +4174,11 @@ static int ath12k_station_assoc(struct ath12k *ar,
ath12k_peer_assoc_prepare(ar, vif, sta, &peer_arg, reassoc);
+ if (peer_arg.peer_nss < 1) {
+ ath12k_warn(ar->ab,
+ "invalid peer NSS %d\n", peer_arg.peer_nss);
+ return -EINVAL;
+ }
ret = ath12k_wmi_send_peer_assoc_cmd(ar, &peer_arg);
if (ret) {
ath12k_warn(ar->ab, "failed to run peer assoc for STA %pM vdev %i: %d\n",
@@ -3879,7 +4213,8 @@ static int ath12k_station_assoc(struct ath12k *ar,
return 0;
ret = ath12k_setup_peer_smps(ar, arvif, sta->addr,
- &sta->deflink.ht_cap);
+ &sta->deflink.ht_cap,
+ &sta->deflink.he_6ghz_capa);
if (ret) {
ath12k_warn(ar->ab, "failed to setup peer SMPS for vdev %d: %d\n",
arvif->vdev_id, ret);
@@ -5269,6 +5604,7 @@ static void ath12k_mac_setup_sband_iftype_data(struct ath12k *ar,
static int __ath12k_set_antenna(struct ath12k *ar, u32 tx_ant, u32 rx_ant)
{
+ struct ath12k_hw *ah = ath12k_ar_to_ah(ar);
int ret;
lockdep_assert_held(&ar->conf_mutex);
@@ -5289,8 +5625,8 @@ static int __ath12k_set_antenna(struct ath12k *ar, u32 tx_ant, u32 rx_ant)
ar->cfg_tx_chainmask = tx_ant;
ar->cfg_rx_chainmask = rx_ant;
- if (ar->state != ATH12K_STATE_ON &&
- ar->state != ATH12K_STATE_RESTARTED)
+ if (ah->state != ATH12K_HW_STATE_ON &&
+ ah->state != ATH12K_HW_STATE_RESTARTED)
return 0;
ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_TX_CHAIN_MASK,
@@ -5590,51 +5926,16 @@ static int ath12k_mac_config_mon_status_default(struct ath12k *ar, bool enable)
/* TODO: Need to support new monitor mode */
}
-static void ath12k_mac_wait_reconfigure(struct ath12k_base *ab)
-{
- int recovery_start_count;
-
- if (!ab->is_reset)
- return;
-
- recovery_start_count = atomic_inc_return(&ab->recovery_start_count);
-
- ath12k_dbg(ab, ATH12K_DBG_MAC, "recovery start count %d\n", recovery_start_count);
-
- if (recovery_start_count == ab->num_radios) {
- complete(&ab->recovery_start);
- ath12k_dbg(ab, ATH12K_DBG_MAC, "recovery started success\n");
- }
-
- ath12k_dbg(ab, ATH12K_DBG_MAC, "waiting reconfigure...\n");
-
- wait_for_completion_timeout(&ab->reconfigure_complete,
- ATH12K_RECONFIGURE_TIMEOUT_HZ);
-}
-
static int ath12k_mac_start(struct ath12k *ar)
{
+ struct ath12k_hw *ah = ar->ah;
struct ath12k_base *ab = ar->ab;
struct ath12k_pdev *pdev = ar->pdev;
int ret;
- mutex_lock(&ar->conf_mutex);
+ lockdep_assert_held(&ah->hw_mutex);
- switch (ar->state) {
- case ATH12K_STATE_OFF:
- ar->state = ATH12K_STATE_ON;
- break;
- case ATH12K_STATE_RESTARTING:
- ar->state = ATH12K_STATE_RESTARTED;
- ath12k_mac_wait_reconfigure(ab);
- break;
- case ATH12K_STATE_RESTARTED:
- case ATH12K_STATE_WEDGED:
- case ATH12K_STATE_ON:
- WARN_ON(1);
- ret = -EINVAL;
- goto err;
- }
+ mutex_lock(&ar->conf_mutex);
ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_PMF_QOS,
1, pdev->pdev_id);
@@ -5726,7 +6027,6 @@ static int ath12k_mac_start(struct ath12k *ar)
return 0;
err:
- ar->state = ATH12K_STATE_OFF;
mutex_unlock(&ar->conf_mutex);
return ret;
@@ -5749,9 +6049,29 @@ static int ath12k_mac_op_start(struct ieee80211_hw *hw)
ath12k_drain_tx(ah);
+ guard(mutex)(&ah->hw_mutex);
+
+ switch (ah->state) {
+ case ATH12K_HW_STATE_OFF:
+ ah->state = ATH12K_HW_STATE_ON;
+ break;
+ case ATH12K_HW_STATE_RESTARTING:
+ ah->state = ATH12K_HW_STATE_RESTARTED;
+ break;
+ case ATH12K_HW_STATE_RESTARTED:
+ case ATH12K_HW_STATE_WEDGED:
+ case ATH12K_HW_STATE_ON:
+ ah->state = ATH12K_HW_STATE_OFF;
+
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
for_each_ar(ah, ar, i) {
ret = ath12k_mac_start(ar);
if (ret) {
+ ah->state = ATH12K_HW_STATE_OFF;
+
ath12k_err(ar->ab, "fail to start mac operations in pdev idx %d ret %d\n",
ar->pdev_idx, ret);
goto fail_start;
@@ -5759,11 +6079,13 @@ static int ath12k_mac_op_start(struct ieee80211_hw *hw)
}
return 0;
+
fail_start:
for (; i > 0; i--) {
ar = ath12k_ah_to_ar(ah, i - 1);
ath12k_mac_stop(ar);
}
+
return ret;
}
@@ -5826,9 +6148,12 @@ int ath12k_mac_rfkill_enable_radio(struct ath12k *ar, bool enable)
static void ath12k_mac_stop(struct ath12k *ar)
{
+ struct ath12k_hw *ah = ar->ah;
struct htt_ppdu_stats_info *ppdu_stats, *tmp;
int ret;
+ lockdep_assert_held(&ah->hw_mutex);
+
mutex_lock(&ar->conf_mutex);
ret = ath12k_mac_config_mon_status_default(ar, false);
if (ret && (ret != -EOPNOTSUPP))
@@ -5836,7 +6161,6 @@ static void ath12k_mac_stop(struct ath12k *ar)
ret);
clear_bit(ATH12K_CAC_RUNNING, &ar->dev_flags);
- ar->state = ATH12K_STATE_OFF;
mutex_unlock(&ar->conf_mutex);
cancel_delayed_work_sync(&ar->scan.timeout);
@@ -5857,7 +6181,7 @@ static void ath12k_mac_stop(struct ath12k *ar)
atomic_set(&ar->num_pending_mgmt_tx, 0);
}
-static void ath12k_mac_op_stop(struct ieee80211_hw *hw)
+static void ath12k_mac_op_stop(struct ieee80211_hw *hw, bool suspend)
{
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
struct ath12k *ar;
@@ -5865,8 +6189,14 @@ static void ath12k_mac_op_stop(struct ieee80211_hw *hw)
ath12k_drain_tx(ah);
+ mutex_lock(&ah->hw_mutex);
+
+ ah->state = ATH12K_HW_STATE_OFF;
+
for_each_ar(ah, ar, i)
ath12k_mac_stop(ar);
+
+ mutex_unlock(&ah->hw_mutex);
}
static u8
@@ -5892,17 +6222,59 @@ ath12k_mac_get_vdev_stats_id(struct ath12k_vif *arvif)
return vdev_stats_id;
}
-static void ath12k_mac_setup_vdev_create_arg(struct ath12k_vif *arvif,
- struct ath12k_wmi_vdev_create_arg *arg)
+static int ath12k_mac_setup_vdev_params_mbssid(struct ath12k_vif *arvif,
+ u32 *flags, u32 *tx_vdev_id)
+{
+ struct ieee80211_vif *tx_vif = arvif->vif->mbssid_tx_vif;
+ struct ath12k *ar = arvif->ar;
+ struct ath12k_vif *tx_arvif;
+
+ if (!tx_vif)
+ return 0;
+
+ tx_arvif = ath12k_vif_to_arvif(tx_vif);
+
+ if (arvif->vif->bss_conf.nontransmitted) {
+ if (ar->ah->hw->wiphy != ieee80211_vif_to_wdev(tx_vif)->wiphy)
+ return -EINVAL;
+
+ *flags = WMI_VDEV_MBSSID_FLAGS_NON_TRANSMIT_AP;
+ *tx_vdev_id = tx_arvif->vdev_id;
+ } else if (tx_arvif == arvif) {
+ *flags = WMI_VDEV_MBSSID_FLAGS_TRANSMIT_AP;
+ } else {
+ return -EINVAL;
+ }
+
+ if (arvif->vif->bss_conf.ema_ap)
+ *flags |= WMI_VDEV_MBSSID_FLAGS_EMA_MODE;
+
+ return 0;
+}
+
+static int ath12k_mac_setup_vdev_create_arg(struct ath12k_vif *arvif,
+ struct ath12k_wmi_vdev_create_arg *arg)
{
struct ath12k *ar = arvif->ar;
struct ath12k_pdev *pdev = ar->pdev;
+ int ret;
arg->if_id = arvif->vdev_id;
arg->type = arvif->vdev_type;
arg->subtype = arvif->vdev_subtype;
arg->pdev_id = pdev->pdev_id;
+ arg->mbssid_flags = WMI_VDEV_MBSSID_FLAGS_NON_MBSSID_AP;
+ arg->mbssid_tx_vdev_id = 0;
+ if (!test_bit(WMI_TLV_SERVICE_MBSS_PARAM_IN_VDEV_START_SUPPORT,
+ ar->ab->wmi_ab.svc_map)) {
+ ret = ath12k_mac_setup_vdev_params_mbssid(arvif,
+ &arg->mbssid_flags,
+ &arg->mbssid_tx_vdev_id);
+ if (ret)
+ return ret;
+ }
+
if (pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP) {
arg->chains[NL80211_BAND_2GHZ].tx = ar->num_tx_chains;
arg->chains[NL80211_BAND_2GHZ].rx = ar->num_rx_chains;
@@ -5918,6 +6290,7 @@ static void ath12k_mac_setup_vdev_create_arg(struct ath12k_vif *arvif,
}
arg->if_stats_id = ath12k_mac_get_vdev_stats_id(arvif);
+ return 0;
}
static u32
@@ -6099,7 +6472,12 @@ static int ath12k_mac_vdev_create(struct ath12k *ar, struct ieee80211_vif *vif)
for (i = 0; i < ARRAY_SIZE(vif->hw_queue); i++)
vif->hw_queue[i] = i % (ATH12K_HW_MAX_QUEUES - 1);
- ath12k_mac_setup_vdev_create_arg(arvif, &vdev_arg);
+ ret = ath12k_mac_setup_vdev_create_arg(arvif, &vdev_arg);
+ if (ret) {
+ ath12k_warn(ab, "failed to create vdev parameters %d: %d\n",
+ arvif->vdev_id, ret);
+ goto err;
+ }
ret = ath12k_wmi_vdev_create(ar, vif->addr, &vdev_arg);
if (ret) {
@@ -6492,7 +6870,6 @@ err_vdev_del:
/* Recalc txpower for remaining vdev */
ath12k_mac_txpower_recalc(ar);
- clear_bit(ATH12K_FLAG_MONITOR_ENABLED, &ar->monitor_flags);
/* TODO: recal traffic pause state based on the available vdevs */
arvif->is_created = false;
@@ -6563,15 +6940,9 @@ static void ath12k_mac_configure_filter(struct ath12k *ar,
reset_flag = !(ar->filter_flags & FIF_BCN_PRBRESP_PROMISC);
ret = ath12k_dp_tx_htt_monitor_mode_ring_config(ar, reset_flag);
- if (!ret) {
- if (!reset_flag)
- set_bit(ATH12K_FLAG_MONITOR_ENABLED, &ar->monitor_flags);
- else
- clear_bit(ATH12K_FLAG_MONITOR_ENABLED, &ar->monitor_flags);
- } else {
+ if (ret)
ath12k_warn(ar->ab,
"fail to set monitor filter: %d\n", ret);
- }
ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
"total_flags:0x%x, reset_flag:%d\n",
@@ -6848,10 +7219,16 @@ ath12k_mac_vdev_start_restart(struct ath12k_vif *arvif,
arg.pref_tx_streams = ar->num_tx_chains;
arg.pref_rx_streams = ar->num_rx_chains;
- /* Fill the MBSSID flags to indicate AP is non MBSSID by default
- * Corresponding flags would be updated with MBSSID support.
- */
arg.mbssid_flags = WMI_VDEV_MBSSID_FLAGS_NON_MBSSID_AP;
+ arg.mbssid_tx_vdev_id = 0;
+ if (test_bit(WMI_TLV_SERVICE_MBSS_PARAM_IN_VDEV_START_SUPPORT,
+ ar->ab->wmi_ab.svc_map)) {
+ ret = ath12k_mac_setup_vdev_params_mbssid(arvif,
+ &arg.mbssid_flags,
+ &arg.mbssid_tx_vdev_id);
+ if (ret)
+ return ret;
+ }
if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
arg.ssid = arvif->u.ap.ssid;
@@ -7045,7 +7422,9 @@ ath12k_mac_update_vif_chan(struct ath12k *ar,
struct ieee80211_vif_chanctx_switch *vifs,
int n_vifs)
{
+ struct ath12k_wmi_vdev_up_params params = {};
struct ath12k_base *ab = ar->ab;
+ struct ieee80211_vif *vif;
struct ath12k_vif *arvif;
int ret;
int i;
@@ -7054,9 +7433,10 @@ ath12k_mac_update_vif_chan(struct ath12k *ar,
lockdep_assert_held(&ar->conf_mutex);
for (i = 0; i < n_vifs; i++) {
- arvif = ath12k_vif_to_arvif(vifs[i].vif);
+ vif = vifs[i].vif;
+ arvif = ath12k_vif_to_arvif(vif);
- if (vifs[i].vif->type == NL80211_IFTYPE_MONITOR)
+ if (vif->type == NL80211_IFTYPE_MONITOR)
monitor_vif = true;
ath12k_dbg(ab, ATH12K_DBG_MAC,
@@ -7070,29 +7450,6 @@ ath12k_mac_update_vif_chan(struct ath12k *ar,
if (WARN_ON(!arvif->is_started))
continue;
- if (WARN_ON(!arvif->is_up))
- continue;
-
- ret = ath12k_wmi_vdev_down(ar, arvif->vdev_id);
- if (ret) {
- ath12k_warn(ab, "failed to down vdev %d: %d\n",
- arvif->vdev_id, ret);
- continue;
- }
- }
-
- /* All relevant vdevs are downed and associated channel resources
- * should be available for the channel switch now.
- */
-
- /* TODO: Update ar->rx_channel */
-
- for (i = 0; i < n_vifs; i++) {
- arvif = ath12k_vif_to_arvif(vifs[i].vif);
-
- if (WARN_ON(!arvif->is_started))
- continue;
-
arvif->punct_bitmap = vifs[i].new_ctx->def.punctured;
/* Firmware expect vdev_restart only if vdev is up.
@@ -7125,8 +7482,16 @@ ath12k_mac_update_vif_chan(struct ath12k *ar,
ath12k_warn(ab, "failed to update bcn tmpl during csa: %d\n",
ret);
- ret = ath12k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
- arvif->bssid);
+ memset(&params, 0, sizeof(params));
+ params.vdev_id = arvif->vdev_id;
+ params.aid = arvif->aid;
+ params.bssid = arvif->bssid;
+ if (vif->mbssid_tx_vif) {
+ params.tx_bssid = ath12k_vif_to_arvif(vif->mbssid_tx_vif)->bssid;
+ params.nontx_profile_idx = vif->bss_conf.bssid_index;
+ params.nontx_profile_cnt = 1 << vif->bss_conf.bssid_indicator;
+ }
+ ret = ath12k_wmi_vdev_up(arvif->ar, &params);
if (ret) {
ath12k_warn(ab, "failed to bring vdev up %d: %d\n",
arvif->vdev_id, ret);
@@ -7259,7 +7624,6 @@ ath12k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
struct ath12k_base *ab;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
int ret;
- struct ath12k_wmi_peer_create_arg param;
/* For multi radio wiphy, the vdev was not created during add_interface
* create now since we have a channel ctx now to assign to a specific ar/fw
@@ -7295,21 +7659,6 @@ ath12k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
goto out;
}
- if (ab->hw_params->vdev_start_delay &&
- arvif->vdev_type != WMI_VDEV_TYPE_AP &&
- arvif->vdev_type != WMI_VDEV_TYPE_MONITOR) {
- param.vdev_id = arvif->vdev_id;
- param.peer_type = WMI_PEER_TYPE_DEFAULT;
- param.peer_addr = ar->mac_addr;
-
- ret = ath12k_peer_create(ar, arvif, NULL, &param);
- if (ret) {
- ath12k_warn(ab, "failed to create peer after vdev start delay: %d",
- ret);
- goto out;
- }
- }
-
if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
ret = ath12k_mac_monitor_start(ar);
if (ret)
@@ -7371,11 +7720,6 @@ ath12k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
WARN_ON(!arvif->is_started);
- if (ab->hw_params->vdev_start_delay &&
- arvif->vdev_type == WMI_VDEV_TYPE_MONITOR &&
- ath12k_peer_find_by_addr(ab, ar->mac_addr))
- ath12k_peer_delete(ar, arvif->vdev_id, ar->mac_addr);
-
if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
ret = ath12k_mac_monitor_stop(ar);
if (ret) {
@@ -7386,7 +7730,8 @@ ath12k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
arvif->is_started = false;
}
- if (arvif->vdev_type != WMI_VDEV_TYPE_STA) {
+ if (arvif->vdev_type != WMI_VDEV_TYPE_STA &&
+ arvif->vdev_type != WMI_VDEV_TYPE_MONITOR) {
ath12k_bss_disassoc(ar, arvif);
ret = ath12k_mac_vdev_stop(arvif);
if (ret)
@@ -7395,10 +7740,6 @@ ath12k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
}
arvif->is_started = false;
- if (ab->hw_params->vdev_start_delay &&
- arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
- ath12k_wmi_vdev_down(ar, arvif->vdev_id);
-
if (arvif->vdev_type != WMI_VDEV_TYPE_MONITOR &&
ar->num_started_vdevs == 1 && ar->monitor_vdev_created)
ath12k_mac_monitor_stop(ar);
@@ -7920,26 +8261,33 @@ ath12k_mac_op_reconfig_complete(struct ieee80211_hw *hw,
struct ath12k *ar;
struct ath12k_base *ab;
struct ath12k_vif *arvif;
- int recovery_count;
+ int recovery_count, i;
if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART)
return;
- ar = ath12k_ah_to_ar(ah, 0);
- ab = ar->ab;
+ guard(mutex)(&ah->hw_mutex);
- mutex_lock(&ar->conf_mutex);
+ if (ah->state != ATH12K_HW_STATE_RESTARTED)
+ return;
+
+ ah->state = ATH12K_HW_STATE_ON;
+ ieee80211_wake_queues(hw);
+
+ for_each_ar(ah, ar, i) {
+ mutex_lock(&ar->conf_mutex);
+
+ ab = ar->ab;
- if (ar->state == ATH12K_STATE_RESTARTED) {
ath12k_warn(ar->ab, "pdev %d successfully recovered\n",
ar->pdev->pdev_id);
- ar->state = ATH12K_STATE_ON;
- ieee80211_wake_queues(hw);
if (ab->is_reset) {
recovery_count = atomic_inc_return(&ab->recovery_count);
+
ath12k_dbg(ab, ATH12K_DBG_BOOT, "recovery count %d\n",
recovery_count);
+
/* When there are multiple radios in an SOC,
* the recovery has to be done for each radio
*/
@@ -7958,6 +8306,7 @@ ath12k_mac_op_reconfig_complete(struct ieee80211_hw *hw,
arvif->key_cipher,
arvif->is_up,
arvif->vdev_type);
+
/* After trigger disconnect, then upper layer will
* trigger connect again, then the PN number of
* upper layer will be reset to keep up with AP
@@ -7967,13 +8316,14 @@ ath12k_mac_op_reconfig_complete(struct ieee80211_hw *hw,
arvif->vdev_type == WMI_VDEV_TYPE_STA &&
arvif->vdev_subtype == WMI_VDEV_SUBTYPE_NONE) {
ieee80211_hw_restart_disconnect(arvif->vif);
+
ath12k_dbg(ab, ATH12K_DBG_BOOT,
"restart disconnect\n");
}
}
- }
- mutex_unlock(&ar->conf_mutex);
+ mutex_unlock(&ar->conf_mutex);
+ }
}
static void
@@ -8026,6 +8376,13 @@ static int ath12k_mac_op_get_survey(struct ieee80211_hw *hw, int idx,
if (!sband)
sband = hw->wiphy->bands[NL80211_BAND_5GHZ];
+ if (sband && idx >= sband->n_channels) {
+ idx -= sband->n_channels;
+ sband = NULL;
+ }
+
+ if (!sband)
+ sband = hw->wiphy->bands[NL80211_BAND_6GHZ];
if (!sband || idx >= sband->n_channels)
return -ENOENT;
@@ -8124,12 +8481,68 @@ static int ath12k_mac_op_remain_on_channel(struct ieee80211_hw *hw,
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
struct ath12k_wmi_scan_req_arg arg;
- struct ath12k *ar;
+ struct ath12k *ar, *prev_ar;
u32 scan_time_msec;
+ bool create = true;
int ret;
- ar = ath12k_ah_to_ar(ah, 0);
+ if (ah->num_radio == 1) {
+ WARN_ON(!arvif->is_created);
+ ar = ath12k_ah_to_ar(ah, 0);
+ goto scan;
+ }
+
+ ar = ath12k_mac_select_scan_device(hw, vif, chan->center_freq);
+ if (!ar)
+ return -EINVAL;
+
+ /* If the vif is already assigned to a specific vdev of an ar,
+ * check whether its already started, vdev which is started
+ * are not allowed to switch to a new radio.
+ * If the vdev is not started, but was earlier created on a
+ * different ar, delete that vdev and create a new one. We don't
+ * delete at the scan stop as an optimization to avoid redundant
+ * delete-create vdev's for the same ar, in case the request is
+ * always on the same band for the vif
+ */
+ if (arvif->is_created) {
+ if (WARN_ON(!arvif->ar))
+ return -EINVAL;
+
+ if (ar != arvif->ar && arvif->is_started)
+ return -EBUSY;
+
+ if (ar != arvif->ar) {
+ /* backup the previously used ar ptr, since the vdev delete
+ * would assign the arvif->ar to NULL after the call
+ */
+ prev_ar = arvif->ar;
+ mutex_lock(&prev_ar->conf_mutex);
+ ret = ath12k_mac_vdev_delete(prev_ar, vif);
+ mutex_unlock(&prev_ar->conf_mutex);
+ if (ret) {
+ ath12k_warn(prev_ar->ab,
+ "unable to delete scan vdev for roc: %d\n",
+ ret);
+ return ret;
+ }
+ } else {
+ create = false;
+ }
+ }
+ if (create) {
+ mutex_lock(&ar->conf_mutex);
+ ret = ath12k_mac_vdev_create(ar, vif);
+ mutex_unlock(&ar->conf_mutex);
+ if (ret) {
+ ath12k_warn(ar->ab, "unable to create scan vdev for roc: %d\n",
+ ret);
+ return -EINVAL;
+ }
+ }
+
+scan:
mutex_lock(&ar->conf_mutex);
spin_lock_bh(&ar->data_lock);
@@ -8211,6 +8624,40 @@ exit:
return ret;
}
+static void ath12k_mac_op_set_rekey_data(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_gtk_rekey_data *data)
+{
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ath12k_rekey_data *rekey_data = &arvif->rekey_data;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar = ath12k_ah_to_ar(ah, 0);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac set rekey data vdev %d\n",
+ arvif->vdev_id);
+
+ mutex_lock(&ar->conf_mutex);
+
+ memcpy(rekey_data->kck, data->kck, NL80211_KCK_LEN);
+ memcpy(rekey_data->kek, data->kek, NL80211_KEK_LEN);
+
+ /* The supplicant works on big-endian, the firmware expects it on
+ * little endian.
+ */
+ rekey_data->replay_ctr = get_unaligned_be64(data->replay_ctr);
+
+ arvif->rekey_data.enable_offload = true;
+
+ ath12k_dbg_dump(ar->ab, ATH12K_DBG_MAC, "kck", NULL,
+ rekey_data->kck, NL80211_KCK_LEN);
+ ath12k_dbg_dump(ar->ab, ATH12K_DBG_MAC, "kek", NULL,
+ rekey_data->kck, NL80211_KEK_LEN);
+ ath12k_dbg_dump(ar->ab, ATH12K_DBG_MAC, "replay ctr", NULL,
+ &rekey_data->replay_ctr, sizeof(rekey_data->replay_ctr));
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
static const struct ieee80211_ops ath12k_ops = {
.tx = ath12k_mac_op_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
@@ -8226,6 +8673,7 @@ static const struct ieee80211_ops ath12k_ops = {
.hw_scan = ath12k_mac_op_hw_scan,
.cancel_hw_scan = ath12k_mac_op_cancel_hw_scan,
.set_key = ath12k_mac_op_set_key,
+ .set_rekey_data = ath12k_mac_op_set_rekey_data,
.sta_state = ath12k_mac_op_sta_state,
.sta_set_txpwr = ath12k_mac_op_sta_set_txpwr,
.sta_rc_update = ath12k_mac_op_sta_rc_update,
@@ -8247,6 +8695,12 @@ static const struct ieee80211_ops ath12k_ops = {
.sta_statistics = ath12k_mac_op_sta_statistics,
.remain_on_channel = ath12k_mac_op_remain_on_channel,
.cancel_remain_on_channel = ath12k_mac_op_cancel_remain_on_channel,
+
+#ifdef CONFIG_PM
+ .suspend = ath12k_wow_op_suspend,
+ .resume = ath12k_wow_op_resume,
+ .set_wakeup = ath12k_wow_op_set_wakeup,
+#endif
};
static void ath12k_mac_update_ch_list(struct ath12k *ar,
@@ -8488,19 +8942,23 @@ static int ath12k_mac_setup_iface_combinations(struct ath12k_hw *ah)
static const u8 ath12k_if_types_ext_capa[] = {
[0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
+ [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT,
[7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
};
static const u8 ath12k_if_types_ext_capa_sta[] = {
[0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
+ [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT,
[7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
[9] = WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT,
};
static const u8 ath12k_if_types_ext_capa_ap[] = {
[0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
+ [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT,
[7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
[9] = WLAN_EXT_CAPA10_TWT_RESPONDER_SUPPORT,
+ [10] = WLAN_EXT_CAPA11_EMA_SUPPORT,
};
static const struct wiphy_iftype_ext_capab ath12k_iftypes_ext_capa[] = {
@@ -8540,8 +8998,10 @@ static void ath12k_mac_hw_unregister(struct ath12k_hw *ah)
struct ath12k *ar;
int i;
- for_each_ar(ah, ar, i)
+ for_each_ar(ah, ar, i) {
cancel_work_sync(&ar->regd_update_work);
+ ath12k_debugfs_unregister(ar);
+ }
ieee80211_unregister_hw(hw);
@@ -8605,6 +9065,7 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah)
u32 ht_cap = U32_MAX, antennas_rx = 0, antennas_tx = 0;
bool is_6ghz = false, is_raw_mode = false, is_monitor_disable = false;
u8 *mac_addr = NULL;
+ u8 mbssid_max_interfaces = 0;
wiphy->max_ap_assoc_sta = 0;
@@ -8648,6 +9109,8 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah)
mac_addr = ar->mac_addr;
else
mac_addr = ab->mac_addr;
+
+ mbssid_max_interfaces += TARGET_NUM_VDEVS;
}
wiphy->available_antennas_rx = antennas_rx;
@@ -8685,7 +9148,7 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah)
ieee80211_hw_set(hw, SUPPORTS_TX_FRAG);
ieee80211_hw_set(hw, REPORTS_LOW_ACK);
- if (ht_cap & WMI_HT_CAP_ENABLED) {
+ if ((ht_cap & WMI_HT_CAP_ENABLED) || ar->supports_6ghz) {
ieee80211_hw_set(hw, AMPDU_AGGREGATION);
ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW);
ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
@@ -8700,7 +9163,8 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah)
* for each band for a dual band capable radio. It will be tricky to
* handle it when the ht capability different for each band.
*/
- if (ht_cap & WMI_HT_CAP_DYNAMIC_SMPS)
+ if (ht_cap & WMI_HT_CAP_DYNAMIC_SMPS ||
+ (ar->supports_6ghz && ab->hw_params->supports_dynamic_smps_6ghz))
wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS;
wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID;
@@ -8739,6 +9203,9 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah)
wiphy->iftype_ext_capab = ath12k_iftypes_ext_capa;
wiphy->num_iftype_ext_capab = ARRAY_SIZE(ath12k_iftypes_ext_capa);
+ wiphy->mbssid_max_interfaces = mbssid_max_interfaces;
+ wiphy->ema_max_profile_periodicity = TARGET_EMA_MAX_PROFILE_PERIOD;
+
if (is_6ghz) {
wiphy_ext_feature_set(wiphy,
NL80211_EXT_FEATURE_FILS_DISCOVERY);
@@ -8756,6 +9223,24 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah)
ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
}
+ if (test_bit(WMI_TLV_SERVICE_NLO, ar->wmi->wmi_ab->svc_map)) {
+ wiphy->max_sched_scan_ssids = WMI_PNO_MAX_SUPP_NETWORKS;
+ wiphy->max_match_sets = WMI_PNO_MAX_SUPP_NETWORKS;
+ wiphy->max_sched_scan_ie_len = WMI_PNO_MAX_IE_LENGTH;
+ wiphy->max_sched_scan_plans = WMI_PNO_MAX_SCHED_SCAN_PLANS;
+ wiphy->max_sched_scan_plan_interval =
+ WMI_PNO_MAX_SCHED_SCAN_PLAN_INT;
+ wiphy->max_sched_scan_plan_iterations =
+ WMI_PNO_MAX_SCHED_SCAN_PLAN_ITRNS;
+ wiphy->features |= NL80211_FEATURE_ND_RANDOM_MAC_ADDR;
+ }
+
+ ret = ath12k_wow_init(ar);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to init wow: %d\n", ret);
+ goto err_free_if_combs;
+ }
+
ret = ieee80211_register_hw(hw);
if (ret) {
ath12k_err(ab, "ieee80211 registration failed: %d\n", ret);
@@ -8777,13 +9262,16 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah)
ath12k_err(ar->ab, "ath12k regd update failed: %d\n", ret);
goto err_unregister_hw;
}
- }
- ath12k_debugfs_register(ar);
+ ath12k_debugfs_register(ar);
+ }
return 0;
err_unregister_hw:
+ for_each_ar(ah, ar, i)
+ ath12k_debugfs_unregister(ar);
+
ieee80211_unregister_hw(hw);
err_free_if_combs:
@@ -8842,7 +9330,6 @@ static void ath12k_mac_setup(struct ath12k *ar)
INIT_WORK(&ar->wmi_mgmt_tx_work, ath12k_mgmt_over_wmi_tx_work);
skb_queue_head_init(&ar->wmi_mgmt_tx_queue);
- clear_bit(ATH12K_FLAG_MONITOR_ENABLED, &ar->monitor_flags);
}
int ath12k_mac_register(struct ath12k_base *ab)
@@ -8919,6 +9406,8 @@ static struct ath12k_hw *ath12k_mac_hw_allocate(struct ath12k_base *ab,
ah->hw = hw;
ah->num_radio = num_pdev_map;
+ mutex_init(&ah->hw_mutex);
+
for (i = 0; i < num_pdev_map; i++) {
ab = pdev_map[i].ab;
pdev_idx = pdev_map[i].pdev_idx;
@@ -8927,7 +9416,7 @@ static struct ath12k_hw *ath12k_mac_hw_allocate(struct ath12k_base *ab,
ar = ath12k_ah_to_ar(ah, i);
ar->ah = ah;
ar->ab = ab;
- ar->hw_link_id = i;
+ ar->hw_link_id = pdev->hw_link_id;
ar->pdev = pdev;
ar->pdev_idx = pdev_idx;
pdev->ar = ar;
@@ -9005,3 +9494,34 @@ err:
return ret;
}
+
+int ath12k_mac_vif_set_keepalive(struct ath12k_vif *arvif,
+ enum wmi_sta_keepalive_method method,
+ u32 interval)
+{
+ struct wmi_sta_keepalive_arg arg = {};
+ struct ath12k *ar = arvif->ar;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
+ return 0;
+
+ if (!test_bit(WMI_TLV_SERVICE_STA_KEEP_ALIVE, ar->ab->wmi_ab.svc_map))
+ return 0;
+
+ arg.vdev_id = arvif->vdev_id;
+ arg.enabled = 1;
+ arg.method = method;
+ arg.interval = interval;
+
+ ret = ath12k_wmi_sta_keepalive(ar, &arg);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set keepalive on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath12k/mac.h b/drivers/net/wireless/ath/ath12k/mac.h
index 69fd282b9dd3..5efbb6822628 100644
--- a/drivers/net/wireless/ath/ath12k/mac.h
+++ b/drivers/net/wireless/ath/ath12k/mac.h
@@ -9,6 +9,7 @@
#include <net/mac80211.h>
#include <net/cfg80211.h>
+#include "wmi.h"
struct ath12k;
struct ath12k_base;
@@ -81,5 +82,9 @@ int ath12k_mac_rfkill_config(struct ath12k *ar);
int ath12k_mac_wait_tx_complete(struct ath12k *ar);
void ath12k_mac_handle_beacon(struct ath12k *ar, struct sk_buff *skb);
void ath12k_mac_handle_beacon_miss(struct ath12k *ar, u32 vdev_id);
+int ath12k_mac_vif_set_keepalive(struct ath12k_vif *arvif,
+ enum wmi_sta_keepalive_method method,
+ u32 interval);
+u8 ath12k_mac_get_target_pdev_id(struct ath12k *ar);
#endif
diff --git a/drivers/net/wireless/ath/ath12k/mhi.c b/drivers/net/wireless/ath/ath12k/mhi.c
index fef2f7622033..df96b0f91f54 100644
--- a/drivers/net/wireless/ath/ath12k/mhi.c
+++ b/drivers/net/wireless/ath/ath12k/mhi.c
@@ -16,6 +16,7 @@
#define MHI_TIMEOUT_DEFAULT_MS 90000
#define OTP_INVALID_BOARD_ID 0xFFFF
#define OTP_VALID_DUALMAC_BOARD_ID_MASK 0x1000
+#define MHI_CB_INVALID 0xff
static const struct mhi_channel_config ath12k_mhi_channels_qcn9274[] = {
{
@@ -268,6 +269,7 @@ static void ath12k_mhi_op_status_cb(struct mhi_controller *mhi_cntrl,
enum mhi_callback cb)
{
struct ath12k_base *ab = dev_get_drvdata(mhi_cntrl->cntrl_dev);
+ struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
ath12k_dbg(ab, ATH12K_DBG_BOOT, "mhi notify status reason %s\n",
ath12k_mhi_op_callback_to_str(cb));
@@ -277,12 +279,20 @@ static void ath12k_mhi_op_status_cb(struct mhi_controller *mhi_cntrl,
ath12k_warn(ab, "firmware crashed: MHI_CB_SYS_ERROR\n");
break;
case MHI_CB_EE_RDDM:
+ if (ab_pci->mhi_pre_cb == MHI_CB_EE_RDDM) {
+ ath12k_dbg(ab, ATH12K_DBG_BOOT,
+ "do not queue again for consecutive RDDM event\n");
+ break;
+ }
+
if (!(test_bit(ATH12K_FLAG_UNREGISTERING, &ab->dev_flags)))
queue_work(ab->workqueue_aux, &ab->reset_work);
break;
default:
break;
}
+
+ ab_pci->mhi_pre_cb = cb;
}
static int ath12k_mhi_op_read_reg(struct mhi_controller *mhi_cntrl,
@@ -313,6 +323,7 @@ int ath12k_mhi_register(struct ath12k_pci *ab_pci)
if (!mhi_ctrl)
return -ENOMEM;
+ ab_pci->mhi_pre_cb = MHI_CB_INVALID;
ab_pci->mhi_ctrl = mhi_ctrl;
mhi_ctrl->cntrl_dev = ab->dev;
mhi_ctrl->regs = ab->mem;
diff --git a/drivers/net/wireless/ath/ath12k/pci.c b/drivers/net/wireless/ath/ath12k/pci.c
index 16af046c33d9..9e0b9e329bda 100644
--- a/drivers/net/wireless/ath/ath12k/pci.c
+++ b/drivers/net/wireless/ath/ath12k/pci.c
@@ -350,6 +350,7 @@ static void ath12k_pci_free_ext_irq(struct ath12k_base *ab)
free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp);
netif_napi_del(&irq_grp->napi);
+ free_netdev(irq_grp->napi_ndev);
}
}
@@ -472,7 +473,8 @@ static void __ath12k_pci_ext_irq_disable(struct ath12k_base *ab)
{
int i;
- clear_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags);
+ if (!test_and_clear_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags))
+ return;
for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
@@ -560,8 +562,9 @@ static irqreturn_t ath12k_pci_ext_interrupt_handler(int irq, void *arg)
static int ath12k_pci_ext_irq_config(struct ath12k_base *ab)
{
struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
- int i, j, ret, num_vectors = 0;
+ int i, j, n, ret, num_vectors = 0;
u32 user_base_data = 0, base_vector = 0, base_idx;
+ struct ath12k_ext_irq_grp *irq_grp;
base_idx = ATH12K_PCI_IRQ_CE0_OFFSET + CE_COUNT_MAX;
ret = ath12k_pci_get_user_msi_assignment(ab, "DP",
@@ -572,13 +575,18 @@ static int ath12k_pci_ext_irq_config(struct ath12k_base *ab)
return ret;
for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
- struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+ irq_grp = &ab->ext_irq_grp[i];
u32 num_irq = 0;
irq_grp->ab = ab;
irq_grp->grp_id = i;
- init_dummy_netdev(&irq_grp->napi_ndev);
- netif_napi_add(&irq_grp->napi_ndev, &irq_grp->napi,
+ irq_grp->napi_ndev = alloc_netdev_dummy(0);
+ if (!irq_grp->napi_ndev) {
+ ret = -ENOMEM;
+ goto fail_allocate;
+ }
+
+ netif_napi_add(irq_grp->napi_ndev, &irq_grp->napi,
ath12k_pci_ext_grp_napi_poll);
if (ab->hw_params->ring_mask->tx[i] ||
@@ -611,13 +619,23 @@ static int ath12k_pci_ext_irq_config(struct ath12k_base *ab)
if (ret) {
ath12k_err(ab, "failed request irq %d: %d\n",
vector, ret);
- return ret;
+ goto fail_request;
}
}
ath12k_pci_ext_grp_disable(irq_grp);
}
return 0;
+
+fail_request:
+ /* i ->napi_ndev was properly allocated. Free it also */
+ i += 1;
+fail_allocate:
+ for (n = 0; n < i; n++) {
+ irq_grp = &ab->ext_irq_grp[n];
+ free_netdev(irq_grp->napi_ndev);
+ }
+ return ret;
}
static int ath12k_pci_set_irq_affinity_hint(struct ath12k_pci *ab_pci,
@@ -1090,14 +1108,14 @@ void ath12k_pci_ext_irq_enable(struct ath12k_base *ab)
{
int i;
- set_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags);
-
for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
napi_enable(&irq_grp->napi);
ath12k_pci_ext_grp_enable(irq_grp);
}
+
+ set_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags);
}
void ath12k_pci_ext_irq_disable(struct ath12k_base *ab)
@@ -1285,6 +1303,13 @@ void ath12k_pci_power_down(struct ath12k_base *ab, bool is_suspend)
ath12k_pci_sw_reset(ab_pci->ab, false);
}
+static int ath12k_pci_panic_handler(struct ath12k_base *ab)
+{
+ ath12k_pci_sw_reset(ab, false);
+
+ return NOTIFY_OK;
+}
+
static const struct ath12k_hif_ops ath12k_pci_hif_ops = {
.start = ath12k_pci_start,
.stop = ath12k_pci_stop,
@@ -1302,6 +1327,7 @@ static const struct ath12k_hif_ops ath12k_pci_hif_ops = {
.ce_irq_enable = ath12k_pci_hif_ce_irq_enable,
.ce_irq_disable = ath12k_pci_hif_ce_irq_disable,
.get_ce_msi_idx = ath12k_pci_get_ce_msi_idx,
+ .panic_handler = ath12k_pci_panic_handler,
};
static
diff --git a/drivers/net/wireless/ath/ath12k/pci.h b/drivers/net/wireless/ath/ath12k/pci.h
index 6186a78038cf..31584a7ad80e 100644
--- a/drivers/net/wireless/ath/ath12k/pci.h
+++ b/drivers/net/wireless/ath/ath12k/pci.h
@@ -104,6 +104,7 @@ struct ath12k_pci {
struct mhi_controller *mhi_ctrl;
const struct ath12k_msi_config *msi_config;
unsigned long mhi_state;
+ enum mhi_callback mhi_pre_cb;
u32 register_window;
/* protects register_window above */
diff --git a/drivers/net/wireless/ath/ath12k/qmi.c b/drivers/net/wireless/ath/ath12k/qmi.c
index 5484112859a6..b93ce9f87f61 100644
--- a/drivers/net/wireless/ath/ath12k/qmi.c
+++ b/drivers/net/wireless/ath/ath12k/qmi.c
@@ -2041,7 +2041,7 @@ static void ath12k_host_cap_parse_mlo(struct ath12k_base *ab,
req->mlo_capable_valid = 1;
req->mlo_capable = 1;
req->mlo_chip_id_valid = 1;
- req->mlo_chip_id = 0;
+ req->mlo_chip_id = ab->device_id;
req->mlo_group_id_valid = 1;
req->mlo_group_id = 0;
req->max_mlo_peer_valid = 1;
@@ -2053,7 +2053,7 @@ static void ath12k_host_cap_parse_mlo(struct ath12k_base *ab,
req->mlo_num_chips = 1;
info = &req->mlo_chip_info[0];
- info->chip_id = 0;
+ info->chip_id = ab->device_id;
info->num_local_links = ab->qmi.num_radios;
for (i = 0; i < info->num_local_links; i++) {
@@ -2503,7 +2503,7 @@ static int ath12k_qmi_request_target_cap(struct ath12k_base *ab)
ab->qmi.dev_mem[i].size =
resp.dev_mem[i].size;
ath12k_dbg(ab, ATH12K_DBG_QMI,
- "devmem [%d] start ox%llx size %llu\n", i,
+ "devmem [%d] start 0x%llx size %llu\n", i,
ab->qmi.dev_mem[i].start,
ab->qmi.dev_mem[i].size);
}
@@ -2538,7 +2538,7 @@ static int ath12k_qmi_load_file_target_mem(struct ath12k_base *ab,
struct qmi_wlanfw_bdf_download_resp_msg_v01 resp = {};
struct qmi_txn txn;
const u8 *temp = data;
- int ret;
+ int ret = 0;
u32 remaining = len;
req = kzalloc(sizeof(*req), GFP_KERNEL);
diff --git a/drivers/net/wireless/ath/ath12k/reg.c b/drivers/net/wireless/ath/ath12k/reg.c
index fbf38044938c..439d61f284d8 100644
--- a/drivers/net/wireless/ath/ath12k/reg.c
+++ b/drivers/net/wireless/ath/ath12k/reg.c
@@ -206,9 +206,9 @@ static void ath12k_copy_regd(struct ieee80211_regdomain *regd_orig,
int ath12k_regd_update(struct ath12k *ar, bool init)
{
- struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
+ struct ath12k_hw *ah = ath12k_ar_to_ah(ar);
+ struct ieee80211_hw *hw = ah->hw;
struct ieee80211_regdomain *regd, *regd_copy = NULL;
- struct ath12k_hw *ah = ar->ah;
int ret, regd_len, pdev_id;
struct ath12k_base *ab;
int i;
@@ -286,19 +286,20 @@ int ath12k_regd_update(struct ath12k *ar, bool init)
if (ret)
goto err;
+ if (ah->state != ATH12K_HW_STATE_ON)
+ goto skip;
+
ah->regd_updated = true;
/* Apply the new regd to all the radios, this is expected to be received only once
* since we check for ah->regd_updated and allow here only once.
*/
for_each_ar(ah, ar, i) {
- if (ar->state == ATH12K_STATE_ON) {
- ab = ar->ab;
- ret = ath12k_reg_update_chan_list(ar);
- if (ret)
- goto err;
- }
+ ab = ar->ab;
+ ret = ath12k_reg_update_chan_list(ar);
+ if (ret)
+ goto err;
}
-
+skip:
return 0;
err:
ath12k_warn(ab, "failed to perform regd update : %d\n", ret);
diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c
index 7a52d2082b79..9f6be557365e 100644
--- a/drivers/net/wireless/ath/ath12k/wmi.c
+++ b/drivers/net/wireless/ath/ath12k/wmi.c
@@ -228,9 +228,12 @@ void ath12k_wmi_init_qcn9274(struct ath12k_base *ab,
config->peer_map_unmap_version = 0x32;
config->twt_ap_pdev_count = ab->num_radios;
config->twt_ap_sta_count = 1000;
+ config->ema_max_vap_cnt = ab->num_radios;
+ config->ema_max_profile_period = TARGET_EMA_MAX_PROFILE_PERIOD;
+ config->beacon_tx_offload_max_vdev += config->ema_max_vap_cnt;
if (test_bit(WMI_TLV_SERVICE_PEER_METADATA_V1A_V1B_SUPPORT, ab->wmi_ab.svc_map))
- config->dp_peer_meta_data_ver = TARGET_RX_PEER_METADATA_VER_V1B;
+ config->peer_metadata_ver = ATH12K_PEER_METADATA_V1B;
}
void ath12k_wmi_init_wcn7850(struct ath12k_base *ab,
@@ -497,6 +500,7 @@ ath12k_pull_mac_phy_cap_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle,
mac_caps = wmi_mac_phy_caps + phy_idx;
pdev->pdev_id = ath12k_wmi_mac_phy_get_pdev_id(mac_caps);
+ pdev->hw_link_id = ath12k_wmi_mac_phy_get_hw_link_id(mac_caps);
pdev_cap->supported_bands |= le32_to_cpu(mac_caps->supported_bands);
pdev_cap->ampdu_density = le32_to_cpu(mac_caps->ampdu_density);
@@ -841,6 +845,8 @@ int ath12k_wmi_vdev_create(struct ath12k *ar, u8 *macaddr,
cmd->vdev_subtype = cpu_to_le32(args->subtype);
cmd->num_cfg_txrx_streams = cpu_to_le32(WMI_NUM_SUPPORTED_BAND_MAX);
cmd->pdev_id = cpu_to_le32(args->pdev_id);
+ cmd->mbssid_flags = cpu_to_le32(args->mbssid_flags);
+ cmd->mbssid_tx_vdev_id = cpu_to_le32(args->mbssid_tx_vdev_id);
cmd->vdev_stats_id = cpu_to_le32(args->if_stats_id);
ether_addr_copy(cmd->vdev_macaddr.addr, macaddr);
@@ -1046,6 +1052,7 @@ int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg,
cmd->he_ops = cpu_to_le32(arg->he_ops);
cmd->punct_bitmap = cpu_to_le32(arg->punct_bitmap);
cmd->mbssid_flags = cpu_to_le32(arg->mbssid_flags);
+ cmd->mbssid_tx_vdev_id = cpu_to_le32(arg->mbssid_tx_vdev_id);
if (!restart) {
if (arg->ssid) {
@@ -1097,7 +1104,7 @@ int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg,
return ret;
}
-int ath12k_wmi_vdev_up(struct ath12k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
+int ath12k_wmi_vdev_up(struct ath12k *ar, struct ath12k_wmi_vdev_up_params *params)
{
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct wmi_vdev_up_cmd *cmd;
@@ -1112,14 +1119,20 @@ int ath12k_wmi_vdev_up(struct ath12k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_UP_CMD,
sizeof(*cmd));
- cmd->vdev_id = cpu_to_le32(vdev_id);
- cmd->vdev_assoc_id = cpu_to_le32(aid);
+ cmd->vdev_id = cpu_to_le32(params->vdev_id);
+ cmd->vdev_assoc_id = cpu_to_le32(params->aid);
+
+ ether_addr_copy(cmd->vdev_bssid.addr, params->bssid);
- ether_addr_copy(cmd->vdev_bssid.addr, bssid);
+ if (params->tx_bssid) {
+ ether_addr_copy(cmd->tx_vdev_bssid.addr, params->tx_bssid);
+ cmd->nontx_profile_idx = cpu_to_le32(params->nontx_profile_idx);
+ cmd->nontx_profile_cnt = cpu_to_le32(params->nontx_profile_cnt);
+ }
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
"WMI mgmt vdev up id 0x%x assoc id %d bssid %pM\n",
- vdev_id, aid, bssid);
+ params->vdev_id, params->aid, params->bssid);
ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_UP_CMDID);
if (ret) {
@@ -1776,13 +1789,15 @@ int ath12k_wmi_p2p_go_bcn_ie(struct ath12k *ar, u32 vdev_id,
int ath12k_wmi_bcn_tmpl(struct ath12k *ar, u32 vdev_id,
struct ieee80211_mutable_offsets *offs,
- struct sk_buff *bcn)
+ struct sk_buff *bcn,
+ struct ath12k_wmi_bcn_tmpl_ema_arg *ema_args)
{
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct wmi_bcn_tmpl_cmd *cmd;
struct ath12k_wmi_bcn_prb_info_params *bcn_prb_info;
struct wmi_tlv *tlv;
struct sk_buff *skb;
+ u32 ema_params = 0;
void *ptr;
int ret, len;
size_t aligned_len = roundup(bcn->len, 4);
@@ -1801,6 +1816,16 @@ int ath12k_wmi_bcn_tmpl(struct ath12k *ar, u32 vdev_id,
cmd->csa_switch_count_offset = cpu_to_le32(offs->cntdwn_counter_offs[0]);
cmd->ext_csa_switch_count_offset = cpu_to_le32(offs->cntdwn_counter_offs[1]);
cmd->buf_len = cpu_to_le32(bcn->len);
+ cmd->mbssid_ie_offset = cpu_to_le32(offs->mbssid_off);
+ if (ema_args) {
+ u32p_replace_bits(&ema_params, ema_args->bcn_cnt, WMI_EMA_BEACON_CNT);
+ u32p_replace_bits(&ema_params, ema_args->bcn_index, WMI_EMA_BEACON_IDX);
+ if (ema_args->bcn_index == 0)
+ u32p_replace_bits(&ema_params, 1, WMI_EMA_BEACON_FIRST);
+ if (ema_args->bcn_index + 1 == ema_args->bcn_cnt)
+ u32p_replace_bits(&ema_params, 1, WMI_EMA_BEACON_LAST);
+ cmd->ema_params = cpu_to_le32(ema_params);
+ }
ptr = skb->data + sizeof(*cmd);
@@ -3473,11 +3498,13 @@ ath12k_wmi_copy_resource_config(struct ath12k_wmi_resource_config_params *wmi_cf
wmi_cfg->sched_params = cpu_to_le32(tg_cfg->sched_params);
wmi_cfg->twt_ap_pdev_count = cpu_to_le32(tg_cfg->twt_ap_pdev_count);
wmi_cfg->twt_ap_sta_count = cpu_to_le32(tg_cfg->twt_ap_sta_count);
- wmi_cfg->flags2 = le32_encode_bits(tg_cfg->dp_peer_meta_data_ver,
+ wmi_cfg->flags2 = le32_encode_bits(tg_cfg->peer_metadata_ver,
WMI_RSRC_CFG_FLAGS2_RX_PEER_METADATA_VERSION);
-
wmi_cfg->host_service_flags = cpu_to_le32(tg_cfg->is_reg_cc_ext_event_supported <<
WMI_RSRC_CFG_HOST_SVC_FLAG_REG_CC_EXT_SUPPORT_BIT);
+ wmi_cfg->ema_max_vap_cnt = cpu_to_le32(tg_cfg->ema_max_vap_cnt);
+ wmi_cfg->ema_max_profile_period = cpu_to_le32(tg_cfg->ema_max_profile_period);
+ wmi_cfg->flags2 |= cpu_to_le32(WMI_RSRC_CFG_FLAGS2_CALC_NEXT_DTIM_COUNT_SET);
}
static int ath12k_init_cmd_send(struct ath12k_wmi_pdev *wmi,
@@ -3690,6 +3717,7 @@ int ath12k_wmi_cmd_init(struct ath12k_base *ab)
arg.res_cfg.is_reg_cc_ext_event_supported = true;
ab->hw_params->wmi_init(ab, &arg.res_cfg);
+ ab->wow.wmi_conf_rx_decap_mode = arg.res_cfg.rx_decap_mode;
arg.num_mem_chunks = wmi_ab->num_mem_chunks;
arg.hw_mode_id = wmi_ab->preferred_hw_mode;
@@ -3701,6 +3729,8 @@ int ath12k_wmi_cmd_init(struct ath12k_base *ab)
arg.num_band_to_mac = ab->num_radios;
ath12k_fill_band_to_mac_param(ab, arg.band_to_mac);
+ ab->dp.peer_metadata_ver = arg.res_cfg.peer_metadata_ver;
+
return ath12k_init_cmd_send(&wmi_ab->wmi[0], &arg);
}
@@ -3808,7 +3838,7 @@ int ath12k_wmi_pdev_dma_ring_cfg(struct ath12k *ar,
cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_DMA_RING_CFG_REQ,
sizeof(*cmd));
- cmd->pdev_id = cpu_to_le32(DP_SW2HW_MACID(arg->pdev_id));
+ cmd->pdev_id = cpu_to_le32(arg->pdev_id);
cmd->module_id = cpu_to_le32(arg->module_id);
cmd->base_paddr_lo = cpu_to_le32(arg->base_paddr_lo);
cmd->base_paddr_hi = cpu_to_le32(arg->base_paddr_hi);
@@ -5693,7 +5723,7 @@ static int ath12k_reg_chan_list_event(struct ath12k_base *ab, struct sk_buff *sk
* event. Otherwise, it goes to fallback.
*/
if (ab->hw_params->single_pdev_only &&
- pdev_idx < ab->hw_params->num_rxmda_per_pdev)
+ pdev_idx < ab->hw_params->num_rxdma_per_pdev)
goto mem_free;
else
goto fallback;
@@ -6022,8 +6052,10 @@ static void ath12k_mgmt_rx_event(struct ath12k_base *ab, struct sk_buff *skb)
if (rx_ev.status & WMI_RX_STATUS_ERR_MIC)
status->flag |= RX_FLAG_MMIC_ERROR;
- if (rx_ev.chan_freq >= ATH12K_MIN_6G_FREQ) {
+ if (rx_ev.chan_freq >= ATH12K_MIN_6G_FREQ &&
+ rx_ev.chan_freq <= ATH12K_MAX_6G_FREQ) {
status->band = NL80211_BAND_6GHZ;
+ status->freq = rx_ev.chan_freq;
} else if (rx_ev.channel >= 1 && rx_ev.channel <= 14) {
status->band = NL80211_BAND_2GHZ;
} else if (rx_ev.channel >= 36 && rx_ev.channel <= ATH12K_MAX_5G_CHAN) {
@@ -6044,8 +6076,10 @@ static void ath12k_mgmt_rx_event(struct ath12k_base *ab, struct sk_buff *skb)
sband = &ar->mac.sbands[status->band];
- status->freq = ieee80211_channel_to_frequency(rx_ev.channel,
- status->band);
+ if (status->band != NL80211_BAND_6GHZ)
+ status->freq = ieee80211_channel_to_frequency(rx_ev.channel,
+ status->band);
+
status->signal = rx_ev.snr + ATH12K_DEFAULT_NOISE_FLOOR;
status->rate_idx = ath12k_mac_bitrate_to_idx(sband, rx_ev.rate / 100);
@@ -6999,6 +7033,116 @@ exit:
kfree(tb);
}
+static int ath12k_wmi_wow_wakeup_host_parse(struct ath12k_base *ab,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ const struct wmi_wow_ev_pg_fault_param *pf_param;
+ const struct wmi_wow_ev_param *param;
+ struct wmi_wow_ev_arg *arg = data;
+ int pf_len;
+
+ switch (tag) {
+ case WMI_TAG_WOW_EVENT_INFO:
+ param = ptr;
+ arg->wake_reason = le32_to_cpu(param->wake_reason);
+ ath12k_dbg(ab, ATH12K_DBG_WMI, "wow wakeup host reason %d %s\n",
+ arg->wake_reason, wow_reason(arg->wake_reason));
+ break;
+
+ case WMI_TAG_ARRAY_BYTE:
+ if (arg && arg->wake_reason == WOW_REASON_PAGE_FAULT) {
+ pf_param = ptr;
+ pf_len = le32_to_cpu(pf_param->len);
+ if (pf_len > len - sizeof(pf_len) ||
+ pf_len < 0) {
+ ath12k_warn(ab, "invalid wo reason page fault buffer len %d\n",
+ pf_len);
+ return -EINVAL;
+ }
+ ath12k_dbg(ab, ATH12K_DBG_WMI, "wow_reason_page_fault len %d\n",
+ pf_len);
+ ath12k_dbg_dump(ab, ATH12K_DBG_WMI,
+ "wow_reason_page_fault packet present",
+ "wow_pg_fault ",
+ pf_param->data,
+ pf_len);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static void ath12k_wmi_event_wow_wakeup_host(struct ath12k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_wow_ev_arg arg = { };
+ int ret;
+
+ ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
+ ath12k_wmi_wow_wakeup_host_parse,
+ &arg);
+ if (ret) {
+ ath12k_warn(ab, "failed to parse wmi wow wakeup host event tlv: %d\n",
+ ret);
+ return;
+ }
+
+ complete(&ab->wow.wakeup_completed);
+}
+
+static void ath12k_wmi_gtk_offload_status_event(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ const struct wmi_gtk_offload_status_event *ev;
+ struct ath12k_vif *arvif;
+ __be64 replay_ctr_be;
+ u64 replay_ctr;
+ const void **tb;
+ int ret;
+
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_GTK_OFFLOAD_STATUS_EVENT];
+ if (!ev) {
+ ath12k_warn(ab, "failed to fetch gtk offload status ev");
+ kfree(tb);
+ return;
+ }
+
+ rcu_read_lock();
+ arvif = ath12k_mac_get_arvif_by_vdev_id(ab, le32_to_cpu(ev->vdev_id));
+ if (!arvif) {
+ rcu_read_unlock();
+ ath12k_warn(ab, "failed to get arvif for vdev_id:%d\n",
+ le32_to_cpu(ev->vdev_id));
+ kfree(tb);
+ return;
+ }
+
+ replay_ctr = le64_to_cpu(ev->replay_ctr);
+ arvif->rekey_data.replay_ctr = replay_ctr;
+ ath12k_dbg(ab, ATH12K_DBG_WMI, "wmi gtk offload event refresh_cnt %d replay_ctr %llu\n",
+ le32_to_cpu(ev->refresh_cnt), replay_ctr);
+
+ /* supplicant expects big-endian replay counter */
+ replay_ctr_be = cpu_to_be64(replay_ctr);
+
+ ieee80211_gtk_rekey_notify(arvif->vif, arvif->bssid,
+ (void *)&replay_ctr_be, GFP_ATOMIC);
+
+ rcu_read_unlock();
+
+ kfree(tb);
+}
+
static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb)
{
struct wmi_cmd_hdr *cmd_hdr;
@@ -7119,6 +7263,12 @@ static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb)
case WMI_DIAG_EVENTID:
ath12k_wmi_diag_event(ab, skb);
break;
+ case WMI_WOW_WAKEUP_HOST_EVENTID:
+ ath12k_wmi_event_wow_wakeup_host(ab, skb);
+ break;
+ case WMI_GTK_OFFLOAD_STATUS_EVENTID:
+ ath12k_wmi_gtk_offload_status_event(ab, skb);
+ break;
/* TODO: Add remaining events */
default:
ath12k_dbg(ab, ATH12K_DBG_WMI, "Unknown eventid: 0x%x\n", id);
@@ -7328,3 +7478,608 @@ void ath12k_wmi_detach(struct ath12k_base *ab)
ath12k_wmi_free_dbring_caps(ab);
}
+
+int ath12k_wmi_hw_data_filter_cmd(struct ath12k *ar, struct wmi_hw_data_filter_arg *arg)
+{
+ struct wmi_hw_data_filter_cmd *cmd;
+ struct sk_buff *skb;
+ int len;
+
+ len = sizeof(*cmd);
+ skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_hw_data_filter_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_HW_DATA_FILTER_CMD,
+ sizeof(*cmd));
+ cmd->vdev_id = cpu_to_le32(arg->vdev_id);
+ cmd->enable = cpu_to_le32(arg->enable ? 1 : 0);
+
+ /* Set all modes in case of disable */
+ if (arg->enable)
+ cmd->hw_filter_bitmap = cpu_to_le32(arg->hw_filter_bitmap);
+ else
+ cmd->hw_filter_bitmap = cpu_to_le32((u32)~0U);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "wmi hw data filter enable %d filter_bitmap 0x%x\n",
+ arg->enable, arg->hw_filter_bitmap);
+
+ return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_HW_DATA_FILTER_CMDID);
+}
+
+int ath12k_wmi_wow_host_wakeup_ind(struct ath12k *ar)
+{
+ struct wmi_wow_host_wakeup_cmd *cmd;
+ struct sk_buff *skb;
+ size_t len;
+
+ len = sizeof(*cmd);
+ skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_wow_host_wakeup_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_HOSTWAKEUP_FROM_SLEEP_CMD,
+ sizeof(*cmd));
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow host wakeup ind\n");
+
+ return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID);
+}
+
+int ath12k_wmi_wow_enable(struct ath12k *ar)
+{
+ struct wmi_wow_enable_cmd *cmd;
+ struct sk_buff *skb;
+ int len;
+
+ len = sizeof(*cmd);
+ skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_wow_enable_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_ENABLE_CMD,
+ sizeof(*cmd));
+
+ cmd->enable = cpu_to_le32(1);
+ cmd->pause_iface_config = cpu_to_le32(WOW_IFACE_PAUSE_ENABLED);
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow enable\n");
+
+ return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_CMDID);
+}
+
+int ath12k_wmi_wow_add_wakeup_event(struct ath12k *ar, u32 vdev_id,
+ enum wmi_wow_wakeup_event event,
+ u32 enable)
+{
+ struct wmi_wow_add_del_event_cmd *cmd;
+ struct sk_buff *skb;
+ size_t len;
+
+ len = sizeof(*cmd);
+ skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_wow_add_del_event_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_ADD_DEL_EVT_CMD,
+ sizeof(*cmd));
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+ cmd->is_add = cpu_to_le32(enable);
+ cmd->event_bitmap = cpu_to_le32((1 << event));
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow add wakeup event %s enable %d vdev_id %d\n",
+ wow_wakeup_event(event), enable, vdev_id);
+
+ return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID);
+}
+
+int ath12k_wmi_wow_add_pattern(struct ath12k *ar, u32 vdev_id, u32 pattern_id,
+ const u8 *pattern, const u8 *mask,
+ int pattern_len, int pattern_offset)
+{
+ struct wmi_wow_add_pattern_cmd *cmd;
+ struct wmi_wow_bitmap_pattern_params *bitmap;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *ptr;
+ size_t len;
+
+ len = sizeof(*cmd) +
+ sizeof(*tlv) + /* array struct */
+ sizeof(*bitmap) + /* bitmap */
+ sizeof(*tlv) + /* empty ipv4 sync */
+ sizeof(*tlv) + /* empty ipv6 sync */
+ sizeof(*tlv) + /* empty magic */
+ sizeof(*tlv) + /* empty info timeout */
+ sizeof(*tlv) + sizeof(u32); /* ratelimit interval */
+
+ skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ /* cmd */
+ ptr = skb->data;
+ cmd = ptr;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_ADD_PATTERN_CMD,
+ sizeof(*cmd));
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+ cmd->pattern_id = cpu_to_le32(pattern_id);
+ cmd->pattern_type = cpu_to_le32(WOW_BITMAP_PATTERN);
+
+ ptr += sizeof(*cmd);
+
+ /* bitmap */
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, sizeof(*bitmap));
+
+ ptr += sizeof(*tlv);
+
+ bitmap = ptr;
+ bitmap->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_BITMAP_PATTERN_T,
+ sizeof(*bitmap));
+ memcpy(bitmap->patternbuf, pattern, pattern_len);
+ memcpy(bitmap->bitmaskbuf, mask, pattern_len);
+ bitmap->pattern_offset = cpu_to_le32(pattern_offset);
+ bitmap->pattern_len = cpu_to_le32(pattern_len);
+ bitmap->bitmask_len = cpu_to_le32(pattern_len);
+ bitmap->pattern_id = cpu_to_le32(pattern_id);
+
+ ptr += sizeof(*bitmap);
+
+ /* ipv4 sync */
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0);
+
+ ptr += sizeof(*tlv);
+
+ /* ipv6 sync */
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0);
+
+ ptr += sizeof(*tlv);
+
+ /* magic */
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0);
+
+ ptr += sizeof(*tlv);
+
+ /* pattern info timeout */
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0);
+
+ ptr += sizeof(*tlv);
+
+ /* ratelimit interval */
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, sizeof(u32));
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow add pattern vdev_id %d pattern_id %d pattern_offset %d pattern_len %d\n",
+ vdev_id, pattern_id, pattern_offset, pattern_len);
+
+ ath12k_dbg_dump(ar->ab, ATH12K_DBG_WMI, NULL, "wow pattern: ",
+ bitmap->patternbuf, pattern_len);
+ ath12k_dbg_dump(ar->ab, ATH12K_DBG_WMI, NULL, "wow bitmask: ",
+ bitmap->bitmaskbuf, pattern_len);
+
+ return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ADD_WAKE_PATTERN_CMDID);
+}
+
+int ath12k_wmi_wow_del_pattern(struct ath12k *ar, u32 vdev_id, u32 pattern_id)
+{
+ struct wmi_wow_del_pattern_cmd *cmd;
+ struct sk_buff *skb;
+ size_t len;
+
+ len = sizeof(*cmd);
+ skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_wow_del_pattern_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_DEL_PATTERN_CMD,
+ sizeof(*cmd));
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+ cmd->pattern_id = cpu_to_le32(pattern_id);
+ cmd->pattern_type = cpu_to_le32(WOW_BITMAP_PATTERN);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow del pattern vdev_id %d pattern_id %d\n",
+ vdev_id, pattern_id);
+
+ return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_DEL_WAKE_PATTERN_CMDID);
+}
+
+static struct sk_buff *
+ath12k_wmi_op_gen_config_pno_start(struct ath12k *ar, u32 vdev_id,
+ struct wmi_pno_scan_req_arg *pno)
+{
+ struct nlo_configured_params *nlo_list;
+ size_t len, nlo_list_len, channel_list_len;
+ struct wmi_wow_nlo_config_cmd *cmd;
+ __le32 *channel_list;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *ptr;
+ u32 i;
+
+ len = sizeof(*cmd) +
+ sizeof(*tlv) +
+ /* TLV place holder for array of structures
+ * nlo_configured_params(nlo_list)
+ */
+ sizeof(*tlv);
+ /* TLV place holder for array of uint32 channel_list */
+
+ channel_list_len = sizeof(u32) * pno->a_networks[0].channel_count;
+ len += channel_list_len;
+
+ nlo_list_len = sizeof(*nlo_list) * pno->uc_networks_count;
+ len += nlo_list_len;
+
+ skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = skb->data;
+ cmd = ptr;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_NLO_CONFIG_CMD, sizeof(*cmd));
+
+ cmd->vdev_id = cpu_to_le32(pno->vdev_id);
+ cmd->flags = cpu_to_le32(WMI_NLO_CONFIG_START | WMI_NLO_CONFIG_SSID_HIDE_EN);
+
+ /* current FW does not support min-max range for dwell time */
+ cmd->active_dwell_time = cpu_to_le32(pno->active_max_time);
+ cmd->passive_dwell_time = cpu_to_le32(pno->passive_max_time);
+
+ if (pno->do_passive_scan)
+ cmd->flags |= cpu_to_le32(WMI_NLO_CONFIG_SCAN_PASSIVE);
+
+ cmd->fast_scan_period = cpu_to_le32(pno->fast_scan_period);
+ cmd->slow_scan_period = cpu_to_le32(pno->slow_scan_period);
+ cmd->fast_scan_max_cycles = cpu_to_le32(pno->fast_scan_max_cycles);
+ cmd->delay_start_time = cpu_to_le32(pno->delay_start_time);
+
+ if (pno->enable_pno_scan_randomization) {
+ cmd->flags |= cpu_to_le32(WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ |
+ WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ);
+ ether_addr_copy(cmd->mac_addr.addr, pno->mac_addr);
+ ether_addr_copy(cmd->mac_mask.addr, pno->mac_addr_mask);
+ }
+
+ ptr += sizeof(*cmd);
+
+ /* nlo_configured_params(nlo_list) */
+ cmd->no_of_ssids = cpu_to_le32(pno->uc_networks_count);
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, nlo_list_len);
+
+ ptr += sizeof(*tlv);
+ nlo_list = ptr;
+ for (i = 0; i < pno->uc_networks_count; i++) {
+ tlv = (struct wmi_tlv *)(&nlo_list[i].tlv_header);
+ tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARRAY_BYTE,
+ sizeof(*nlo_list));
+
+ nlo_list[i].ssid.valid = cpu_to_le32(1);
+ nlo_list[i].ssid.ssid.ssid_len =
+ cpu_to_le32(pno->a_networks[i].ssid.ssid_len);
+ memcpy(nlo_list[i].ssid.ssid.ssid,
+ pno->a_networks[i].ssid.ssid,
+ le32_to_cpu(nlo_list[i].ssid.ssid.ssid_len));
+
+ if (pno->a_networks[i].rssi_threshold &&
+ pno->a_networks[i].rssi_threshold > -300) {
+ nlo_list[i].rssi_cond.valid = cpu_to_le32(1);
+ nlo_list[i].rssi_cond.rssi =
+ cpu_to_le32(pno->a_networks[i].rssi_threshold);
+ }
+
+ nlo_list[i].bcast_nw_type.valid = cpu_to_le32(1);
+ nlo_list[i].bcast_nw_type.bcast_nw_type =
+ cpu_to_le32(pno->a_networks[i].bcast_nw_type);
+ }
+
+ ptr += nlo_list_len;
+ cmd->num_of_channels = cpu_to_le32(pno->a_networks[0].channel_count);
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, channel_list_len);
+ ptr += sizeof(*tlv);
+ channel_list = ptr;
+
+ for (i = 0; i < pno->a_networks[0].channel_count; i++)
+ channel_list[i] = cpu_to_le32(pno->a_networks[0].channels[i]);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv start pno config vdev_id %d\n",
+ vdev_id);
+
+ return skb;
+}
+
+static struct sk_buff *ath12k_wmi_op_gen_config_pno_stop(struct ath12k *ar,
+ u32 vdev_id)
+{
+ struct wmi_wow_nlo_config_cmd *cmd;
+ struct sk_buff *skb;
+ size_t len;
+
+ len = sizeof(*cmd);
+ skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_wow_nlo_config_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_NLO_CONFIG_CMD, len);
+
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+ cmd->flags = cpu_to_le32(WMI_NLO_CONFIG_STOP);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "wmi tlv stop pno config vdev_id %d\n", vdev_id);
+ return skb;
+}
+
+int ath12k_wmi_wow_config_pno(struct ath12k *ar, u32 vdev_id,
+ struct wmi_pno_scan_req_arg *pno_scan)
+{
+ struct sk_buff *skb;
+
+ if (pno_scan->enable)
+ skb = ath12k_wmi_op_gen_config_pno_start(ar, vdev_id, pno_scan);
+ else
+ skb = ath12k_wmi_op_gen_config_pno_stop(ar, vdev_id);
+
+ if (IS_ERR_OR_NULL(skb))
+ return -ENOMEM;
+
+ return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID);
+}
+
+static void ath12k_wmi_fill_ns_offload(struct ath12k *ar,
+ struct wmi_arp_ns_offload_arg *offload,
+ void **ptr,
+ bool enable,
+ bool ext)
+{
+ struct wmi_ns_offload_params *ns;
+ struct wmi_tlv *tlv;
+ void *buf_ptr = *ptr;
+ u32 ns_cnt, ns_ext_tuples;
+ int i, max_offloads;
+
+ ns_cnt = offload->ipv6_count;
+
+ tlv = buf_ptr;
+
+ if (ext) {
+ ns_ext_tuples = offload->ipv6_count - WMI_MAX_NS_OFFLOADS;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
+ ns_ext_tuples * sizeof(*ns));
+ i = WMI_MAX_NS_OFFLOADS;
+ max_offloads = offload->ipv6_count;
+ } else {
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
+ WMI_MAX_NS_OFFLOADS * sizeof(*ns));
+ i = 0;
+ max_offloads = WMI_MAX_NS_OFFLOADS;
+ }
+
+ buf_ptr += sizeof(*tlv);
+
+ for (; i < max_offloads; i++) {
+ ns = buf_ptr;
+ ns->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_NS_OFFLOAD_TUPLE,
+ sizeof(*ns));
+
+ if (enable) {
+ if (i < ns_cnt)
+ ns->flags |= cpu_to_le32(WMI_NSOL_FLAGS_VALID);
+
+ memcpy(ns->target_ipaddr[0], offload->ipv6_addr[i], 16);
+ memcpy(ns->solicitation_ipaddr, offload->self_ipv6_addr[i], 16);
+
+ if (offload->ipv6_type[i])
+ ns->flags |= cpu_to_le32(WMI_NSOL_FLAGS_IS_IPV6_ANYCAST);
+
+ memcpy(ns->target_mac.addr, offload->mac_addr, ETH_ALEN);
+
+ if (!is_zero_ether_addr(ns->target_mac.addr))
+ ns->flags |= cpu_to_le32(WMI_NSOL_FLAGS_MAC_VALID);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "wmi index %d ns_solicited %pI6 target %pI6",
+ i, ns->solicitation_ipaddr,
+ ns->target_ipaddr[0]);
+ }
+
+ buf_ptr += sizeof(*ns);
+ }
+
+ *ptr = buf_ptr;
+}
+
+static void ath12k_wmi_fill_arp_offload(struct ath12k *ar,
+ struct wmi_arp_ns_offload_arg *offload,
+ void **ptr,
+ bool enable)
+{
+ struct wmi_arp_offload_params *arp;
+ struct wmi_tlv *tlv;
+ void *buf_ptr = *ptr;
+ int i;
+
+ /* fill arp tuple */
+ tlv = buf_ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
+ WMI_MAX_ARP_OFFLOADS * sizeof(*arp));
+ buf_ptr += sizeof(*tlv);
+
+ for (i = 0; i < WMI_MAX_ARP_OFFLOADS; i++) {
+ arp = buf_ptr;
+ arp->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARP_OFFLOAD_TUPLE,
+ sizeof(*arp));
+
+ if (enable && i < offload->ipv4_count) {
+ /* Copy the target ip addr and flags */
+ arp->flags = cpu_to_le32(WMI_ARPOL_FLAGS_VALID);
+ memcpy(arp->target_ipaddr, offload->ipv4_addr[i], 4);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi arp offload address %pI4",
+ arp->target_ipaddr);
+ }
+
+ buf_ptr += sizeof(*arp);
+ }
+
+ *ptr = buf_ptr;
+}
+
+int ath12k_wmi_arp_ns_offload(struct ath12k *ar,
+ struct ath12k_vif *arvif,
+ struct wmi_arp_ns_offload_arg *offload,
+ bool enable)
+{
+ struct wmi_set_arp_ns_offload_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *buf_ptr;
+ size_t len;
+ u8 ns_cnt, ns_ext_tuples = 0;
+
+ ns_cnt = offload->ipv6_count;
+
+ len = sizeof(*cmd) +
+ sizeof(*tlv) +
+ WMI_MAX_NS_OFFLOADS * sizeof(struct wmi_ns_offload_params) +
+ sizeof(*tlv) +
+ WMI_MAX_ARP_OFFLOADS * sizeof(struct wmi_arp_offload_params);
+
+ if (ns_cnt > WMI_MAX_NS_OFFLOADS) {
+ ns_ext_tuples = ns_cnt - WMI_MAX_NS_OFFLOADS;
+ len += sizeof(*tlv) +
+ ns_ext_tuples * sizeof(struct wmi_ns_offload_params);
+ }
+
+ skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ buf_ptr = skb->data;
+ cmd = buf_ptr;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SET_ARP_NS_OFFLOAD_CMD,
+ sizeof(*cmd));
+ cmd->flags = cpu_to_le32(0);
+ cmd->vdev_id = cpu_to_le32(arvif->vdev_id);
+ cmd->num_ns_ext_tuples = cpu_to_le32(ns_ext_tuples);
+
+ buf_ptr += sizeof(*cmd);
+
+ ath12k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 0);
+ ath12k_wmi_fill_arp_offload(ar, offload, &buf_ptr, enable);
+
+ if (ns_ext_tuples)
+ ath12k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 1);
+
+ return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_SET_ARP_NS_OFFLOAD_CMDID);
+}
+
+int ath12k_wmi_gtk_rekey_offload(struct ath12k *ar,
+ struct ath12k_vif *arvif, bool enable)
+{
+ struct ath12k_rekey_data *rekey_data = &arvif->rekey_data;
+ struct wmi_gtk_rekey_offload_cmd *cmd;
+ struct sk_buff *skb;
+ __le64 replay_ctr;
+ int len;
+
+ len = sizeof(*cmd);
+ skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_gtk_rekey_offload_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_GTK_OFFLOAD_CMD, sizeof(*cmd));
+ cmd->vdev_id = cpu_to_le32(arvif->vdev_id);
+
+ if (enable) {
+ cmd->flags = cpu_to_le32(GTK_OFFLOAD_ENABLE_OPCODE);
+
+ /* the length in rekey_data and cmd is equal */
+ memcpy(cmd->kck, rekey_data->kck, sizeof(cmd->kck));
+ memcpy(cmd->kek, rekey_data->kek, sizeof(cmd->kek));
+
+ replay_ctr = cpu_to_le64(rekey_data->replay_ctr);
+ memcpy(cmd->replay_ctr, &replay_ctr,
+ sizeof(replay_ctr));
+ } else {
+ cmd->flags = cpu_to_le32(GTK_OFFLOAD_DISABLE_OPCODE);
+ }
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "offload gtk rekey vdev: %d %d\n",
+ arvif->vdev_id, enable);
+ return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID);
+}
+
+int ath12k_wmi_gtk_rekey_getinfo(struct ath12k *ar,
+ struct ath12k_vif *arvif)
+{
+ struct wmi_gtk_rekey_offload_cmd *cmd;
+ struct sk_buff *skb;
+ int len;
+
+ len = sizeof(*cmd);
+ skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_gtk_rekey_offload_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_GTK_OFFLOAD_CMD, sizeof(*cmd));
+ cmd->vdev_id = cpu_to_le32(arvif->vdev_id);
+ cmd->flags = cpu_to_le32(GTK_OFFLOAD_REQUEST_STATUS_OPCODE);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "get gtk rekey vdev_id: %d\n",
+ arvif->vdev_id);
+ return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID);
+}
+
+int ath12k_wmi_sta_keepalive(struct ath12k *ar,
+ const struct wmi_sta_keepalive_arg *arg)
+{
+ struct wmi_sta_keepalive_arp_resp_params *arp;
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_sta_keepalive_cmd *cmd;
+ struct sk_buff *skb;
+ size_t len;
+
+ len = sizeof(*cmd) + sizeof(*arp);
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_sta_keepalive_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_KEEPALIVE_CMD, sizeof(*cmd));
+ cmd->vdev_id = cpu_to_le32(arg->vdev_id);
+ cmd->enabled = cpu_to_le32(arg->enabled);
+ cmd->interval = cpu_to_le32(arg->interval);
+ cmd->method = cpu_to_le32(arg->method);
+
+ arp = (struct wmi_sta_keepalive_arp_resp_params *)(cmd + 1);
+ arp->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_KEEPALVE_ARP_RESPONSE,
+ sizeof(*arp));
+ if (arg->method == WMI_STA_KEEPALIVE_METHOD_UNSOLICITED_ARP_RESPONSE ||
+ arg->method == WMI_STA_KEEPALIVE_METHOD_GRATUITOUS_ARP_REQUEST) {
+ arp->src_ip4_addr = cpu_to_le32(arg->src_ip4_addr);
+ arp->dest_ip4_addr = cpu_to_le32(arg->dest_ip4_addr);
+ ether_addr_copy(arp->dest_mac_addr.addr, arg->dest_mac_addr);
+ }
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "wmi sta keepalive vdev %d enabled %d method %d interval %d\n",
+ arg->vdev_id, arg->enabled, arg->method, arg->interval);
+
+ return ath12k_wmi_cmd_send(wmi, skb, WMI_STA_KEEPALIVE_CMDID);
+}
diff --git a/drivers/net/wireless/ath/ath12k/wmi.h b/drivers/net/wireless/ath/ath12k/wmi.h
index 496866673aea..f1f52175a52b 100644
--- a/drivers/net/wireless/ath/ath12k/wmi.h
+++ b/drivers/net/wireless/ath/ath12k/wmi.h
@@ -24,6 +24,7 @@
struct ath12k_base;
struct ath12k;
+struct ath12k_vif;
/* There is no signed version of __le32, so for a temporary solution come
* up with our own version. The idea is from fs/ntfs/endian.h.
@@ -2154,6 +2155,7 @@ enum wmi_tlv_service {
WMI_TLV_SERVICE_PER_PEER_HTT_STATS_RESET = 213,
WMI_TLV_SERVICE_FREQINFO_IN_METADATA = 219,
WMI_TLV_SERVICE_EXT2_MSG = 220,
+ WMI_TLV_SERVICE_MBSS_PARAM_IN_VDEV_START_SUPPORT = 253,
WMI_MAX_EXT_SERVICE = 256,
@@ -2292,6 +2294,13 @@ struct ath12k_wmi_host_mem_chunk_arg {
u32 req_id;
};
+enum ath12k_peer_metadata_version {
+ ATH12K_PEER_METADATA_V0,
+ ATH12K_PEER_METADATA_V1,
+ ATH12K_PEER_METADATA_V1A,
+ ATH12K_PEER_METADATA_V1B
+};
+
struct ath12k_wmi_resource_config_arg {
u32 num_vdevs;
u32 num_peers;
@@ -2354,8 +2363,10 @@ struct ath12k_wmi_resource_config_arg {
u32 sched_params;
u32 twt_ap_pdev_count;
u32 twt_ap_sta_count;
+ enum ath12k_peer_metadata_version peer_metadata_ver;
+ u32 ema_max_vap_cnt;
+ u32 ema_max_profile_period;
bool is_reg_cc_ext_event_supported;
- u8 dp_peer_meta_data_ver;
};
struct ath12k_wmi_init_cmd_arg {
@@ -2410,6 +2421,7 @@ struct wmi_init_cmd {
#define WMI_RSRC_CFG_HOST_SVC_FLAG_REG_CC_EXT_SUPPORT_BIT 4
#define WMI_RSRC_CFG_FLAGS2_RX_PEER_METADATA_VERSION GENMASK(5, 4)
#define WMI_RSRC_CFG_FLAG1_BSS_CHANNEL_INFO_64 BIT(5)
+#define WMI_RSRC_CFG_FLAGS2_CALC_NEXT_DTIM_COUNT_SET BIT(9)
struct ath12k_wmi_resource_config_params {
__le32 tlv_header;
@@ -2726,6 +2738,8 @@ struct ath12k_wmi_vdev_create_arg {
} chains[NUM_NL80211_BANDS];
u32 pdev_id;
u8 if_stats_id;
+ u32 mbssid_flags;
+ u32 mbssid_tx_vdev_id;
};
#define ATH12K_MAX_VDEV_STATS_ID 0x30
@@ -2757,14 +2771,23 @@ struct wmi_vdev_delete_cmd {
__le32 vdev_id;
} __packed;
+struct ath12k_wmi_vdev_up_params {
+ u32 vdev_id;
+ u32 aid;
+ const u8 *bssid;
+ const u8 *tx_bssid;
+ u32 nontx_profile_idx;
+ u32 nontx_profile_cnt;
+};
+
struct wmi_vdev_up_cmd {
__le32 tlv_header;
__le32 vdev_id;
__le32 vdev_assoc_id;
struct ath12k_wmi_mac_addr_params vdev_bssid;
- struct ath12k_wmi_mac_addr_params trans_bssid;
- __le32 profile_idx;
- __le32 profile_num;
+ struct ath12k_wmi_mac_addr_params tx_vdev_bssid;
+ __le32 nontx_profile_idx;
+ __le32 nontx_profile_cnt;
} __packed;
struct wmi_vdev_stop_cmd {
@@ -2792,6 +2815,10 @@ struct ath12k_wmi_ssid_params {
enum wmi_vdev_mbssid_flags {
WMI_VDEV_MBSSID_FLAGS_NON_MBSSID_AP = BIT(0),
+ WMI_VDEV_MBSSID_FLAGS_TRANSMIT_AP = BIT(1),
+ WMI_VDEV_MBSSID_FLAGS_NON_TRANSMIT_AP = BIT(2),
+ WMI_VDEV_MBSSID_FLAGS_EMA_MODE = BIT(3),
+ WMI_VDEV_MBSSID_FLAGS_SCAN_MODE_VAP = BIT(4),
};
struct wmi_vdev_start_request_cmd {
@@ -3514,6 +3541,16 @@ struct ath12k_wmi_p2p_noa_info {
#define WMI_BEACON_TX_BUFFER_SIZE 512
+#define WMI_EMA_BEACON_CNT GENMASK(7, 0)
+#define WMI_EMA_BEACON_IDX GENMASK(15, 8)
+#define WMI_EMA_BEACON_FIRST GENMASK(23, 16)
+#define WMI_EMA_BEACON_LAST GENMASK(31, 24)
+
+struct ath12k_wmi_bcn_tmpl_ema_arg {
+ u8 bcn_cnt;
+ u8 bcn_index;
+};
+
struct wmi_bcn_tmpl_cmd {
__le32 tlv_header;
__le32 vdev_id;
@@ -3524,6 +3561,11 @@ struct wmi_bcn_tmpl_cmd {
__le32 csa_event_bitmap;
__le32 mbssid_ie_offset;
__le32 esp_ie_offset;
+ __le32 csc_switch_count_offset;
+ __le32 csc_event_bitmap;
+ __le32 mu_edca_ie_offset;
+ __le32 feature_enable_bitmap;
+ __le32 ema_params;
} __packed;
struct wmi_p2p_go_set_beacon_ie_cmd {
@@ -4770,7 +4812,7 @@ struct wmi_probe_tmpl_cmd {
__le32 buf_len;
} __packed;
-#define MAX_RADIOS 3
+#define MAX_RADIOS 2
#define WMI_SERVICE_READY_TIMEOUT_HZ (5 * HZ)
#define WMI_SEND_TIMEOUT_HZ (3 * HZ)
@@ -4868,6 +4910,556 @@ struct wmi_twt_disable_event {
__le32 status;
} __packed;
+/* WOW structures */
+enum wmi_wow_wakeup_event {
+ WOW_BMISS_EVENT = 0,
+ WOW_BETTER_AP_EVENT,
+ WOW_DEAUTH_RECVD_EVENT,
+ WOW_MAGIC_PKT_RECVD_EVENT,
+ WOW_GTK_ERR_EVENT,
+ WOW_FOURWAY_HSHAKE_EVENT,
+ WOW_EAPOL_RECVD_EVENT,
+ WOW_NLO_DETECTED_EVENT,
+ WOW_DISASSOC_RECVD_EVENT,
+ WOW_PATTERN_MATCH_EVENT,
+ WOW_CSA_IE_EVENT,
+ WOW_PROBE_REQ_WPS_IE_EVENT,
+ WOW_AUTH_REQ_EVENT,
+ WOW_ASSOC_REQ_EVENT,
+ WOW_HTT_EVENT,
+ WOW_RA_MATCH_EVENT,
+ WOW_HOST_AUTO_SHUTDOWN_EVENT,
+ WOW_IOAC_MAGIC_EVENT,
+ WOW_IOAC_SHORT_EVENT,
+ WOW_IOAC_EXTEND_EVENT,
+ WOW_IOAC_TIMER_EVENT,
+ WOW_DFS_PHYERR_RADAR_EVENT,
+ WOW_BEACON_EVENT,
+ WOW_CLIENT_KICKOUT_EVENT,
+ WOW_EVENT_MAX,
+};
+
+enum wmi_wow_interface_cfg {
+ WOW_IFACE_PAUSE_ENABLED,
+ WOW_IFACE_PAUSE_DISABLED
+};
+
+#define C2S(x) case x: return #x
+
+static inline const char *wow_wakeup_event(enum wmi_wow_wakeup_event ev)
+{
+ switch (ev) {
+ C2S(WOW_BMISS_EVENT);
+ C2S(WOW_BETTER_AP_EVENT);
+ C2S(WOW_DEAUTH_RECVD_EVENT);
+ C2S(WOW_MAGIC_PKT_RECVD_EVENT);
+ C2S(WOW_GTK_ERR_EVENT);
+ C2S(WOW_FOURWAY_HSHAKE_EVENT);
+ C2S(WOW_EAPOL_RECVD_EVENT);
+ C2S(WOW_NLO_DETECTED_EVENT);
+ C2S(WOW_DISASSOC_RECVD_EVENT);
+ C2S(WOW_PATTERN_MATCH_EVENT);
+ C2S(WOW_CSA_IE_EVENT);
+ C2S(WOW_PROBE_REQ_WPS_IE_EVENT);
+ C2S(WOW_AUTH_REQ_EVENT);
+ C2S(WOW_ASSOC_REQ_EVENT);
+ C2S(WOW_HTT_EVENT);
+ C2S(WOW_RA_MATCH_EVENT);
+ C2S(WOW_HOST_AUTO_SHUTDOWN_EVENT);
+ C2S(WOW_IOAC_MAGIC_EVENT);
+ C2S(WOW_IOAC_SHORT_EVENT);
+ C2S(WOW_IOAC_EXTEND_EVENT);
+ C2S(WOW_IOAC_TIMER_EVENT);
+ C2S(WOW_DFS_PHYERR_RADAR_EVENT);
+ C2S(WOW_BEACON_EVENT);
+ C2S(WOW_CLIENT_KICKOUT_EVENT);
+ C2S(WOW_EVENT_MAX);
+ default:
+ return NULL;
+ }
+}
+
+enum wmi_wow_wake_reason {
+ WOW_REASON_UNSPECIFIED = -1,
+ WOW_REASON_NLOD = 0,
+ WOW_REASON_AP_ASSOC_LOST,
+ WOW_REASON_LOW_RSSI,
+ WOW_REASON_DEAUTH_RECVD,
+ WOW_REASON_DISASSOC_RECVD,
+ WOW_REASON_GTK_HS_ERR,
+ WOW_REASON_EAP_REQ,
+ WOW_REASON_FOURWAY_HS_RECV,
+ WOW_REASON_TIMER_INTR_RECV,
+ WOW_REASON_PATTERN_MATCH_FOUND,
+ WOW_REASON_RECV_MAGIC_PATTERN,
+ WOW_REASON_P2P_DISC,
+ WOW_REASON_WLAN_HB,
+ WOW_REASON_CSA_EVENT,
+ WOW_REASON_PROBE_REQ_WPS_IE_RECV,
+ WOW_REASON_AUTH_REQ_RECV,
+ WOW_REASON_ASSOC_REQ_RECV,
+ WOW_REASON_HTT_EVENT,
+ WOW_REASON_RA_MATCH,
+ WOW_REASON_HOST_AUTO_SHUTDOWN,
+ WOW_REASON_IOAC_MAGIC_EVENT,
+ WOW_REASON_IOAC_SHORT_EVENT,
+ WOW_REASON_IOAC_EXTEND_EVENT,
+ WOW_REASON_IOAC_TIMER_EVENT,
+ WOW_REASON_ROAM_HO,
+ WOW_REASON_DFS_PHYERR_RADADR_EVENT,
+ WOW_REASON_BEACON_RECV,
+ WOW_REASON_CLIENT_KICKOUT_EVENT,
+ WOW_REASON_PAGE_FAULT = 0x3a,
+ WOW_REASON_DEBUG_TEST = 0xFF,
+};
+
+static inline const char *wow_reason(enum wmi_wow_wake_reason reason)
+{
+ switch (reason) {
+ C2S(WOW_REASON_UNSPECIFIED);
+ C2S(WOW_REASON_NLOD);
+ C2S(WOW_REASON_AP_ASSOC_LOST);
+ C2S(WOW_REASON_LOW_RSSI);
+ C2S(WOW_REASON_DEAUTH_RECVD);
+ C2S(WOW_REASON_DISASSOC_RECVD);
+ C2S(WOW_REASON_GTK_HS_ERR);
+ C2S(WOW_REASON_EAP_REQ);
+ C2S(WOW_REASON_FOURWAY_HS_RECV);
+ C2S(WOW_REASON_TIMER_INTR_RECV);
+ C2S(WOW_REASON_PATTERN_MATCH_FOUND);
+ C2S(WOW_REASON_RECV_MAGIC_PATTERN);
+ C2S(WOW_REASON_P2P_DISC);
+ C2S(WOW_REASON_WLAN_HB);
+ C2S(WOW_REASON_CSA_EVENT);
+ C2S(WOW_REASON_PROBE_REQ_WPS_IE_RECV);
+ C2S(WOW_REASON_AUTH_REQ_RECV);
+ C2S(WOW_REASON_ASSOC_REQ_RECV);
+ C2S(WOW_REASON_HTT_EVENT);
+ C2S(WOW_REASON_RA_MATCH);
+ C2S(WOW_REASON_HOST_AUTO_SHUTDOWN);
+ C2S(WOW_REASON_IOAC_MAGIC_EVENT);
+ C2S(WOW_REASON_IOAC_SHORT_EVENT);
+ C2S(WOW_REASON_IOAC_EXTEND_EVENT);
+ C2S(WOW_REASON_IOAC_TIMER_EVENT);
+ C2S(WOW_REASON_ROAM_HO);
+ C2S(WOW_REASON_DFS_PHYERR_RADADR_EVENT);
+ C2S(WOW_REASON_BEACON_RECV);
+ C2S(WOW_REASON_CLIENT_KICKOUT_EVENT);
+ C2S(WOW_REASON_PAGE_FAULT);
+ C2S(WOW_REASON_DEBUG_TEST);
+ default:
+ return NULL;
+ }
+}
+
+#undef C2S
+
+#define WOW_DEFAULT_BITMAP_PATTERN_SIZE 148
+#define WOW_DEFAULT_BITMASK_SIZE 148
+
+#define WOW_MIN_PATTERN_SIZE 1
+#define WOW_MAX_PATTERN_SIZE 148
+#define WOW_MAX_PKT_OFFSET 128
+#define WOW_HDR_LEN (sizeof(struct ieee80211_hdr_3addr) + \
+ sizeof(struct rfc1042_hdr))
+#define WOW_MAX_REDUCE (WOW_HDR_LEN - sizeof(struct ethhdr) - \
+ offsetof(struct ieee80211_hdr_3addr, addr1))
+
+struct wmi_wow_bitmap_pattern_params {
+ __le32 tlv_header;
+ u8 patternbuf[WOW_DEFAULT_BITMAP_PATTERN_SIZE];
+ u8 bitmaskbuf[WOW_DEFAULT_BITMASK_SIZE];
+ __le32 pattern_offset;
+ __le32 pattern_len;
+ __le32 bitmask_len;
+ __le32 pattern_id;
+} __packed;
+
+struct wmi_wow_add_pattern_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 pattern_id;
+ __le32 pattern_type;
+} __packed;
+
+struct wmi_wow_del_pattern_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 pattern_id;
+ __le32 pattern_type;
+} __packed;
+
+enum wmi_tlv_pattern_type {
+ WOW_PATTERN_MIN = 0,
+ WOW_BITMAP_PATTERN = WOW_PATTERN_MIN,
+ WOW_IPV4_SYNC_PATTERN,
+ WOW_IPV6_SYNC_PATTERN,
+ WOW_WILD_CARD_PATTERN,
+ WOW_TIMER_PATTERN,
+ WOW_MAGIC_PATTERN,
+ WOW_IPV6_RA_PATTERN,
+ WOW_IOAC_PKT_PATTERN,
+ WOW_IOAC_TMR_PATTERN,
+ WOW_PATTERN_MAX
+};
+
+struct wmi_wow_add_del_event_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 is_add;
+ __le32 event_bitmap;
+} __packed;
+
+struct wmi_wow_enable_cmd {
+ __le32 tlv_header;
+ __le32 enable;
+ __le32 pause_iface_config;
+ __le32 flags;
+} __packed;
+
+struct wmi_wow_host_wakeup_cmd {
+ __le32 tlv_header;
+ __le32 reserved;
+} __packed;
+
+struct wmi_wow_ev_param {
+ __le32 vdev_id;
+ __le32 flag;
+ __le32 wake_reason;
+ __le32 data_len;
+} __packed;
+
+struct wmi_wow_ev_pg_fault_param {
+ __le32 len;
+ u8 data[];
+} __packed;
+
+struct wmi_wow_ev_arg {
+ enum wmi_wow_wake_reason wake_reason;
+};
+
+#define WMI_PNO_MAX_SCHED_SCAN_PLANS 2
+#define WMI_PNO_MAX_SCHED_SCAN_PLAN_INT 7200
+#define WMI_PNO_MAX_SCHED_SCAN_PLAN_ITRNS 100
+#define WMI_PNO_MAX_NETW_CHANNELS 26
+#define WMI_PNO_MAX_NETW_CHANNELS_EX 60
+#define WMI_PNO_MAX_SUPP_NETWORKS WLAN_SCAN_PARAMS_MAX_SSID
+#define WMI_PNO_MAX_IE_LENGTH WLAN_SCAN_PARAMS_MAX_IE_LEN
+
+/* size based of dot11 declaration without extra IEs as we will not carry those for PNO */
+#define WMI_PNO_MAX_PB_REQ_SIZE 450
+
+#define WMI_PNO_24GHZ_DEFAULT_CH 1
+#define WMI_PNO_5GHZ_DEFAULT_CH 36
+
+#define WMI_ACTIVE_MAX_CHANNEL_TIME 40
+#define WMI_PASSIVE_MAX_CHANNEL_TIME 110
+
+/* SSID broadcast type */
+enum wmi_ssid_bcast_type {
+ BCAST_UNKNOWN = 0,
+ BCAST_NORMAL = 1,
+ BCAST_HIDDEN = 2,
+};
+
+#define WMI_NLO_MAX_SSIDS 16
+#define WMI_NLO_MAX_CHAN 48
+
+#define WMI_NLO_CONFIG_STOP BIT(0)
+#define WMI_NLO_CONFIG_START BIT(1)
+#define WMI_NLO_CONFIG_RESET BIT(2)
+#define WMI_NLO_CONFIG_SLOW_SCAN BIT(4)
+#define WMI_NLO_CONFIG_FAST_SCAN BIT(5)
+#define WMI_NLO_CONFIG_SSID_HIDE_EN BIT(6)
+
+/* This bit is used to indicate if EPNO or supplicant PNO is enabled.
+ * Only one of them can be enabled at a given time
+ */
+#define WMI_NLO_CONFIG_ENLO BIT(7)
+#define WMI_NLO_CONFIG_SCAN_PASSIVE BIT(8)
+#define WMI_NLO_CONFIG_ENLO_RESET BIT(9)
+#define WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ BIT(10)
+#define WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ BIT(11)
+#define WMI_NLO_CONFIG_ENABLE_IE_WHITELIST_IN_PROBE_REQ BIT(12)
+#define WMI_NLO_CONFIG_ENABLE_CNLO_RSSI_CONFIG BIT(13)
+
+struct wmi_nlo_ssid_params {
+ __le32 valid;
+ struct ath12k_wmi_ssid_params ssid;
+} __packed;
+
+struct wmi_nlo_enc_params {
+ __le32 valid;
+ __le32 enc_type;
+} __packed;
+
+struct wmi_nlo_auth_params {
+ __le32 valid;
+ __le32 auth_type;
+} __packed;
+
+struct wmi_nlo_bcast_nw_params {
+ __le32 valid;
+ __le32 bcast_nw_type;
+} __packed;
+
+struct wmi_nlo_rssi_params {
+ __le32 valid;
+ __le32 rssi;
+} __packed;
+
+struct nlo_configured_params {
+ /* TLV tag and len;*/
+ __le32 tlv_header;
+ struct wmi_nlo_ssid_params ssid;
+ struct wmi_nlo_enc_params enc_type;
+ struct wmi_nlo_auth_params auth_type;
+ struct wmi_nlo_rssi_params rssi_cond;
+
+ /* indicates if the SSID is hidden or not */
+ struct wmi_nlo_bcast_nw_params bcast_nw_type;
+} __packed;
+
+struct wmi_network_type_arg {
+ struct cfg80211_ssid ssid;
+ u32 authentication;
+ u32 encryption;
+ u32 bcast_nw_type;
+ u8 channel_count;
+ u16 channels[WMI_PNO_MAX_NETW_CHANNELS_EX];
+ s32 rssi_threshold;
+};
+
+struct wmi_pno_scan_req_arg {
+ u8 enable;
+ u8 vdev_id;
+ u8 uc_networks_count;
+ struct wmi_network_type_arg a_networks[WMI_PNO_MAX_SUPP_NETWORKS];
+ u32 fast_scan_period;
+ u32 slow_scan_period;
+ u8 fast_scan_max_cycles;
+
+ bool do_passive_scan;
+
+ u32 delay_start_time;
+ u32 active_min_time;
+ u32 active_max_time;
+ u32 passive_min_time;
+ u32 passive_max_time;
+
+ /* mac address randomization attributes */
+ u32 enable_pno_scan_randomization;
+ u8 mac_addr[ETH_ALEN];
+ u8 mac_addr_mask[ETH_ALEN];
+};
+
+struct wmi_wow_nlo_config_cmd {
+ __le32 tlv_header;
+ __le32 flags;
+ __le32 vdev_id;
+ __le32 fast_scan_max_cycles;
+ __le32 active_dwell_time;
+ __le32 passive_dwell_time;
+ __le32 probe_bundle_size;
+
+ /* ART = IRT */
+ __le32 rest_time;
+
+ /* max value that can be reached after scan_backoff_multiplier */
+ __le32 max_rest_time;
+
+ __le32 scan_backoff_multiplier;
+ __le32 fast_scan_period;
+
+ /* specific to windows */
+ __le32 slow_scan_period;
+
+ __le32 no_of_ssids;
+
+ __le32 num_of_channels;
+
+ /* NLO scan start delay time in milliseconds */
+ __le32 delay_start_time;
+
+ /* MAC Address to use in Probe Req as SA */
+ struct ath12k_wmi_mac_addr_params mac_addr;
+
+ /* Mask on which MAC has to be randomized */
+ struct ath12k_wmi_mac_addr_params mac_mask;
+
+ /* IE bitmap to use in Probe Req */
+ __le32 ie_bitmap[8];
+
+ /* Number of vendor OUIs. In the TLV vendor_oui[] */
+ __le32 num_vendor_oui;
+
+ /* Number of connected NLO band preferences */
+ __le32 num_cnlo_band_pref;
+
+ /* The TLVs will follow.
+ * nlo_configured_params nlo_list[];
+ * u32 channel_list[num_of_channels];
+ */
+} __packed;
+
+/* Definition of HW data filtering */
+enum hw_data_filter_type {
+ WMI_HW_DATA_FILTER_DROP_NON_ARP_BC = BIT(0),
+ WMI_HW_DATA_FILTER_DROP_NON_ICMPV6_MC = BIT(1),
+};
+
+struct wmi_hw_data_filter_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 enable;
+ __le32 hw_filter_bitmap;
+} __packed;
+
+struct wmi_hw_data_filter_arg {
+ u32 vdev_id;
+ bool enable;
+ u32 hw_filter_bitmap;
+};
+
+#define WMI_IPV6_UC_TYPE 0
+#define WMI_IPV6_AC_TYPE 1
+
+#define WMI_IPV6_MAX_COUNT 16
+#define WMI_IPV4_MAX_COUNT 2
+
+struct wmi_arp_ns_offload_arg {
+ u8 ipv4_addr[WMI_IPV4_MAX_COUNT][4];
+ u32 ipv4_count;
+ u32 ipv6_count;
+ u8 ipv6_addr[WMI_IPV6_MAX_COUNT][16];
+ u8 self_ipv6_addr[WMI_IPV6_MAX_COUNT][16];
+ u8 ipv6_type[WMI_IPV6_MAX_COUNT];
+ bool ipv6_valid[WMI_IPV6_MAX_COUNT];
+ u8 mac_addr[ETH_ALEN];
+};
+
+#define WMI_MAX_NS_OFFLOADS 2
+#define WMI_MAX_ARP_OFFLOADS 2
+
+#define WMI_ARPOL_FLAGS_VALID BIT(0)
+#define WMI_ARPOL_FLAGS_MAC_VALID BIT(1)
+#define WMI_ARPOL_FLAGS_REMOTE_IP_VALID BIT(2)
+
+struct wmi_arp_offload_params {
+ __le32 tlv_header;
+ __le32 flags;
+ u8 target_ipaddr[4];
+ u8 remote_ipaddr[4];
+ struct ath12k_wmi_mac_addr_params target_mac;
+} __packed;
+
+#define WMI_NSOL_FLAGS_VALID BIT(0)
+#define WMI_NSOL_FLAGS_MAC_VALID BIT(1)
+#define WMI_NSOL_FLAGS_REMOTE_IP_VALID BIT(2)
+#define WMI_NSOL_FLAGS_IS_IPV6_ANYCAST BIT(3)
+
+#define WMI_NSOL_MAX_TARGET_IPS 2
+
+struct wmi_ns_offload_params {
+ __le32 tlv_header;
+ __le32 flags;
+ u8 target_ipaddr[WMI_NSOL_MAX_TARGET_IPS][16];
+ u8 solicitation_ipaddr[16];
+ u8 remote_ipaddr[16];
+ struct ath12k_wmi_mac_addr_params target_mac;
+} __packed;
+
+struct wmi_set_arp_ns_offload_cmd {
+ __le32 tlv_header;
+ __le32 flags;
+ __le32 vdev_id;
+ __le32 num_ns_ext_tuples;
+ /* The TLVs follow:
+ * wmi_ns_offload_params ns[WMI_MAX_NS_OFFLOADS];
+ * wmi_arp_offload_params arp[WMI_MAX_ARP_OFFLOADS];
+ * wmi_ns_offload_params ns_ext[num_ns_ext_tuples];
+ */
+} __packed;
+
+#define GTK_OFFLOAD_OPCODE_MASK 0xFF000000
+#define GTK_OFFLOAD_ENABLE_OPCODE 0x01000000
+#define GTK_OFFLOAD_DISABLE_OPCODE 0x02000000
+#define GTK_OFFLOAD_REQUEST_STATUS_OPCODE 0x04000000
+
+#define GTK_OFFLOAD_KEK_BYTES 16
+#define GTK_OFFLOAD_KCK_BYTES 16
+#define GTK_REPLAY_COUNTER_BYTES 8
+#define WMI_MAX_KEY_LEN 32
+#define IGTK_PN_SIZE 6
+
+struct wmi_gtk_offload_status_event {
+ __le32 vdev_id;
+ __le32 flags;
+ __le32 refresh_cnt;
+ __le64 replay_ctr;
+ u8 igtk_key_index;
+ u8 igtk_key_length;
+ u8 igtk_key_rsc[IGTK_PN_SIZE];
+ u8 igtk_key[WMI_MAX_KEY_LEN];
+ u8 gtk_key_index;
+ u8 gtk_key_length;
+ u8 gtk_key_rsc[GTK_REPLAY_COUNTER_BYTES];
+ u8 gtk_key[WMI_MAX_KEY_LEN];
+} __packed;
+
+struct wmi_gtk_rekey_offload_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 flags;
+ u8 kek[GTK_OFFLOAD_KEK_BYTES];
+ u8 kck[GTK_OFFLOAD_KCK_BYTES];
+ u8 replay_ctr[GTK_REPLAY_COUNTER_BYTES];
+} __packed;
+
+struct wmi_sta_keepalive_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 enabled;
+
+ /* WMI_STA_KEEPALIVE_METHOD_ */
+ __le32 method;
+
+ /* in seconds */
+ __le32 interval;
+
+ /* following this structure is the TLV for struct
+ * wmi_sta_keepalive_arp_resp_params
+ */
+} __packed;
+
+struct wmi_sta_keepalive_arp_resp_params {
+ __le32 tlv_header;
+ __le32 src_ip4_addr;
+ __le32 dest_ip4_addr;
+ struct ath12k_wmi_mac_addr_params dest_mac_addr;
+} __packed;
+
+struct wmi_sta_keepalive_arg {
+ u32 vdev_id;
+ u32 enabled;
+ u32 method;
+ u32 interval;
+ u32 src_ip4_addr;
+ u32 dest_ip4_addr;
+ const u8 dest_mac_addr[ETH_ALEN];
+};
+
+enum wmi_sta_keepalive_method {
+ WMI_STA_KEEPALIVE_METHOD_NULL_FRAME = 1,
+ WMI_STA_KEEPALIVE_METHOD_UNSOLICITED_ARP_RESPONSE = 2,
+ WMI_STA_KEEPALIVE_METHOD_ETHERNET_LOOPBACK = 3,
+ WMI_STA_KEEPALIVE_METHOD_GRATUITOUS_ARP_REQUEST = 4,
+ WMI_STA_KEEPALIVE_METHOD_MGMT_VENDOR_ACTION = 5,
+};
+
+#define WMI_STA_KEEPALIVE_INTERVAL_DEFAULT 30
+#define WMI_STA_KEEPALIVE_INTERVAL_DISABLE 0
+
void ath12k_wmi_init_qcn9274(struct ath12k_base *ab,
struct ath12k_wmi_resource_config_arg *config);
void ath12k_wmi_init_wcn7850(struct ath12k_base *ab,
@@ -4881,10 +5473,10 @@ int ath12k_wmi_p2p_go_bcn_ie(struct ath12k *ar, u32 vdev_id,
const u8 *p2p_ie);
int ath12k_wmi_bcn_tmpl(struct ath12k *ar, u32 vdev_id,
struct ieee80211_mutable_offsets *offs,
- struct sk_buff *bcn);
+ struct sk_buff *bcn,
+ struct ath12k_wmi_bcn_tmpl_ema_arg *ema_args);
int ath12k_wmi_vdev_down(struct ath12k *ar, u8 vdev_id);
-int ath12k_wmi_vdev_up(struct ath12k *ar, u32 vdev_id, u32 aid,
- const u8 *bssid);
+int ath12k_wmi_vdev_up(struct ath12k *ar, struct ath12k_wmi_vdev_up_params *params);
int ath12k_wmi_vdev_stop(struct ath12k *ar, u8 vdev_id);
int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg,
bool restart);
@@ -5020,4 +5612,28 @@ ath12k_wmi_mac_phy_get_hw_link_id(const struct ath12k_wmi_mac_phy_caps_params *p
WMI_CAPS_PARAMS_HW_LINK_ID);
}
+int ath12k_wmi_wow_host_wakeup_ind(struct ath12k *ar);
+int ath12k_wmi_wow_enable(struct ath12k *ar);
+int ath12k_wmi_wow_del_pattern(struct ath12k *ar, u32 vdev_id, u32 pattern_id);
+int ath12k_wmi_wow_add_pattern(struct ath12k *ar, u32 vdev_id, u32 pattern_id,
+ const u8 *pattern, const u8 *mask,
+ int pattern_len, int pattern_offset);
+int ath12k_wmi_wow_add_wakeup_event(struct ath12k *ar, u32 vdev_id,
+ enum wmi_wow_wakeup_event event,
+ u32 enable);
+int ath12k_wmi_wow_config_pno(struct ath12k *ar, u32 vdev_id,
+ struct wmi_pno_scan_req_arg *pno_scan);
+int ath12k_wmi_hw_data_filter_cmd(struct ath12k *ar,
+ struct wmi_hw_data_filter_arg *arg);
+int ath12k_wmi_arp_ns_offload(struct ath12k *ar,
+ struct ath12k_vif *arvif,
+ struct wmi_arp_ns_offload_arg *offload,
+ bool enable);
+int ath12k_wmi_gtk_rekey_offload(struct ath12k *ar,
+ struct ath12k_vif *arvif, bool enable);
+int ath12k_wmi_gtk_rekey_getinfo(struct ath12k *ar,
+ struct ath12k_vif *arvif);
+int ath12k_wmi_sta_keepalive(struct ath12k *ar,
+ const struct wmi_sta_keepalive_arg *arg);
+
#endif
diff --git a/drivers/net/wireless/ath/ath12k/wow.c b/drivers/net/wireless/ath/ath12k/wow.c
new file mode 100644
index 000000000000..9b8684abbe40
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/wow.c
@@ -0,0 +1,1026 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/delay.h>
+#include <linux/inetdevice.h>
+#include <net/addrconf.h>
+#include <net/if_inet6.h>
+#include <net/ipv6.h>
+
+#include "mac.h"
+
+#include <net/mac80211.h>
+#include "core.h"
+#include "hif.h"
+#include "debug.h"
+#include "wmi.h"
+#include "wow.h"
+
+static const struct wiphy_wowlan_support ath12k_wowlan_support = {
+ .flags = WIPHY_WOWLAN_DISCONNECT |
+ WIPHY_WOWLAN_MAGIC_PKT |
+ WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
+ WIPHY_WOWLAN_GTK_REKEY_FAILURE,
+ .pattern_min_len = WOW_MIN_PATTERN_SIZE,
+ .pattern_max_len = WOW_MAX_PATTERN_SIZE,
+ .max_pkt_offset = WOW_MAX_PKT_OFFSET,
+};
+
+static inline bool ath12k_wow_is_p2p_vdev(struct ath12k_vif *arvif)
+{
+ return (arvif->vdev_subtype == WMI_VDEV_SUBTYPE_P2P_DEVICE ||
+ arvif->vdev_subtype == WMI_VDEV_SUBTYPE_P2P_CLIENT ||
+ arvif->vdev_subtype == WMI_VDEV_SUBTYPE_P2P_GO);
+}
+
+int ath12k_wow_enable(struct ath12k *ar)
+{
+ struct ath12k_base *ab = ar->ab;
+ int i, ret;
+
+ clear_bit(ATH12K_FLAG_HTC_SUSPEND_COMPLETE, &ab->dev_flags);
+
+ /* The firmware might be busy and it can not enter WoW immediately.
+ * In that case firmware notifies host with
+ * ATH12K_HTC_MSG_NACK_SUSPEND message, asking host to try again
+ * later. Per the firmware team there could be up to 10 loops.
+ */
+ for (i = 0; i < ATH12K_WOW_RETRY_NUM; i++) {
+ reinit_completion(&ab->htc_suspend);
+
+ ret = ath12k_wmi_wow_enable(ar);
+ if (ret) {
+ ath12k_warn(ab, "failed to issue wow enable: %d\n", ret);
+ return ret;
+ }
+
+ ret = wait_for_completion_timeout(&ab->htc_suspend, 3 * HZ);
+ if (ret == 0) {
+ ath12k_warn(ab,
+ "timed out while waiting for htc suspend completion\n");
+ return -ETIMEDOUT;
+ }
+
+ if (test_bit(ATH12K_FLAG_HTC_SUSPEND_COMPLETE, &ab->dev_flags))
+ /* success, suspend complete received */
+ return 0;
+
+ ath12k_warn(ab, "htc suspend not complete, retrying (try %d)\n",
+ i);
+ msleep(ATH12K_WOW_RETRY_WAIT_MS);
+ }
+
+ ath12k_warn(ab, "htc suspend not complete, failing after %d tries\n", i);
+
+ return -ETIMEDOUT;
+}
+
+int ath12k_wow_wakeup(struct ath12k *ar)
+{
+ struct ath12k_base *ab = ar->ab;
+ int ret;
+
+ reinit_completion(&ab->wow.wakeup_completed);
+
+ ret = ath12k_wmi_wow_host_wakeup_ind(ar);
+ if (ret) {
+ ath12k_warn(ab, "failed to send wow wakeup indication: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = wait_for_completion_timeout(&ab->wow.wakeup_completed, 3 * HZ);
+ if (ret == 0) {
+ ath12k_warn(ab, "timed out while waiting for wow wakeup completion\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int ath12k_wow_vif_cleanup(struct ath12k_vif *arvif)
+{
+ struct ath12k *ar = arvif->ar;
+ int i, ret;
+
+ for (i = 0; i < WOW_EVENT_MAX; i++) {
+ ret = ath12k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 0);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to issue wow wakeup for event %s on vdev %i: %d\n",
+ wow_wakeup_event(i), arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ for (i = 0; i < ar->wow.max_num_patterns; i++) {
+ ret = ath12k_wmi_wow_del_pattern(ar, arvif->vdev_id, i);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to delete wow pattern %d for vdev %i: %d\n",
+ i, arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath12k_wow_cleanup(struct ath12k *ar)
+{
+ struct ath12k_vif *arvif;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ ret = ath12k_wow_vif_cleanup(arvif);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to clean wow wakeups on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/* Convert a 802.3 format to a 802.11 format.
+ * +------------+-----------+--------+----------------+
+ * 802.3: |dest mac(6B)|src mac(6B)|type(2B)| body... |
+ * +------------+-----------+--------+----------------+
+ * |__ |_______ |____________ |________
+ * | | | |
+ * +--+------------+----+-----------+---------------+-----------+
+ * 802.11: |4B|dest mac(6B)| 6B |src mac(6B)| 8B |type(2B)| body... |
+ * +--+------------+----+-----------+---------------+-----------+
+ */
+static void
+ath12k_wow_convert_8023_to_80211(struct ath12k *ar,
+ const struct cfg80211_pkt_pattern *eth_pattern,
+ struct ath12k_pkt_pattern *i80211_pattern)
+{
+ size_t r1042_eth_ofs = offsetof(struct rfc1042_hdr, eth_type);
+ size_t a1_ofs = offsetof(struct ieee80211_hdr_3addr, addr1);
+ size_t a3_ofs = offsetof(struct ieee80211_hdr_3addr, addr3);
+ size_t i80211_hdr_len = sizeof(struct ieee80211_hdr_3addr);
+ size_t prot_ofs = offsetof(struct ethhdr, h_proto);
+ size_t src_ofs = offsetof(struct ethhdr, h_source);
+ u8 eth_bytemask[WOW_MAX_PATTERN_SIZE] = {};
+ const u8 *eth_pat = eth_pattern->pattern;
+ size_t eth_pat_len = eth_pattern->pattern_len;
+ size_t eth_pkt_ofs = eth_pattern->pkt_offset;
+ u8 *bytemask = i80211_pattern->bytemask;
+ u8 *pat = i80211_pattern->pattern;
+ size_t pat_len = 0;
+ size_t pkt_ofs = 0;
+ size_t delta;
+ int i;
+
+ /* convert bitmask to bytemask */
+ for (i = 0; i < eth_pat_len; i++)
+ if (eth_pattern->mask[i / 8] & BIT(i % 8))
+ eth_bytemask[i] = 0xff;
+
+ if (eth_pkt_ofs < ETH_ALEN) {
+ pkt_ofs = eth_pkt_ofs + a1_ofs;
+
+ if (size_add(eth_pkt_ofs, eth_pat_len) < ETH_ALEN) {
+ memcpy(pat, eth_pat, eth_pat_len);
+ memcpy(bytemask, eth_bytemask, eth_pat_len);
+
+ pat_len = eth_pat_len;
+ } else if (eth_pkt_ofs + eth_pat_len < prot_ofs) {
+ memcpy(pat, eth_pat, ETH_ALEN - eth_pkt_ofs);
+ memcpy(bytemask, eth_bytemask, ETH_ALEN - eth_pkt_ofs);
+
+ delta = eth_pkt_ofs + eth_pat_len - src_ofs;
+ memcpy(pat + a3_ofs - pkt_ofs,
+ eth_pat + ETH_ALEN - eth_pkt_ofs,
+ delta);
+ memcpy(bytemask + a3_ofs - pkt_ofs,
+ eth_bytemask + ETH_ALEN - eth_pkt_ofs,
+ delta);
+
+ pat_len = a3_ofs - pkt_ofs + delta;
+ } else {
+ memcpy(pat, eth_pat, ETH_ALEN - eth_pkt_ofs);
+ memcpy(bytemask, eth_bytemask, ETH_ALEN - eth_pkt_ofs);
+
+ memcpy(pat + a3_ofs - pkt_ofs,
+ eth_pat + ETH_ALEN - eth_pkt_ofs,
+ ETH_ALEN);
+ memcpy(bytemask + a3_ofs - pkt_ofs,
+ eth_bytemask + ETH_ALEN - eth_pkt_ofs,
+ ETH_ALEN);
+
+ delta = eth_pkt_ofs + eth_pat_len - prot_ofs;
+ memcpy(pat + i80211_hdr_len + r1042_eth_ofs - pkt_ofs,
+ eth_pat + prot_ofs - eth_pkt_ofs,
+ delta);
+ memcpy(bytemask + i80211_hdr_len + r1042_eth_ofs - pkt_ofs,
+ eth_bytemask + prot_ofs - eth_pkt_ofs,
+ delta);
+
+ pat_len = i80211_hdr_len + r1042_eth_ofs - pkt_ofs + delta;
+ }
+ } else if (eth_pkt_ofs < prot_ofs) {
+ pkt_ofs = eth_pkt_ofs - ETH_ALEN + a3_ofs;
+
+ if (size_add(eth_pkt_ofs, eth_pat_len) < prot_ofs) {
+ memcpy(pat, eth_pat, eth_pat_len);
+ memcpy(bytemask, eth_bytemask, eth_pat_len);
+
+ pat_len = eth_pat_len;
+ } else {
+ memcpy(pat, eth_pat, prot_ofs - eth_pkt_ofs);
+ memcpy(bytemask, eth_bytemask, prot_ofs - eth_pkt_ofs);
+
+ delta = eth_pkt_ofs + eth_pat_len - prot_ofs;
+ memcpy(pat + i80211_hdr_len + r1042_eth_ofs - pkt_ofs,
+ eth_pat + prot_ofs - eth_pkt_ofs,
+ delta);
+ memcpy(bytemask + i80211_hdr_len + r1042_eth_ofs - pkt_ofs,
+ eth_bytemask + prot_ofs - eth_pkt_ofs,
+ delta);
+
+ pat_len = i80211_hdr_len + r1042_eth_ofs - pkt_ofs + delta;
+ }
+ } else {
+ pkt_ofs = eth_pkt_ofs - prot_ofs + i80211_hdr_len + r1042_eth_ofs;
+
+ memcpy(pat, eth_pat, eth_pat_len);
+ memcpy(bytemask, eth_bytemask, eth_pat_len);
+
+ pat_len = eth_pat_len;
+ }
+
+ i80211_pattern->pattern_len = pat_len;
+ i80211_pattern->pkt_offset = pkt_ofs;
+}
+
+static int
+ath12k_wow_pno_check_and_convert(struct ath12k *ar, u32 vdev_id,
+ const struct cfg80211_sched_scan_request *nd_config,
+ struct wmi_pno_scan_req_arg *pno)
+{
+ int i, j;
+ u8 ssid_len;
+
+ pno->enable = 1;
+ pno->vdev_id = vdev_id;
+ pno->uc_networks_count = nd_config->n_match_sets;
+
+ if (!pno->uc_networks_count ||
+ pno->uc_networks_count > WMI_PNO_MAX_SUPP_NETWORKS)
+ return -EINVAL;
+
+ if (nd_config->n_channels > WMI_PNO_MAX_NETW_CHANNELS_EX)
+ return -EINVAL;
+
+ /* Filling per profile params */
+ for (i = 0; i < pno->uc_networks_count; i++) {
+ ssid_len = nd_config->match_sets[i].ssid.ssid_len;
+
+ if (ssid_len == 0 || ssid_len > 32)
+ return -EINVAL;
+
+ pno->a_networks[i].ssid.ssid_len = ssid_len;
+
+ memcpy(pno->a_networks[i].ssid.ssid,
+ nd_config->match_sets[i].ssid.ssid,
+ ssid_len);
+ pno->a_networks[i].authentication = 0;
+ pno->a_networks[i].encryption = 0;
+ pno->a_networks[i].bcast_nw_type = 0;
+
+ /* Copying list of valid channel into request */
+ pno->a_networks[i].channel_count = nd_config->n_channels;
+ pno->a_networks[i].rssi_threshold = nd_config->match_sets[i].rssi_thold;
+
+ for (j = 0; j < nd_config->n_channels; j++) {
+ pno->a_networks[i].channels[j] =
+ nd_config->channels[j]->center_freq;
+ }
+ }
+
+ /* set scan to passive if no SSIDs are specified in the request */
+ if (nd_config->n_ssids == 0)
+ pno->do_passive_scan = true;
+ else
+ pno->do_passive_scan = false;
+
+ for (i = 0; i < nd_config->n_ssids; i++) {
+ for (j = 0; j < pno->uc_networks_count; j++) {
+ if (pno->a_networks[j].ssid.ssid_len ==
+ nd_config->ssids[i].ssid_len &&
+ !memcmp(pno->a_networks[j].ssid.ssid,
+ nd_config->ssids[i].ssid,
+ pno->a_networks[j].ssid.ssid_len)) {
+ pno->a_networks[j].bcast_nw_type = BCAST_HIDDEN;
+ break;
+ }
+ }
+ }
+
+ if (nd_config->n_scan_plans == 2) {
+ pno->fast_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
+ pno->fast_scan_max_cycles = nd_config->scan_plans[0].iterations;
+ pno->slow_scan_period =
+ nd_config->scan_plans[1].interval * MSEC_PER_SEC;
+ } else if (nd_config->n_scan_plans == 1) {
+ pno->fast_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
+ pno->fast_scan_max_cycles = 1;
+ pno->slow_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
+ } else {
+ ath12k_warn(ar->ab, "Invalid number of PNO scan plans: %d",
+ nd_config->n_scan_plans);
+ }
+
+ if (nd_config->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
+ /* enable mac randomization */
+ pno->enable_pno_scan_randomization = 1;
+ memcpy(pno->mac_addr, nd_config->mac_addr, ETH_ALEN);
+ memcpy(pno->mac_addr_mask, nd_config->mac_addr_mask, ETH_ALEN);
+ }
+
+ pno->delay_start_time = nd_config->delay;
+
+ /* Current FW does not support min-max range for dwell time */
+ pno->active_max_time = WMI_ACTIVE_MAX_CHANNEL_TIME;
+ pno->passive_max_time = WMI_PASSIVE_MAX_CHANNEL_TIME;
+
+ return 0;
+}
+
+static int ath12k_wow_vif_set_wakeups(struct ath12k_vif *arvif,
+ struct cfg80211_wowlan *wowlan)
+{
+ const struct cfg80211_pkt_pattern *patterns = wowlan->patterns;
+ struct ath12k *ar = arvif->ar;
+ unsigned long wow_mask = 0;
+ int pattern_id = 0;
+ int ret, i, j;
+
+ /* Setup requested WOW features */
+ switch (arvif->vdev_type) {
+ case WMI_VDEV_TYPE_IBSS:
+ __set_bit(WOW_BEACON_EVENT, &wow_mask);
+ fallthrough;
+ case WMI_VDEV_TYPE_AP:
+ __set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask);
+ __set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask);
+ __set_bit(WOW_PROBE_REQ_WPS_IE_EVENT, &wow_mask);
+ __set_bit(WOW_AUTH_REQ_EVENT, &wow_mask);
+ __set_bit(WOW_ASSOC_REQ_EVENT, &wow_mask);
+ __set_bit(WOW_HTT_EVENT, &wow_mask);
+ __set_bit(WOW_RA_MATCH_EVENT, &wow_mask);
+ break;
+ case WMI_VDEV_TYPE_STA:
+ if (wowlan->disconnect) {
+ __set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask);
+ __set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask);
+ __set_bit(WOW_BMISS_EVENT, &wow_mask);
+ __set_bit(WOW_CSA_IE_EVENT, &wow_mask);
+ }
+
+ if (wowlan->magic_pkt)
+ __set_bit(WOW_MAGIC_PKT_RECVD_EVENT, &wow_mask);
+
+ if (wowlan->nd_config) {
+ struct wmi_pno_scan_req_arg *pno;
+ int ret;
+
+ pno = kzalloc(sizeof(*pno), GFP_KERNEL);
+ if (!pno)
+ return -ENOMEM;
+
+ ar->nlo_enabled = true;
+
+ ret = ath12k_wow_pno_check_and_convert(ar, arvif->vdev_id,
+ wowlan->nd_config, pno);
+ if (!ret) {
+ ath12k_wmi_wow_config_pno(ar, arvif->vdev_id, pno);
+ __set_bit(WOW_NLO_DETECTED_EVENT, &wow_mask);
+ }
+
+ kfree(pno);
+ }
+ break;
+ default:
+ break;
+ }
+
+ for (i = 0; i < wowlan->n_patterns; i++) {
+ const struct cfg80211_pkt_pattern *eth_pattern = &patterns[i];
+ struct ath12k_pkt_pattern new_pattern = {};
+
+ if (WARN_ON(eth_pattern->pattern_len > WOW_MAX_PATTERN_SIZE))
+ return -EINVAL;
+
+ if (ar->ab->wow.wmi_conf_rx_decap_mode ==
+ ATH12K_HW_TXRX_NATIVE_WIFI) {
+ ath12k_wow_convert_8023_to_80211(ar, eth_pattern,
+ &new_pattern);
+
+ if (WARN_ON(new_pattern.pattern_len > WOW_MAX_PATTERN_SIZE))
+ return -EINVAL;
+ } else {
+ memcpy(new_pattern.pattern, eth_pattern->pattern,
+ eth_pattern->pattern_len);
+
+ /* convert bitmask to bytemask */
+ for (j = 0; j < eth_pattern->pattern_len; j++)
+ if (eth_pattern->mask[j / 8] & BIT(j % 8))
+ new_pattern.bytemask[j] = 0xff;
+
+ new_pattern.pattern_len = eth_pattern->pattern_len;
+ new_pattern.pkt_offset = eth_pattern->pkt_offset;
+ }
+
+ ret = ath12k_wmi_wow_add_pattern(ar, arvif->vdev_id,
+ pattern_id,
+ new_pattern.pattern,
+ new_pattern.bytemask,
+ new_pattern.pattern_len,
+ new_pattern.pkt_offset);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to add pattern %i to vdev %i: %d\n",
+ pattern_id,
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ pattern_id++;
+ __set_bit(WOW_PATTERN_MATCH_EVENT, &wow_mask);
+ }
+
+ for (i = 0; i < WOW_EVENT_MAX; i++) {
+ if (!test_bit(i, &wow_mask))
+ continue;
+ ret = ath12k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 1);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to enable wakeup event %s on vdev %i: %d\n",
+ wow_wakeup_event(i), arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath12k_wow_set_wakeups(struct ath12k *ar,
+ struct cfg80211_wowlan *wowlan)
+{
+ struct ath12k_vif *arvif;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (ath12k_wow_is_p2p_vdev(arvif))
+ continue;
+ ret = ath12k_wow_vif_set_wakeups(arvif, wowlan);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set wow wakeups on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath12k_wow_vdev_clean_nlo(struct ath12k *ar, u32 vdev_id)
+{
+ struct wmi_pno_scan_req_arg *pno;
+ int ret;
+
+ if (!ar->nlo_enabled)
+ return 0;
+
+ pno = kzalloc(sizeof(*pno), GFP_KERNEL);
+ if (!pno)
+ return -ENOMEM;
+
+ pno->enable = 0;
+ ret = ath12k_wmi_wow_config_pno(ar, vdev_id, pno);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to disable PNO: %d", ret);
+ goto out;
+ }
+
+ ar->nlo_enabled = false;
+
+out:
+ kfree(pno);
+ return ret;
+}
+
+static int ath12k_wow_vif_clean_nlo(struct ath12k_vif *arvif)
+{
+ struct ath12k *ar = arvif->ar;
+
+ switch (arvif->vdev_type) {
+ case WMI_VDEV_TYPE_STA:
+ return ath12k_wow_vdev_clean_nlo(ar, arvif->vdev_id);
+ default:
+ return 0;
+ }
+}
+
+static int ath12k_wow_nlo_cleanup(struct ath12k *ar)
+{
+ struct ath12k_vif *arvif;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (ath12k_wow_is_p2p_vdev(arvif))
+ continue;
+
+ ret = ath12k_wow_vif_clean_nlo(arvif);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to clean nlo settings on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath12k_wow_set_hw_filter(struct ath12k *ar)
+{
+ struct wmi_hw_data_filter_arg arg;
+ struct ath12k_vif *arvif;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
+ continue;
+
+ arg.vdev_id = arvif->vdev_id;
+ arg.enable = true;
+ arg.hw_filter_bitmap = WMI_HW_DATA_FILTER_DROP_NON_ICMPV6_MC;
+ ret = ath12k_wmi_hw_data_filter_cmd(ar, &arg);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set hw data filter on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath12k_wow_clear_hw_filter(struct ath12k *ar)
+{
+ struct wmi_hw_data_filter_arg arg;
+ struct ath12k_vif *arvif;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
+ continue;
+
+ arg.vdev_id = arvif->vdev_id;
+ arg.enable = false;
+ arg.hw_filter_bitmap = 0;
+ ret = ath12k_wmi_hw_data_filter_cmd(ar, &arg);
+
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to clear hw data filter on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void ath12k_wow_generate_ns_mc_addr(struct ath12k_base *ab,
+ struct wmi_arp_ns_offload_arg *offload)
+{
+ int i;
+
+ for (i = 0; i < offload->ipv6_count; i++) {
+ offload->self_ipv6_addr[i][0] = 0xff;
+ offload->self_ipv6_addr[i][1] = 0x02;
+ offload->self_ipv6_addr[i][11] = 0x01;
+ offload->self_ipv6_addr[i][12] = 0xff;
+ offload->self_ipv6_addr[i][13] =
+ offload->ipv6_addr[i][13];
+ offload->self_ipv6_addr[i][14] =
+ offload->ipv6_addr[i][14];
+ offload->self_ipv6_addr[i][15] =
+ offload->ipv6_addr[i][15];
+ ath12k_dbg(ab, ATH12K_DBG_WOW, "NS solicited addr %pI6\n",
+ offload->self_ipv6_addr[i]);
+ }
+}
+
+static void ath12k_wow_prepare_ns_offload(struct ath12k_vif *arvif,
+ struct wmi_arp_ns_offload_arg *offload)
+{
+ struct net_device *ndev = ieee80211_vif_to_wdev(arvif->vif)->netdev;
+ struct ath12k_base *ab = arvif->ar->ab;
+ struct inet6_ifaddr *ifa6;
+ struct ifacaddr6 *ifaca6;
+ struct inet6_dev *idev;
+ u32 count = 0, scope;
+
+ if (!ndev)
+ return;
+
+ idev = in6_dev_get(ndev);
+ if (!idev)
+ return;
+
+ ath12k_dbg(ab, ATH12K_DBG_WOW, "wow prepare ns offload\n");
+
+ read_lock_bh(&idev->lock);
+
+ /* get unicast address */
+ list_for_each_entry(ifa6, &idev->addr_list, if_list) {
+ if (count >= WMI_IPV6_MAX_COUNT)
+ goto unlock;
+
+ if (ifa6->flags & IFA_F_DADFAILED)
+ continue;
+
+ scope = ipv6_addr_src_scope(&ifa6->addr);
+ if (scope != IPV6_ADDR_SCOPE_LINKLOCAL &&
+ scope != IPV6_ADDR_SCOPE_GLOBAL) {
+ ath12k_dbg(ab, ATH12K_DBG_WOW,
+ "Unsupported ipv6 scope: %d\n", scope);
+ continue;
+ }
+
+ memcpy(offload->ipv6_addr[count], &ifa6->addr.s6_addr,
+ sizeof(ifa6->addr.s6_addr));
+ offload->ipv6_type[count] = WMI_IPV6_UC_TYPE;
+ ath12k_dbg(ab, ATH12K_DBG_WOW, "mac count %d ipv6 uc %pI6 scope %d\n",
+ count, offload->ipv6_addr[count],
+ scope);
+ count++;
+ }
+
+ /* get anycast address */
+ rcu_read_lock();
+
+ for (ifaca6 = rcu_dereference(idev->ac_list); ifaca6;
+ ifaca6 = rcu_dereference(ifaca6->aca_next)) {
+ if (count >= WMI_IPV6_MAX_COUNT) {
+ rcu_read_unlock();
+ goto unlock;
+ }
+
+ scope = ipv6_addr_src_scope(&ifaca6->aca_addr);
+ if (scope != IPV6_ADDR_SCOPE_LINKLOCAL &&
+ scope != IPV6_ADDR_SCOPE_GLOBAL) {
+ ath12k_dbg(ab, ATH12K_DBG_WOW,
+ "Unsupported ipv scope: %d\n", scope);
+ continue;
+ }
+
+ memcpy(offload->ipv6_addr[count], &ifaca6->aca_addr,
+ sizeof(ifaca6->aca_addr));
+ offload->ipv6_type[count] = WMI_IPV6_AC_TYPE;
+ ath12k_dbg(ab, ATH12K_DBG_WOW, "mac count %d ipv6 ac %pI6 scope %d\n",
+ count, offload->ipv6_addr[count],
+ scope);
+ count++;
+ }
+
+ rcu_read_unlock();
+
+unlock:
+ read_unlock_bh(&idev->lock);
+
+ in6_dev_put(idev);
+
+ offload->ipv6_count = count;
+ ath12k_wow_generate_ns_mc_addr(ab, offload);
+}
+
+static void ath12k_wow_prepare_arp_offload(struct ath12k_vif *arvif,
+ struct wmi_arp_ns_offload_arg *offload)
+{
+ struct ieee80211_vif *vif = arvif->vif;
+ struct ieee80211_vif_cfg vif_cfg = vif->cfg;
+ struct ath12k_base *ab = arvif->ar->ab;
+ u32 ipv4_cnt;
+
+ ath12k_dbg(ab, ATH12K_DBG_WOW, "wow prepare arp offload\n");
+
+ ipv4_cnt = min(vif_cfg.arp_addr_cnt, WMI_IPV4_MAX_COUNT);
+ memcpy(offload->ipv4_addr, vif_cfg.arp_addr_list, ipv4_cnt * sizeof(u32));
+ offload->ipv4_count = ipv4_cnt;
+
+ ath12k_dbg(ab, ATH12K_DBG_WOW,
+ "wow arp_addr_cnt %d vif->addr %pM, offload_addr %pI4\n",
+ vif_cfg.arp_addr_cnt, vif->addr, offload->ipv4_addr);
+}
+
+static int ath12k_wow_arp_ns_offload(struct ath12k *ar, bool enable)
+{
+ struct wmi_arp_ns_offload_arg *offload;
+ struct ath12k_vif *arvif;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ offload = kmalloc(sizeof(*offload), GFP_KERNEL);
+ if (!offload)
+ return -ENOMEM;
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
+ continue;
+
+ memset(offload, 0, sizeof(*offload));
+
+ memcpy(offload->mac_addr, arvif->vif->addr, ETH_ALEN);
+ ath12k_wow_prepare_ns_offload(arvif, offload);
+ ath12k_wow_prepare_arp_offload(arvif, offload);
+
+ ret = ath12k_wmi_arp_ns_offload(ar, arvif, offload, enable);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set arp ns offload vdev %i: enable %d, ret %d\n",
+ arvif->vdev_id, enable, ret);
+ return ret;
+ }
+ }
+
+ kfree(offload);
+
+ return 0;
+}
+
+static int ath12k_gtk_rekey_offload(struct ath12k *ar, bool enable)
+{
+ struct ath12k_vif *arvif;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (arvif->vdev_type != WMI_VDEV_TYPE_STA ||
+ !arvif->is_up ||
+ !arvif->rekey_data.enable_offload)
+ continue;
+
+ /* get rekey info before disable rekey offload */
+ if (!enable) {
+ ret = ath12k_wmi_gtk_rekey_getinfo(ar, arvif);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to request rekey info vdev %i, ret %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ ret = ath12k_wmi_gtk_rekey_offload(ar, arvif, enable);
+
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to offload gtk reky vdev %i: enable %d, ret %d\n",
+ arvif->vdev_id, enable, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath12k_wow_protocol_offload(struct ath12k *ar, bool enable)
+{
+ int ret;
+
+ ret = ath12k_wow_arp_ns_offload(ar, enable);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to offload ARP and NS %d %d\n",
+ enable, ret);
+ return ret;
+ }
+
+ ret = ath12k_gtk_rekey_offload(ar, enable);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to offload gtk rekey %d %d\n",
+ enable, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath12k_wow_set_keepalive(struct ath12k *ar,
+ enum wmi_sta_keepalive_method method,
+ u32 interval)
+{
+ struct ath12k_vif *arvif;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ ret = ath12k_mac_vif_set_keepalive(arvif, method, interval);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath12k_wow_op_suspend(struct ieee80211_hw *hw,
+ struct cfg80211_wowlan *wowlan)
+{
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar = ath12k_ah_to_ar(ah, 0);
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ret = ath12k_wow_cleanup(ar);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to clear wow wakeup events: %d\n",
+ ret);
+ goto exit;
+ }
+
+ ret = ath12k_wow_set_wakeups(ar, wowlan);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set wow wakeup events: %d\n",
+ ret);
+ goto cleanup;
+ }
+
+ ret = ath12k_wow_protocol_offload(ar, true);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set wow protocol offload events: %d\n",
+ ret);
+ goto cleanup;
+ }
+
+ ret = ath12k_mac_wait_tx_complete(ar);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to wait tx complete: %d\n", ret);
+ goto cleanup;
+ }
+
+ ret = ath12k_wow_set_hw_filter(ar);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set hw filter: %d\n",
+ ret);
+ goto cleanup;
+ }
+
+ ret = ath12k_wow_set_keepalive(ar,
+ WMI_STA_KEEPALIVE_METHOD_NULL_FRAME,
+ WMI_STA_KEEPALIVE_INTERVAL_DEFAULT);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to enable wow keepalive: %d\n", ret);
+ goto cleanup;
+ }
+
+ ret = ath12k_wow_enable(ar);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to start wow: %d\n", ret);
+ goto cleanup;
+ }
+
+ ath12k_hif_irq_disable(ar->ab);
+ ath12k_hif_ce_irq_disable(ar->ab);
+
+ ret = ath12k_hif_suspend(ar->ab);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to suspend hif: %d\n", ret);
+ goto wakeup;
+ }
+
+ goto exit;
+
+wakeup:
+ ath12k_wow_wakeup(ar);
+
+cleanup:
+ ath12k_wow_cleanup(ar);
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret ? 1 : 0;
+}
+
+void ath12k_wow_op_set_wakeup(struct ieee80211_hw *hw, bool enabled)
+{
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar = ath12k_ah_to_ar(ah, 0);
+
+ mutex_lock(&ar->conf_mutex);
+ device_set_wakeup_enable(ar->ab->dev, enabled);
+ mutex_unlock(&ar->conf_mutex);
+}
+
+int ath12k_wow_op_resume(struct ieee80211_hw *hw)
+{
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar = ath12k_ah_to_ar(ah, 0);
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ret = ath12k_hif_resume(ar->ab);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to resume hif: %d\n", ret);
+ goto exit;
+ }
+
+ ath12k_hif_ce_irq_enable(ar->ab);
+ ath12k_hif_irq_enable(ar->ab);
+
+ ret = ath12k_wow_wakeup(ar);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to wakeup from wow: %d\n", ret);
+ goto exit;
+ }
+
+ ret = ath12k_wow_nlo_cleanup(ar);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to cleanup nlo: %d\n", ret);
+ goto exit;
+ }
+
+ ret = ath12k_wow_clear_hw_filter(ar);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to clear hw filter: %d\n", ret);
+ goto exit;
+ }
+
+ ret = ath12k_wow_protocol_offload(ar, false);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to clear wow protocol offload events: %d\n",
+ ret);
+ goto exit;
+ }
+
+ ret = ath12k_wow_set_keepalive(ar,
+ WMI_STA_KEEPALIVE_METHOD_NULL_FRAME,
+ WMI_STA_KEEPALIVE_INTERVAL_DISABLE);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to disable wow keepalive: %d\n", ret);
+ goto exit;
+ }
+
+exit:
+ if (ret) {
+ switch (ah->state) {
+ case ATH12K_HW_STATE_ON:
+ ah->state = ATH12K_HW_STATE_RESTARTING;
+ ret = 1;
+ break;
+ case ATH12K_HW_STATE_OFF:
+ case ATH12K_HW_STATE_RESTARTING:
+ case ATH12K_HW_STATE_RESTARTED:
+ case ATH12K_HW_STATE_WEDGED:
+ ath12k_warn(ar->ab, "encountered unexpected device state %d on resume, cannot recover\n",
+ ah->state);
+ ret = -EIO;
+ break;
+ }
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+int ath12k_wow_init(struct ath12k *ar)
+{
+ if (!test_bit(WMI_TLV_SERVICE_WOW, ar->wmi->wmi_ab->svc_map))
+ return 0;
+
+ ar->wow.wowlan_support = ath12k_wowlan_support;
+
+ if (ar->ab->wow.wmi_conf_rx_decap_mode == ATH12K_HW_TXRX_NATIVE_WIFI) {
+ ar->wow.wowlan_support.pattern_max_len -= WOW_MAX_REDUCE;
+ ar->wow.wowlan_support.max_pkt_offset -= WOW_MAX_REDUCE;
+ }
+
+ if (test_bit(WMI_TLV_SERVICE_NLO, ar->wmi->wmi_ab->svc_map)) {
+ ar->wow.wowlan_support.flags |= WIPHY_WOWLAN_NET_DETECT;
+ ar->wow.wowlan_support.max_nd_match_sets = WMI_PNO_MAX_SUPP_NETWORKS;
+ }
+
+ ar->wow.max_num_patterns = ATH12K_WOW_PATTERNS;
+ ar->wow.wowlan_support.n_patterns = ar->wow.max_num_patterns;
+ ar->ah->hw->wiphy->wowlan = &ar->wow.wowlan_support;
+
+ device_set_wakeup_capable(ar->ab->dev, true);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath12k/wow.h b/drivers/net/wireless/ath/ath12k/wow.h
new file mode 100644
index 000000000000..af9be5fadcc3
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/wow.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef ATH12K_WOW_H
+#define ATH12K_WOW_H
+
+#define ATH12K_WOW_RETRY_NUM 10
+#define ATH12K_WOW_RETRY_WAIT_MS 200
+#define ATH12K_WOW_PATTERNS 22
+
+struct ath12k_wow {
+ u32 max_num_patterns;
+ struct completion wakeup_completed;
+ struct wiphy_wowlan_support wowlan_support;
+};
+
+struct ath12k_pkt_pattern {
+ u8 pattern[WOW_MAX_PATTERN_SIZE];
+ u8 bytemask[WOW_MAX_PATTERN_SIZE];
+ int pattern_len;
+ int pkt_offset;
+};
+
+struct rfc1042_hdr {
+ u8 llc_dsap;
+ u8 llc_ssap;
+ u8 llc_ctrl;
+ u8 snap_oui[3];
+ __be16 eth_type;
+} __packed;
+
+#ifdef CONFIG_PM
+
+int ath12k_wow_init(struct ath12k *ar);
+int ath12k_wow_op_suspend(struct ieee80211_hw *hw,
+ struct cfg80211_wowlan *wowlan);
+int ath12k_wow_op_resume(struct ieee80211_hw *hw);
+void ath12k_wow_op_set_wakeup(struct ieee80211_hw *hw, bool enabled);
+int ath12k_wow_enable(struct ath12k *ar);
+int ath12k_wow_wakeup(struct ath12k *ar);
+
+#else
+
+static inline int ath12k_wow_init(struct ath12k *ar)
+{
+ return 0;
+}
+
+static inline int ath12k_wow_enable(struct ath12k *ar)
+{
+ return 0;
+}
+
+static inline int ath12k_wow_wakeup(struct ath12k *ar)
+{
+ return 0;
+}
+#endif /* CONFIG_PM */
+#endif /* ATH12K_WOW_H */
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 9f534ed2fbb3..abe41330fb69 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -2847,7 +2847,7 @@ static void ath5k_stop_tasklets(struct ath5k_hw *ah)
* if another thread does a system call and the thread doing the
* stop is preempted).
*/
-void ath5k_stop(struct ieee80211_hw *hw)
+void ath5k_stop(struct ieee80211_hw *hw, bool suspend)
{
struct ath5k_hw *ah = hw->priv;
int ret;
diff --git a/drivers/net/wireless/ath/ath5k/base.h b/drivers/net/wireless/ath/ath5k/base.h
index 97469d0fbad7..594e5b945cb7 100644
--- a/drivers/net/wireless/ath/ath5k/base.h
+++ b/drivers/net/wireless/ath/ath5k/base.h
@@ -92,7 +92,7 @@ void ath5k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif);
bool ath5k_any_vif_assoc(struct ath5k_hw *ah);
int ath5k_start(struct ieee80211_hw *hw);
-void ath5k_stop(struct ieee80211_hw *hw);
+void ath5k_stop(struct ieee80211_hw *hw, bool suspend);
void ath5k_beacon_update_timers(struct ath5k_hw *ah, u64 bc_tsf);
int ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index b389e19381c4..8a03bcc2789e 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -973,7 +973,7 @@ static int ath9k_htc_start(struct ieee80211_hw *hw)
return ret;
}
-static void ath9k_htc_stop(struct ieee80211_hw *hw)
+static void ath9k_htc_stop(struct ieee80211_hw *hw, bool suspend)
{
struct ath9k_htc_priv *priv = hw->priv;
struct ath_hw *ah = priv->ah;
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 01173aac3045..b92c89dad8de 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -895,7 +895,7 @@ static void ath9k_pending_key_del(struct ath_softc *sc, u8 keyix)
ath_key_delete(common, keyix);
}
-static void ath9k_stop(struct ieee80211_hw *hw)
+static void ath9k_stop(struct ieee80211_hw *hw, bool suspend)
{
struct ath_softc *sc = hw->priv;
struct ath_hw *ah = sc->sc_ah;
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index 7e7797bf44b7..755c068e4197 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -439,7 +439,7 @@ static void carl9170_cancel_worker(struct ar9170 *ar)
cancel_work_sync(&ar->ampdu_work);
}
-static void carl9170_op_stop(struct ieee80211_hw *hw)
+static void carl9170_op_stop(struct ieee80211_hw *hw, bool suspend)
{
struct ar9170 *ar = hw->priv;
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index e760d8002e09..408776562a7e 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -278,7 +278,7 @@ out_err:
return ret;
}
-static void wcn36xx_stop(struct ieee80211_hw *hw)
+static void wcn36xx_stop(struct ieee80211_hw *hw, bool suspend)
{
struct wcn36xx *wcn = hw->priv;
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index ee7d7e9c2718..d5d364683c0e 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -453,16 +453,21 @@ int wil_if_add(struct wil6210_priv *wil)
return rc;
}
- init_dummy_netdev(&wil->napi_ndev);
+ wil->napi_ndev = alloc_netdev_dummy(0);
+ if (!wil->napi_ndev) {
+ wil_err(wil, "failed to allocate dummy netdev");
+ rc = -ENOMEM;
+ goto out_wiphy;
+ }
if (wil->use_enhanced_dma_hw) {
- netif_napi_add(&wil->napi_ndev, &wil->napi_rx,
+ netif_napi_add(wil->napi_ndev, &wil->napi_rx,
wil6210_netdev_poll_rx_edma);
- netif_napi_add_tx(&wil->napi_ndev,
+ netif_napi_add_tx(wil->napi_ndev,
&wil->napi_tx, wil6210_netdev_poll_tx_edma);
} else {
- netif_napi_add(&wil->napi_ndev, &wil->napi_rx,
+ netif_napi_add(wil->napi_ndev, &wil->napi_rx,
wil6210_netdev_poll_rx);
- netif_napi_add_tx(&wil->napi_ndev,
+ netif_napi_add_tx(wil->napi_ndev,
&wil->napi_tx, wil6210_netdev_poll_tx);
}
@@ -474,10 +479,12 @@ int wil_if_add(struct wil6210_priv *wil)
wiphy_unlock(wiphy);
rtnl_unlock();
if (rc < 0)
- goto out_wiphy;
+ goto free_dummy;
return 0;
+free_dummy:
+ free_netdev(wil->napi_ndev);
out_wiphy:
wiphy_unregister(wiphy);
return rc;
@@ -554,5 +561,7 @@ void wil_if_remove(struct wil6210_priv *wil)
netif_napi_del(&wil->napi_tx);
netif_napi_del(&wil->napi_rx);
+ free_netdev(wil->napi_ndev);
+
wiphy_unregister(wiphy);
}
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 22a6eb3e12b7..9bd1286d2857 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -983,7 +983,7 @@ struct wil6210_priv {
spinlock_t eap_lock; /* guarding access to eap rekey fields */
struct napi_struct napi_rx;
struct napi_struct napi_tx;
- struct net_device napi_ndev; /* dummy net_device serving all VIFs */
+ struct net_device *napi_ndev; /* dummy net_device serving all VIFs */
/* DMA related */
struct wil_ring ring_rx;