summaryrefslogtreecommitdiff
path: root/drivers/net/wireless/intel/iwlwifi/mvm
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2017-11-04 12:07:50 +0300
committerDavid S. Miller <davem@davemloft.net>2017-11-04 12:07:50 +0300
commit6e300769dcbaba7bfacbc02ec9c3fcc595eec755 (patch)
treef1505cff8d85ad454a021b97e9917c04f368e59e /drivers/net/wireless/intel/iwlwifi/mvm
parent2a171788ba7bb61995e98e8163204fc7880f63b2 (diff)
parente226fb5affccca98c405de80527180224d93d251 (diff)
downloadlinux-6e300769dcbaba7bfacbc02ec9c3fcc595eec755.tar.xz
Merge tag 'wireless-drivers-next-for-davem-2017-11-03' of git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next
Kalle Valo says: ==================== wireless-drivers-next patches for 4.15 Mostly fixes this time, but also few new features. Major changes: wil6210 * remove ssid debugfs file rsi * add WOWLAN support for suspend, hibernate and shutdown states ath10k * add support for CCMP-256, GCMP and GCMP-256 ciphers on hardware where it's supported (QCA99x0 and QCA4019) ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/wireless/intel/iwlwifi/mvm')
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h43
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c49
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c44
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c17
6 files changed, 132 insertions, 51 deletions
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index f476882291ae..0296df625cd5 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -176,6 +176,7 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
struct iwl_lmac_alive *lmac1;
struct iwl_lmac_alive *lmac2 = NULL;
u16 status;
+ u32 umac_error_event_table;
if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive)) {
palive = (void *)pkt->data;
@@ -198,12 +199,25 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
mvm->sf_space.addr = le32_to_cpu(lmac1->st_fwrd_addr);
mvm->sf_space.size = le32_to_cpu(lmac1->st_fwrd_size);
- mvm->umac_error_event_table = le32_to_cpu(umac->error_info_addr);
+ umac_error_event_table = le32_to_cpu(umac->error_info_addr);
+
+ if (!umac_error_event_table) {
+ mvm->support_umac_log = false;
+ } else if (umac_error_event_table >=
+ mvm->trans->cfg->min_umac_error_event_table) {
+ mvm->support_umac_log = true;
+ mvm->umac_error_event_table = umac_error_event_table;
+ } else {
+ IWL_ERR(mvm,
+ "Not valid error log pointer 0x%08X for %s uCode\n",
+ mvm->umac_error_event_table,
+ (mvm->fwrt.cur_fw_img == IWL_UCODE_INIT) ?
+ "Init" : "RT");
+ mvm->support_umac_log = false;
+ }
alive_data->scd_base_addr = le32_to_cpu(lmac1->scd_base_ptr);
alive_data->valid = status == IWL_ALIVE_STATUS_OK;
- if (mvm->umac_error_event_table)
- mvm->support_umac_log = true;
IWL_DEBUG_FW(mvm,
"Alive ucode status 0x%04x revision 0x%01X 0x%01X\n",
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index bf25c3ce7c95..e34b3eb8e08b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -585,13 +585,9 @@ enum iwl_mvm_tdls_cs_state {
* @head_sn: reorder window head sn
* @num_stored: number of mpdus stored in the buffer
* @buf_size: the reorder buffer size as set by the last addba request
- * @sta_id: sta id of this reorder buffer
* @queue: queue of this reorder buffer
* @last_amsdu: track last ASMDU SN for duplication detection
* @last_sub_index: track ASMDU sub frame index for duplication detection
- * @tid: the tid
- * @entries: list of skbs stored
- * @reorder_time: time the packet was stored in the reorder buffer
* @reorder_timer: timer for frames are in the reorder buffer. For AMSDU
* it is the time of last received sub-frame
* @removed: prevent timer re-arming
@@ -603,13 +599,9 @@ struct iwl_mvm_reorder_buffer {
u16 head_sn;
u16 num_stored;
u8 buf_size;
- u8 sta_id;
int queue;
u16 last_amsdu;
u8 last_sub_index;
- u8 tid;
- struct sk_buff_head entries[IEEE80211_MAX_AMPDU_BUF];
- unsigned long reorder_time[IEEE80211_MAX_AMPDU_BUF];
struct timer_list reorder_timer;
bool removed;
bool valid;
@@ -618,15 +610,38 @@ struct iwl_mvm_reorder_buffer {
} ____cacheline_aligned_in_smp;
/**
+ * struct _iwl_mvm_reorder_buf_entry - reorder buffer entry per-queue/per-seqno
+ * @frames: list of skbs stored
+ * @reorder_time: time the packet was stored in the reorder buffer
+ */
+struct _iwl_mvm_reorder_buf_entry {
+ struct sk_buff_head frames;
+ unsigned long reorder_time;
+};
+
+/* make this indirection to get the aligned thing */
+struct iwl_mvm_reorder_buf_entry {
+ struct _iwl_mvm_reorder_buf_entry e;
+}
+#ifndef __CHECKER__
+/* sparse doesn't like this construct: "bad integer constant expression" */
+__aligned(roundup_pow_of_two(sizeof(struct _iwl_mvm_reorder_buf_entry)))
+#endif
+;
+
+/**
* struct iwl_mvm_baid_data - BA session data
* @sta_id: station id
* @tid: tid of the session
* @baid baid of the session
* @timeout: the timeout set in the addba request
+ * @entries_per_queue: # of buffers per queue, this actually gets
+ * aligned up to avoid cache line sharing between queues
* @last_rx: last rx jiffies, updated only if timeout passed from last update
* @session_timer: timer to check if BA session expired, runs at 2 * timeout
* @mvm: mvm pointer, needed for timer context
* @reorder_buf: reorder buffer, allocated per queue
+ * @reorder_buf_data: data
*/
struct iwl_mvm_baid_data {
struct rcu_head rcu_head;
@@ -634,12 +649,22 @@ struct iwl_mvm_baid_data {
u8 tid;
u8 baid;
u16 timeout;
+ u16 entries_per_queue;
unsigned long last_rx;
struct timer_list session_timer;
struct iwl_mvm *mvm;
- struct iwl_mvm_reorder_buffer reorder_buf[];
+ struct iwl_mvm_reorder_buffer reorder_buf[IWL_MAX_RX_HW_QUEUES];
+ struct iwl_mvm_reorder_buf_entry entries[];
};
+static inline struct iwl_mvm_baid_data *
+iwl_mvm_baid_data_from_reorder_buf(struct iwl_mvm_reorder_buffer *buf)
+{
+ return (void *)((u8 *)buf -
+ offsetof(struct iwl_mvm_baid_data, reorder_buf) -
+ sizeof(*buf) * buf->queue);
+}
+
/*
* enum iwl_mvm_queue_status - queue status
* @IWL_MVM_QUEUE_FREE: the queue is not allocated nor reserved
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 5e679859f948..b84756dc9d6c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -409,9 +409,13 @@ static bool iwl_mvm_is_sn_less(u16 sn1, u16 sn2, u16 buffer_size)
static void iwl_mvm_release_frames(struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
struct napi_struct *napi,
+ struct iwl_mvm_baid_data *baid_data,
struct iwl_mvm_reorder_buffer *reorder_buf,
u16 nssn)
{
+ struct iwl_mvm_reorder_buf_entry *entries =
+ &baid_data->entries[reorder_buf->queue *
+ baid_data->entries_per_queue];
u16 ssn = reorder_buf->head_sn;
lockdep_assert_held(&reorder_buf->lock);
@@ -422,7 +426,7 @@ static void iwl_mvm_release_frames(struct iwl_mvm *mvm,
while (iwl_mvm_is_sn_less(ssn, nssn, reorder_buf->buf_size)) {
int index = ssn % reorder_buf->buf_size;
- struct sk_buff_head *skb_list = &reorder_buf->entries[index];
+ struct sk_buff_head *skb_list = &entries[index].e.frames;
struct sk_buff *skb;
ssn = ieee80211_sn_inc(ssn);
@@ -445,11 +449,11 @@ set_timer:
if (reorder_buf->num_stored && !reorder_buf->removed) {
u16 index = reorder_buf->head_sn % reorder_buf->buf_size;
- while (skb_queue_empty(&reorder_buf->entries[index]))
+ while (skb_queue_empty(&entries[index].e.frames))
index = (index + 1) % reorder_buf->buf_size;
/* modify timer to match next frame's expiration time */
mod_timer(&reorder_buf->reorder_timer,
- reorder_buf->reorder_time[index] + 1 +
+ entries[index].e.reorder_time + 1 +
RX_REORDER_BUF_TIMEOUT_MQ);
} else {
del_timer(&reorder_buf->reorder_timer);
@@ -459,6 +463,10 @@ set_timer:
void iwl_mvm_reorder_timer_expired(unsigned long data)
{
struct iwl_mvm_reorder_buffer *buf = (void *)data;
+ struct iwl_mvm_baid_data *baid_data =
+ iwl_mvm_baid_data_from_reorder_buf(buf);
+ struct iwl_mvm_reorder_buf_entry *entries =
+ &baid_data->entries[buf->queue * baid_data->entries_per_queue];
int i;
u16 sn = 0, index = 0;
bool expired = false;
@@ -474,7 +482,7 @@ void iwl_mvm_reorder_timer_expired(unsigned long data)
for (i = 0; i < buf->buf_size ; i++) {
index = (buf->head_sn + i) % buf->buf_size;
- if (skb_queue_empty(&buf->entries[index])) {
+ if (skb_queue_empty(&entries[index].e.frames)) {
/*
* If there is a hole and the next frame didn't expire
* we want to break and not advance SN
@@ -482,7 +490,8 @@ void iwl_mvm_reorder_timer_expired(unsigned long data)
cont = false;
continue;
}
- if (!cont && !time_after(jiffies, buf->reorder_time[index] +
+ if (!cont &&
+ !time_after(jiffies, entries[index].e.reorder_time +
RX_REORDER_BUF_TIMEOUT_MQ))
break;
@@ -495,18 +504,19 @@ void iwl_mvm_reorder_timer_expired(unsigned long data)
if (expired) {
struct ieee80211_sta *sta;
struct iwl_mvm_sta *mvmsta;
+ u8 sta_id = baid_data->sta_id;
rcu_read_lock();
- sta = rcu_dereference(buf->mvm->fw_id_to_mac_id[buf->sta_id]);
+ sta = rcu_dereference(buf->mvm->fw_id_to_mac_id[sta_id]);
mvmsta = iwl_mvm_sta_from_mac80211(sta);
/* SN is set to the last expired frame + 1 */
IWL_DEBUG_HT(buf->mvm,
"Releasing expired frames for sta %u, sn %d\n",
- buf->sta_id, sn);
+ sta_id, sn);
iwl_mvm_event_frame_timeout_callback(buf->mvm, mvmsta->vif,
- sta, buf->tid);
- iwl_mvm_release_frames(buf->mvm, sta, NULL, buf, sn);
+ sta, baid_data->tid);
+ iwl_mvm_release_frames(buf->mvm, sta, NULL, baid_data, buf, sn);
rcu_read_unlock();
} else {
/*
@@ -515,7 +525,7 @@ void iwl_mvm_reorder_timer_expired(unsigned long data)
* accordingly to this frame.
*/
mod_timer(&buf->reorder_timer,
- buf->reorder_time[index] +
+ entries[index].e.reorder_time +
1 + RX_REORDER_BUF_TIMEOUT_MQ);
}
spin_unlock(&buf->lock);
@@ -546,7 +556,7 @@ static void iwl_mvm_del_ba(struct iwl_mvm *mvm, int queue,
/* release all frames that are in the reorder buffer to the stack */
spin_lock_bh(&reorder_buf->lock);
- iwl_mvm_release_frames(mvm, sta, NULL, reorder_buf,
+ iwl_mvm_release_frames(mvm, sta, NULL, ba_data, reorder_buf,
ieee80211_sn_add(reorder_buf->head_sn,
reorder_buf->buf_size));
spin_unlock_bh(&reorder_buf->lock);
@@ -610,6 +620,7 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
u8 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
u8 sub_frame_idx = desc->amsdu_info &
IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
+ struct iwl_mvm_reorder_buf_entry *entries;
int index;
u16 nssn, sn;
u8 baid;
@@ -660,6 +671,7 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
IWL_RX_MPDU_REORDER_SN_SHIFT;
buffer = &baid_data->reorder_buf[queue];
+ entries = &baid_data->entries[queue * baid_data->entries_per_queue];
spin_lock_bh(&buffer->lock);
@@ -672,7 +684,7 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
}
if (ieee80211_is_back_req(hdr->frame_control)) {
- iwl_mvm_release_frames(mvm, sta, napi, buffer, nssn);
+ iwl_mvm_release_frames(mvm, sta, napi, baid_data, buffer, nssn);
goto drop;
}
@@ -688,7 +700,8 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
!ieee80211_sn_less(sn, buffer->head_sn + buffer->buf_size)) {
u16 min_sn = ieee80211_sn_less(sn, nssn) ? sn : nssn;
- iwl_mvm_release_frames(mvm, sta, napi, buffer, min_sn);
+ iwl_mvm_release_frames(mvm, sta, napi, baid_data, buffer,
+ min_sn);
}
/* drop any oudated packets */
@@ -716,7 +729,7 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
* If it is the same SN then if the subframe index is incrementing it
* is the same AMSDU - otherwise it is a retransmission.
*/
- tail = skb_peek_tail(&buffer->entries[index]);
+ tail = skb_peek_tail(&entries[index].e.frames);
if (tail && !amsdu)
goto drop;
else if (tail && (sn != buffer->last_amsdu ||
@@ -724,9 +737,9 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
goto drop;
/* put in reorder buffer */
- __skb_queue_tail(&buffer->entries[index], skb);
+ __skb_queue_tail(&entries[index].e.frames, skb);
buffer->num_stored++;
- buffer->reorder_time[index] = jiffies;
+ entries[index].e.reorder_time = jiffies;
if (amsdu) {
buffer->last_amsdu = sn;
@@ -745,7 +758,7 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
* release notification with up to date NSSN.
*/
if (!amsdu || last_subframe)
- iwl_mvm_release_frames(mvm, sta, napi, buffer, nssn);
+ iwl_mvm_release_frames(mvm, sta, napi, baid_data, buffer, nssn);
spin_unlock_bh(&buffer->lock);
return true;
@@ -1065,7 +1078,7 @@ void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi,
reorder_buf = &ba_data->reorder_buf[queue];
spin_lock_bh(&reorder_buf->lock);
- iwl_mvm_release_frames(mvm, sta, napi, reorder_buf,
+ iwl_mvm_release_frames(mvm, sta, napi, ba_data, reorder_buf,
le16_to_cpu(release->nssn));
spin_unlock_bh(&reorder_buf->lock);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 282424f40c43..23787cc9c89e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -2104,6 +2104,8 @@ static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
int j;
struct iwl_mvm_reorder_buffer *reorder_buf =
&data->reorder_buf[i];
+ struct iwl_mvm_reorder_buf_entry *entries =
+ &data->entries[i * data->entries_per_queue];
spin_lock_bh(&reorder_buf->lock);
if (likely(!reorder_buf->num_stored)) {
@@ -2119,7 +2121,7 @@ static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
WARN_ON(1);
for (j = 0; j < reorder_buf->buf_size; j++)
- __skb_queue_purge(&reorder_buf->entries[j]);
+ __skb_queue_purge(&entries[j].e.frames);
/*
* Prevent timer re-arm. This prevents a very far fetched case
* where we timed out on the notification. There may be prior
@@ -2135,7 +2137,6 @@ static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
}
static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
- u32 sta_id,
struct iwl_mvm_baid_data *data,
u16 ssn, u8 buf_size)
{
@@ -2144,6 +2145,8 @@ static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
for (i = 0; i < mvm->trans->num_rx_queues; i++) {
struct iwl_mvm_reorder_buffer *reorder_buf =
&data->reorder_buf[i];
+ struct iwl_mvm_reorder_buf_entry *entries =
+ &data->entries[i * data->entries_per_queue];
int j;
reorder_buf->num_stored = 0;
@@ -2157,11 +2160,9 @@ static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
spin_lock_init(&reorder_buf->lock);
reorder_buf->mvm = mvm;
reorder_buf->queue = i;
- reorder_buf->sta_id = sta_id;
- reorder_buf->tid = data->tid;
reorder_buf->valid = false;
for (j = 0; j < reorder_buf->buf_size; j++)
- __skb_queue_head_init(&reorder_buf->entries[j]);
+ __skb_queue_head_init(&entries[j].e.frames);
}
}
@@ -2182,16 +2183,44 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
}
if (iwl_mvm_has_new_rx_api(mvm) && start) {
+ u16 reorder_buf_size = buf_size * sizeof(baid_data->entries[0]);
+
+ /* sparse doesn't like the __align() so don't check */
+#ifndef __CHECKER__
+ /*
+ * The division below will be OK if either the cache line size
+ * can be divided by the entry size (ALIGN will round up) or if
+ * if the entry size can be divided by the cache line size, in
+ * which case the ALIGN() will do nothing.
+ */
+ BUILD_BUG_ON(SMP_CACHE_BYTES % sizeof(baid_data->entries[0]) &&
+ sizeof(baid_data->entries[0]) % SMP_CACHE_BYTES);
+#endif
+
+ /*
+ * Upward align the reorder buffer size to fill an entire cache
+ * line for each queue, to avoid sharing cache lines between
+ * different queues.
+ */
+ reorder_buf_size = ALIGN(reorder_buf_size, SMP_CACHE_BYTES);
+
/*
* Allocate here so if allocation fails we can bail out early
* before starting the BA session in the firmware
*/
baid_data = kzalloc(sizeof(*baid_data) +
mvm->trans->num_rx_queues *
- sizeof(baid_data->reorder_buf[0]),
+ reorder_buf_size,
GFP_KERNEL);
if (!baid_data)
return -ENOMEM;
+
+ /*
+ * This division is why we need the above BUILD_BUG_ON(),
+ * if that doesn't hold then this will not be right.
+ */
+ baid_data->entries_per_queue =
+ reorder_buf_size / sizeof(baid_data->entries[0]);
}
cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
@@ -2262,8 +2291,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
mod_timer(&baid_data->session_timer,
TU_TO_EXP_TIME(timeout * 2));
- iwl_mvm_init_reorder_buffer(mvm, mvm_sta->sta_id,
- baid_data, ssn, buf_size);
+ iwl_mvm_init_reorder_buffer(mvm, baid_data, ssn, buf_size);
/*
* protect the BA data with RCU to cover a case where our
* internal RX sync mechanism will timeout (not that it's
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 6f2e2af23219..00a0efab20e3 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -1746,6 +1746,7 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
if (iwl_mvm_has_new_tx_api(mvm)) {
struct iwl_mvm_compressed_ba_notif *ba_res =
(void *)pkt->data;
+ u8 lq_color = TX_RES_RATE_TABLE_COL_GET(ba_res->tlc_rate_info);
int i;
sta_id = ba_res->sta_id;
@@ -1759,11 +1760,18 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
if (!le16_to_cpu(ba_res->tfd_cnt))
goto out;
+ rcu_read_lock();
+
+ mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id);
+ if (!mvmsta)
+ goto out_unlock;
+
/* Free per TID */
for (i = 0; i < le16_to_cpu(ba_res->tfd_cnt); i++) {
struct iwl_mvm_compressed_ba_tfd *ba_tfd =
&ba_res->tfd[i];
+ mvmsta->tid_data[i].lq_color = lq_color;
iwl_mvm_tx_reclaim(mvm, sta_id, ba_tfd->tid,
(int)(le16_to_cpu(ba_tfd->q_num)),
le16_to_cpu(ba_tfd->tfd_index),
@@ -1771,6 +1779,8 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
le32_to_cpu(ba_res->tx_rate));
}
+out_unlock:
+ rcu_read_unlock();
out:
IWL_DEBUG_TX_REPLY(mvm,
"BA_NOTIFICATION Received from sta_id = %d, flags %x, sent:%d, acked:%d\n",
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
index 2da1b088ac01..d46115e2d69e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -455,20 +455,12 @@ static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm)
{
struct iwl_trans *trans = mvm->trans;
struct iwl_umac_error_event_table table;
- u32 base;
- base = mvm->umac_error_event_table;
-
- if (base < 0x800000) {
- IWL_ERR(mvm,
- "Not valid error log pointer 0x%08X for %s uCode\n",
- base,
- (mvm->fwrt.cur_fw_img == IWL_UCODE_INIT)
- ? "Init" : "RT");
+ if (!mvm->support_umac_log)
return;
- }
- iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
+ iwl_trans_read_mem_bytes(trans, mvm->umac_error_event_table, &table,
+ sizeof(table));
if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
IWL_ERR(trans, "Start IWL Error Log Dump:\n");
@@ -608,8 +600,7 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
if (mvm->error_event_table[1])
iwl_mvm_dump_lmac_error_log(mvm, mvm->error_event_table[1]);
- if (mvm->support_umac_log)
- iwl_mvm_dump_umac_error_log(mvm);
+ iwl_mvm_dump_umac_error_log(mvm);
}
int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq)