summaryrefslogtreecommitdiff
path: root/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
diff options
context:
space:
mode:
authorBenjamin Berg <benjamin.berg@intel.com>2024-02-18 19:51:47 +0200
committerJohannes Berg <johannes.berg@intel.com>2024-02-21 14:40:49 +0100
commit78f65fbf421a61894c14a1b91fe2fb4437b3fe5f (patch)
tree8125269426a068e3b6edb782f5d24ec29229dea6 /drivers/net/wireless/intel/iwlwifi/mvm/sta.c
parentf78c1375339a291cba492a70eaf12ec501d28a8e (diff)
downloadlinux-78f65fbf421a61894c14a1b91fe2fb4437b3fe5f.tar.gz
linux-78f65fbf421a61894c14a1b91fe2fb4437b3fe5f.tar.bz2
linux-78f65fbf421a61894c14a1b91fe2fb4437b3fe5f.zip
wifi: iwlwifi: mvm: ensure offloading TID queue exists
The resume code path assumes that the TX queue for the offloading TID has been configured. At resume time it then tries to sync the write pointer as it may have been updated by the firmware. In the unusual event that no packets have been send on TID 0, the queue will not have been allocated and this causes a crash. Fix this by ensuring the queue exist at suspend time. Signed-off-by: Benjamin Berg <benjamin.berg@intel.com> Signed-off-by: Miri Korenblit <miriam.rachel.korenblit@intel.com> Link: https://msgid.link/20240218194912.6632e6dc7b35.Ie6e6a7488c9c7d4529f13d48f752b5439d8ac3c4@changeid Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Diffstat (limited to 'drivers/net/wireless/intel/iwlwifi/mvm/sta.c')
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c28
1 files changed, 28 insertions, 0 deletions
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 2a3ca9785974..c2e0cff740e9 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -1502,6 +1502,34 @@ out_err:
return ret;
}
+int iwl_mvm_sta_ensure_queue(struct iwl_mvm *mvm,
+ struct ieee80211_txq *txq)
+{
+ struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq);
+ int ret = -EINVAL;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (likely(test_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state)) ||
+ !txq->sta) {
+ return 0;
+ }
+
+ if (!iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, txq->tid)) {
+ set_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
+ ret = 0;
+ }
+
+ local_bh_disable();
+ spin_lock(&mvm->add_stream_lock);
+ if (!list_empty(&mvmtxq->list))
+ list_del_init(&mvmtxq->list);
+ spin_unlock(&mvm->add_stream_lock);
+ local_bh_enable();
+
+ return ret;
+}
+
void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
{
struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,