@@ -1870,6 +1870,7 @@ struct ath10k_htt {
spinlock_t tx_lock;
int max_num_pending_tx;
int num_pending_tx;
+ int pending_per_queue[IEEE80211_MAX_QUEUES];
int num_pending_mgmt_tx;
struct idr pending_tx;
wait_queue_head_t empty_tx_wq;
@@ -2447,8 +2448,10 @@ void ath10k_htt_tx_txq_update(struct ieee80211_hw *hw,
void ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw,
struct ieee80211_txq *txq);
void ath10k_htt_tx_txq_sync(struct ath10k *ar);
-void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt);
-int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt);
+void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt,
+ struct ieee80211_txq *txq);
+int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt,
+ struct ieee80211_txq *txq);
void ath10k_htt_tx_mgmt_dec_pending(struct ath10k_htt *htt);
int ath10k_htt_tx_mgmt_inc_pending(struct ath10k_htt *htt, bool is_mgmt,
bool is_presp);
@@ -140,19 +140,26 @@ void ath10k_htt_tx_txq_update(struct ieee80211_hw *hw,
spin_unlock_bh(&ar->htt.tx_lock);
}
-void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
+void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt,
+ struct ieee80211_txq *txq)
{
+ int num_txq = -1;
+
lockdep_assert_held(&htt->tx_lock);
htt->num_pending_tx--;
if (htt->num_pending_tx == htt->max_num_pending_tx - 1)
ath10k_mac_tx_unlock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
- if (htt->num_pending_tx == 0)
+ if (txq)
+ num_txq = --htt->pending_per_queue[txq->vif->hw_queue[txq->ac]];
+
+ if (htt->num_pending_tx == 0 || num_txq == 0)
wake_up(&htt->empty_tx_wq);
}
-int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt)
+int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt,
+ struct ieee80211_txq *txq)
{
lockdep_assert_held(&htt->tx_lock);
@@ -163,6 +170,11 @@ int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt)
if (htt->num_pending_tx == htt->max_num_pending_tx)
ath10k_mac_tx_lock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
+ if (!txq)
+ return 0;
+
+ htt->pending_per_queue[txq->vif->hw_queue[txq->ac]]++;
+
return 0;
}
@@ -4385,7 +4385,7 @@ int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
u16 airtime;
spin_lock_bh(&ar->htt.tx_lock);
- ret = ath10k_htt_tx_inc_pending(htt);
+ ret = ath10k_htt_tx_inc_pending(htt, txq);
spin_unlock_bh(&ar->htt.tx_lock);
if (ret)
@@ -4394,7 +4394,7 @@ int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
skb = ieee80211_tx_dequeue_ni(hw, txq);
if (!skb) {
spin_lock_bh(&ar->htt.tx_lock);
- ath10k_htt_tx_dec_pending(htt);
+ ath10k_htt_tx_dec_pending(htt, txq);
spin_unlock_bh(&ar->htt.tx_lock);
return -ENOENT;
@@ -4416,7 +4416,7 @@ int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
ret = ath10k_htt_tx_mgmt_inc_pending(htt, is_mgmt, is_presp);
if (ret) {
- ath10k_htt_tx_dec_pending(htt);
+ ath10k_htt_tx_dec_pending(htt, txq);
spin_unlock_bh(&ar->htt.tx_lock);
return ret;
}
@@ -4430,7 +4430,7 @@ int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
ath10k_warn(ar, "failed to push frame: %d\n", ret);
spin_lock_bh(&ar->htt.tx_lock);
- ath10k_htt_tx_dec_pending(htt);
+ ath10k_htt_tx_dec_pending(htt, txq);
if (is_mgmt)
ath10k_htt_tx_mgmt_dec_pending(htt);
spin_unlock_bh(&ar->htt.tx_lock);
@@ -4693,7 +4693,7 @@ static void ath10k_mac_op_tx(struct ieee80211_hw *hw,
is_presp = ieee80211_is_probe_resp(hdr->frame_control);
}
- ret = ath10k_htt_tx_inc_pending(htt);
+ ret = ath10k_htt_tx_inc_pending(htt, txq);
if (ret) {
ath10k_warn(ar, "failed to increase tx pending count: %d, dropping\n",
ret);
@@ -4706,7 +4706,7 @@ static void ath10k_mac_op_tx(struct ieee80211_hw *hw,
if (ret) {
ath10k_dbg(ar, ATH10K_DBG_MAC, "failed to increase tx mgmt pending count: %d, dropping\n",
ret);
- ath10k_htt_tx_dec_pending(htt);
+ ath10k_htt_tx_dec_pending(htt, txq);
spin_unlock_bh(&ar->htt.tx_lock);
ieee80211_free_txskb(ar->hw, skb);
return;
@@ -4719,7 +4719,7 @@ static void ath10k_mac_op_tx(struct ieee80211_hw *hw,
ath10k_warn(ar, "failed to transmit frame: %d\n", ret);
if (is_htt) {
spin_lock_bh(&ar->htt.tx_lock);
- ath10k_htt_tx_dec_pending(htt);
+ ath10k_htt_tx_dec_pending(htt, txq);
if (is_mgmt)
ath10k_htt_tx_mgmt_dec_pending(htt);
spin_unlock_bh(&ar->htt.tx_lock);
@@ -8046,10 +8046,12 @@ static int ath10k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
return -EOPNOTSUPP;
}
-void ath10k_mac_wait_tx_complete(struct ath10k *ar)
+static void _ath10k_mac_wait_tx_complete(struct ath10k *ar,
+ unsigned long queues)
{
bool skip;
long time_left;
+ unsigned int q;
/* mac80211 doesn't care if we really xmit queued frames or not
* we'll collect those frames either way if we stop/delete vdevs
@@ -8062,7 +8064,11 @@ void ath10k_mac_wait_tx_complete(struct ath10k *ar)
bool empty;
spin_lock_bh(&ar->htt.tx_lock);
- empty = (ar->htt.num_pending_tx == 0);
+ for_each_set_bit(q, &queues, ar->hw->queues) {
+ empty = (ar->htt.pending_per_queue[q] == 0);
+ if (!empty)
+ break;
+ }
spin_unlock_bh(&ar->htt.tx_lock);
skip = (ar->state == ATH10K_STATE_WEDGED) ||
@@ -8077,6 +8083,13 @@ void ath10k_mac_wait_tx_complete(struct ath10k *ar)
skip, ar->state, time_left);
}
+void ath10k_mac_wait_tx_complete(struct ath10k *ar)
+{
+ unsigned int queues = GENMASK(ar->hw->queues - 1, 0);
+
+ _ath10k_mac_wait_tx_complete(ar, queues);
+}
+
static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 queues, bool drop)
{
@@ -8098,7 +8111,7 @@ static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
}
mutex_lock(&ar->conf_mutex);
- ath10k_mac_wait_tx_complete(ar);
+ _ath10k_mac_wait_tx_complete(ar, queues);
mutex_unlock(&ar->conf_mutex);
}
@@ -82,7 +82,7 @@ int ath10k_txrx_tx_unref(struct ath10k_htt *htt,
flags = skb_cb->flags;
ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id);
- ath10k_htt_tx_dec_pending(htt);
+ ath10k_htt_tx_dec_pending(htt, txq);
spin_unlock_bh(&htt->tx_lock);
rcu_read_lock();
The ieee80211 flush callback can be called to flush only part of all hw queues. The ath10k's flush callback implementation (i.e. ath10k_flush()) was waiting for all pending frames of all queues to be flushed ignoring the queue parameter. Because only the queues to be flushed are stopped by mac80211, skb can still be queued to other queues meanwhile. Thus ath10k_flush() could fail (and wait 5sec holding ar->conf lock) even if the requested queues are flushed correctly. A way to reproduce the issue is to use two different APs because each vdev has its own hw queue in ath10k. Connect STA0 to AP0 and STA1 to AP1. Then generate traffic from AP0 to STA0 and kill STA0 without clean disassociation frame (e.g. unplug power cable, reboot -f, ...). Now if we were to flush AP1's queue, ath10k_flush() would fail (and effectively block 5 seconds with ar->conf or even wiphy's lock held) with the following warning: ath10k_pci 0000:01:00.0: failed to flush transmit queue (skip 0 ar-state 2): 0 Wait only for pending frames of the requested queues to be flushed in ath10k_flush() to avoid that long blocking. Reported-by: Cedric Veilleux <veilleux.cedric@gmail.com> Signed-off-by: Remi Pommarel <repk@triplefau.lt> --- drivers/net/wireless/ath/ath10k/htt.h | 7 +++-- drivers/net/wireless/ath/ath10k/htt_tx.c | 18 ++++++++++--- drivers/net/wireless/ath/ath10k/mac.c | 33 +++++++++++++++++------- drivers/net/wireless/ath/ath10k/txrx.c | 2 +- 4 files changed, 44 insertions(+), 16 deletions(-)