@@ -408,13 +408,17 @@ static int ath11k_dp_srng_common_setup(struct ath11k_base *ab)
goto err;
}
- ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_comp_ring,
- HAL_WBM2SW_RELEASE, wbm_num, 0,
- DP_TX_COMP_RING_SIZE);
- if (ret) {
- ath11k_warn(ab, "failed to set up tcl_comp ring (%d) :%d\n",
- i, ret);
- goto err;
+ if (wbm_num != ATH11K_HW_INVALID_WBM_RING_NUM) {
+ ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_comp_ring,
+ HAL_WBM2SW_RELEASE, wbm_num, 0,
+ DP_TX_COMP_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ab, "failed to set up tcl_comp ring (%d) :%d\n",
+ i, ret);
+ goto err;
+ }
+ } else {
+ dp->wbm2sw_txring_reuse = true;
}
srng = &ab->hal.srng_list[dp->tx_ring[i].tcl_data_ring.ring_id];
@@ -1018,6 +1022,38 @@ static int ath11k_dp_tx_pending_cleanup(int buf_id, void *skb, void *ctx)
return 0;
}
+static void ath11k_dp_tx_idr_resources_deinit(struct ath11k_base *ab, int ring_id)
+{
+ struct dp_tx_ring *tx_ring = &ab->dp.tx_ring[ring_id];
+
+ spin_lock_bh(&tx_ring->tx_idr_lock);
+ idr_for_each(&tx_ring->txbuf_idr,
+ ath11k_dp_tx_pending_cleanup, ab);
+ idr_destroy(&tx_ring->txbuf_idr);
+ spin_unlock_bh(&tx_ring->tx_idr_lock);
+}
+
+static void ath11k_dp_tx_idr_resources_init(struct ath11k_base *ab, int ring_id)
+{
+ struct dp_tx_ring *tx_ring = &ab->dp.tx_ring[ring_id];
+ int idr_start;
+ int idr_end;
+
+ if (ab->dp.wbm2sw_txring_reuse) {
+ idr_start = ring_id * DP_TX_IDR_SIZE;
+ idr_end = idr_start + DP_TX_IDR_SIZE - 1;
+ } else {
+ idr_start = 0;
+ idr_end = DP_TX_IDR_SIZE - 1;
+ }
+
+ tx_ring->idr_start = idr_start;
+ tx_ring->idr_end = idr_end;
+
+ idr_init_base(&tx_ring->txbuf_idr, idr_start);
+ spin_lock_init(&tx_ring->tx_idr_lock);
+}
+
void ath11k_dp_free(struct ath11k_base *ab)
{
struct ath11k_dp *dp = &ab->dp;
@@ -1031,11 +1067,7 @@ void ath11k_dp_free(struct ath11k_base *ab)
ath11k_dp_reo_cmd_list_cleanup(ab);
for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
- spin_lock_bh(&dp->tx_ring[i].tx_idr_lock);
- idr_for_each(&dp->tx_ring[i].txbuf_idr,
- ath11k_dp_tx_pending_cleanup, ab);
- idr_destroy(&dp->tx_ring[i].txbuf_idr);
- spin_unlock_bh(&dp->tx_ring[i].tx_idr_lock);
+ ath11k_dp_tx_idr_resources_deinit(ab, i);
kfree(dp->tx_ring[i].tx_status);
}
@@ -1082,8 +1114,7 @@ int ath11k_dp_alloc(struct ath11k_base *ab)
size = sizeof(struct hal_wbm_release_ring) * DP_TX_COMP_RING_SIZE;
for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
- idr_init(&dp->tx_ring[i].txbuf_idr);
- spin_lock_init(&dp->tx_ring[i].tx_idr_lock);
+ ath11k_dp_tx_idr_resources_init(ab, i);
dp->tx_ring[i].tcl_data_ring_id = i;
dp->tx_ring[i].tx_status_head = 0;
@@ -88,6 +88,8 @@ struct dp_tx_ring {
struct hal_wbm_release_ring *tx_status;
int tx_status_head;
int tx_status_tail;
+ int idr_start;
+ int idr_end;
};
enum dp_mon_status_buf_state {
@@ -286,6 +288,7 @@ struct ath11k_dp {
spinlock_t reo_cmd_lock;
struct ath11k_hp_update_timer reo_cmd_timer;
struct ath11k_hp_update_timer tx_ring_timer[DP_TCL_NUM_RING_MAX];
+ bool wbm2sw_txring_reuse;
};
/* HTT definitions */
@@ -120,8 +120,8 @@ int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
tx_ring = &dp->tx_ring[ti.ring_id];
spin_lock_bh(&tx_ring->tx_idr_lock);
- ret = idr_alloc(&tx_ring->txbuf_idr, skb, 0,
- DP_TX_IDR_SIZE - 1, GFP_ATOMIC);
+ ret = idr_alloc(&tx_ring->txbuf_idr, skb, tx_ring->idr_start,
+ tx_ring->idr_end, GFP_ATOMIC);
spin_unlock_bh(&tx_ring->tx_idr_lock);
if (unlikely(ret < 0)) {
@@ -283,6 +283,22 @@ int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
return ret;
}
+static inline struct sk_buff*
+ath11k_dp_tx_fetch_msdu(struct ath11k_base *ab, struct dp_tx_ring *tx_ring,
+ int msdu_id)
+{
+ struct sk_buff *msdu;
+
+ if (ab->dp.wbm2sw_txring_reuse)
+ tx_ring = &ab->dp.tx_ring[msdu_id / DP_TX_IDR_SIZE];
+
+ spin_lock(&tx_ring->tx_idr_lock);
+ msdu = idr_remove(&tx_ring->txbuf_idr, msdu_id);
+ spin_unlock(&tx_ring->tx_idr_lock);
+
+ return msdu;
+}
+
static void ath11k_dp_tx_free_txbuf(struct ath11k_base *ab, u8 mac_id,
int msdu_id,
struct dp_tx_ring *tx_ring)
@@ -291,10 +307,7 @@ static void ath11k_dp_tx_free_txbuf(struct ath11k_base *ab, u8 mac_id,
struct sk_buff *msdu;
struct ath11k_skb_cb *skb_cb;
- spin_lock(&tx_ring->tx_idr_lock);
- msdu = idr_remove(&tx_ring->txbuf_idr, msdu_id);
- spin_unlock(&tx_ring->tx_idr_lock);
-
+ msdu = ath11k_dp_tx_fetch_msdu(ab, tx_ring, msdu_id);
if (unlikely(!msdu)) {
ath11k_warn(ab, "tx completion for unknown msdu_id %d\n",
msdu_id);
@@ -321,10 +334,7 @@ ath11k_dp_tx_htt_tx_complete_buf(struct ath11k_base *ab,
struct ath11k_skb_cb *skb_cb;
struct ath11k *ar;
- spin_lock(&tx_ring->tx_idr_lock);
- msdu = idr_remove(&tx_ring->txbuf_idr, ts->msdu_id);
- spin_unlock(&tx_ring->tx_idr_lock);
-
+ msdu = ath11k_dp_tx_fetch_msdu(ab, tx_ring, ts->msdu_id);
if (unlikely(!msdu)) {
ath11k_warn(ab, "htt tx completion for unknown msdu_id %d\n",
ts->msdu_id);
@@ -703,17 +713,13 @@ void ath11k_dp_tx_completion_handler(struct ath11k_base *ab, int ring_id)
continue;
}
- spin_lock(&tx_ring->tx_idr_lock);
- msdu = idr_remove(&tx_ring->txbuf_idr, msdu_id);
+ msdu = ath11k_dp_tx_fetch_msdu(ab, tx_ring, msdu_id);
if (unlikely(!msdu)) {
ath11k_warn(ab, "tx completion for unknown msdu_id %d\n",
msdu_id);
- spin_unlock(&tx_ring->tx_idr_lock);
continue;
}
- spin_unlock(&tx_ring->tx_idr_lock);
-
ar = ab->pdevs[mac_id].ar;
if (atomic_dec_and_test(&ar->dp.num_tx_pending))
@@ -2025,8 +2025,6 @@ const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_wcn6750 = {
ATH11K_TX_RING_MASK_0,
0,
ATH11K_TX_RING_MASK_2,
- 0,
- ATH11K_TX_RING_MASK_4,
},
.rx_mon_status = {
0, 0, 0, 0, 0, 0,
@@ -2720,8 +2718,8 @@ static const struct ath11k_hw_tcl2wbm_rbm_map ath11k_hw_tcl2wbm_rbm_map_wcn6750[
},
{
.tcl_ring_num = 1,
- .wbm_ring_num = 4,
- .rbm_id = HAL_RX_BUF_RBM_SW4_BM,
+ .wbm_ring_num = ATH11K_HW_INVALID_WBM_RING_NUM,
+ .rbm_id = HAL_RX_BUF_RBM_SW0_BM,
},
{
.tcl_ring_num = 2,
@@ -80,6 +80,8 @@
#define ATH11K_M3_FILE "m3.bin"
#define ATH11K_REGDB_FILE_NAME "regdb.bin"
+#define ATH11K_HW_INVALID_WBM_RING_NUM 0xF
+
#define ATH11K_CE_OFFSET(ab) (ab->mem_ce - ab->mem)
enum ath11k_hw_rate_cck {
On WCN6750, ageout flush on WBM2SW4 SRNG does not happen if there is only one TX completion pending in the FIFO and all other WBM2SW release SRNGs are idle. Due to this limitation, TX completion for the lone packet in WBM2SW4 SRNG will never reach the driver. This is a case where the TX packet has been acked in the air but the completion status has not been reported to the driver. In turn, the pending TX completions will prevent the device from entering suspend. The following error logs gets printed during suspend and the suspend fails. Failure log: ath11k 17a10040.wifi: failed to flush transmit queue, data pkts pending 1 Fix this by not using WBM2SW4 SRNG for TX completions on WCN6750 and instead reuse WBM2SW0. Tested-on: WCN6750 hw1.0 AHB WLAN.MSL.1.0.1-00887-QCAMSLSWPLZ-1 Signed-off-by: Manikanta Pubbisetty <quic_mpubbise@quicinc.com> --- drivers/net/wireless/ath/ath11k/dp.c | 59 +++++++++++++++++++------ drivers/net/wireless/ath/ath11k/dp.h | 3 ++ drivers/net/wireless/ath/ath11k/dp_tx.c | 36 ++++++++------- drivers/net/wireless/ath/ath11k/hw.c | 6 +-- drivers/net/wireless/ath/ath11k/hw.h | 2 + 5 files changed, 73 insertions(+), 33 deletions(-) base-commit: bea046575a2e6d7d1cf63cc7ab032647a3585de5