@@ -1265,15 +1265,23 @@ static void ath12k_dp_reoq_lut_cleanup(struct ath12k_base *ab)
if (!ab->hw_params->reoq_lut_support)
return;
- if (!dp->reoq_lut.vaddr)
- return;
+ if (dp->reoq_lut.vaddr) {
+ ath12k_hif_write32(ab,
+ HAL_SEQ_WCSS_UMAC_REO_REG +
+ HAL_REO1_QDESC_LUT_BASE0(ab), 0);
+ dma_free_coherent(ab->dev, DP_REOQ_LUT_SIZE,
+ dp->reoq_lut.vaddr, dp->reoq_lut.paddr);
+ dp->reoq_lut.vaddr = NULL;
+ }
- dma_free_coherent(ab->dev, DP_REOQ_LUT_SIZE,
- dp->reoq_lut.vaddr, dp->reoq_lut.paddr);
- dp->reoq_lut.vaddr = NULL;
-
- ath12k_hif_write32(ab,
- HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_LUT_BASE0(ab), 0);
+ if (dp->ml_reoq_lut.vaddr) {
+ ath12k_hif_write32(ab,
+ HAL_SEQ_WCSS_UMAC_REO_REG +
+ HAL_REO1_QDESC_LUT_BASE1(ab), 0);
+ dma_free_coherent(ab->dev, DP_REOQ_LUT_SIZE,
+ dp->ml_reoq_lut.vaddr, dp->ml_reoq_lut.paddr);
+ dp->ml_reoq_lut.vaddr = NULL;
+ }
}
void ath12k_dp_free(struct ath12k_base *ab)
@@ -1599,8 +1607,23 @@ static int ath12k_dp_reoq_lut_setup(struct ath12k_base *ab)
return -ENOMEM;
}
+ dp->ml_reoq_lut.vaddr = dma_alloc_coherent(ab->dev,
+ DP_REOQ_LUT_SIZE,
+ &dp->ml_reoq_lut.paddr,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!dp->ml_reoq_lut.vaddr) {
+ ath12k_warn(ab, "failed to allocate memory for ML reoq table");
+ dma_free_coherent(ab->dev, DP_REOQ_LUT_SIZE,
+ dp->reoq_lut.vaddr, dp->reoq_lut.paddr);
+ dp->reoq_lut.vaddr = NULL;
+ return -ENOMEM;
+ }
+
ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_LUT_BASE0(ab),
dp->reoq_lut.paddr);
+ ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_LUT_BASE1(ab),
+ dp->ml_reoq_lut.paddr >> 8);
+
return 0;
}
@@ -368,6 +368,7 @@ struct ath12k_dp {
struct dp_rxdma_mon_ring rxdma_mon_buf_ring;
struct dp_rxdma_mon_ring tx_mon_buf_ring;
struct ath12k_reo_q_addr_lut reoq_lut;
+ struct ath12k_reo_q_addr_lut ml_reoq_lut;
};
/* HTT definitions */
@@ -740,15 +740,22 @@ static void ath12k_peer_rx_tid_qref_setup(struct ath12k_base *ab, u16 peer_id, u
{
struct ath12k_reo_queue_ref *qref;
struct ath12k_dp *dp = &ab->dp;
+ bool ml_peer = false;
if (!ab->hw_params->reoq_lut_support)
return;
- /* TODO: based on ML peer or not, select the LUT. below assumes non
- * ML peer
- */
- qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr +
- (peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
+ if (peer_id & ATH12K_PEER_ML_ID_VALID) {
+ peer_id &= ~ATH12K_PEER_ML_ID_VALID;
+ ml_peer = true;
+ }
+
+ if (ml_peer)
+ qref = (struct ath12k_reo_queue_ref *)dp->ml_reoq_lut.vaddr +
+ (peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
+ else
+ qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr +
+ (peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
qref->info0 = u32_encode_bits(lower_32_bits(paddr),
BUFFER_ADDR_INFO0_ADDR);
@@ -761,15 +768,22 @@ static void ath12k_peer_rx_tid_qref_reset(struct ath12k_base *ab, u16 peer_id, u
{
struct ath12k_reo_queue_ref *qref;
struct ath12k_dp *dp = &ab->dp;
+ bool ml_peer = false;
if (!ab->hw_params->reoq_lut_support)
return;
- /* TODO: based on ML peer or not, select the LUT. below assumes non
- * ML peer
- */
- qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr +
- (peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
+ if (peer_id & ATH12K_PEER_ML_ID_VALID) {
+ peer_id &= ~ATH12K_PEER_ML_ID_VALID;
+ ml_peer = true;
+ }
+
+ if (ml_peer)
+ qref = (struct ath12k_reo_queue_ref *)dp->ml_reoq_lut.vaddr +
+ (peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
+ else
+ qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr +
+ (peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
qref->info0 = u32_encode_bits(0, BUFFER_ADDR_INFO0_ADDR);
qref->info1 = u32_encode_bits(0, BUFFER_ADDR_INFO1_ADDR) |
@@ -802,7 +816,10 @@ void ath12k_dp_rx_peer_tid_delete(struct ath12k *ar,
rx_tid->vaddr = NULL;
}
- ath12k_peer_rx_tid_qref_reset(ar->ab, peer->peer_id, tid);
+ if (peer->mlo)
+ ath12k_peer_rx_tid_qref_reset(ar->ab, peer->ml_id, tid);
+ else
+ ath12k_peer_rx_tid_qref_reset(ar->ab, peer->peer_id, tid);
rx_tid->active = false;
}
@@ -945,7 +962,8 @@ int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_
return 0;
}
- if (ab->hw_params->reoq_lut_support && !dp->reoq_lut.vaddr) {
+ if (ab->hw_params->reoq_lut_support &&
+ (!dp->reoq_lut.vaddr || !dp->ml_reoq_lut.vaddr)) {
spin_unlock_bh(&ab->base_lock);
ath12k_warn(ab, "reo qref table is not setup\n");
return -EINVAL;
@@ -1026,7 +1044,11 @@ int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_
/* Update the REO queue LUT at the corresponding peer id
* and tid with qaddr.
*/
- ath12k_peer_rx_tid_qref_setup(ab, peer->peer_id, tid, paddr);
+ if (peer->mlo)
+ ath12k_peer_rx_tid_qref_setup(ab, peer->ml_id, tid, paddr);
+ else
+ ath12k_peer_rx_tid_qref_setup(ab, peer->peer_id, tid, paddr);
+
spin_unlock_bh(&ab->base_lock);
} else {
spin_unlock_bh(&ab->base_lock);
@@ -396,9 +396,11 @@ int ath12k_peer_create(struct ath12k *ar, struct ath12k_link_vif *arvif,
/* the assoc link is considered primary for now */
peer->primary_link = arsta->is_assoc_link;
+ peer->mlo = true;
} else {
peer->ml_id = ATH12K_MLO_PEER_ID_INVALID;
peer->primary_link = true;
+ peer->mlo = false;
}
}
@@ -46,7 +46,7 @@ struct ath12k_peer {
struct ppdu_user_delayba ppdu_stats_delayba;
bool delayba_flag;
bool is_authorized;
-
+ bool mlo;
/* protected by ab->data_lock */
bool dp_setup_done;