@@ -847,6 +847,7 @@ int iwl_mld_add_chanctx(struct ieee80211_hw *hw,
if (fw_id < 0)
return fw_id;
+ phy->mld = mld;
phy->fw_id = fw_id;
phy->chandef = *iwl_mld_get_chandef_from_chanctx(ctx);
@@ -3,6 +3,7 @@
* Copyright (C) 2024-2025 Intel Corporation
*/
#include "mlo.h"
+#include "phy.h"
/* Block reasons helper */
#define HANDLE_EMLSR_BLOCKED_REASONS(HOW) \
@@ -177,6 +178,19 @@ static void iwl_mld_check_emlsr_prevention(struct iwl_mld *mld,
&mld_vif->emlsr.prevent_done_wk, delay);
}
+static void iwl_mld_clear_avg_chan_load_iter(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx,
+ void *dat)
+{
+ struct iwl_mld_phy *phy = iwl_mld_phy_from_mac80211(ctx);
+
+ /* It is ok to do it for all chanctx (and not only for the ones that
+ * belong to the EMLSR vif) since EMLSR is not allowed if there is
+ * another vif.
+ */
+ phy->avg_channel_load_not_by_us = 0;
+}
+
static int _iwl_mld_exit_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif,
enum iwl_mld_emlsr_exit exit, u8 link_to_keep,
bool sync)
@@ -215,6 +229,13 @@ static int _iwl_mld_exit_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif,
/* Update latest exit reason and check EMLSR prevention */
iwl_mld_check_emlsr_prevention(mld, mld_vif, exit);
+ /* channel_load_not_by_us is invalid when in EMLSR.
+ * Clear it so wrong values won't be used.
+ */
+ ieee80211_iter_chan_contexts_atomic(mld->hw,
+ iwl_mld_clear_avg_chan_load_iter,
+ NULL);
+
return ret;
}
@@ -15,6 +15,9 @@
* with. Used to detect a no-op when the chanctx changes.
* @channel_load_by_us: channel load on this channel caused by
* the NIC itself, as indicated by firmware
+ * @avg_channel_load_not_by_us: averaged channel load on this channel caused by
+ * others. This value is invalid when in EMLSR (due to FW limitations)
+ * @mld: pointer to the MLD context
*/
struct iwl_mld_phy {
/* Add here fields that need clean up on hw restart */
@@ -24,6 +27,8 @@ struct iwl_mld_phy {
);
/* And here fields that survive a hw restart */
u32 channel_load_by_us;
+ u32 avg_channel_load_not_by_us;
+ struct iwl_mld *mld;
};
static inline struct iwl_mld_phy *
@@ -461,6 +461,7 @@ static void iwl_mld_fill_chanctx_stats(struct ieee80211_hw *hw,
{
struct iwl_mld_phy *phy = iwl_mld_phy_from_mac80211(ctx);
const struct iwl_stats_ntfy_per_phy *per_phy = data;
+ u32 new_load;
if (WARN_ON(phy->fw_id >= IWL_STATS_MAX_PHY_OPERATIONAL))
return;
@@ -468,7 +469,14 @@ static void iwl_mld_fill_chanctx_stats(struct ieee80211_hw *hw,
phy->channel_load_by_us =
le32_to_cpu(per_phy[phy->fw_id].channel_load_by_us);
- /* TODO: channel load not by us (task=statistics) */
+ new_load = le32_to_cpu(per_phy[phy->fw_id].channel_load_not_by_us);
+ if (IWL_FW_CHECK(phy->mld, new_load > 100, "Invalid channel load %u\n",
+ new_load))
+ return;
+
+ /* give a weight of 0.5 for the old value */
+ phy->avg_channel_load_not_by_us =
+ (new_load >> 1) + (phy->avg_channel_load_not_by_us >> 1);
}
static void
@@ -168,6 +168,7 @@ iwlmld_kunit_add_chanctx_from_def(struct cfg80211_chan_def *def)
KUNIT_ASSERT_GE(test, fw_id, 0);
phy->fw_id = fw_id;
+ phy->mld = mld;
phy->chandef = *iwl_mld_get_chandef_from_chanctx(ctx);
return ctx;