new file mode 100644
@@ -0,0 +1,274 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * This file contains Rx routine.
+ *
+ * Copyright (C) [2022-2025] Renesas Electronics Corporation and/or its affiliates.
+ */
+
+#include "core.h"
+#include "dev.h"
+#include "params.h"
+#include "dbg.h"
+#include "cfg80211.h"
+#include "rx.h"
+
+#define RA6W_RX_THREAD_NAME "ra6w_rx_thread"
+
+static void ra6w_rx_update_stats(struct ra6w_cfg80211_vif *vif, const struct ra6w_rx_buf *rx_buf)
+{
+ struct ra6w_cfg80211_sta *sta = NULL;
+ struct ra6w_cfg80211_sta_stats *stats = NULL;
+
+ sta = ra6w_cfg80211_sta_get(vif->priv, rx_buf->ext_hdr.sta_idx);
+ if (!sta)
+ return;
+
+ stats = &sta->stats;
+
+ sta->stats.last_acttive_time = jiffies;
+ stats->rx_packets++;
+ stats->rx_bytes += le16_to_cpu(rx_buf->data_len);
+ stats->last_rx_data_ext = rx_buf->ext_hdr;
+
+ if (stats->last_rx_data_ext.format_mod > 1)
+ stats->last_stats = rx_buf->ext_hdr;
+
+ ra6w_stats_rx_update(&vif->stats, stats);
+}
+
+static int ra6w_rx_mgmt_set(struct ra6w_cfg80211_vif *vif, struct sk_buff *skb)
+{
+ struct net_device *ndev = vif->ndev;
+ struct ra6w_rx_buf *rx_buf = (struct ra6w_rx_buf *)skb->data;
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)rx_buf->data;
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_P2P_CLIENT:
+ case NL80211_IFTYPE_P2P_GO:
+ if (ieee80211_is_beacon(mgmt->frame_control)) {
+ struct wiphy *wiphy = vif->wdev.wiphy;
+
+ if (!wiphy)
+ return -ENOENT;
+
+ cfg80211_report_obss_beacon(wiphy, rx_buf->data,
+ le16_to_cpu(rx_buf->data_len),
+ rx_buf->ext_hdr.prim20_freq,
+ rx_buf->ext_hdr.rssi1);
+
+ return 0;
+ }
+
+ if (!ieee80211_is_deauth(mgmt->frame_control) &&
+ !ieee80211_is_disassoc(mgmt->frame_control))
+ break;
+
+ if (mgmt->u.deauth.reason_code ==
+ cpu_to_le16(WLAN_REASON_CLASS2_FRAME_FROM_NONAUTH_STA) ||
+ mgmt->u.deauth.reason_code ==
+ cpu_to_le16(WLAN_REASON_CLASS3_FRAME_FROM_NONASSOC_STA)) {
+ cfg80211_rx_unprot_mlme_mgmt(ndev, rx_buf->data,
+ le16_to_cpu(rx_buf->data_len));
+
+ return 0;
+ }
+ break;
+ default:
+ break;
+ }
+
+ cfg80211_rx_mgmt(&vif->wdev, rx_buf->ext_hdr.prim20_freq, rx_buf->ext_hdr.rssi1,
+ rx_buf->data, le16_to_cpu(rx_buf->data_len), 0);
+
+ return 0;
+}
+
+static void ra6w_rx_mgmt(struct ra6w_cfg80211_priv *priv, struct sk_buff *skb)
+{
+ struct ra6w_cfg80211_vif *vif = NULL;
+ struct net_device *ndev = NULL;
+ struct ra6w_rx_buf *rx_buf = (struct ra6w_rx_buf *)skb->data;
+ u8 vif_idx = rx_buf->ext_hdr.vif_idx;
+ int ret;
+ int len;
+
+ if (vif_idx == RA6W_CFG80211_VIF_IDX_INVALID) {
+ u8 n = 0;
+
+ n = find_first_bit(priv->vif_map, RA6W_CFG80211_VIF_MAX);
+ if (n >= RA6W_CFG80211_VIF_MAX)
+ return;
+
+ vif_idx = n;
+ }
+
+ vif = ra6w_cfg80211_vif_get(priv, vif_idx);
+ if (!vif)
+ return;
+
+ ndev = vif->ndev;
+
+ ra6w_rx_update_stats(vif, rx_buf);
+
+ len = le16_to_cpu(rx_buf->data_len);
+ ret = ra6w_rx_mgmt_set(vif, skb);
+ if (ret) {
+ ndev->stats.rx_errors++;
+ ndev->stats.rx_dropped++;
+ } else {
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += len;
+ }
+}
+
+static void ra6w_rx_sta(struct ra6w_cfg80211_priv *priv, struct sk_buff *skb)
+{
+ struct ra6w_cfg80211_vif *vif = NULL;
+ struct net_device *ndev = NULL;
+ struct ra6w_rx_buf *rx_buf = (struct ra6w_rx_buf *)skb->data;
+ int ret;
+ int len;
+
+ vif = ra6w_cfg80211_vif_get(priv, rx_buf->ext_hdr.vif_idx);
+ if (!vif) {
+ dev_kfree_skb(skb);
+ return;
+ }
+
+ ndev = vif->ndev;
+
+ skb->len = RA6W_GET_DATA_SIZE(rx_buf->ext_len, le16_to_cpu(rx_buf->data_len));
+ skb_pull(skb, rx_buf->ext_len + RA6W_BASE_HDR_SIZE);
+ skb->dev = ndev;
+ skb->priority = rx_buf->ext_hdr.priority;
+ skb->protocol = eth_type_trans(skb, skb->dev);
+
+ ra6w_rx_update_stats(vif, rx_buf);
+
+ len = skb->len;
+ ret = netif_rx(skb);
+ if (ret) {
+ ndev->stats.rx_errors++;
+ ndev->stats.rx_dropped++;
+ } else {
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += len;
+ }
+}
+
+static void ra6w_rx_skb(struct ra6w_rx *rx, struct sk_buff *skb)
+{
+ struct ra6w_core *core = container_of(rx, struct ra6w_core, rx);
+ struct ra6w_cfg80211_priv *priv = core->priv;
+ struct ra6w_rx_buf *rx_buf = (struct ra6w_rx_buf *)skb->data;
+
+ core->stats.rx.packets++;
+
+ if (rx_buf->ext_hdr.mpdu == 0) {
+ ra6w_rx_sta(priv, skb);
+ return;
+ }
+
+ ra6w_rx_mgmt(priv, skb);
+ dev_kfree_skb(skb);
+}
+
+static void ra6w_rx_worker(struct ra6w_rx *rx)
+{
+ struct sk_buff *skb = NULL;
+
+ while (!kthread_should_stop() && (skb = ra6w_q_pop(&rx->q)))
+ ra6w_rx_skb(rx, skb);
+}
+
+static int ra6w_rx_thread_handler(void *arg)
+{
+ struct ra6w_rx *rx = arg;
+ int event = 0;
+
+ while (!kthread_should_stop()) {
+ event = ra6w_q_wait(&rx->event, RA6W_RX_EVENT_MASK);
+ if (event & BIT(RA6W_RX_DATA_EVENT))
+ ra6w_rx_worker(rx);
+
+ if (event & BIT(RA6W_RX_EVENT_RESET))
+ break;
+
+ atomic_set(&rx->event.condition, 0);
+ }
+
+ return 0;
+}
+
+static int _ra6w_rx_init(struct ra6w_rx *rx, size_t rx_buf_num)
+{
+ int ret = -ENOENT;
+
+ if (rx_buf_num == 0) {
+ ra6w_err("[%s] rx queue size must be greater then zero\n", __func__);
+ return -EINVAL;
+ }
+
+ ret = ra6w_q_init(&rx->q, rx_buf_num, sizeof(struct ra6w_rx_buf *));
+ if (ret)
+ return ret;
+
+ atomic_set(&rx->event.condition, 0);
+ init_waitqueue_head(&rx->event.wait_queue);
+
+ rx->task = kthread_run(ra6w_rx_thread_handler, rx, RA6W_RX_THREAD_NAME);
+ if (!rx->task) {
+ ra6w_err("[%s] kthread_run %s failed\n", __func__, RA6W_RX_THREAD_NAME);
+ ret = -ENOENT;
+
+ goto rx_buf_free;
+ }
+
+ return 0;
+
+rx_buf_free:
+ ra6w_q_deinit(&rx->q);
+
+ return ret;
+}
+
+int ra6w_rx_init(struct ra6w_rx *rx)
+{
+ return _ra6w_rx_init(rx, RA6W_RX_BUF_Q_MAX);
+}
+
+int ra6w_rx_event_post(struct ra6w_rx *rx, struct sk_buff *skb)
+{
+ struct ra6w_core *core = container_of(rx, struct ra6w_core, rx);
+ struct ra6w_rx_buf *rx_buf = (struct ra6w_rx_buf *)skb->data;
+ int ret;
+
+ if (rx_buf->ext_len != RA6W_RX_EXT_LEN) {
+ core->stats.rx.err++;
+ return -EINVAL;
+ }
+
+ ra6w_status_set(&core->status, rx_buf->ext_hdr.status);
+
+ if (rx_buf->data_len == 0) {
+ core->stats.rx.err++;
+ return -EINVAL;
+ }
+
+ ret = ra6w_q_push(&rx->q, skb);
+ if (!ret || !ra6w_q_empty(&rx->q))
+ ra6w_q_event_set(&rx->event, BIT(RA6W_RX_DATA_EVENT));
+
+ return ret;
+}
+
+void ra6w_rx_deinit(struct ra6w_rx *rx)
+{
+ if (rx->task) {
+ atomic_set(&rx->event.condition, BIT(RA6W_RX_EVENT_RESET));
+ kthread_stop(rx->task);
+ }
+
+ ra6w_q_deinit(&rx->q);
+}