@@ -42,8 +42,12 @@ struct xgmac_mtl_ops;
#define XGMAC_RX_QUEUES 16
/* Max/Min RI Watchdog Timer count value */
-#define XGMAC_MAX_DMA_RIWT 0xff
-#define XGMAC_MIN_DMA_RIWT 0x20
+/* Calculated based how much time does it take to fill 256KB Rx memory
+ * at 10Gb speed at 156MHz clock rate and considered little less then
+ * the actual value.
+ */
+#define XGMAC_MAX_DMA_RIWT 0x70
+#define XGMAC_MIN_DMA_RIWT 0x01
/* Tx coalesce parameters */
#define XGMAC_COAL_TX_TIMER 40000
@@ -206,6 +210,20 @@ enum dma_irq_status {
#define XGMAC_FOR_EACH_QUEUE(max_queues, queue_num) \
for (queue_num = 0; queue_num < max_queues; queue_num++)
+#define DRV_VERSION "1.0.0"
+
+#define XGMAC_MAX_RX_CHANNELS 16
+#define XGMAC_MAX_TX_CHANNELS 16
+
+#define START_MAC_REG_OFFSET 0x0000
+#define MAX_MAC_REG_OFFSET 0x0DFC
+#define START_MTL_REG_OFFSET 0x1000
+#define MAX_MTL_REG_OFFSET 0x18FC
+#define START_DMA_REG_OFFSET 0x3000
+#define MAX_DMA_REG_OFFSET 0x38FC
+
+#define REG_SPACE_SIZE 0x2000
+
/* xgmac statistics counters */
struct xgmac_extra_stats {
/* TX/RX IRQ events */
@@ -488,7 +506,8 @@ struct xgmac_priv_data {
int oldlink;
int speed;
int oldduplex;
- unsigned int flow_ctrl;
+ u8 rx_pause;
+ u8 tx_pause;
unsigned int pause;
struct mii_bus *mii;
int mii_irq[PHY_MAX_ADDR];
@@ -508,6 +527,7 @@ struct xgmac_priv_data {
u32 adv_ts;
int use_riwt;
spinlock_t ptp_lock;
+ struct ptp_clock *ptp_clock;
/* EEE-LPI specific members */
struct timer_list eee_ctrl_timer;
@@ -546,4 +566,6 @@ extern const struct xgmac_mtl_ops *xgmac_get_mtl_ops(void);
void xgmac_disable_eee_mode(struct xgmac_priv_data * const priv);
bool xgmac_eee_init(struct xgmac_priv_data * const priv);
+int xgmac_set_flow_ctrl(struct xgmac_priv_data *priv, int rx, int tx);
+
#endif /* __XGMAC_COMMON_H__ */
@@ -9,12 +9,17 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
+#include <linux/net_tstamp.h>
#include <linux/phy.h>
+#include <linux/ptp_clock_kernel.h>
#include "xgmac_common.h"
+#include "xgmac_reg.h"
+#include "xgmac_dma.h"
struct xgmac_stats {
char stat_string[ETH_GSTRING_LEN];
@@ -27,16 +32,105 @@ struct xgmac_stats {
offsetof(struct xgmac_priv_data, xstats.m)}
static const struct xgmac_stats xgmac_gstrings_stats[] = {
+ /* TX/RX IRQ events */
+ XGMAC_STAT(tx_process_stopped_irq),
+ XGMAC_STAT(tx_ctxt_desc_err),
+ XGMAC_STAT(tx_threshold),
+ XGMAC_STAT(rx_threshold),
+ XGMAC_STAT(tx_pkt_n),
+ XGMAC_STAT(rx_pkt_n),
+ XGMAC_STAT(normal_irq_n),
+ XGMAC_STAT(tx_normal_irq_n),
+ XGMAC_STAT(rx_normal_irq_n),
+ XGMAC_STAT(napi_poll),
+ XGMAC_STAT(tx_clean),
+ XGMAC_STAT(tx_reset_ic_bit),
+ XGMAC_STAT(rx_process_stopped_irq),
+ XGMAC_STAT(rx_underflow_irq),
+
+ /* Bus access errors */
+ XGMAC_STAT(fatal_bus_error_irq),
+ XGMAC_STAT(tx_read_transfer_err),
+ XGMAC_STAT(tx_write_transfer_err),
+ XGMAC_STAT(tx_desc_access_err),
+ XGMAC_STAT(tx_buffer_access_err),
+ XGMAC_STAT(tx_data_transfer_err),
+ XGMAC_STAT(rx_read_transfer_err),
+ XGMAC_STAT(rx_write_transfer_err),
+ XGMAC_STAT(rx_desc_access_err),
+ XGMAC_STAT(rx_buffer_access_err),
+ XGMAC_STAT(rx_data_transfer_err),
+ XGMAC_STAT(pmt_irq_event_n),
+
+ /* EEE-LPI stats */
XGMAC_STAT(tx_lpi_entry_n),
XGMAC_STAT(tx_lpi_exit_n),
XGMAC_STAT(rx_lpi_entry_n),
XGMAC_STAT(rx_lpi_exit_n),
XGMAC_STAT(eee_wakeup_error_n),
- XGMAC_STAT(pmt_irq_event_n),
+
+ /* RX specific */
+ /* L2 error */
+ XGMAC_STAT(rx_code_gmii_err),
+ XGMAC_STAT(rx_watchdog_err),
+ XGMAC_STAT(rx_crc_err),
+ XGMAC_STAT(rx_gaint_pkt_err),
+ XGMAC_STAT(ip_hdr_err),
+ XGMAC_STAT(ip_payload_err),
+ XGMAC_STAT(overflow_error),
+
+ /* L2 Pkt type */
+ XGMAC_STAT(len_pkt),
+ XGMAC_STAT(mac_ctl_pkt),
+ XGMAC_STAT(dcb_ctl_pkt),
+ XGMAC_STAT(arp_pkt),
+ XGMAC_STAT(oam_pkt),
+ XGMAC_STAT(untag_okt),
+ XGMAC_STAT(other_pkt),
+ XGMAC_STAT(svlan_tag_pkt),
+ XGMAC_STAT(cvlan_tag_pkt),
+ XGMAC_STAT(dvlan_ocvlan_icvlan_pkt),
+ XGMAC_STAT(dvlan_osvlan_isvlan_pkt),
+ XGMAC_STAT(dvlan_osvlan_icvlan_pkt),
+ XGMAC_STAT(dvan_ocvlan_icvlan_pkt),
+
+ /* L3/L4 Pkt type */
+ XGMAC_STAT(not_ip_pkt),
+ XGMAC_STAT(ip4_tcp_pkt),
+ XGMAC_STAT(ip4_udp_pkt),
+ XGMAC_STAT(ip4_icmp_pkt),
+ XGMAC_STAT(ip4_unknown_pkt),
+ XGMAC_STAT(ip6_tcp_pkt),
+ XGMAC_STAT(ip6_udp_pkt),
+ XGMAC_STAT(ip6_icmp_pkt),
+ XGMAC_STAT(ip6_unknown_pkt),
+
+ /* Filter specific */
+ XGMAC_STAT(vlan_filter_match),
+ XGMAC_STAT(sa_filter_fail),
+ XGMAC_STAT(da_filter_fail),
+ XGMAC_STAT(hash_filter_pass),
+ XGMAC_STAT(l3_filter_match),
+ XGMAC_STAT(l4_filter_match),
+
+ /* RX context specific */
+ XGMAC_STAT(timestamp_dropped),
+ XGMAC_STAT(rx_msg_type_no_ptp),
+ XGMAC_STAT(rx_ptp_type_sync),
+ XGMAC_STAT(rx_ptp_type_follow_up),
+ XGMAC_STAT(rx_ptp_type_delay_req),
+ XGMAC_STAT(rx_ptp_type_delay_resp),
+ XGMAC_STAT(rx_ptp_type_pdelay_req),
+ XGMAC_STAT(rx_ptp_type_pdelay_resp),
+ XGMAC_STAT(rx_ptp_type_pdelay_follow_up),
+ XGMAC_STAT(rx_ptp_announce),
+ XGMAC_STAT(rx_ptp_mgmt),
+ XGMAC_STAT(rx_ptp_signal),
+ XGMAC_STAT(rx_ptp_resv_msg_type),
};
#define XGMAC_STATS_LEN ARRAY_SIZE(xgmac_gstrings_stats)
-static int xgmac_ethtool_get_eee(struct net_device *dev,
+static int xgmac_get_eee(struct net_device *dev,
struct ethtool_eee *edata)
{
struct xgmac_priv_data *priv = netdev_priv(dev);
@@ -51,7 +145,7 @@ static int xgmac_ethtool_get_eee(struct net_device *dev,
return phy_ethtool_get_eee(priv->phydev, edata);
}
-static int xgmac_ethtool_set_eee(struct net_device *dev,
+static int xgmac_set_eee(struct net_device *dev,
struct ethtool_eee *edata)
{
struct xgmac_priv_data *priv = netdev_priv(dev);
@@ -106,7 +200,7 @@ static int xgmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
return -EOPNOTSUPP;
if (wol->wolopts) {
- pr_info("stmmac: wakeup enable\n");
+ pr_info("xgmac: wakeup enable\n");
device_set_wakeup_enable(priv->device, true);
enable_irq_wake(priv->wol_irq);
} else {
@@ -119,9 +213,411 @@ static int xgmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
return 0;
}
+static void xgmac_getdrvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+}
+
+static int xgmac_getsettings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
+{
+ struct xgmac_priv_data *priv = netdev_priv(dev);
+
+ if (priv->phydev)
+ return phy_ethtool_gset(priv->phydev, cmd);
+
+ return -ENODEV;
+}
+
+static int xgmac_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct xgmac_priv_data *priv = netdev_priv(dev);
+
+ if (priv->phydev)
+ return phy_ethtool_sset(priv->phydev, cmd);
+
+ return -ENODEV;
+}
+
+static u32 xgmac_getmsglevel(struct net_device *dev)
+{
+ struct xgmac_priv_data *priv = netdev_priv(dev);
+ return priv->msg_enable;
+}
+
+static void xgmac_setmsglevel(struct net_device *dev, u32 level)
+{
+ struct xgmac_priv_data *priv = netdev_priv(dev);
+ priv->msg_enable = level;
+
+}
+
+static int xgmac_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ struct xgmac_priv_data *priv = netdev_priv(dev);
+
+ if (priv->hw_cap.atime_stamp) {
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ if (priv->ptp_clock)
+ info->phc_index = ptp_clock_index(priv->ptp_clock);
+
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON)
+ | (1 << HWTSTAMP_TX_ONESTEP_SYNC);
+
+ info->rx_filters = ((1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_ALL));
+ return 0;
+ } else
+ return ethtool_op_get_ts_info(dev, info);
+}
+
+int xgmac_set_flow_ctrl(struct xgmac_priv_data *priv, int rx, int tx)
+{
+ return 0;
+}
+
+static void xgmac_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct xgmac_priv_data *priv = netdev_priv(netdev);
+
+ pause->rx_pause = priv->rx_pause;
+ pause->tx_pause = priv->tx_pause;
+}
+
+static int xgmac_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct xgmac_priv_data *priv = netdev_priv(netdev);
+
+ if (pause->autoneg)
+ return -EINVAL;
+
+ return xgmac_set_flow_ctrl(priv, pause->rx_pause, pause->tx_pause);
+}
+
+
+
+static void xgmac_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+ int i;
+ u8 *p = data;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < XGMAC_STATS_LEN; i++) {
+ memcpy(p, xgmac_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+}
+
+static int xgmac_get_sset_count(struct net_device *netdev, int sset)
+{
+ int len;
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ len = XGMAC_STATS_LEN;
+ return len;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void xgmac_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *dummy, u64 *data)
+{
+ struct xgmac_priv_data *priv = netdev_priv(dev);
+ int i, j = 0;
+ char *p;
+
+ if (priv->eee_enabled) {
+ int val = phy_get_eee_err(priv->phydev);
+ if (val)
+ priv->xstats.eee_wakeup_error_n = val;
+ }
+
+ for (i = 0; i < XGMAC_STATS_LEN; i++) {
+ p = (char *)priv + xgmac_gstrings_stats[i].stat_offset;
+ data[j++] = (xgmac_gstrings_stats[i].sizeof_stat == sizeof(u64))
+ ? (*(u64 *)p) : (*(u32 *)p);
+ }
+}
+
+static void xgmac_get_channels(struct net_device *dev,
+ struct ethtool_channels *channel)
+{
+ channel->max_rx = XGMAC_MAX_RX_CHANNELS;
+ channel->max_tx = XGMAC_MAX_TX_CHANNELS;
+ channel->rx_count = XGMAC_RX_QUEUES;
+ channel->tx_count = XGMAC_TX_QUEUES;
+}
+
+static u32 xgmac_riwt2usec(u32 riwt, struct xgmac_priv_data *priv)
+{
+ unsigned long clk = clk_get_rate(priv->xgmac_clk);
+
+ if (!clk)
+ return 0;
+
+ return (riwt * 256) / (clk / 1000000);
+}
+
+static u32 xgmac_usec2riwt(u32 usec, struct xgmac_priv_data *priv)
+{
+ unsigned long clk = clk_get_rate(priv->xgmac_clk);
+
+ if (!clk)
+ return 0;
+
+ return (usec * (clk / 1000000)) / 256;
+}
+
+static int xgmac_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ec)
+{
+ struct xgmac_priv_data *priv = netdev_priv(dev);
+
+ if (priv->use_riwt)
+ ec->rx_coalesce_usecs = xgmac_riwt2usec(priv->rx_riwt, priv);
+
+ return 0;
+}
+
+static int xgmac_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ec)
+{
+ struct xgmac_priv_data *priv = netdev_priv(dev);
+ unsigned int rx_riwt;
+
+ rx_riwt = xgmac_usec2riwt(ec->rx_coalesce_usecs, priv);
+
+ if ((rx_riwt > XGMAC_MAX_DMA_RIWT) || (rx_riwt < XGMAC_MIN_DMA_RIWT))
+ return -EINVAL;
+ else if (!priv->use_riwt)
+ return -EOPNOTSUPP;
+
+ priv->rx_riwt = rx_riwt;
+ priv->hw->dma->rx_watchdog(priv->ioaddr, priv->rx_riwt);
+
+ return 0;
+}
+
+static int xgmac_get_rss_hash_opts(struct xgmac_priv_data *priv,
+ struct ethtool_rxnfc *cmd)
+{
+ cmd->data = 0;
+
+ /* Report default options for RSS on xgmac */
+ switch (cmd->flow_type) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ case SCTP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case IPV4_FLOW:
+ cmd->data |= RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ case SCTP_V6_FLOW:
+ case AH_ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case IPV6_FLOW:
+ cmd->data |= RXH_IP_SRC | RXH_IP_DST;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int xgmac_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct xgmac_priv_data *priv = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXFH:
+ ret = xgmac_get_rss_hash_opts(priv, cmd);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int xgmac_set_rss_hash_opt(struct xgmac_priv_data *priv,
+ struct ethtool_rxnfc *cmd)
+{
+ u32 reg_val = 0;
+
+ /* RSS does not support anything other than hashing
+ * to queues on src and dst IPs and ports
+ */
+ if (cmd->data & ~(RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3))
+ return -EINVAL;
+
+ switch (cmd->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ if (!(cmd->data & RXH_IP_SRC) ||
+ !(cmd->data & RXH_IP_DST) ||
+ !(cmd->data & RXH_L4_B_0_1) ||
+ !(cmd->data & RXH_L4_B_2_3))
+ return -EINVAL;
+ reg_val = XGMAC_CORE_RSS_CTL_TCP4TE;
+ break;
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
+ if (!(cmd->data & RXH_IP_SRC) ||
+ !(cmd->data & RXH_IP_DST) ||
+ !(cmd->data & RXH_L4_B_0_1) ||
+ !(cmd->data & RXH_L4_B_2_3))
+ return -EINVAL;
+ reg_val = XGMAC_CORE_RSS_CTL_UDP4TE;
+ break;
+ case SCTP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case AH_ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ if (!(cmd->data & RXH_IP_SRC) ||
+ !(cmd->data & RXH_IP_DST) ||
+ (cmd->data & RXH_L4_B_0_1) ||
+ (cmd->data & RXH_L4_B_2_3))
+ return -EINVAL;
+ reg_val = XGMAC_CORE_RSS_CTL_IP2TE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Read XGMAC RSS control register and update */
+ reg_val |= readl(priv->ioaddr + XGMAC_CORE_RSS_CTL_REG);
+ writel(reg_val, priv->ioaddr + XGMAC_CORE_RSS_CTL_REG);
+ readl(priv->ioaddr + XGMAC_CORE_RSS_CTL_REG);
+
+ return 0;
+}
+
+static int xgmac_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+ struct xgmac_priv_data *priv = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXFH:
+ ret = xgmac_set_rss_hash_opt(priv, cmd);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static void xgmac_get_regs(struct net_device *dev,
+ struct ethtool_regs *regs, void *space)
+{
+ struct xgmac_priv_data *priv = netdev_priv(dev);
+ u32 *reg_space = (u32 *)space;
+ int reg_offset;
+ int reg_ix = 0;
+ void __iomem *ioaddr = priv->ioaddr;
+
+ memset(reg_space, 0x0, REG_SPACE_SIZE);
+
+ /* MAC registers */
+ for (reg_offset = START_MAC_REG_OFFSET;
+ reg_offset <= MAX_MAC_REG_OFFSET; reg_offset += 4) {
+ reg_space[reg_ix] = readl(ioaddr + reg_offset);
+ reg_ix++;
+ }
+
+ /* MTL registers */
+ for (reg_offset = START_MTL_REG_OFFSET;
+ reg_offset <= MAX_MTL_REG_OFFSET; reg_offset += 4) {
+ reg_space[reg_ix] = readl(ioaddr + reg_offset);
+ reg_ix++;
+ }
+
+ /* DMA registers */
+ for (reg_offset = START_DMA_REG_OFFSET;
+ reg_offset <= MAX_DMA_REG_OFFSET; reg_offset += 4) {
+ reg_space[reg_ix] = readl(ioaddr + reg_offset);
+ reg_ix++;
+ }
+}
+
+static int xgmac_get_regs_len(struct net_device *dev)
+{
+ return REG_SPACE_SIZE;
+}
+
static const struct ethtool_ops xgmac_ethtool_ops = {
- .get_eee = xgmac_ethtool_get_eee,
- .set_eee = xgmac_ethtool_set_eee,
+ .get_drvinfo = xgmac_getdrvinfo,
+ .get_settings = xgmac_getsettings,
+ .set_settings = xgmac_setsettings,
+ .get_msglevel = xgmac_getmsglevel,
+ .set_msglevel = xgmac_setmsglevel,
+ .get_link = ethtool_op_get_link,
+ .get_ts_info = xgmac_get_ts_info,
+ .get_pauseparam = xgmac_get_pauseparam,
+ .set_pauseparam = xgmac_set_pauseparam,
+ .get_strings = xgmac_get_strings,
+ .get_ethtool_stats = xgmac_get_ethtool_stats,
+ .get_sset_count = xgmac_get_sset_count,
+ .get_channels = xgmac_get_channels,
+ .get_coalesce = xgmac_get_coalesce,
+ .set_coalesce = xgmac_set_coalesce,
+ .get_rxnfc = xgmac_get_rxnfc,
+ .set_rxnfc = xgmac_set_rxnfc,
+ .get_regs = xgmac_get_regs,
+ .get_regs_len = xgmac_get_regs_len,
+ .get_eee = xgmac_get_eee,
+ .set_eee = xgmac_set_eee,
.get_wol = xgmac_get_wol,
.set_wol = xgmac_set_wol,
};
@@ -56,7 +56,6 @@ static int debug = -1;
static int xgmac_phyaddr = -1;
static int dma_txsize = DMA_TX_SIZE;
static int dma_rxsize = DMA_RX_SIZE;
-static int flow_ctrl = XGMAC_FLOW_OFF;
static int pause = XGMAC_PAUSE_TIME;
static int tx_tc = TC_DEFAULT;
static int rx_tc = TC_DEFAULT;
@@ -68,7 +67,6 @@ module_param(debug, int, S_IRUGO | S_IWUSR);
module_param(xgmac_phyaddr, int, S_IRUGO);
module_param(dma_txsize, int, S_IRUGO | S_IWUSR);
module_param(dma_rxsize, int, S_IRUGO | S_IWUSR);
-module_param(flow_ctrl, int, S_IRUGO | S_IWUSR);
module_param(pause, int, S_IRUGO | S_IWUSR);
module_param(tx_tc, int, S_IRUGO | S_IWUSR);
module_param(rx_tc, int, S_IRUGO | S_IWUSR);
@@ -102,10 +100,6 @@ static void xgmac_verify_args(void)
dma_txsize = DMA_TX_SIZE;
if (unlikely((buf_sz < DMA_BUFFER_SIZE) || (buf_sz > BUF_SIZE_16KiB)))
buf_sz = DMA_BUFFER_SIZE;
- if (unlikely(flow_ctrl > 1))
- flow_ctrl = XGMAC_FLOW_AUTO;
- else if (likely(flow_ctrl < 0))
- flow_ctrl = XGMAC_FLOW_OFF;
if (unlikely((pause < 0) || (pause > 0xffff)))
pause = XGMAC_PAUSE_TIME;
if (unlikely(eee_timer < 0))
@@ -2219,9 +2213,6 @@ struct xgmac_priv_data *xgmac_dvr_probe(struct device *device,
priv->msg_enable = netif_msg_init(debug, default_msg_level);
- if (flow_ctrl)
- priv->flow_ctrl = XGMAC_FLOW_AUTO; /* RX/TX pause on */
-
/* Enable TCP segmentation offload for all DMA channels */
if (priv->hw_cap.tcpseg_offload) {
XGMAC_FOR_EACH_QUEUE(XGMAC_TX_QUEUES, queue_num) {
@@ -2235,6 +2226,11 @@ struct xgmac_priv_data *xgmac_dvr_probe(struct device *device,
priv->rxcsum_insertion = true;
}
+ /* Initialise pause frame settings */
+ priv->rx_pause = 1;
+ priv->tx_pause = 1;
+ xgmac_set_flow_ctrl(priv, priv->rx_pause, priv->tx_pause);
+
/* Rx Watchdog is available, enable depend on platform data */
if (!priv->plat->riwt_off) {
priv->use_riwt = 1;
@@ -195,6 +195,12 @@
#define XGMAC_CORE_RSS_ADD_REG 0x0C88
#define XGMAC_CORE_RSS_DATA_REG 0x0C8C
+/* RSS conrol register bits */
+#define XGMAC_CORE_RSS_CTL_UDP4TE BIT(3)
+#define XGMAC_CORE_RSS_CTL_TCP4TE BIT(2)
+#define XGMAC_CORE_RSS_CTL_IP2TE BIT(1)
+#define XGMAC_CORE_RSS_CTL_RSSE BIT(0)
+
/* IEEE 1588 registers */
#define XGMAC_CORE_TSTAMP_CTL_REG 0x0D00
#define XGMAC_CORE_SUBSEC_INC_REG 0x0D04