diff options
Diffstat (limited to 'drivers/net/ethernet')
371 files changed, 30488 insertions, 4975 deletions
diff --git a/drivers/net/ethernet/8390/ne2k-pci.c b/drivers/net/ethernet/8390/ne2k-pci.c index 65f56a98c0a0..1a34da07c0db 100644 --- a/drivers/net/ethernet/8390/ne2k-pci.c +++ b/drivers/net/ethernet/8390/ne2k-pci.c @@ -186,17 +186,6 @@ static void ne2k_pci_block_output(struct net_device *dev, const int count, static const struct ethtool_ops ne2k_pci_ethtool_ops; - -/* There is no room in the standard 8390 structure for extra info we need, - * so we build a meta/outer-wrapper structure.. - */ -struct ne2k_pci_card { - struct net_device *dev; - struct pci_dev *pci_dev; -}; - - - /* NEx000-clone boards have a Station Address (SA) PROM (SAPROM) in the packet * buffer memory space. By-the-spec NE2000 clones have 0x57,0x57 in bytes * 0x0e,0x0f of the SAPROM, while other supposed NE2000 clones must be diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 6a19b5393ed1..0baac25db4f8 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -122,6 +122,7 @@ source "drivers/net/ethernet/litex/Kconfig" source "drivers/net/ethernet/marvell/Kconfig" source "drivers/net/ethernet/mediatek/Kconfig" source "drivers/net/ethernet/mellanox/Kconfig" +source "drivers/net/ethernet/meta/Kconfig" source "drivers/net/ethernet/micrel/Kconfig" source "drivers/net/ethernet/microchip/Kconfig" source "drivers/net/ethernet/mscc/Kconfig" diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 0d872d4efcd1..c03203439c0e 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -59,6 +59,7 @@ obj-$(CONFIG_NET_VENDOR_LITEX) += litex/ obj-$(CONFIG_NET_VENDOR_MARVELL) += marvell/ obj-$(CONFIG_NET_VENDOR_MEDIATEK) += mediatek/ obj-$(CONFIG_NET_VENDOR_MELLANOX) += mellanox/ +obj-$(CONFIG_NET_VENDOR_META) += meta/ obj-$(CONFIG_NET_VENDOR_MICREL) += micrel/ obj-$(CONFIG_NET_VENDOR_MICROCHIP) += microchip/ obj-$(CONFIG_NET_VENDOR_MICROSEMI) += mscc/ diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c index 857361c74f5d..e1b8794b14c9 100644 --- a/drivers/net/ethernet/adaptec/starfire.c +++ b/drivers/net/ethernet/adaptec/starfire.c @@ -441,14 +441,6 @@ enum rx_desc_bits { }; /* Completion queue entry. */ -struct short_rx_done_desc { - __le32 status; /* Low 16 bits is length. */ -}; -struct basic_rx_done_desc { - __le32 status; /* Low 16 bits is length. */ - __le16 vlanid; - __le16 status2; -}; struct csum_rx_done_desc { __le32 status; /* Low 16 bits is length. */ __le16 csum; /* Partial checksum */ diff --git a/drivers/net/ethernet/amd/7990.c b/drivers/net/ethernet/amd/7990.c index ef512cf89abf..27792a52b6cf 100644 --- a/drivers/net/ethernet/amd/7990.c +++ b/drivers/net/ethernet/amd/7990.c @@ -667,4 +667,5 @@ void lance_poll(struct net_device *dev) EXPORT_SYMBOL_GPL(lance_poll); #endif +MODULE_DESCRIPTION("LANCE Ethernet IC generic routines"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/amd/a2065.c b/drivers/net/ethernet/amd/a2065.c index 68983b717145..1ca26a8c40eb 100644 --- a/drivers/net/ethernet/amd/a2065.c +++ b/drivers/net/ethernet/amd/a2065.c @@ -781,4 +781,5 @@ static void __exit a2065_cleanup_module(void) module_init(a2065_init_module); module_exit(a2065_cleanup_module); +MODULE_DESCRIPTION("Commodore A2065 Ethernet driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/amd/ariadne.c b/drivers/net/ethernet/amd/ariadne.c index 38153e633231..fa201da567ed 100644 --- a/drivers/net/ethernet/amd/ariadne.c +++ b/drivers/net/ethernet/amd/ariadne.c @@ -790,4 +790,5 @@ static void __exit ariadne_cleanup_module(void) module_init(ariadne_init_module); module_exit(ariadne_cleanup_module); +MODULE_DESCRIPTION("Ariadne Ethernet Driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c index 751454d305c6..8c8cc7d0f42d 100644 --- a/drivers/net/ethernet/amd/atarilance.c +++ b/drivers/net/ethernet/amd/atarilance.c @@ -79,6 +79,7 @@ static int lance_debug = 1; #endif module_param(lance_debug, int, 0); MODULE_PARM_DESC(lance_debug, "atarilance debug level (0-3)"); +MODULE_DESCRIPTION("Atari LANCE Ethernet driver"); MODULE_LICENSE("GPL"); /* Print debug messages on probing? */ diff --git a/drivers/net/ethernet/amd/hplance.c b/drivers/net/ethernet/amd/hplance.c index 055fda11c572..df42294530cb 100644 --- a/drivers/net/ethernet/amd/hplance.c +++ b/drivers/net/ethernet/amd/hplance.c @@ -234,4 +234,5 @@ static void __exit hplance_cleanup_module(void) module_init(hplance_init_module); module_exit(hplance_cleanup_module); +MODULE_DESCRIPTION("HP300 on-board LANCE Ethernet driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/amd/lance.c b/drivers/net/ethernet/amd/lance.c index 6cf38180cc01..b1e6620ad41d 100644 --- a/drivers/net/ethernet/amd/lance.c +++ b/drivers/net/ethernet/amd/lance.c @@ -385,6 +385,7 @@ static void __exit lance_cleanup_module(void) } module_exit(lance_cleanup_module); #endif /* MODULE */ +MODULE_DESCRIPTION("AMD LANCE/PCnet Ethernet driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/amd/mvme147.c b/drivers/net/ethernet/amd/mvme147.c index 410c7b67eba4..c156566c0906 100644 --- a/drivers/net/ethernet/amd/mvme147.c +++ b/drivers/net/ethernet/amd/mvme147.c @@ -178,6 +178,7 @@ static int m147lance_close(struct net_device *dev) return 0; } +MODULE_DESCRIPTION("MVME147 LANCE Ethernet driver"); MODULE_LICENSE("GPL"); static struct net_device *dev_mvme147_lance; diff --git a/drivers/net/ethernet/amd/sun3lance.c b/drivers/net/ethernet/amd/sun3lance.c index 246f34c43765..c60df4a21158 100644 --- a/drivers/net/ethernet/amd/sun3lance.c +++ b/drivers/net/ethernet/amd/sun3lance.c @@ -74,6 +74,7 @@ static int lance_debug = 1; #endif module_param(lance_debug, int, 0); MODULE_PARM_DESC(lance_debug, "SUN3 Lance debug level (0-3)"); +MODULE_DESCRIPTION("Sun3/Sun3x on-board LANCE Ethernet driver"); MODULE_LICENSE("GPL"); #define DPRINTK(n,a) \ diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c index 58e7e88aae5b..21407a26f806 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c @@ -577,7 +577,7 @@ static int xgbe_set_rxfh(struct net_device *netdev, } static int xgbe_get_ts_info(struct net_device *netdev, - struct ethtool_ts_info *ts_info) + struct kernel_ethtool_ts_info *ts_info) { struct xgbe_prv_data *pdata = netdev_priv(netdev); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c index a2606ee3b0a5..d0aecd1d7357 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c @@ -652,7 +652,7 @@ static int aq_ethtool_set_wol(struct net_device *ndev, } static int aq_ethtool_get_ts_info(struct net_device *ndev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct aq_nic_s *aq_nic = netdev_priv(ndev); diff --git a/drivers/net/ethernet/arc/Kconfig b/drivers/net/ethernet/arc/Kconfig index 0a67612af228..0d400a7d8d91 100644 --- a/drivers/net/ethernet/arc/Kconfig +++ b/drivers/net/ethernet/arc/Kconfig @@ -23,16 +23,6 @@ config ARC_EMAC_CORE select PHYLIB select CRC32 -config ARC_EMAC - tristate "ARC EMAC support" - select ARC_EMAC_CORE - depends on OF_IRQ - depends on ARC || COMPILE_TEST - help - On some legacy ARC (Synopsys) FPGA boards such as ARCAngel4/ML50x - non-standard on-chip ethernet device ARC EMAC 10/100 is used. - Say Y here if you have such a board. If unsure, say N. - config EMAC_ROCKCHIP tristate "Rockchip EMAC support" select ARC_EMAC_CORE diff --git a/drivers/net/ethernet/arc/Makefile b/drivers/net/ethernet/arc/Makefile index d63ada577c8e..23586eefec44 100644 --- a/drivers/net/ethernet/arc/Makefile +++ b/drivers/net/ethernet/arc/Makefile @@ -5,5 +5,4 @@ arc_emac-objs := emac_main.o emac_mdio.o obj-$(CONFIG_ARC_EMAC_CORE) += arc_emac.o -obj-$(CONFIG_ARC_EMAC) += emac_arc.o obj-$(CONFIG_EMAC_ROCKCHIP) += emac_rockchip.o diff --git a/drivers/net/ethernet/arc/emac_arc.c b/drivers/net/ethernet/arc/emac_arc.c deleted file mode 100644 index a3afddb23ee8..000000000000 --- a/drivers/net/ethernet/arc/emac_arc.c +++ /dev/null @@ -1,88 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/** - * DOC: emac_arc.c - ARC EMAC specific glue layer - * - * Copyright (C) 2014 Romain Perier - * - * Romain Perier <romain.perier@gmail.com> - */ - -#include <linux/etherdevice.h> -#include <linux/module.h> -#include <linux/of_net.h> -#include <linux/platform_device.h> - -#include "emac.h" - -#define DRV_NAME "emac_arc" - -static int emac_arc_probe(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - struct arc_emac_priv *priv; - phy_interface_t interface; - struct net_device *ndev; - int err; - - if (!dev->of_node) - return -ENODEV; - - ndev = alloc_etherdev(sizeof(struct arc_emac_priv)); - if (!ndev) - return -ENOMEM; - platform_set_drvdata(pdev, ndev); - SET_NETDEV_DEV(ndev, dev); - - priv = netdev_priv(ndev); - priv->drv_name = DRV_NAME; - - err = of_get_phy_mode(dev->of_node, &interface); - if (err) { - if (err == -ENODEV) - interface = PHY_INTERFACE_MODE_MII; - else - goto out_netdev; - } - - priv->clk = devm_clk_get(dev, "hclk"); - if (IS_ERR(priv->clk)) { - dev_err(dev, "failed to retrieve host clock from device tree\n"); - err = -EINVAL; - goto out_netdev; - } - - err = arc_emac_probe(ndev, interface); -out_netdev: - if (err) - free_netdev(ndev); - return err; -} - -static void emac_arc_remove(struct platform_device *pdev) -{ - struct net_device *ndev = platform_get_drvdata(pdev); - - arc_emac_remove(ndev); - free_netdev(ndev); -} - -static const struct of_device_id emac_arc_dt_ids[] = { - { .compatible = "snps,arc-emac" }, - { /* Sentinel */ } -}; -MODULE_DEVICE_TABLE(of, emac_arc_dt_ids); - -static struct platform_driver emac_arc_driver = { - .probe = emac_arc_probe, - .remove_new = emac_arc_remove, - .driver = { - .name = DRV_NAME, - .of_match_table = emac_arc_dt_ids, - }, -}; - -module_platform_driver(emac_arc_driver); - -MODULE_AUTHOR("Romain Perier <romain.perier@gmail.com>"); -MODULE_DESCRIPTION("ARC EMAC platform driver"); -MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index 58956ed8f531..c7b56a5e5425 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -3634,7 +3634,7 @@ static int bnx2x_set_channels(struct net_device *dev, } static int bnx2x_get_ts_info(struct net_device *dev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct bnx2x *bp = netdev_priv(dev); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 43952689bfb0..bb3be33c1bbd 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -456,8 +456,9 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) dma_addr_t mapping; unsigned int length, pad = 0; u32 len, free_size, vlan_tag_flags, cfa_action, flags; - u16 prod, last_frag; + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; struct pci_dev *pdev = bp->pdev; + u16 prod, last_frag, txts_prod; struct bnxt_tx_ring_info *txr; struct bnxt_sw_tx_bd *tx_buf; __le32 lflags = 0; @@ -509,23 +510,29 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT; } - if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { - struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && ptp && + ptp->tx_tstamp_en) { + if (bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP) { + lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP); + tx_buf->is_ts_pkt = 1; + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + } else if (!skb_is_gso(skb)) { + u16 seq_id, hdr_off; - if (ptp && ptp->tx_tstamp_en && !skb_is_gso(skb) && - atomic_dec_if_positive(&ptp->tx_avail) >= 0) { - if (!bnxt_ptp_parse(skb, &ptp->tx_seqid, - &ptp->tx_hdr_off)) { + if (!bnxt_ptp_parse(skb, &seq_id, &hdr_off) && + !bnxt_ptp_get_txts_prod(ptp, &txts_prod)) { if (vlan_tag_flags) - ptp->tx_hdr_off += VLAN_HLEN; + hdr_off += VLAN_HLEN; lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP); + tx_buf->is_ts_pkt = 1; skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; - } else { - atomic_inc(&bp->ptp_cfg->tx_avail); + + ptp->txts_req[txts_prod].tx_seqid = seq_id; + ptp->txts_req[txts_prod].tx_hdr_off = hdr_off; + tx_buf->txts_prod = txts_prod; } } } - if (unlikely(skb->no_fcs)) lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC); @@ -753,8 +760,13 @@ tx_dma_error: tx_free: dev_kfree_skb_any(skb); tx_kick_pending: - if (BNXT_TX_PTP_IS_SET(lflags)) - atomic_inc(&bp->ptp_cfg->tx_avail); + if (BNXT_TX_PTP_IS_SET(lflags)) { + txr->tx_buf_ring[txr->tx_prod].is_ts_pkt = 0; + atomic64_inc(&bp->ptp_cfg->stats.ts_err); + if (!(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP)) + /* set SKB to err so PTP worker will clean up */ + ptp->txts_req[txts_prod].tx_skb = ERR_PTR(-EIO); + } if (txr->kick_pending) bnxt_txr_db_kick(bp, txr, txr->tx_prod); txr->tx_buf_ring[txr->tx_prod].skb = NULL; @@ -762,7 +774,8 @@ tx_kick_pending: return NETDEV_TX_OK; } -static void __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr, +/* Returns true if some remaining TX packets not processed. */ +static bool __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr, int budget) { struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index); @@ -771,24 +784,33 @@ static void __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr, unsigned int tx_bytes = 0; u16 cons = txr->tx_cons; int tx_pkts = 0; + bool rc = false; while (RING_TX(bp, cons) != hw_cons) { struct bnxt_sw_tx_bd *tx_buf; struct sk_buff *skb; + bool is_ts_pkt; int j, last; tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)]; - cons = NEXT_TX(cons); skb = tx_buf->skb; - tx_buf->skb = NULL; if (unlikely(!skb)) { bnxt_sched_reset_txr(bp, txr, cons); - return; + return rc; + } + + is_ts_pkt = tx_buf->is_ts_pkt; + if (is_ts_pkt && (bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP)) { + rc = true; + break; } + cons = NEXT_TX(cons); tx_pkts++; tx_bytes += skb->len; + tx_buf->skb = NULL; + tx_buf->is_ts_pkt = 0; if (tx_buf->is_push) { tx_buf->is_push = 0; @@ -808,13 +830,11 @@ static void __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr, skb_frag_size(&skb_shinfo(skb)->frags[j]), DMA_TO_DEVICE); } - if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { + if (unlikely(is_ts_pkt)) { if (BNXT_CHIP_P5(bp)) { /* PTP worker takes ownership of the skb */ - if (!bnxt_get_tx_ts_p5(bp, skb)) - skb = NULL; - else - atomic_inc(&bp->ptp_cfg->tx_avail); + bnxt_get_tx_ts_p5(bp, skb, tx_buf->txts_prod); + skb = NULL; } } @@ -829,18 +849,22 @@ next_tx_int: __netif_txq_completed_wake(txq, tx_pkts, tx_bytes, bnxt_tx_avail(bp, txr), bp->tx_wake_thresh, READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING); + + return rc; } static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) { struct bnxt_tx_ring_info *txr; + bool more = false; int i; bnxt_for_each_napi_tx(i, bnapi, txr) { if (txr->tx_hw_cons != RING_TX(bp, txr->tx_cons)) - __bnxt_tx_int(bp, txr, budget); + more |= __bnxt_tx_int(bp, txr, budget); } - bnapi->events &= ~BNXT_TX_CMP_EVENT; + if (!more) + bnapi->events &= ~BNXT_TX_CMP_EVENT; } static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping, @@ -2906,6 +2930,8 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, cpr->has_more_work = 1; break; } + } else if (cmp_type == CMP_TYPE_TX_L2_PKT_TS_CMP) { + bnxt_tx_ts_cmp(bp, bnapi, (struct tx_ts_cmp *)txcmp); } else if (cmp_type >= CMP_TYPE_RX_L2_CMP && cmp_type <= CMP_TYPE_RX_L2_TPA_START_V3_CMP) { if (likely(budget)) @@ -2937,8 +2963,10 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, } } - if (event & BNXT_REDIRECT_EVENT) + if (event & BNXT_REDIRECT_EVENT) { xdp_do_flush(); + event &= ~BNXT_REDIRECT_EVENT; + } if (event & BNXT_TX_EVENT) { struct bnxt_tx_ring_info *txr = bnapi->tx_ring[0]; @@ -2948,6 +2976,7 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, wmb(); bnxt_db_write_relaxed(bp, &txr->tx_db, prod); + event &= ~BNXT_TX_EVENT; } cpr->cp_raw_cons = raw_cons; @@ -2965,13 +2994,14 @@ static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi, struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); + bnapi->events &= ~BNXT_RX_EVENT; } if (bnapi->events & BNXT_AGG_EVENT) { struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); + bnapi->events &= ~BNXT_AGG_EVENT; } - bnapi->events &= BNXT_TX_CMP_EVENT; } static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, @@ -3308,37 +3338,12 @@ static void bnxt_free_tx_skbs(struct bnxt *bp) } } -static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr) +static void bnxt_free_one_rx_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) { - struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr]; struct pci_dev *pdev = bp->pdev; - struct bnxt_tpa_idx_map *map; - int i, max_idx, max_agg_idx; + int i, max_idx; max_idx = bp->rx_nr_pages * RX_DESC_CNT; - max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT; - if (!rxr->rx_tpa) - goto skip_rx_tpa_free; - - for (i = 0; i < bp->max_tpa; i++) { - struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i]; - u8 *data = tpa_info->data; - - if (!data) - continue; - - dma_unmap_single_attrs(&pdev->dev, tpa_info->mapping, - bp->rx_buf_use_size, bp->rx_dir, - DMA_ATTR_WEAK_ORDERING); - - tpa_info->data = NULL; - - skb_free_frag(data); - } - -skip_rx_tpa_free: - if (!rxr->rx_buf_ring) - goto skip_rx_buf_free; for (i = 0; i < max_idx; i++) { struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i]; @@ -3358,12 +3363,15 @@ skip_rx_tpa_free: skb_free_frag(data); } } +} -skip_rx_buf_free: - if (!rxr->rx_agg_ring) - goto skip_rx_agg_free; +static void bnxt_free_one_rx_agg_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) +{ + int i, max_idx; - for (i = 0; i < max_agg_idx; i++) { + max_idx = bp->rx_agg_nr_pages * RX_DESC_CNT; + + for (i = 0; i < max_idx; i++) { struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i]; struct page *page = rx_agg_buf->page; @@ -3375,6 +3383,45 @@ skip_rx_buf_free: page_pool_recycle_direct(rxr->page_pool, page); } +} + +static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr) +{ + struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr]; + struct pci_dev *pdev = bp->pdev; + struct bnxt_tpa_idx_map *map; + int i; + + if (!rxr->rx_tpa) + goto skip_rx_tpa_free; + + for (i = 0; i < bp->max_tpa; i++) { + struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i]; + u8 *data = tpa_info->data; + + if (!data) + continue; + + dma_unmap_single_attrs(&pdev->dev, tpa_info->mapping, + bp->rx_buf_use_size, bp->rx_dir, + DMA_ATTR_WEAK_ORDERING); + + tpa_info->data = NULL; + + skb_free_frag(data); + } + +skip_rx_tpa_free: + if (!rxr->rx_buf_ring) + goto skip_rx_buf_free; + + bnxt_free_one_rx_ring(bp, rxr); + +skip_rx_buf_free: + if (!rxr->rx_agg_ring) + goto skip_rx_agg_free; + + bnxt_free_one_rx_agg_ring(bp, rxr); skip_rx_agg_free: map = rxr->rx_tpa_idx_map; @@ -3971,6 +4018,62 @@ static int bnxt_alloc_cp_rings(struct bnxt *bp) return 0; } +static void bnxt_init_rx_ring_struct(struct bnxt *bp, + struct bnxt_rx_ring_info *rxr) +{ + struct bnxt_ring_mem_info *rmem; + struct bnxt_ring_struct *ring; + + ring = &rxr->rx_ring_struct; + rmem = &ring->ring_mem; + rmem->nr_pages = bp->rx_nr_pages; + rmem->page_size = HW_RXBD_RING_SIZE; + rmem->pg_arr = (void **)rxr->rx_desc_ring; + rmem->dma_arr = rxr->rx_desc_mapping; + rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages; + rmem->vmem = (void **)&rxr->rx_buf_ring; + + ring = &rxr->rx_agg_ring_struct; + rmem = &ring->ring_mem; + rmem->nr_pages = bp->rx_agg_nr_pages; + rmem->page_size = HW_RXBD_RING_SIZE; + rmem->pg_arr = (void **)rxr->rx_agg_desc_ring; + rmem->dma_arr = rxr->rx_agg_desc_mapping; + rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages; + rmem->vmem = (void **)&rxr->rx_agg_ring; +} + +static void bnxt_reset_rx_ring_struct(struct bnxt *bp, + struct bnxt_rx_ring_info *rxr) +{ + struct bnxt_ring_mem_info *rmem; + struct bnxt_ring_struct *ring; + int i; + + rxr->page_pool->p.napi = NULL; + rxr->page_pool = NULL; + + ring = &rxr->rx_ring_struct; + rmem = &ring->ring_mem; + rmem->pg_tbl = NULL; + rmem->pg_tbl_map = 0; + for (i = 0; i < rmem->nr_pages; i++) { + rmem->pg_arr[i] = NULL; + rmem->dma_arr[i] = 0; + } + *rmem->vmem = NULL; + + ring = &rxr->rx_agg_ring_struct; + rmem = &ring->ring_mem; + rmem->pg_tbl = NULL; + rmem->pg_tbl_map = 0; + for (i = 0; i < rmem->nr_pages; i++) { + rmem->pg_arr[i] = NULL; + rmem->dma_arr[i] = 0; + } + *rmem->vmem = NULL; +} + static void bnxt_init_ring_struct(struct bnxt *bp) { int i, j; @@ -4053,37 +4156,55 @@ static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type) } } -static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr) +static void bnxt_alloc_one_rx_ring_skb(struct bnxt *bp, + struct bnxt_rx_ring_info *rxr, + int ring_nr) { - struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr]; - struct net_device *dev = bp->dev; u32 prod; int i; prod = rxr->rx_prod; for (i = 0; i < bp->rx_ring_size; i++) { if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) { - netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n", + netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n", ring_nr, i, bp->rx_ring_size); break; } prod = NEXT_RX(prod); } rxr->rx_prod = prod; +} - if (!(bp->flags & BNXT_FLAG_AGG_RINGS)) - return 0; +static void bnxt_alloc_one_rx_ring_page(struct bnxt *bp, + struct bnxt_rx_ring_info *rxr, + int ring_nr) +{ + u32 prod; + int i; prod = rxr->rx_agg_prod; for (i = 0; i < bp->rx_agg_ring_size; i++) { if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) { - netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n", + netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d pages only\n", ring_nr, i, bp->rx_ring_size); break; } prod = NEXT_RX_AGG(prod); } rxr->rx_agg_prod = prod; +} + +static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr) +{ + struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr]; + int i; + + bnxt_alloc_one_rx_ring_skb(bp, rxr, ring_nr); + + if (!(bp->flags & BNXT_FLAG_AGG_RINGS)) + return 0; + + bnxt_alloc_one_rx_ring_page(bp, rxr, ring_nr); if (rxr->rx_tpa) { dma_addr_t mapping; @@ -4102,9 +4223,9 @@ static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr) return 0; } -static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) +static void bnxt_init_one_rx_ring_rxbd(struct bnxt *bp, + struct bnxt_rx_ring_info *rxr) { - struct bnxt_rx_ring_info *rxr; struct bnxt_ring_struct *ring; u32 type; @@ -4114,28 +4235,43 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) if (NET_IP_ALIGN == 2) type |= RX_BD_FLAGS_SOP; - rxr = &bp->rx_ring[ring_nr]; ring = &rxr->rx_ring_struct; bnxt_init_rxbd_pages(ring, type); - - netif_queue_set_napi(bp->dev, ring_nr, NETDEV_QUEUE_TYPE_RX, - &rxr->bnapi->napi); - - if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) { - bpf_prog_add(bp->xdp_prog, 1); - rxr->xdp_prog = bp->xdp_prog; - } ring->fw_ring_id = INVALID_HW_RING_ID; +} + +static void bnxt_init_one_rx_agg_ring_rxbd(struct bnxt *bp, + struct bnxt_rx_ring_info *rxr) +{ + struct bnxt_ring_struct *ring; + u32 type; ring = &rxr->rx_agg_ring_struct; ring->fw_ring_id = INVALID_HW_RING_ID; - if ((bp->flags & BNXT_FLAG_AGG_RINGS)) { type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) | RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP; bnxt_init_rxbd_pages(ring, type); } +} + +static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) +{ + struct bnxt_rx_ring_info *rxr; + + rxr = &bp->rx_ring[ring_nr]; + bnxt_init_one_rx_ring_rxbd(bp, rxr); + + netif_queue_set_napi(bp->dev, ring_nr, NETDEV_QUEUE_TYPE_RX, + &rxr->bnapi->napi); + + if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) { + bpf_prog_add(bp->xdp_prog, 1); + rxr->xdp_prog = bp->xdp_prog; + } + + bnxt_init_one_rx_agg_ring_rxbd(bp, rxr); return bnxt_alloc_one_rx_ring(bp, ring_nr); } @@ -5834,17 +5970,20 @@ bnxt_cfg_rfs_ring_tbl_idx(struct bnxt *bp, struct hwrm_cfa_ntuple_filter_alloc_input *req, struct bnxt_ntuple_filter *fltr) { - struct bnxt_rss_ctx *rss_ctx, *tmp; u16 rxq = fltr->base.rxq; if (fltr->base.flags & BNXT_ACT_RSS_CTX) { - list_for_each_entry_safe(rss_ctx, tmp, &bp->rss_ctx_list, list) { - if (rss_ctx->index == fltr->base.fw_vnic_id) { - struct bnxt_vnic_info *vnic = &rss_ctx->vnic; + struct ethtool_rxfh_context *ctx; + struct bnxt_rss_ctx *rss_ctx; + struct bnxt_vnic_info *vnic; - req->dst_id = cpu_to_le16(vnic->fw_vnic_id); - break; - } + ctx = xa_load(&bp->dev->ethtool->rss_ctx, + fltr->base.fw_vnic_id); + if (ctx) { + rss_ctx = ethtool_rxfh_context_priv(ctx); + vnic = &rss_ctx->vnic; + + req->dst_id = cpu_to_le16(vnic->fw_vnic_id); } return; } @@ -6083,10 +6222,9 @@ static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr) return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct); } -int bnxt_alloc_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx) +static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp) { int entries; - u16 *tbl; if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5; @@ -6094,22 +6232,19 @@ int bnxt_alloc_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx) entries = HW_HASH_INDEX_SIZE; bp->rss_indir_tbl_entries = entries; - tbl = kmalloc_array(entries, sizeof(*bp->rss_indir_tbl), GFP_KERNEL); - if (!tbl) + bp->rss_indir_tbl = + kmalloc_array(entries, sizeof(*bp->rss_indir_tbl), GFP_KERNEL); + if (!bp->rss_indir_tbl) return -ENOMEM; - if (rss_ctx) - rss_ctx->rss_indir_tbl = tbl; - else - bp->rss_indir_tbl = tbl; - return 0; } -void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx) +void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp, + struct ethtool_rxfh_context *rss_ctx) { u16 max_rings, max_entries, pad, i; - u16 *rss_indir_tbl; + u32 *rss_indir_tbl; if (!bp->rx_nr_rings) return; @@ -6121,7 +6256,7 @@ void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx) max_entries = bnxt_get_rxfh_indir_size(bp->dev); if (rss_ctx) - rss_indir_tbl = &rss_ctx->rss_indir_tbl[0]; + rss_indir_tbl = ethtool_rxfh_context_indir(rss_ctx); else rss_indir_tbl = &bp->rss_indir_tbl[0]; @@ -6130,12 +6265,12 @@ void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx) pad = bp->rss_indir_tbl_entries - max_entries; if (pad) - memset(&rss_indir_tbl[i], 0, pad * sizeof(u16)); + memset(&rss_indir_tbl[i], 0, pad * sizeof(*rss_indir_tbl)); } static u16 bnxt_get_max_rss_ring(struct bnxt *bp) { - u16 i, tbl_size, max_ring = 0; + u32 i, tbl_size, max_ring = 0; if (!bp->rss_indir_tbl) return 0; @@ -6146,24 +6281,6 @@ static u16 bnxt_get_max_rss_ring(struct bnxt *bp) return max_ring; } -u16 bnxt_get_max_rss_ctx_ring(struct bnxt *bp) -{ - u16 i, tbl_size, max_ring = 0; - struct bnxt_rss_ctx *rss_ctx; - - if (!BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) - return 0; - - tbl_size = bnxt_get_rxfh_indir_size(bp->dev); - - list_for_each_entry(rss_ctx, &bp->rss_ctx_list, list) { - for (i = 0; i < tbl_size; i++) - max_ring = max(max_ring, rss_ctx->rss_indir_tbl[i]); - } - - return max_ring; -} - int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings) { if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { @@ -6205,7 +6322,7 @@ static void bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp, if (vnic->flags & BNXT_VNIC_NTUPLE_FLAG) j = ethtool_rxfh_indir_default(i, bp->rx_nr_rings); else if (vnic->flags & BNXT_VNIC_RSSCTX_FLAG) - j = vnic->rss_ctx->rss_indir_tbl[i]; + j = ethtool_rxfh_context_indir(vnic->rss_ctx)[i]; else j = bp->rss_indir_tbl[i]; rxr = &bp->rx_ring[j]; @@ -6692,6 +6809,7 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp, switch (ring_type) { case HWRM_RING_ALLOC_TX: { struct bnxt_tx_ring_info *txr; + u16 flags = 0; txr = container_of(ring, struct bnxt_tx_ring_info, tx_ring_struct); @@ -6705,6 +6823,9 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp, if (bp->flags & BNXT_FLAG_TX_COAL_CMPL) req->cmpl_coal_cnt = RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_64; + if ((bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP) && bp->ptp_cfg) + flags |= RING_ALLOC_REQ_FLAGS_TX_PKT_TS_CMPL_ENABLE; + req->flags = cpu_to_le16(flags); break; } case HWRM_RING_ALLOC_RX: @@ -6878,6 +6999,48 @@ static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type, bnxt_set_db_mask(bp, db, ring_type); } +static int bnxt_hwrm_rx_ring_alloc(struct bnxt *bp, + struct bnxt_rx_ring_info *rxr) +{ + struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; + struct bnxt_napi *bnapi = rxr->bnapi; + u32 type = HWRM_RING_ALLOC_RX; + u32 map_idx = bnapi->index; + int rc; + + rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); + if (rc) + return rc; + + bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id); + bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id; + + return 0; +} + +static int bnxt_hwrm_rx_agg_ring_alloc(struct bnxt *bp, + struct bnxt_rx_ring_info *rxr) +{ + struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct; + u32 type = HWRM_RING_ALLOC_AGG; + u32 grp_idx = ring->grp_idx; + u32 map_idx; + int rc; + + map_idx = grp_idx + bp->rx_nr_rings; + rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); + if (rc) + return rc; + + bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx, + ring->fw_ring_id); + bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); + bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); + bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id; + + return 0; +} + static int bnxt_hwrm_ring_alloc(struct bnxt *bp) { bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS); @@ -6943,24 +7106,21 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp) bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id); } - type = HWRM_RING_ALLOC_RX; for (i = 0; i < bp->rx_nr_rings; i++) { struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; - struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; - struct bnxt_napi *bnapi = rxr->bnapi; - u32 map_idx = bnapi->index; - rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); + rc = bnxt_hwrm_rx_ring_alloc(bp, rxr); if (rc) goto err_out; - bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id); /* If we have agg rings, post agg buffers first. */ if (!agg_rings) bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); - bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id; if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { struct bnxt_cp_ring_info *cpr2 = rxr->rx_cpr; + struct bnxt_napi *bnapi = rxr->bnapi; u32 type2 = HWRM_RING_ALLOC_CMPL; + struct bnxt_ring_struct *ring; + u32 map_idx = bnapi->index; ring = &cpr2->cp_ring_struct; ring->handle = BNXT_SET_NQ_HDL(cpr2); @@ -6974,23 +7134,10 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp) } if (agg_rings) { - type = HWRM_RING_ALLOC_AGG; for (i = 0; i < bp->rx_nr_rings; i++) { - struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; - struct bnxt_ring_struct *ring = - &rxr->rx_agg_ring_struct; - u32 grp_idx = ring->grp_idx; - u32 map_idx = grp_idx + bp->rx_nr_rings; - - rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); + rc = bnxt_hwrm_rx_agg_ring_alloc(bp, &bp->rx_ring[i]); if (rc) goto err_out; - - bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx, - ring->fw_ring_id); - bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); - bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); - bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id; } } err_out: @@ -7030,6 +7177,50 @@ exit: return 0; } +static void bnxt_hwrm_rx_ring_free(struct bnxt *bp, + struct bnxt_rx_ring_info *rxr, + bool close_path) +{ + struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; + u32 grp_idx = rxr->bnapi->index; + u32 cmpl_ring_id; + + if (ring->fw_ring_id == INVALID_HW_RING_ID) + return; + + cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr); + hwrm_ring_free_send_msg(bp, ring, + RING_FREE_REQ_RING_TYPE_RX, + close_path ? cmpl_ring_id : + INVALID_HW_RING_ID); + ring->fw_ring_id = INVALID_HW_RING_ID; + bp->grp_info[grp_idx].rx_fw_ring_id = INVALID_HW_RING_ID; +} + +static void bnxt_hwrm_rx_agg_ring_free(struct bnxt *bp, + struct bnxt_rx_ring_info *rxr, + bool close_path) +{ + struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct; + u32 grp_idx = rxr->bnapi->index; + u32 type, cmpl_ring_id; + + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) + type = RING_FREE_REQ_RING_TYPE_RX_AGG; + else + type = RING_FREE_REQ_RING_TYPE_RX; + + if (ring->fw_ring_id == INVALID_HW_RING_ID) + return; + + cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr); + hwrm_ring_free_send_msg(bp, ring, type, + close_path ? cmpl_ring_id : + INVALID_HW_RING_ID); + ring->fw_ring_id = INVALID_HW_RING_ID; + bp->grp_info[grp_idx].agg_fw_ring_id = INVALID_HW_RING_ID; +} + static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) { u32 type; @@ -7054,42 +7245,8 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) } for (i = 0; i < bp->rx_nr_rings; i++) { - struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; - struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; - u32 grp_idx = rxr->bnapi->index; - - if (ring->fw_ring_id != INVALID_HW_RING_ID) { - u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr); - - hwrm_ring_free_send_msg(bp, ring, - RING_FREE_REQ_RING_TYPE_RX, - close_path ? cmpl_ring_id : - INVALID_HW_RING_ID); - ring->fw_ring_id = INVALID_HW_RING_ID; - bp->grp_info[grp_idx].rx_fw_ring_id = - INVALID_HW_RING_ID; - } - } - - if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) - type = RING_FREE_REQ_RING_TYPE_RX_AGG; - else - type = RING_FREE_REQ_RING_TYPE_RX; - for (i = 0; i < bp->rx_nr_rings; i++) { - struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; - struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct; - u32 grp_idx = rxr->bnapi->index; - - if (ring->fw_ring_id != INVALID_HW_RING_ID) { - u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr); - - hwrm_ring_free_send_msg(bp, ring, type, - close_path ? cmpl_ring_id : - INVALID_HW_RING_ID); - ring->fw_ring_id = INVALID_HW_RING_ID; - bp->grp_info[grp_idx].agg_fw_ring_id = - INVALID_HW_RING_ID; - } + bnxt_hwrm_rx_ring_free(bp, &bp->rx_ring[i], close_path); + bnxt_hwrm_rx_agg_ring_free(bp, &bp->rx_ring[i], close_path); } /* The completion rings are about to be freed. After that the @@ -8849,7 +9006,7 @@ static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp) u8 flags; int rc; - if (bp->hwrm_spec_code < 0x10801 || !BNXT_CHIP_P5(bp)) { + if (bp->hwrm_spec_code < 0x10801 || !BNXT_CHIP_P5_PLUS(bp)) { rc = -ENODEV; goto no_ptp; } @@ -8865,7 +9022,8 @@ static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp) goto exit; flags = resp->flags; - if (!(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) { + if (BNXT_CHIP_P5_AND_MINUS(bp) && + !(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) { rc = -ENODEV; goto exit; } @@ -8878,10 +9036,13 @@ static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp) ptp->bp = bp; bp->ptp_cfg = ptp; } - if (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK) { + + if (flags & + (PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK | + PORT_MAC_PTP_QCFG_RESP_FLAGS_64B_PHC_TIME)) { ptp->refclk_regs[0] = le32_to_cpu(resp->ts_ref_clock_reg_lower); ptp->refclk_regs[1] = le32_to_cpu(resp->ts_ref_clock_reg_upper); - } else if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { + } else if (BNXT_CHIP_P5(bp)) { ptp->refclk_regs[0] = BNXT_TS_REG_TIMESYNC_TS0_LOWER; ptp->refclk_regs[1] = BNXT_TS_REG_TIMESYNC_TS0_UPPER; } else { @@ -8963,6 +9124,8 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) bp->fw_cap |= BNXT_FW_CAP_RX_ALL_PKT_TS; if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_UDP_GSO_SUPPORTED) bp->flags |= BNXT_FLAG_UDP_GSO_CAP; + if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_TX_PKT_TS_CMPL_SUPPORTED) + bp->fw_cap |= BNXT_FW_CAP_TX_TS_CMP; bp->tx_push_thresh = 0; if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) && @@ -10031,10 +10194,12 @@ void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx, struct bnxt_ntuple_filter *ntp_fltr; int i; - bnxt_hwrm_vnic_free_one(bp, &rss_ctx->vnic); - for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) { - if (vnic->fw_rss_cos_lb_ctx[i] != INVALID_HW_RING_ID) - bnxt_hwrm_vnic_ctx_free_one(bp, vnic, i); + if (netif_running(bp->dev)) { + bnxt_hwrm_vnic_free_one(bp, &rss_ctx->vnic); + for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) { + if (vnic->fw_rss_cos_lb_ctx[i] != INVALID_HW_RING_ID) + bnxt_hwrm_vnic_ctx_free_one(bp, vnic, i); + } } if (!all) return; @@ -10055,19 +10220,17 @@ void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx, dma_free_coherent(&bp->pdev->dev, vnic->rss_table_size, vnic->rss_table, vnic->rss_table_dma_addr); - kfree(rss_ctx->rss_indir_tbl); - list_del(&rss_ctx->list); bp->num_rss_ctx--; - clear_bit(rss_ctx->index, bp->rss_ctx_bmap); - kfree(rss_ctx); } static void bnxt_hwrm_realloc_rss_ctx_vnic(struct bnxt *bp) { bool set_tpa = !!(bp->flags & BNXT_FLAG_TPA); - struct bnxt_rss_ctx *rss_ctx, *tmp; + struct ethtool_rxfh_context *ctx; + unsigned long context; - list_for_each_entry_safe(rss_ctx, tmp, &bp->rss_ctx_list, list) { + xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) { + struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx); struct bnxt_vnic_info *vnic = &rss_ctx->vnic; if (bnxt_hwrm_vnic_alloc(bp, vnic, 0, bp->rx_nr_rings) || @@ -10076,42 +10239,20 @@ static void bnxt_hwrm_realloc_rss_ctx_vnic(struct bnxt *bp) netdev_err(bp->dev, "Failed to restore RSS ctx %d\n", rss_ctx->index); bnxt_del_one_rss_ctx(bp, rss_ctx, true); + ethtool_rxfh_context_lost(bp->dev, rss_ctx->index); } } } -struct bnxt_rss_ctx *bnxt_alloc_rss_ctx(struct bnxt *bp) +void bnxt_clear_rss_ctxs(struct bnxt *bp) { - struct bnxt_rss_ctx *rss_ctx = NULL; + struct ethtool_rxfh_context *ctx; + unsigned long context; - rss_ctx = kzalloc(sizeof(*rss_ctx), GFP_KERNEL); - if (rss_ctx) { - rss_ctx->vnic.rss_ctx = rss_ctx; - list_add_tail(&rss_ctx->list, &bp->rss_ctx_list); - bp->num_rss_ctx++; - } - return rss_ctx; -} + xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) { + struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx); -void bnxt_clear_rss_ctxs(struct bnxt *bp, bool all) -{ - struct bnxt_rss_ctx *rss_ctx, *tmp; - - list_for_each_entry_safe(rss_ctx, tmp, &bp->rss_ctx_list, list) - bnxt_del_one_rss_ctx(bp, rss_ctx, all); - - if (all) - bitmap_free(bp->rss_ctx_bmap); -} - -static void bnxt_init_multi_rss_ctx(struct bnxt *bp) -{ - bp->rss_ctx_bmap = bitmap_zalloc(BNXT_RSS_CTX_BMAP_LEN, GFP_KERNEL); - if (bp->rss_ctx_bmap) { - /* burn index 0 since we cannot have context 0 */ - __set_bit(0, bp->rss_ctx_bmap); - INIT_LIST_HEAD(&bp->rss_ctx_list); - bp->rss_cap |= BNXT_RSS_CAP_MULTI_RSS_CTX; + bnxt_del_one_rss_ctx(bp, rss_ctx, false); } } @@ -12004,8 +12145,8 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) /* VF-reps may need to be re-opened after the PF is re-opened */ if (BNXT_PF(bp)) bnxt_vf_reps_open(bp); - if (bp->ptp_cfg) - atomic_set(&bp->ptp_cfg->tx_avail, BNXT_MAX_TX_TS); + if (bp->ptp_cfg && !(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP)) + WRITE_ONCE(bp->ptp_cfg->tx_avail, BNXT_MAX_TX_TS); bnxt_ptp_init_rtc(bp, true); bnxt_ptp_cfg_tstamp_filters(bp); if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) @@ -12158,7 +12299,7 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init, msleep(20); if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) - bnxt_clear_rss_ctxs(bp, false); + bnxt_clear_rss_ctxs(bp); /* Flush rings and disable interrupts */ bnxt_shutdown_nic(bp, irq_re_init); @@ -14842,6 +14983,220 @@ static const struct netdev_stat_ops bnxt_stat_ops = { .get_base_stats = bnxt_get_base_stats, }; +static int bnxt_alloc_rx_agg_bmap(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) +{ + u16 mem_size; + + rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1; + mem_size = rxr->rx_agg_bmap_size / 8; + rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL); + if (!rxr->rx_agg_bmap) + return -ENOMEM; + + return 0; +} + +static int bnxt_queue_mem_alloc(struct net_device *dev, void *qmem, int idx) +{ + struct bnxt_rx_ring_info *rxr, *clone; + struct bnxt *bp = netdev_priv(dev); + struct bnxt_ring_struct *ring; + int rc; + + rxr = &bp->rx_ring[idx]; + clone = qmem; + memcpy(clone, rxr, sizeof(*rxr)); + bnxt_init_rx_ring_struct(bp, clone); + bnxt_reset_rx_ring_struct(bp, clone); + + clone->rx_prod = 0; + clone->rx_agg_prod = 0; + clone->rx_sw_agg_prod = 0; + clone->rx_next_cons = 0; + + rc = bnxt_alloc_rx_page_pool(bp, clone, rxr->page_pool->p.nid); + if (rc) + return rc; + + ring = &clone->rx_ring_struct; + rc = bnxt_alloc_ring(bp, &ring->ring_mem); + if (rc) + goto err_free_rx_ring; + + if (bp->flags & BNXT_FLAG_AGG_RINGS) { + ring = &clone->rx_agg_ring_struct; + rc = bnxt_alloc_ring(bp, &ring->ring_mem); + if (rc) + goto err_free_rx_agg_ring; + + rc = bnxt_alloc_rx_agg_bmap(bp, clone); + if (rc) + goto err_free_rx_agg_ring; + } + + bnxt_init_one_rx_ring_rxbd(bp, clone); + bnxt_init_one_rx_agg_ring_rxbd(bp, clone); + + bnxt_alloc_one_rx_ring_skb(bp, clone, idx); + if (bp->flags & BNXT_FLAG_AGG_RINGS) + bnxt_alloc_one_rx_ring_page(bp, clone, idx); + + return 0; + +err_free_rx_agg_ring: + bnxt_free_ring(bp, &clone->rx_agg_ring_struct.ring_mem); +err_free_rx_ring: + bnxt_free_ring(bp, &clone->rx_ring_struct.ring_mem); + clone->page_pool->p.napi = NULL; + page_pool_destroy(clone->page_pool); + clone->page_pool = NULL; + return rc; +} + +static void bnxt_queue_mem_free(struct net_device *dev, void *qmem) +{ + struct bnxt_rx_ring_info *rxr = qmem; + struct bnxt *bp = netdev_priv(dev); + struct bnxt_ring_struct *ring; + + bnxt_free_one_rx_ring(bp, rxr); + bnxt_free_one_rx_agg_ring(bp, rxr); + + page_pool_destroy(rxr->page_pool); + rxr->page_pool = NULL; + + ring = &rxr->rx_ring_struct; + bnxt_free_ring(bp, &ring->ring_mem); + + ring = &rxr->rx_agg_ring_struct; + bnxt_free_ring(bp, &ring->ring_mem); + + kfree(rxr->rx_agg_bmap); + rxr->rx_agg_bmap = NULL; +} + +static void bnxt_copy_rx_ring(struct bnxt *bp, + struct bnxt_rx_ring_info *dst, + struct bnxt_rx_ring_info *src) +{ + struct bnxt_ring_mem_info *dst_rmem, *src_rmem; + struct bnxt_ring_struct *dst_ring, *src_ring; + int i; + + dst_ring = &dst->rx_ring_struct; + dst_rmem = &dst_ring->ring_mem; + src_ring = &src->rx_ring_struct; + src_rmem = &src_ring->ring_mem; + + WARN_ON(dst_rmem->nr_pages != src_rmem->nr_pages); + WARN_ON(dst_rmem->page_size != src_rmem->page_size); + WARN_ON(dst_rmem->flags != src_rmem->flags); + WARN_ON(dst_rmem->depth != src_rmem->depth); + WARN_ON(dst_rmem->vmem_size != src_rmem->vmem_size); + WARN_ON(dst_rmem->ctx_mem != src_rmem->ctx_mem); + + dst_rmem->pg_tbl = src_rmem->pg_tbl; + dst_rmem->pg_tbl_map = src_rmem->pg_tbl_map; + *dst_rmem->vmem = *src_rmem->vmem; + for (i = 0; i < dst_rmem->nr_pages; i++) { + dst_rmem->pg_arr[i] = src_rmem->pg_arr[i]; + dst_rmem->dma_arr[i] = src_rmem->dma_arr[i]; + } + + if (!(bp->flags & BNXT_FLAG_AGG_RINGS)) + return; + + dst_ring = &dst->rx_agg_ring_struct; + dst_rmem = &dst_ring->ring_mem; + src_ring = &src->rx_agg_ring_struct; + src_rmem = &src_ring->ring_mem; + + WARN_ON(dst_rmem->nr_pages != src_rmem->nr_pages); + WARN_ON(dst_rmem->page_size != src_rmem->page_size); + WARN_ON(dst_rmem->flags != src_rmem->flags); + WARN_ON(dst_rmem->depth != src_rmem->depth); + WARN_ON(dst_rmem->vmem_size != src_rmem->vmem_size); + WARN_ON(dst_rmem->ctx_mem != src_rmem->ctx_mem); + WARN_ON(dst->rx_agg_bmap_size != src->rx_agg_bmap_size); + + dst_rmem->pg_tbl = src_rmem->pg_tbl; + dst_rmem->pg_tbl_map = src_rmem->pg_tbl_map; + *dst_rmem->vmem = *src_rmem->vmem; + for (i = 0; i < dst_rmem->nr_pages; i++) { + dst_rmem->pg_arr[i] = src_rmem->pg_arr[i]; + dst_rmem->dma_arr[i] = src_rmem->dma_arr[i]; + } + + dst->rx_agg_bmap = src->rx_agg_bmap; +} + +static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx) +{ + struct bnxt *bp = netdev_priv(dev); + struct bnxt_rx_ring_info *rxr, *clone; + struct bnxt_cp_ring_info *cpr; + int rc; + + rxr = &bp->rx_ring[idx]; + clone = qmem; + + rxr->rx_prod = clone->rx_prod; + rxr->rx_agg_prod = clone->rx_agg_prod; + rxr->rx_sw_agg_prod = clone->rx_sw_agg_prod; + rxr->rx_next_cons = clone->rx_next_cons; + rxr->page_pool = clone->page_pool; + + bnxt_copy_rx_ring(bp, rxr, clone); + + rc = bnxt_hwrm_rx_ring_alloc(bp, rxr); + if (rc) + return rc; + rc = bnxt_hwrm_rx_agg_ring_alloc(bp, rxr); + if (rc) + goto err_free_hwrm_rx_ring; + + bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); + if (bp->flags & BNXT_FLAG_AGG_RINGS) + bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); + + napi_enable(&rxr->bnapi->napi); + + cpr = &rxr->bnapi->cp_ring; + cpr->sw_stats->rx.rx_resets++; + + return 0; + +err_free_hwrm_rx_ring: + bnxt_hwrm_rx_ring_free(bp, rxr, false); + return rc; +} + +static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx) +{ + struct bnxt *bp = netdev_priv(dev); + struct bnxt_rx_ring_info *rxr; + + rxr = &bp->rx_ring[idx]; + napi_disable(&rxr->bnapi->napi); + bnxt_hwrm_rx_ring_free(bp, rxr, false); + bnxt_hwrm_rx_agg_ring_free(bp, rxr, false); + rxr->rx_next_cons = 0; + page_pool_disable_direct_recycling(rxr->page_pool); + + memcpy(qmem, rxr, sizeof(*rxr)); + bnxt_init_rx_ring_struct(bp, qmem); + + return 0; +} + +static const struct netdev_queue_mgmt_ops bnxt_queue_mgmt_ops = { + .ndo_queue_mem_size = sizeof(struct bnxt_rx_ring_info), + .ndo_queue_mem_alloc = bnxt_queue_mem_alloc, + .ndo_queue_mem_free = bnxt_queue_mem_free, + .ndo_queue_start = bnxt_queue_start, + .ndo_queue_stop = bnxt_queue_stop, +}; + static void bnxt_remove_one(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); @@ -14859,8 +15214,7 @@ static void bnxt_remove_one(struct pci_dev *pdev) bnxt_free_l2_filters(bp, true); bnxt_free_ntp_fltrs(bp, true); - if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) - bnxt_clear_rss_ctxs(bp, true); + WARN_ON(bp->num_rss_ctx); clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); /* Flush any pending tasks */ cancel_work_sync(&bp->sp_task); @@ -15307,6 +15661,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dev->stat_ops = &bnxt_stat_ops; dev->watchdog_timeo = BNXT_TX_TIMEOUT; dev->ethtool_ops = &bnxt_ethtool_ops; + dev->queue_mgmt_ops = &bnxt_queue_mgmt_ops; pci_set_drvdata(pdev, dev); rc = bnxt_alloc_hwrm_resources(bp); @@ -15329,7 +15684,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) bp->flags |= BNXT_FLAG_CHIP_P7; } - rc = bnxt_alloc_rss_indir_tbl(bp, NULL); + rc = bnxt_alloc_rss_indir_tbl(bp); if (rc) goto init_err_pci_clean; @@ -15486,8 +15841,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) INIT_LIST_HEAD(&bp->usr_fltr_list); if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) - bnxt_init_multi_rss_ctx(bp); - + bp->rss_cap |= BNXT_RSS_CAP_MULTI_RSS_CTX; rc = register_netdev(dev); if (rc) @@ -15510,8 +15864,6 @@ init_err_dl: bnxt_clear_int_mode(bp); init_err_pci_clean: - if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) - bnxt_clear_rss_ctxs(bp, true); bnxt_hwrm_func_drv_unrgtr(bp); bnxt_free_hwrm_resources(bp); bnxt_hwmon_uninit(bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 6b10a09ee1af..6bbdc718c3a7 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -181,6 +181,32 @@ struct tx_cmp { #define TX_CMP_SQ_CONS_IDX(txcmp) \ (le32_to_cpu((txcmp)->sq_cons_idx) & TX_CMP_SQ_CONS_IDX_MASK) +struct tx_ts_cmp { + __le32 tx_ts_cmp_flags_type; + #define TX_TS_CMP_FLAGS_ERROR (1 << 6) + #define TX_TS_CMP_FLAGS_TS_TYPE (1 << 7) + #define TX_TS_CMP_FLAGS_TS_TYPE_PM (0 << 7) + #define TX_TS_CMP_FLAGS_TS_TYPE_PA (1 << 7) + #define TX_TS_CMP_FLAGS_TS_FALLBACK (1 << 8) + #define TX_TS_CMP_TS_SUB_NS (0xf << 12) + #define TX_TS_CMP_TS_NS_MID (0xffff << 16) + #define TX_TS_CMP_TS_NS_MID_SFT 16 + u32 tx_ts_cmp_opaque; + __le32 tx_ts_cmp_errors_v; + #define TX_TS_CMP_V (1 << 0) + #define TX_TS_CMP_TS_INVALID_ERR (1 << 10) + __le32 tx_ts_cmp_ts_ns_lo; +}; + +#define BNXT_GET_TX_TS_48B_NS(tscmp) \ + (le32_to_cpu((tscmp)->tx_ts_cmp_ts_ns_lo) | \ + ((u64)(le32_to_cpu((tscmp)->tx_ts_cmp_flags_type) & \ + TX_TS_CMP_TS_NS_MID) << TX_TS_CMP_TS_NS_MID_SFT)) + +#define BNXT_TX_TS_ERR(tscmp) \ + (((tscmp)->tx_ts_cmp_flags_type & cpu_to_le32(TX_TS_CMP_FLAGS_ERROR)) &&\ + ((tscmp)->tx_ts_cmp_errors_v & cpu_to_le32(TX_TS_CMP_TS_INVALID_ERR))) + struct rx_cmp { __le32 rx_cmp_len_flags_type; #define RX_CMP_CMP_TYPE (0x3f << 0) @@ -848,11 +874,14 @@ struct bnxt_sw_tx_bd { DEFINE_DMA_UNMAP_ADDR(mapping); DEFINE_DMA_UNMAP_LEN(len); struct page *page; - u8 is_gso; + u8 is_ts_pkt; u8 is_push; u8 action; unsigned short nr_frags; - u16 rx_prod; + union { + u16 rx_prod; + u16 txts_prod; + }; }; struct bnxt_sw_rx_bd { @@ -1257,19 +1286,16 @@ struct bnxt_vnic_info { #define BNXT_VNIC_RFS_NEW_RSS_FLAG 0x10 #define BNXT_VNIC_NTUPLE_FLAG 0x20 #define BNXT_VNIC_RSSCTX_FLAG 0x40 - struct bnxt_rss_ctx *rss_ctx; + struct ethtool_rxfh_context *rss_ctx; u32 vnic_id; }; struct bnxt_rss_ctx { - struct list_head list; struct bnxt_vnic_info vnic; - u16 *rss_indir_tbl; u8 index; }; #define BNXT_MAX_ETH_RSS_CTX 32 -#define BNXT_RSS_CTX_BMAP_LEN (BNXT_MAX_ETH_RSS_CTX + 1) #define BNXT_VNIC_ID_INVALID 0xffffffff struct bnxt_hw_rings { @@ -2237,9 +2263,17 @@ struct bnxt { (BNXT_CHIP_NUM_58700((bp)->chip_num) && \ !BNXT_CHIP_TYPE_NITRO_A0(bp))) +/* Chip class phase 3.x */ +#define BNXT_CHIP_P3(bp) \ + (BNXT_CHIP_NUM_57X0X((bp)->chip_num) || \ + BNXT_CHIP_TYPE_NITRO_A0(bp)) + #define BNXT_CHIP_P4_PLUS(bp) \ (BNXT_CHIP_P4(bp) || BNXT_CHIP_P5_PLUS(bp)) +#define BNXT_CHIP_P5_AND_MINUS(bp) \ + (BNXT_CHIP_P3(bp) || BNXT_CHIP_P4(bp) || BNXT_CHIP_P5(bp)) + struct bnxt_aux_priv *aux_priv; struct bnxt_en_dev *edev; @@ -2294,11 +2328,9 @@ struct bnxt { /* grp_info indexed by completion ring index */ struct bnxt_ring_grp_info *grp_info; struct bnxt_vnic_info *vnic_info; - struct list_head rss_ctx_list; - unsigned long *rss_ctx_bmap; u32 num_rss_ctx; int nr_vnics; - u16 *rss_indir_tbl; + u32 *rss_indir_tbl; u16 rss_indir_tbl_entries; u32 rss_hash_cfg; u32 rss_hash_delta; @@ -2384,6 +2416,7 @@ struct bnxt { #define BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2 BIT_ULL(16) #define BNXT_FW_CAP_PCIE_STATS_SUPPORTED BIT_ULL(17) #define BNXT_FW_CAP_EXT_STATS_SUPPORTED BIT_ULL(18) + #define BNXT_FW_CAP_TX_TS_CMP BIT_ULL(19) #define BNXT_FW_CAP_ERR_RECOVER_RELOAD BIT_ULL(20) #define BNXT_FW_CAP_HOT_RESET BIT_ULL(21) #define BNXT_FW_CAP_PTP_RTC BIT_ULL(22) @@ -2774,9 +2807,8 @@ int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, struct bnxt_vnic_info *vnic, u32 tpa_flags); void bnxt_fill_ipv6_mask(__be32 mask[4]); -int bnxt_alloc_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx); -void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx); -u16 bnxt_get_max_rss_ctx_ring(struct bnxt *bp); +void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp, + struct ethtool_rxfh_context *rss_ctx); int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings); int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic); int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic, @@ -2810,8 +2842,7 @@ int bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic); int __bnxt_setup_vnic_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic); void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx, bool all); -struct bnxt_rss_ctx *bnxt_alloc_rss_ctx(struct bnxt *bp); -void bnxt_clear_rss_ctxs(struct bnxt *bp, bool all); +void bnxt_clear_rss_ctxs(struct bnxt *bp); int bnxt_open_nic(struct bnxt *, bool, bool); int bnxt_half_open_nic(struct bnxt *bp); void bnxt_half_close_nic(struct bnxt *bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 79c09c1cdf93..d00ef0063820 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -961,12 +961,6 @@ static int bnxt_set_channels(struct net_device *dev, return rc; } - if (req_rx_rings < bp->rx_nr_rings && - req_rx_rings <= bnxt_get_max_rss_ctx_ring(bp)) { - netdev_warn(dev, "Can't deactivate rings used by RSS contexts\n"); - return -EINVAL; - } - if (bnxt_get_nr_rss_ctxs(bp, req_rx_rings) != bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) && netif_is_rxfh_configured(dev)) { @@ -976,7 +970,7 @@ static int bnxt_set_channels(struct net_device *dev, bnxt_clear_usr_fltrs(bp, true); if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) - bnxt_clear_rss_ctxs(bp, false); + bnxt_clear_rss_ctxs(bp); if (netif_running(dev)) { if (BNXT_PF(bp)) { /* TODO CHIMP_FW: Send message to all VF's @@ -1216,19 +1210,18 @@ fltr_err: static struct bnxt_rss_ctx *bnxt_get_rss_ctx_from_index(struct bnxt *bp, u32 index) { - struct bnxt_rss_ctx *rss_ctx, *tmp; + struct ethtool_rxfh_context *ctx; - list_for_each_entry_safe(rss_ctx, tmp, &bp->rss_ctx_list, list) - if (rss_ctx->index == index) - return rss_ctx; - return NULL; + ctx = xa_load(&bp->dev->ethtool->rss_ctx, index); + if (!ctx) + return NULL; + return ethtool_rxfh_context_priv(ctx); } -static int bnxt_alloc_rss_ctx_rss_table(struct bnxt *bp, - struct bnxt_rss_ctx *rss_ctx) +static int bnxt_alloc_vnic_rss_table(struct bnxt *bp, + struct bnxt_vnic_info *vnic) { int size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5); - struct bnxt_vnic_info *vnic = &rss_ctx->vnic; vnic->rss_table_size = size + HW_HASH_KEY_SIZE; vnic->rss_table = dma_alloc_coherent(&bp->pdev->dev, @@ -1807,10 +1800,9 @@ static u32 bnxt_get_rxfh_key_size(struct net_device *dev) static int bnxt_get_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh) { - u32 rss_context = rxfh->rss_context; struct bnxt_rss_ctx *rss_ctx = NULL; struct bnxt *bp = netdev_priv(dev); - u16 *indir_tbl = bp->rss_indir_tbl; + u32 *indir_tbl = bp->rss_indir_tbl; struct bnxt_vnic_info *vnic; u32 i, tbl_size; @@ -1821,10 +1813,13 @@ static int bnxt_get_rxfh(struct net_device *dev, vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; if (rxfh->rss_context) { - rss_ctx = bnxt_get_rss_ctx_from_index(bp, rss_context); - if (!rss_ctx) + struct ethtool_rxfh_context *ctx; + + ctx = xa_load(&bp->dev->ethtool->rss_ctx, rxfh->rss_context); + if (!ctx) return -EINVAL; - indir_tbl = rss_ctx->rss_indir_tbl; + indir_tbl = ethtool_rxfh_context_indir(ctx); + rss_ctx = ethtool_rxfh_context_priv(ctx); vnic = &rss_ctx->vnic; } @@ -1840,8 +1835,9 @@ static int bnxt_get_rxfh(struct net_device *dev, return 0; } -static void bnxt_modify_rss(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx, - struct ethtool_rxfh_param *rxfh) +static void bnxt_modify_rss(struct bnxt *bp, struct ethtool_rxfh_context *ctx, + struct bnxt_rss_ctx *rss_ctx, + const struct ethtool_rxfh_param *rxfh) { if (rxfh->key) { if (rss_ctx) { @@ -1854,29 +1850,21 @@ static void bnxt_modify_rss(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx, } if (rxfh->indir) { u32 i, pad, tbl_size = bnxt_get_rxfh_indir_size(bp->dev); - u16 *indir_tbl = bp->rss_indir_tbl; + u32 *indir_tbl = bp->rss_indir_tbl; if (rss_ctx) - indir_tbl = rss_ctx->rss_indir_tbl; + indir_tbl = ethtool_rxfh_context_indir(ctx); for (i = 0; i < tbl_size; i++) indir_tbl[i] = rxfh->indir[i]; pad = bp->rss_indir_tbl_entries - tbl_size; if (pad) - memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16)); + memset(&indir_tbl[i], 0, pad * sizeof(*indir_tbl)); } } -static int bnxt_set_rxfh_context(struct bnxt *bp, - struct ethtool_rxfh_param *rxfh, - struct netlink_ext_ack *extack) +static int bnxt_rxfh_context_check(struct bnxt *bp, + struct netlink_ext_ack *extack) { - u32 *rss_context = &rxfh->rss_context; - struct bnxt_rss_ctx *rss_ctx; - struct bnxt_vnic_info *vnic; - bool modify = false; - int bit_id; - int rc; - if (!BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) { NL_SET_ERR_MSG_MOD(extack, "RSS contexts not supported"); return -EOPNOTSUPP; @@ -1887,21 +1875,22 @@ static int bnxt_set_rxfh_context(struct bnxt *bp, return -EAGAIN; } - if (*rss_context != ETH_RXFH_CONTEXT_ALLOC) { - rss_ctx = bnxt_get_rss_ctx_from_index(bp, *rss_context); - if (!rss_ctx) { - NL_SET_ERR_MSG_FMT_MOD(extack, "RSS context %u not found", - *rss_context); - return -EINVAL; - } - if (*rss_context && rxfh->rss_delete) { - bnxt_del_one_rss_ctx(bp, rss_ctx, true); - return 0; - } - modify = true; - vnic = &rss_ctx->vnic; - goto modify_context; - } + return 0; +} + +static int bnxt_create_rxfh_context(struct net_device *dev, + struct ethtool_rxfh_context *ctx, + const struct ethtool_rxfh_param *rxfh, + struct netlink_ext_ack *extack) +{ + struct bnxt *bp = netdev_priv(dev); + struct bnxt_rss_ctx *rss_ctx; + struct bnxt_vnic_info *vnic; + int rc; + + rc = bnxt_rxfh_context_check(bp, extack); + if (rc) + return rc; if (bp->num_rss_ctx >= BNXT_MAX_ETH_RSS_CTX) { NL_SET_ERR_MSG_FMT_MOD(extack, "Out of RSS contexts, maximum %u", @@ -1914,22 +1903,19 @@ static int bnxt_set_rxfh_context(struct bnxt *bp, return -ENOMEM; } - rss_ctx = bnxt_alloc_rss_ctx(bp); - if (!rss_ctx) - return -ENOMEM; + rss_ctx = ethtool_rxfh_context_priv(ctx); + + bp->num_rss_ctx++; vnic = &rss_ctx->vnic; + vnic->rss_ctx = ctx; vnic->flags |= BNXT_VNIC_RSSCTX_FLAG; vnic->vnic_id = BNXT_VNIC_ID_INVALID; - rc = bnxt_alloc_rss_ctx_rss_table(bp, rss_ctx); + rc = bnxt_alloc_vnic_rss_table(bp, vnic); if (rc) goto out; - rc = bnxt_alloc_rss_indir_tbl(bp, rss_ctx); - if (rc) - goto out; - - bnxt_set_dflt_rss_indir_tbl(bp, rss_ctx); + bnxt_set_dflt_rss_indir_tbl(bp, ctx); memcpy(vnic->rss_hash_key, bp->rss_hash_key, HW_HASH_KEY_SIZE); rc = bnxt_hwrm_vnic_alloc(bp, vnic, 0, bp->rx_nr_rings); @@ -1943,11 +1929,7 @@ static int bnxt_set_rxfh_context(struct bnxt *bp, NL_SET_ERR_MSG_MOD(extack, "Unable to setup TPA"); goto out; } -modify_context: - bnxt_modify_rss(bp, rss_ctx, rxfh); - - if (modify) - return bnxt_hwrm_vnic_rss_cfg_p5(bp, vnic); + bnxt_modify_rss(bp, ctx, rss_ctx, rxfh); rc = __bnxt_setup_vnic_p5(bp, vnic); if (rc) { @@ -1955,21 +1937,47 @@ modify_context: goto out; } - bit_id = bitmap_find_free_region(bp->rss_ctx_bmap, - BNXT_RSS_CTX_BMAP_LEN, 0); - if (bit_id < 0) { - rc = -ENOMEM; - goto out; - } - rss_ctx->index = (u16)bit_id; - *rss_context = rss_ctx->index; - + rss_ctx->index = rxfh->rss_context; return 0; out: bnxt_del_one_rss_ctx(bp, rss_ctx, true); return rc; } +static int bnxt_modify_rxfh_context(struct net_device *dev, + struct ethtool_rxfh_context *ctx, + const struct ethtool_rxfh_param *rxfh, + struct netlink_ext_ack *extack) +{ + struct bnxt *bp = netdev_priv(dev); + struct bnxt_rss_ctx *rss_ctx; + int rc; + + rc = bnxt_rxfh_context_check(bp, extack); + if (rc) + return rc; + + rss_ctx = ethtool_rxfh_context_priv(ctx); + + bnxt_modify_rss(bp, ctx, rss_ctx, rxfh); + + return bnxt_hwrm_vnic_rss_cfg_p5(bp, &rss_ctx->vnic); +} + +static int bnxt_remove_rxfh_context(struct net_device *dev, + struct ethtool_rxfh_context *ctx, + u32 rss_context, + struct netlink_ext_ack *extack) +{ + struct bnxt *bp = netdev_priv(dev); + struct bnxt_rss_ctx *rss_ctx; + + rss_ctx = ethtool_rxfh_context_priv(ctx); + + bnxt_del_one_rss_ctx(bp, rss_ctx, true); + return 0; +} + static int bnxt_set_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh, struct netlink_ext_ack *extack) @@ -1980,10 +1988,7 @@ static int bnxt_set_rxfh(struct net_device *dev, if (rxfh->hfunc && rxfh->hfunc != ETH_RSS_HASH_TOP) return -EOPNOTSUPP; - if (rxfh->rss_context) - return bnxt_set_rxfh_context(bp, rxfh, extack); - - bnxt_modify_rss(bp, NULL, rxfh); + bnxt_modify_rss(bp, NULL, NULL, rxfh); bnxt_clear_usr_fltrs(bp, false); if (netif_running(bp->dev)) { @@ -5019,7 +5024,7 @@ static int bnxt_get_dump_data(struct net_device *dev, struct ethtool_dump *dump, } static int bnxt_get_ts_info(struct net_device *dev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct bnxt *bp = netdev_priv(dev); struct bnxt_ptp_cfg *ptp; @@ -5239,6 +5244,19 @@ static void bnxt_get_rmon_stats(struct net_device *dev, *ranges = bnxt_rmon_ranges; } +static void bnxt_get_ptp_stats(struct net_device *dev, + struct ethtool_ts_stats *ts_stats) +{ + struct bnxt *bp = netdev_priv(dev); + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + + if (ptp) { + ts_stats->pkts = ptp->stats.ts_pkts; + ts_stats->lost = ptp->stats.ts_lost; + ts_stats->err = atomic64_read(&ptp->stats.ts_err); + } +} + static void bnxt_get_link_ext_stats(struct net_device *dev, struct ethtool_link_ext_stats *stats) { @@ -5262,6 +5280,9 @@ void bnxt_ethtool_free(struct bnxt *bp) const struct ethtool_ops bnxt_ethtool_ops = { .cap_link_lanes_supported = 1, .cap_rss_ctx_supported = 1, + .rxfh_max_context_id = BNXT_MAX_ETH_RSS_CTX, + .rxfh_indir_space = BNXT_MAX_RSS_TABLE_ENTRIES_P5, + .rxfh_priv_size = sizeof(struct bnxt_rss_ctx), .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_MAX_FRAMES | ETHTOOL_COALESCE_USECS_IRQ | @@ -5299,6 +5320,9 @@ const struct ethtool_ops bnxt_ethtool_ops = { .get_rxfh_key_size = bnxt_get_rxfh_key_size, .get_rxfh = bnxt_get_rxfh, .set_rxfh = bnxt_set_rxfh, + .create_rxfh_context = bnxt_create_rxfh_context, + .modify_rxfh_context = bnxt_modify_rxfh_context, + .remove_rxfh_context = bnxt_remove_rxfh_context, .flash_device = bnxt_flash_device, .get_eeprom_len = bnxt_get_eeprom_len, .get_eeprom = bnxt_get_eeprom, @@ -5322,4 +5346,5 @@ const struct ethtool_ops bnxt_ethtool_ops = { .get_eth_mac_stats = bnxt_get_eth_mac_stats, .get_eth_ctrl_stats = bnxt_get_eth_ctrl_stats, .get_rmon_stats = bnxt_get_rmon_stats, + .get_ts_stats = bnxt_get_ptp_stats, }; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c index e661ab154d6b..37d42423459c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c @@ -110,7 +110,7 @@ static void bnxt_ptp_get_current_time(struct bnxt *bp) } static int bnxt_hwrm_port_ts_query(struct bnxt *bp, u32 flags, u64 *ts, - u32 txts_tmo) + u32 txts_tmo, int slot) { struct hwrm_port_ts_query_output *resp; struct hwrm_port_ts_query_input *req; @@ -123,11 +123,12 @@ static int bnxt_hwrm_port_ts_query(struct bnxt *bp, u32 flags, u64 *ts, req->flags = cpu_to_le32(flags); if ((flags & PORT_TS_QUERY_REQ_FLAGS_PATH) == PORT_TS_QUERY_REQ_FLAGS_PATH_TX) { + struct bnxt_ptp_tx_req *txts_req = &bp->ptp_cfg->txts_req[slot]; u32 tmo_us = txts_tmo * 1000; req->enables = cpu_to_le16(BNXT_PTP_QTS_TX_ENABLES); - req->ptp_seq_id = cpu_to_le32(bp->ptp_cfg->tx_seqid); - req->ptp_hdr_offset = cpu_to_le16(bp->ptp_cfg->tx_hdr_off); + req->ptp_seq_id = cpu_to_le32(txts_req->tx_seqid); + req->ptp_hdr_offset = cpu_to_le16(txts_req->tx_hdr_off); if (!tmo_us) tmo_us = BNXT_PTP_QTS_TIMEOUT; tmo_us = min(tmo_us, BNXT_PTP_QTS_MAX_TMO_US); @@ -656,6 +657,14 @@ static int bnxt_map_ptp_regs(struct bnxt *bp) (ptp->refclk_regs[i] & BNXT_GRC_OFFSET_MASK); return 0; } + if (bp->flags & BNXT_FLAG_CHIP_P7) { + for (i = 0; i < 2; i++) { + if (reg_arr[i] & BNXT_GRC_BASE_MASK) + return -EINVAL; + ptp->refclk_mapped_regs[i] = reg_arr[i]; + } + return 0; + } return -ENODEV; } @@ -674,41 +683,44 @@ static u64 bnxt_cc_read(const struct cyclecounter *cc) return ns; } -static void bnxt_stamp_tx_skb(struct bnxt *bp, struct sk_buff *skb) +static int bnxt_stamp_tx_skb(struct bnxt *bp, int slot) { struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; struct skb_shared_hwtstamps timestamp; + struct bnxt_ptp_tx_req *txts_req; unsigned long now = jiffies; u64 ts = 0, ns = 0; u32 tmo = 0; int rc; - if (!ptp->txts_pending) - ptp->abs_txts_tmo = now + msecs_to_jiffies(ptp->txts_tmo); - if (!time_after_eq(now, ptp->abs_txts_tmo)) - tmo = jiffies_to_msecs(ptp->abs_txts_tmo - now); + txts_req = &ptp->txts_req[slot]; + /* make sure bnxt_get_tx_ts_p5() has updated abs_txts_tmo */ + smp_rmb(); + if (!time_after_eq(now, txts_req->abs_txts_tmo)) + tmo = jiffies_to_msecs(txts_req->abs_txts_tmo - now); rc = bnxt_hwrm_port_ts_query(bp, PORT_TS_QUERY_REQ_FLAGS_PATH_TX, &ts, - tmo); + tmo, slot); if (!rc) { memset(×tamp, 0, sizeof(timestamp)); spin_lock_bh(&ptp->ptp_lock); ns = timecounter_cyc2time(&ptp->tc, ts); spin_unlock_bh(&ptp->ptp_lock); timestamp.hwtstamp = ns_to_ktime(ns); - skb_tstamp_tx(ptp->tx_skb, ×tamp); + skb_tstamp_tx(txts_req->tx_skb, ×tamp); + ptp->stats.ts_pkts++; } else { - if (!time_after_eq(jiffies, ptp->abs_txts_tmo)) { - ptp->txts_pending = true; - return; - } + if (!time_after_eq(jiffies, txts_req->abs_txts_tmo)) + return -EAGAIN; + + ptp->stats.ts_lost++; netdev_warn_once(bp->dev, "TS query for TX timer failed rc = %x\n", rc); } - dev_kfree_skb_any(ptp->tx_skb); - ptp->tx_skb = NULL; - atomic_inc(&ptp->tx_avail); - ptp->txts_pending = false; + dev_kfree_skb_any(txts_req->tx_skb); + txts_req->tx_skb = NULL; + + return 0; } static long bnxt_ptp_ts_aux_work(struct ptp_clock_info *ptp_info) @@ -717,12 +729,30 @@ static long bnxt_ptp_ts_aux_work(struct ptp_clock_info *ptp_info) ptp_info); unsigned long now = jiffies; struct bnxt *bp = ptp->bp; + u16 cons = ptp->txts_cons; + u32 num_requests; + int rc = 0; + + num_requests = BNXT_MAX_TX_TS - READ_ONCE(ptp->tx_avail); + while (num_requests--) { + if (IS_ERR(ptp->txts_req[cons].tx_skb)) + goto next_slot; + if (!ptp->txts_req[cons].tx_skb) + break; + rc = bnxt_stamp_tx_skb(bp, cons); + if (rc == -EAGAIN) + break; +next_slot: + BNXT_PTP_INC_TX_AVAIL(ptp); + cons = NEXT_TXTS(cons); + } + ptp->txts_cons = cons; - if (ptp->tx_skb) - bnxt_stamp_tx_skb(bp, ptp->tx_skb); - - if (!time_after_eq(now, ptp->next_period)) + if (!time_after_eq(now, ptp->next_period)) { + if (rc == -EAGAIN) + return 0; return ptp->next_period - now; + } bnxt_ptp_get_current_time(bp); ptp->next_period = now + HZ; @@ -732,22 +762,37 @@ static long bnxt_ptp_ts_aux_work(struct ptp_clock_info *ptp_info) spin_unlock_bh(&ptp->ptp_lock); ptp->next_overflow_check = now + BNXT_PHC_OVERFLOW_PERIOD; } - if (ptp->txts_pending) + if (rc == -EAGAIN) return 0; return HZ; } -int bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb) +int bnxt_ptp_get_txts_prod(struct bnxt_ptp_cfg *ptp, u16 *prod) +{ + spin_lock_bh(&ptp->ptp_tx_lock); + if (ptp->tx_avail) { + *prod = ptp->txts_prod; + ptp->txts_prod = NEXT_TXTS(*prod); + ptp->tx_avail--; + spin_unlock_bh(&ptp->ptp_tx_lock); + return 0; + } + spin_unlock_bh(&ptp->ptp_tx_lock); + atomic64_inc(&ptp->stats.ts_err); + return -ENOSPC; +} + +void bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb, u16 prod) { struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + struct bnxt_ptp_tx_req *txts_req; - if (ptp->tx_skb) { - netdev_err(bp->dev, "deferring skb:one SKB is still outstanding\n"); - return -EBUSY; - } - ptp->tx_skb = skb; + txts_req = &ptp->txts_req[prod]; + txts_req->abs_txts_tmo = jiffies + msecs_to_jiffies(ptp->txts_tmo); + /* make sure abs_txts_tmo is written first */ + smp_wmb(); + txts_req->tx_skb = skb; ptp_schedule_worker(ptp->ptp_clock, 0); - return 0; } int bnxt_get_rx_ts_p5(struct bnxt *bp, u64 *ts, u32 pkt_ts) @@ -766,6 +811,38 @@ int bnxt_get_rx_ts_p5(struct bnxt *bp, u64 *ts, u32 pkt_ts) return 0; } +void bnxt_tx_ts_cmp(struct bnxt *bp, struct bnxt_napi *bnapi, + struct tx_ts_cmp *tscmp) +{ + struct skb_shared_hwtstamps timestamp = {}; + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + u32 opaque = tscmp->tx_ts_cmp_opaque; + struct bnxt_tx_ring_info *txr; + struct bnxt_sw_tx_bd *tx_buf; + u64 ts, ns; + u16 cons; + + txr = bnapi->tx_ring[TX_OPAQUE_RING(opaque)]; + ts = BNXT_GET_TX_TS_48B_NS(tscmp); + cons = TX_OPAQUE_IDX(opaque); + tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)]; + if (tx_buf->is_ts_pkt) { + if (BNXT_TX_TS_ERR(tscmp)) { + netdev_err(bp->dev, + "timestamp completion error 0x%x 0x%x\n", + le32_to_cpu(tscmp->tx_ts_cmp_flags_type), + le32_to_cpu(tscmp->tx_ts_cmp_errors_v)); + } else { + spin_lock_bh(&ptp->ptp_lock); + ns = timecounter_cyc2time(&ptp->tc, ts); + spin_unlock_bh(&ptp->ptp_lock); + timestamp.hwtstamp = ns_to_ktime(ns); + skb_tstamp_tx(tx_buf->skb, ×tamp); + } + tx_buf->is_ts_pkt = 0; + } +} + static const struct ptp_clock_info bnxt_ptp_caps = { .owner = THIS_MODULE, .name = "bnxt clock", @@ -912,7 +989,7 @@ int bnxt_ptp_init_rtc(struct bnxt *bp, bool phc_cfg) return rc; } else { rc = bnxt_hwrm_port_ts_query(bp, PORT_TS_QUERY_REQ_FLAGS_CURRENT_TIME, - &ns, 0); + &ns, 0, 0); if (rc) return rc; } @@ -952,8 +1029,9 @@ int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg) bnxt_ptp_free(bp); - atomic_set(&ptp->tx_avail, BNXT_MAX_TX_TS); + WRITE_ONCE(ptp->tx_avail, BNXT_MAX_TX_TS); spin_lock_init(&ptp->ptp_lock); + spin_lock_init(&ptp->ptp_tx_lock); if (BNXT_PTP_USE_RTC(bp)) { bnxt_ptp_timecounter_init(bp, false); @@ -979,7 +1057,12 @@ int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg) rc = err; goto out; } - if (BNXT_CHIP_P5(bp)) { + + ptp->stats.ts_pkts = 0; + ptp->stats.ts_lost = 0; + atomic64_set(&ptp->stats.ts_err, 0); + + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { spin_lock_bh(&ptp->ptp_lock); bnxt_refclk_read(bp, NULL, &ptp->current_time); WRITE_ONCE(ptp->old_time, ptp->current_time); @@ -998,6 +1081,7 @@ out: void bnxt_ptp_clear(struct bnxt *bp) { struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + int i; if (!ptp) return; @@ -1009,9 +1093,12 @@ void bnxt_ptp_clear(struct bnxt *bp) kfree(ptp->ptp_info.pin_config); ptp->ptp_info.pin_config = NULL; - if (ptp->tx_skb) { - dev_kfree_skb_any(ptp->tx_skb); - ptp->tx_skb = NULL; + for (i = 0; i < BNXT_MAX_TX_TS; i++) { + if (ptp->txts_req[i].tx_skb) { + dev_kfree_skb_any(ptp->txts_req[i].tx_skb); + ptp->txts_req[i].tx_skb = NULL; + } } + bnxt_unmap_ptp_regs(bp); } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h index 2c3415c8fc03..a9a2f9a18c9c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h @@ -79,6 +79,22 @@ struct bnxt_pps { struct pps_pin pins[BNXT_MAX_TSIO_PINS]; }; +struct bnxt_ptp_stats { + u64 ts_pkts; + u64 ts_lost; + atomic64_t ts_err; +}; + +#define BNXT_MAX_TX_TS 4 +#define NEXT_TXTS(idx) (((idx) + 1) & (BNXT_MAX_TX_TS - 1)) + +struct bnxt_ptp_tx_req { + struct sk_buff *tx_skb; + u16 tx_seqid; + u16 tx_hdr_off; + unsigned long abs_txts_tmo; +}; + struct bnxt_ptp_cfg { struct ptp_clock_info ptp_info; struct ptp_clock *ptp_clock; @@ -87,7 +103,8 @@ struct bnxt_ptp_cfg { struct bnxt_pps pps_info; /* serialize timecounter access */ spinlock_t ptp_lock; - struct sk_buff *tx_skb; + /* serialize ts tx request queuing */ + spinlock_t ptp_tx_lock; u64 current_time; u64 old_time; unsigned long next_period; @@ -96,11 +113,10 @@ struct bnxt_ptp_cfg { /* a 23b shift cyclecounter will overflow in ~36 mins. Check overflow every 18 mins. */ #define BNXT_PHC_OVERFLOW_PERIOD (18 * 60 * HZ) - u16 tx_seqid; - u16 tx_hdr_off; + struct bnxt_ptp_tx_req txts_req[BNXT_MAX_TX_TS]; + struct bnxt *bp; - atomic_t tx_avail; -#define BNXT_MAX_TX_TS 1 + u32 tx_avail; u16 rxctl; #define BNXT_PTP_MSG_SYNC (1 << 0) #define BNXT_PTP_MSG_DELAY_REQ (1 << 1) @@ -117,14 +133,16 @@ struct bnxt_ptp_cfg { BNXT_PTP_MSG_PDELAY_REQ | \ BNXT_PTP_MSG_PDELAY_RESP) u8 tx_tstamp_en:1; - u8 txts_pending:1; int rx_filter; u32 tstamp_filters; u32 refclk_regs[2]; u32 refclk_mapped_regs[2]; u32 txts_tmo; - unsigned long abs_txts_tmo; + u16 txts_prod; + u16 txts_cons; + + struct bnxt_ptp_stats stats; }; #if BITS_PER_LONG == 32 @@ -139,6 +157,13 @@ do { \ ((dst) = READ_ONCE(src)) #endif +#define BNXT_PTP_INC_TX_AVAIL(ptp) \ +do { \ + spin_lock_bh(&(ptp)->ptp_tx_lock); \ + (ptp)->tx_avail++; \ + spin_unlock_bh(&(ptp)->ptp_tx_lock); \ +} while (0) + int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id, u16 *hdr_off); void bnxt_ptp_update_current_time(struct bnxt *bp); void bnxt_ptp_pps_event(struct bnxt *bp, u32 data1, u32 data2); @@ -146,8 +171,11 @@ int bnxt_ptp_cfg_tstamp_filters(struct bnxt *bp); void bnxt_ptp_reapply_pps(struct bnxt *bp); int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr); int bnxt_hwtstamp_get(struct net_device *dev, struct ifreq *ifr); -int bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb); +int bnxt_ptp_get_txts_prod(struct bnxt_ptp_cfg *ptp, u16 *prod); +void bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb, u16 prod); int bnxt_get_rx_ts_p5(struct bnxt *bp, u64 *ts, u32 pkt_ts); +void bnxt_tx_ts_cmp(struct bnxt *bp, struct bnxt_napi *bnapi, + struct tx_ts_cmp *tscmp); void bnxt_ptp_rtc_timecounter_init(struct bnxt_ptp_cfg *ptp, u64 ns); int bnxt_ptp_init_rtc(struct bnxt *bp, bool phc_cfg); int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg); diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 1589a49b876c..0ec5f01551f9 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -6141,7 +6141,7 @@ static void tg3_refclk_write(struct tg3 *tp, u64 newval) static inline void tg3_full_lock(struct tg3 *tp, int irq_sync); static inline void tg3_full_unlock(struct tg3 *tp); -static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) +static int tg3_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *info) { struct tg3 *tp = netdev_priv(dev); diff --git a/drivers/net/ethernet/brocade/bna/bna_types.h b/drivers/net/ethernet/brocade/bna/bna_types.h index a5ebd7110e07..986f43d27711 100644 --- a/drivers/net/ethernet/brocade/bna/bna_types.h +++ b/drivers/net/ethernet/brocade/bna/bna_types.h @@ -416,7 +416,7 @@ struct bna_ib { /* Tx object */ /* Tx datapath control structure */ -#define BNA_Q_NAME_SIZE 16 +#define BNA_Q_NAME_SIZE (IFNAMSIZ + 6) struct bna_tcb { /* Fast path */ void **sw_qpt; diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index fe121d36112d..ece6f3b48327 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -1534,8 +1534,9 @@ bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info, for (i = 0; i < num_txqs; i++) { vector_num = tx_info->tcb[i]->intr_vector; - sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name, - tx_id + tx_info->tcb[i]->id); + snprintf(tx_info->tcb[i]->name, BNA_Q_NAME_SIZE, "%s TXQ %d", + bnad->netdev->name, + tx_id + tx_info->tcb[i]->id); err = request_irq(bnad->msix_table[vector_num].vector, (irq_handler_t)bnad_msix_tx, 0, tx_info->tcb[i]->name, @@ -1585,9 +1586,9 @@ bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info, for (i = 0; i < num_rxps; i++) { vector_num = rx_info->rx_ctrl[i].ccb->intr_vector; - sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d", - bnad->netdev->name, - rx_id + rx_info->rx_ctrl[i].ccb->id); + snprintf(rx_info->rx_ctrl[i].ccb->name, BNA_Q_NAME_SIZE, + "%s CQ %d", bnad->netdev->name, + rx_id + rx_info->rx_ctrl[i].ccb->id); err = request_irq(bnad->msix_table[vector_num].vector, (irq_handler_t)bnad_msix_rx, 0, rx_info->rx_ctrl[i].ccb->name, diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index aa5700ac9c00..ea71612f6b36 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h @@ -645,6 +645,10 @@ #define GEM_T2OFST_OFFSET 0 /* offset value */ #define GEM_T2OFST_SIZE 7 +/* Bitfields in queue pointer registers */ +#define MACB_QUEUE_DISABLE_OFFSET 0 /* disable queue */ +#define MACB_QUEUE_DISABLE_SIZE 1 + /* Offset for screener type 2 compare values (T2CMPOFST). * Note the offset is applied after the specified point, * e.g. GEM_T2COMPOFST_ETYPE denotes the EtherType field, so an offset @@ -733,6 +737,7 @@ #define MACB_CAPS_NEEDS_RSTONUBR 0x00000100 #define MACB_CAPS_MIIONRGMII 0x00000200 #define MACB_CAPS_NEED_TSUCLK 0x00000400 +#define MACB_CAPS_QUEUE_DISABLE 0x00000800 #define MACB_CAPS_PCS 0x01000000 #define MACB_CAPS_HIGH_SPEED 0x02000000 #define MACB_CAPS_CLK_HW_CHG 0x04000000 @@ -1163,7 +1168,7 @@ struct macb_ptp_info { s32 (*get_ptp_max_adj)(void); unsigned int (*get_tsu_rate)(struct macb *bp); int (*get_ts_info)(struct net_device *dev, - struct ethtool_ts_info *info); + struct kernel_ethtool_ts_info *info); int (*get_hwtst)(struct net_device *netdev, struct kernel_hwtstamp_config *tstamp_config); int (*set_hwtst)(struct net_device *netdev, @@ -1254,6 +1259,8 @@ struct macb { u32 (*macb_reg_readl)(struct macb *bp, int offset); void (*macb_reg_writel)(struct macb *bp, int offset, u32 value); + struct macb_dma_desc *rx_ring_tieoff; + dma_addr_t rx_ring_tieoff_dma; size_t rx_buffer_size; unsigned int rx_ring_size; @@ -1299,6 +1306,7 @@ struct macb { unsigned int jumbo_max_len; u32 wol; + u32 wolopts; /* holds value of rx watermark value for pbuf_rxcutthru register */ u32 rx_watermark; diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 241ce9a2fa99..11665be3a22c 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -38,6 +38,7 @@ #include <linux/ptp_classify.h> #include <linux/reset.h> #include <linux/firmware/xlnx-zynqmp.h> +#include <linux/inetdevice.h> #include "macb.h" /* This structure is only used for MACB on SiFive FU540 devices */ @@ -84,8 +85,7 @@ struct sifive_fu540_macb_mgmt { #define GEM_MTU_MIN_SIZE ETH_MIN_MTU #define MACB_NETIF_LSO NETIF_F_TSO -#define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0) -#define MACB_WOL_ENABLED (0x1 << 1) +#define MACB_WOL_ENABLED BIT(0) #define HS_SPEED_10000M 4 #define MACB_SERDES_RATE_10G 1 @@ -2477,6 +2477,12 @@ static void macb_free_consistent(struct macb *bp) unsigned int q; int size; + if (bp->rx_ring_tieoff) { + dma_free_coherent(&bp->pdev->dev, macb_dma_desc_get_size(bp), + bp->rx_ring_tieoff, bp->rx_ring_tieoff_dma); + bp->rx_ring_tieoff = NULL; + } + bp->macbgem_ops.mog_free_rx_buffers(bp); for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { @@ -2568,6 +2574,16 @@ static int macb_alloc_consistent(struct macb *bp) if (bp->macbgem_ops.mog_alloc_rx_buffers(bp)) goto out_err; + /* Required for tie off descriptor for PM cases */ + if (!(bp->caps & MACB_CAPS_QUEUE_DISABLE)) { + bp->rx_ring_tieoff = dma_alloc_coherent(&bp->pdev->dev, + macb_dma_desc_get_size(bp), + &bp->rx_ring_tieoff_dma, + GFP_KERNEL); + if (!bp->rx_ring_tieoff) + goto out_err; + } + return 0; out_err: @@ -2575,6 +2591,19 @@ out_err: return -ENOMEM; } +static void macb_init_tieoff(struct macb *bp) +{ + struct macb_dma_desc *desc = bp->rx_ring_tieoff; + + if (bp->caps & MACB_CAPS_QUEUE_DISABLE) + return; + /* Setup a wrapping descriptor with no free slots + * (WRAP and USED) to tie off/disable unused RX queues. + */ + macb_set_addr(bp, desc, MACB_BIT(RX_WRAP) | MACB_BIT(RX_USED)); + desc->ctrl = 0; +} + static void gem_init_rings(struct macb *bp) { struct macb_queue *queue; @@ -2598,6 +2627,7 @@ static void gem_init_rings(struct macb *bp) gem_rx_refill(queue); } + macb_init_tieoff(bp); } static void macb_init_rings(struct macb *bp) @@ -2615,6 +2645,8 @@ static void macb_init_rings(struct macb *bp) bp->queues[0].tx_head = 0; bp->queues[0].tx_tail = 0; desc->ctrl |= MACB_BIT(TX_WRAP); + + macb_init_tieoff(bp); } static void macb_reset_hw(struct macb *bp) @@ -3246,13 +3278,11 @@ static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct macb *bp = netdev_priv(netdev); - if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) { - phylink_ethtool_get_wol(bp->phylink, wol); - wol->supported |= WAKE_MAGIC; + phylink_ethtool_get_wol(bp->phylink, wol); + wol->supported |= (WAKE_MAGIC | WAKE_ARP); - if (bp->wol & MACB_WOL_ENABLED) - wol->wolopts |= WAKE_MAGIC; - } + /* Add macb wolopts to phy wolopts */ + wol->wolopts |= bp->wolopts; } static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) @@ -3262,22 +3292,15 @@ static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) /* Pass the order to phylink layer */ ret = phylink_ethtool_set_wol(bp->phylink, wol); - /* Don't manage WoL on MAC if handled by the PHY - * or if there's a failure in talking to the PHY - */ - if (!ret || ret != -EOPNOTSUPP) + /* Don't manage WoL on MAC, if PHY set_wol() fails */ + if (ret && ret != -EOPNOTSUPP) return ret; - if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) || - (wol->wolopts & ~WAKE_MAGIC)) - return -EOPNOTSUPP; - - if (wol->wolopts & WAKE_MAGIC) - bp->wol |= MACB_WOL_ENABLED; - else - bp->wol &= ~MACB_WOL_ENABLED; + bp->wolopts = (wol->wolopts & WAKE_MAGIC) ? WAKE_MAGIC : 0; + bp->wolopts |= (wol->wolopts & WAKE_ARP) ? WAKE_ARP : 0; + bp->wol = (wol->wolopts) ? MACB_WOL_ENABLED : 0; - device_set_wakeup_enable(&bp->pdev->dev, bp->wol & MACB_WOL_ENABLED); + device_set_wakeup_enable(&bp->pdev->dev, bp->wol); return 0; } @@ -3376,7 +3399,7 @@ static s32 gem_get_ptp_max_adj(void) } static int gem_get_ts_info(struct net_device *dev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct macb *bp = netdev_priv(dev); @@ -3417,7 +3440,7 @@ static struct macb_ptp_info gem_ptp_info = { #endif static int macb_get_ts_info(struct net_device *netdev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct macb *bp = netdev_priv(netdev); @@ -4917,7 +4940,8 @@ static const struct macb_config sama7g5_emac_config = { static const struct macb_config versal_config = { .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO | - MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH | MACB_CAPS_NEED_TSUCLK, + MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH | MACB_CAPS_NEED_TSUCLK | + MACB_CAPS_QUEUE_DISABLE, .dma_burst_length = 16, .clk_init = macb_clk_init, .init = init_reset_optional, @@ -5053,9 +5077,7 @@ static int macb_probe(struct platform_device *pdev) bp->max_tx_length = GEM_MAX_TX_LEN; bp->wol = 0; - if (of_property_read_bool(np, "magic-packet")) - bp->wol |= MACB_WOL_HAS_MAGIC_PACKET; - device_set_wakeup_capable(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET); + device_set_wakeup_capable(&pdev->dev, 1); bp->usrio = macb_config->usrio; @@ -5211,10 +5233,13 @@ static int __maybe_unused macb_suspend(struct device *dev) { struct net_device *netdev = dev_get_drvdata(dev); struct macb *bp = netdev_priv(netdev); + struct in_ifaddr *ifa = NULL; struct macb_queue *queue; + struct in_device *idev; unsigned long flags; unsigned int q; int err; + u32 tmp; if (!device_may_wakeup(&bp->dev->dev)) phy_exit(bp->sgmii_phy); @@ -5223,18 +5248,54 @@ static int __maybe_unused macb_suspend(struct device *dev) return 0; if (bp->wol & MACB_WOL_ENABLED) { + /* Check for IP address in WOL ARP mode */ + idev = __in_dev_get_rcu(bp->dev); + if (idev && idev->ifa_list) + ifa = rcu_access_pointer(idev->ifa_list); + if ((bp->wolopts & WAKE_ARP) && !ifa) { + netdev_err(netdev, "IP address not assigned as required by WoL walk ARP\n"); + return -EOPNOTSUPP; + } spin_lock_irqsave(&bp->lock, flags); - /* Flush all status bits */ - macb_writel(bp, TSR, -1); - macb_writel(bp, RSR, -1); + + /* Disable Tx and Rx engines before disabling the queues, + * this is mandatory as per the IP spec sheet + */ + tmp = macb_readl(bp, NCR); + macb_writel(bp, NCR, tmp & ~(MACB_BIT(TE) | MACB_BIT(RE))); for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { + /* Disable RX queues */ + if (bp->caps & MACB_CAPS_QUEUE_DISABLE) { + queue_writel(queue, RBQP, MACB_BIT(QUEUE_DISABLE)); + } else { + /* Tie off RX queues */ + queue_writel(queue, RBQP, + lower_32_bits(bp->rx_ring_tieoff_dma)); +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT + queue_writel(queue, RBQPH, + upper_32_bits(bp->rx_ring_tieoff_dma)); +#endif + } /* Disable all interrupts */ queue_writel(queue, IDR, -1); queue_readl(queue, ISR); if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) queue_writel(queue, ISR, -1); } + /* Enable Receive engine */ + macb_writel(bp, NCR, tmp | MACB_BIT(RE)); + /* Flush all status bits */ + macb_writel(bp, TSR, -1); + macb_writel(bp, RSR, -1); + + tmp = (bp->wolopts & WAKE_MAGIC) ? MACB_BIT(MAG) : 0; + if (bp->wolopts & WAKE_ARP) { + tmp |= MACB_BIT(ARP); + /* write IP address into register */ + tmp |= MACB_BFEXT(IP, be32_to_cpu(ifa->ifa_local)); + } + /* Change interrupt handler and * Enable WoL IRQ on queue 0 */ @@ -5250,7 +5311,7 @@ static int __maybe_unused macb_suspend(struct device *dev) return err; } queue_writel(bp->queues, IER, GEM_BIT(WOL)); - gem_writel(bp, WOL, MACB_BIT(MAG)); + gem_writel(bp, WOL, tmp); } else { err = devm_request_irq(dev, bp->queues[0].irq, macb_wol_interrupt, IRQF_SHARED, netdev->name, bp->queues); @@ -5262,7 +5323,7 @@ static int __maybe_unused macb_suspend(struct device *dev) return err; } queue_writel(bp->queues, IER, MACB_BIT(WOL)); - macb_writel(bp, WOL, MACB_BIT(MAG)); + macb_writel(bp, WOL, tmp); } spin_unlock_irqrestore(&bp->lock, flags); diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c index d3e07b6ed5e1..5835965dbc32 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c @@ -2497,7 +2497,7 @@ ret_intrmod: } static int lio_get_ts_info(struct net_device *netdev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct lio *lio = GET_LIO(netdev); diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 34f02a8ec2ca..1d79f6eaa41f 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -92,12 +92,6 @@ static int octeon_console_debug_enabled(u32 console) /* time to wait for possible in-flight requests in milliseconds */ #define WAIT_INFLIGHT_REQUEST msecs_to_jiffies(1000) -struct oct_link_status_resp { - u64 rh; - struct oct_link_info link_info; - u64 status; -}; - struct oct_timestamp_resp { u64 rh; u64 timestamp; diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c index 0d6ee30affb9..eef12fdd246d 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c @@ -30,11 +30,6 @@ #include "cn23xx_pf_device.h" #include "cn23xx_vf_device.h" -struct niclist { - struct list_head list; - void *ptr; -}; - struct __dispatch { struct list_head list; struct octeon_recv_info *rinfo; diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c index 34125b8cd935..6a04d2530176 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c @@ -836,7 +836,7 @@ static int nicvf_set_pauseparam(struct net_device *dev, } static int nicvf_get_ts_info(struct net_device *netdev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct nicvf *nic = netdev_priv(netdev); diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c index a317feb8decb..a40c266c37f2 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c @@ -54,7 +54,7 @@ struct lmac { bool link_up; int lmacid; /* ID within BGX */ int lmacid_bd; /* ID on board */ - struct net_device netdev; + struct net_device *netdev; struct phy_device *phydev; unsigned int last_duplex; unsigned int last_link; @@ -590,10 +590,12 @@ static void bgx_sgmii_change_link_state(struct lmac *lmac) static void bgx_lmac_handler(struct net_device *netdev) { - struct lmac *lmac = container_of(netdev, struct lmac, netdev); struct phy_device *phydev; + struct lmac *lmac, **priv; int link_changed = 0; + priv = netdev_priv(netdev); + lmac = *priv; phydev = lmac->phydev; if (!phydev->link && lmac->last_link) @@ -1052,12 +1054,18 @@ static int phy_interface_mode(u8 lmac_type) static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid) { - struct lmac *lmac; + struct lmac *lmac, **priv; u64 cfg; lmac = &bgx->lmac[lmacid]; lmac->bgx = bgx; + lmac->netdev = alloc_netdev_dummy(sizeof(struct lmac *)); + if (!lmac->netdev) + return -ENOMEM; + priv = netdev_priv(lmac->netdev); + *priv = lmac; + if ((lmac->lmac_type == BGX_MODE_SGMII) || (lmac->lmac_type == BGX_MODE_QSGMII) || (lmac->lmac_type == BGX_MODE_RGMII)) { @@ -1116,7 +1124,7 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid) } lmac->phydev->dev_flags = 0; - if (phy_connect_direct(&lmac->netdev, lmac->phydev, + if (phy_connect_direct(lmac->netdev, lmac->phydev, bgx_lmac_handler, phy_interface_mode(lmac->lmac_type))) return -ENODEV; @@ -1183,6 +1191,7 @@ static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid) (lmac->lmac_type != BGX_MODE_10G_KR) && lmac->phydev) phy_disconnect(lmac->phydev); + free_netdev(lmac->netdev); lmac->phydev = NULL; } @@ -1414,7 +1423,7 @@ static acpi_status bgx_acpi_register_phy(acpi_handle handle, acpi_get_mac_address(dev, adev, bgx->lmac[bgx->acpi_lmac_idx].mac); - SET_NETDEV_DEV(&bgx->lmac[bgx->acpi_lmac_idx].netdev, dev); + SET_NETDEV_DEV(bgx->lmac[bgx->acpi_lmac_idx].netdev, dev); bgx->lmac[bgx->acpi_lmac_idx].lmacid = bgx->acpi_lmac_idx; bgx->acpi_lmac_idx++; /* move to next LMAC */ @@ -1483,7 +1492,7 @@ static int bgx_init_of_phy(struct bgx *bgx) of_get_mac_address(node, bgx->lmac[lmac].mac); - SET_NETDEV_DEV(&bgx->lmac[lmac].netdev, &bgx->pdev->dev); + SET_NETDEV_DEV(bgx->lmac[lmac].netdev, &bgx->pdev->dev); bgx->lmac[lmac].lmacid = lmac; phy_np = of_parse_phandle(node, "phy-handle", 0); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c index 47eecde36285..3d091947ae00 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c @@ -1550,7 +1550,7 @@ out_free_fw: return ret; } -static int get_ts_info(struct net_device *dev, struct ethtool_ts_info *ts_info) +static int get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *ts_info) { struct port_info *pi = netdev_priv(dev); struct adapter *adapter = pi->adapter; diff --git a/drivers/net/ethernet/cirrus/mac89x0.c b/drivers/net/ethernet/cirrus/mac89x0.c index 887876f35f10..84b300fee2bb 100644 --- a/drivers/net/ethernet/cirrus/mac89x0.c +++ b/drivers/net/ethernet/cirrus/mac89x0.c @@ -554,6 +554,7 @@ static int set_mac_address(struct net_device *dev, void *addr) return 0; } +MODULE_DESCRIPTION("Macintosh CS89x0-based Ethernet driver"); MODULE_LICENSE("GPL"); static void mac89x0_device_remove(struct platform_device *pdev) diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c index 241906697019..f2f1055880b2 100644 --- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c +++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c @@ -599,7 +599,7 @@ static int enic_set_rxfh(struct net_device *netdev, } static int enic_get_ts_info(struct net_device *netdev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | SOF_TIMESTAMPING_RX_SOFTWARE | @@ -608,6 +608,28 @@ static int enic_get_ts_info(struct net_device *netdev, return 0; } +static void enic_get_channels(struct net_device *netdev, + struct ethtool_channels *channels) +{ + struct enic *enic = netdev_priv(netdev); + + switch (vnic_dev_get_intr_mode(enic->vdev)) { + case VNIC_DEV_INTR_MODE_MSIX: + channels->max_rx = ENIC_RQ_MAX; + channels->max_tx = ENIC_WQ_MAX; + channels->rx_count = enic->rq_count; + channels->tx_count = enic->wq_count; + break; + case VNIC_DEV_INTR_MODE_MSI: + case VNIC_DEV_INTR_MODE_INTX: + channels->max_combined = 1; + channels->combined_count = 1; + break; + default: + break; + } +} + static const struct ethtool_ops enic_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_USE_ADAPTIVE_RX | @@ -632,6 +654,7 @@ static const struct ethtool_ops enic_ethtool_ops = { .set_rxfh = enic_set_rxfh, .get_link_ksettings = enic_get_ksettings, .get_ts_info = enic_get_ts_info, + .get_channels = enic_get_channels, }; void enic_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c index 5f0c9e1771db..73e1c71c5092 100644 --- a/drivers/net/ethernet/cortina/gemini.c +++ b/drivers/net/ethernet/cortina/gemini.c @@ -79,7 +79,8 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); #define GMAC0_IRQ4_8 (GMAC0_MIB_INT_BIT | GMAC0_RX_OVERRUN_INT_BIT) #define GMAC_OFFLOAD_FEATURES (NETIF_F_SG | NETIF_F_IP_CSUM | \ - NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM) + NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | \ + NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6) /** * struct gmac_queue_page - page buffer per-page info @@ -287,13 +288,13 @@ static void gmac_set_flow_control(struct net_device *netdev, bool tx, bool rx) spin_unlock_irqrestore(&port->config_lock, flags); } -static void gmac_speed_set(struct net_device *netdev) +static void gmac_adjust_link(struct net_device *netdev) { struct gemini_ethernet_port *port = netdev_priv(netdev); struct phy_device *phydev = netdev->phydev; union gmac_status status, old_status; - int pause_tx = 0; - int pause_rx = 0; + bool pause_tx = false; + bool pause_rx = false; status.bits32 = readl(port->gmac_base + GMAC_STATUS); old_status.bits32 = status.bits32; @@ -328,14 +329,9 @@ static void gmac_speed_set(struct net_device *netdev) } if (phydev->duplex == DUPLEX_FULL) { - u16 lcladv = phy_read(phydev, MII_ADVERTISE); - u16 rmtadv = phy_read(phydev, MII_LPA); - u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv); - - if (cap & FLOW_CTRL_RX) - pause_rx = 1; - if (cap & FLOW_CTRL_TX) - pause_tx = 1; + phy_get_pause(phydev, &pause_tx, &pause_rx); + netdev_dbg(netdev, "set negotiated pause params pause TX = %s, pause RX = %s\n", + pause_tx ? "ON" : "OFF", pause_rx ? "ON" : "OFF"); } gmac_set_flow_control(netdev, pause_tx, pause_rx); @@ -366,7 +362,7 @@ static int gmac_setup_phy(struct net_device *netdev) phy = of_phy_get_and_connect(netdev, dev->of_node, - gmac_speed_set); + gmac_adjust_link); if (!phy) return -ENODEV; netdev->phydev = phy; @@ -1148,13 +1144,25 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb, skb_frag_t *skb_frag; dma_addr_t mapping; void *buffer; + u16 mss; int ret; - /* TODO: implement proper TSO using MTU in word3 */ word1 = skb->len; word3 = SOF_BIT; - if (skb->len >= ETH_FRAME_LEN) { + mss = skb_shinfo(skb)->gso_size; + if (mss) { + /* This means we are dealing with TCP and skb->len is the + * sum total of all the segments. The TSO will deal with + * chopping this up for us. + */ + /* The accelerator needs the full frame size here */ + mss += skb_tcp_all_headers(skb); + netdev_dbg(netdev, "segment offloading mss = %04x len=%04x\n", + mss, skb->len); + word1 |= TSS_MTU_ENABLE_BIT; + word3 |= mss; + } else if (skb->len >= ETH_FRAME_LEN) { /* Hardware offloaded checksumming isn't working on frames * bigger than 1514 bytes. A hypothesis about this is that the * checksum buffer is only 1518 bytes, so when the frames get @@ -1169,7 +1177,9 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb, return ret; } word1 |= TSS_BYPASS_BIT; - } else if (skb->ip_summed == CHECKSUM_PARTIAL) { + } + + if (skb->ip_summed == CHECKSUM_PARTIAL) { int tcp = 0; /* We do not switch off the checksumming on non TCP/UDP @@ -2116,6 +2126,19 @@ static void gmac_get_pauseparam(struct net_device *netdev, pparam->autoneg = true; } +static int gmac_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pparam) +{ + struct phy_device *phydev = netdev->phydev; + + if (!pparam->autoneg) + return -EOPNOTSUPP; + + phy_set_asym_pause(phydev, pparam->rx_pause, pparam->tx_pause); + + return 0; +} + static void gmac_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *rp, struct kernel_ethtool_ringparam *kernel_rp, @@ -2236,6 +2259,7 @@ static const struct ethtool_ops gmac_351x_ethtool_ops = { .set_link_ksettings = gmac_set_ksettings, .nway_reset = gmac_nway_reset, .get_pauseparam = gmac_get_pauseparam, + .set_pauseparam = gmac_set_pauseparam, .get_ringparam = gmac_get_ringparam, .set_ringparam = gmac_set_ringparam, .get_coalesce = gmac_get_coalesce, diff --git a/drivers/net/ethernet/engleder/tsnep_ethtool.c b/drivers/net/ethernet/engleder/tsnep_ethtool.c index 65ec1abc9442..9aa286ba1f00 100644 --- a/drivers/net/ethernet/engleder/tsnep_ethtool.c +++ b/drivers/net/ethernet/engleder/tsnep_ethtool.c @@ -305,7 +305,7 @@ static void tsnep_ethtool_get_channels(struct net_device *netdev, } static int tsnep_ethtool_get_ts_info(struct net_device *netdev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct tsnep_adapter *adapter = netdev_priv(netdev); diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index baa0b3c2ce6f..cfe6b57b1da0 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -371,6 +371,7 @@ static int dpaa_setup_tc(struct net_device *net_dev, enum tc_setup_type type, void *type_data) { struct dpaa_priv *priv = netdev_priv(net_dev); + int num_txqs_per_tc = dpaa_num_txqs_per_tc(); struct tc_mqprio_qopt *mqprio = type_data; u8 num_tc; int i; @@ -398,12 +399,12 @@ static int dpaa_setup_tc(struct net_device *net_dev, enum tc_setup_type type, netdev_set_num_tc(net_dev, num_tc); for (i = 0; i < num_tc; i++) - netdev_set_tc_queue(net_dev, i, DPAA_TC_TXQ_NUM, - i * DPAA_TC_TXQ_NUM); + netdev_set_tc_queue(net_dev, i, num_txqs_per_tc, + i * num_txqs_per_tc); out: priv->num_tc = num_tc ? : 1; - netif_set_real_num_tx_queues(net_dev, priv->num_tc * DPAA_TC_TXQ_NUM); + netif_set_real_num_tx_queues(net_dev, priv->num_tc * num_txqs_per_tc); return 0; } @@ -649,7 +650,7 @@ static inline void dpaa_assign_wq(struct dpaa_fq *fq, int idx) fq->wq = 6; break; case FQ_TYPE_TX: - switch (idx / DPAA_TC_TXQ_NUM) { + switch (idx / dpaa_num_txqs_per_tc()) { case 0: /* Low priority (best effort) */ fq->wq = 6; @@ -667,8 +668,8 @@ static inline void dpaa_assign_wq(struct dpaa_fq *fq, int idx) fq->wq = 0; break; default: - WARN(1, "Too many TX FQs: more than %d!\n", - DPAA_ETH_TXQ_NUM); + WARN(1, "Too many TX FQs: more than %zu!\n", + dpaa_max_num_txqs()); } break; default: @@ -740,7 +741,8 @@ static int dpaa_alloc_all_fqs(struct device *dev, struct list_head *list, port_fqs->rx_pcdq = &dpaa_fq[0]; - if (!dpaa_fq_alloc(dev, 0, DPAA_ETH_TXQ_NUM, list, FQ_TYPE_TX_CONF_MQ)) + if (!dpaa_fq_alloc(dev, 0, dpaa_max_num_txqs(), list, + FQ_TYPE_TX_CONF_MQ)) goto fq_alloc_failed; dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_TX_ERROR); @@ -755,7 +757,7 @@ static int dpaa_alloc_all_fqs(struct device *dev, struct list_head *list, port_fqs->tx_defq = &dpaa_fq[0]; - if (!dpaa_fq_alloc(dev, 0, DPAA_ETH_TXQ_NUM, list, FQ_TYPE_TX)) + if (!dpaa_fq_alloc(dev, 0, dpaa_max_num_txqs(), list, FQ_TYPE_TX)) goto fq_alloc_failed; return 0; @@ -931,14 +933,18 @@ static inline void dpaa_setup_egress(const struct dpaa_priv *priv, } } -static void dpaa_fq_setup(struct dpaa_priv *priv, - const struct dpaa_fq_cbs *fq_cbs, - struct fman_port *tx_port) +static int dpaa_fq_setup(struct dpaa_priv *priv, + const struct dpaa_fq_cbs *fq_cbs, + struct fman_port *tx_port) { int egress_cnt = 0, conf_cnt = 0, num_portals = 0, portal_cnt = 0, cpu; const cpumask_t *affine_cpus = qman_affine_cpus(); - u16 channels[NR_CPUS]; struct dpaa_fq *fq; + u16 *channels; + + channels = kcalloc(num_possible_cpus(), sizeof(u16), GFP_KERNEL); + if (!channels) + return -ENOMEM; for_each_cpu_and(cpu, affine_cpus, cpu_online_mask) channels[num_portals++] = qman_affine_channel(cpu); @@ -965,11 +971,7 @@ static void dpaa_fq_setup(struct dpaa_priv *priv, case FQ_TYPE_TX: dpaa_setup_egress(priv, fq, tx_port, &fq_cbs->egress_ern); - /* If we have more Tx queues than the number of cores, - * just ignore the extra ones. - */ - if (egress_cnt < DPAA_ETH_TXQ_NUM) - priv->egress_fqs[egress_cnt++] = &fq->fq_base; + priv->egress_fqs[egress_cnt++] = &fq->fq_base; break; case FQ_TYPE_TX_CONF_MQ: priv->conf_fqs[conf_cnt++] = &fq->fq_base; @@ -987,16 +989,9 @@ static void dpaa_fq_setup(struct dpaa_priv *priv, } } - /* Make sure all CPUs receive a corresponding Tx queue. */ - while (egress_cnt < DPAA_ETH_TXQ_NUM) { - list_for_each_entry(fq, &priv->dpaa_fq_list, list) { - if (fq->fq_type != FQ_TYPE_TX) - continue; - priv->egress_fqs[egress_cnt++] = &fq->fq_base; - if (egress_cnt == DPAA_ETH_TXQ_NUM) - break; - } - } + kfree(channels); + + return 0; } static inline int dpaa_tx_fq_to_id(const struct dpaa_priv *priv, @@ -1004,7 +999,7 @@ static inline int dpaa_tx_fq_to_id(const struct dpaa_priv *priv, { int i; - for (i = 0; i < DPAA_ETH_TXQ_NUM; i++) + for (i = 0; i < dpaa_max_num_txqs(); i++) if (priv->egress_fqs[i] == tx_fq) return i; @@ -3324,7 +3319,7 @@ static int dpaa_eth_probe(struct platform_device *pdev) /* Allocate this early, so we can store relevant information in * the private area */ - net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TXQ_NUM); + net_dev = alloc_etherdev_mq(sizeof(*priv), dpaa_max_num_txqs()); if (!net_dev) { dev_err(dev, "alloc_etherdev_mq() failed\n"); return -ENOMEM; @@ -3339,6 +3334,22 @@ static int dpaa_eth_probe(struct platform_device *pdev) priv->msg_enable = netif_msg_init(debug, DPAA_MSG_DEFAULT); + priv->egress_fqs = devm_kcalloc(dev, dpaa_max_num_txqs(), + sizeof(*priv->egress_fqs), + GFP_KERNEL); + if (!priv->egress_fqs) { + err = -ENOMEM; + goto free_netdev; + } + + priv->conf_fqs = devm_kcalloc(dev, dpaa_max_num_txqs(), + sizeof(*priv->conf_fqs), + GFP_KERNEL); + if (!priv->conf_fqs) { + err = -ENOMEM; + goto free_netdev; + } + mac_dev = dpaa_mac_dev_get(pdev); if (IS_ERR(mac_dev)) { netdev_err(net_dev, "dpaa_mac_dev_get() failed\n"); @@ -3416,7 +3427,9 @@ static int dpaa_eth_probe(struct platform_device *pdev) */ dpaa_eth_add_channel(priv->channel, &pdev->dev); - dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]); + err = dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]); + if (err) + goto free_dpaa_bps; /* Create a congestion group for this netdev, with * dynamically-allocated CGR ID. @@ -3462,7 +3475,8 @@ static int dpaa_eth_probe(struct platform_device *pdev) } priv->num_tc = 1; - netif_set_real_num_tx_queues(net_dev, priv->num_tc * DPAA_TC_TXQ_NUM); + netif_set_real_num_tx_queues(net_dev, + priv->num_tc * dpaa_num_txqs_per_tc()); /* Initialize NAPI */ err = dpaa_napi_add(net_dev); diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h index ac3c8ed57bbe..7ed659eb08de 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h @@ -18,10 +18,6 @@ /* Number of prioritised traffic classes */ #define DPAA_TC_NUM 4 -/* Number of Tx queues per traffic class */ -#define DPAA_TC_TXQ_NUM NR_CPUS -/* Total number of Tx queues */ -#define DPAA_ETH_TXQ_NUM (DPAA_TC_NUM * DPAA_TC_TXQ_NUM) /* More detailed FQ types - used for fine-grained WQ assignments */ enum dpaa_fq_type { @@ -142,8 +138,8 @@ struct dpaa_priv { struct mac_device *mac_dev; struct device *rx_dma_dev; struct device *tx_dma_dev; - struct qman_fq *egress_fqs[DPAA_ETH_TXQ_NUM]; - struct qman_fq *conf_fqs[DPAA_ETH_TXQ_NUM]; + struct qman_fq **egress_fqs; + struct qman_fq **conf_fqs; u16 channel; struct list_head dpaa_fq_list; @@ -185,4 +181,16 @@ extern const struct ethtool_ops dpaa_ethtool_ops; /* from dpaa_eth_sysfs.c */ void dpaa_eth_sysfs_remove(struct device *dev); void dpaa_eth_sysfs_init(struct device *dev); + +static inline size_t dpaa_num_txqs_per_tc(void) +{ + return num_possible_cpus(); +} + +/* Total number of Tx queues */ +static inline size_t dpaa_max_num_txqs(void) +{ + return DPAA_TC_NUM * dpaa_num_txqs_per_tc(); +} + #endif /* __DPAA_H */ diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c index 4fee74c024bd..aad470e9caea 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c @@ -35,7 +35,6 @@ static ssize_t dpaa_eth_show_fqids(struct device *dev, u32 last_fqid = 0; ssize_t bytes = 0; char *str; - int i = 0; list_for_each_entry_safe(fq, tmp, &priv->dpaa_fq_list, list) { switch (fq->fq_type) { @@ -85,7 +84,6 @@ static ssize_t dpaa_eth_show_fqids(struct device *dev, prev = fq; prevstr = str; - i++; } if (prev) { diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c index 5bd0b36d1feb..b0060cf96090 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c @@ -394,7 +394,7 @@ static int dpaa_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) } static int dpaa_get_ts_info(struct net_device *net_dev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct device *dev = net_dev->dev.parent; struct device_node *mac_node = dev->of_node; @@ -457,12 +457,16 @@ static int dpaa_set_coalesce(struct net_device *dev, struct netlink_ext_ack *extack) { const cpumask_t *cpus = qman_affine_cpus(); - bool needs_revert[NR_CPUS] = {false}; struct qman_portal *portal; u32 period, prev_period; u8 thresh, prev_thresh; + bool *needs_revert; int cpu, res; + needs_revert = kcalloc(num_possible_cpus(), sizeof(bool), GFP_KERNEL); + if (!needs_revert) + return -ENOMEM; + period = c->rx_coalesce_usecs; thresh = c->rx_max_coalesced_frames; @@ -485,6 +489,8 @@ static int dpaa_set_coalesce(struct net_device *dev, needs_revert[cpu] = true; } + kfree(needs_revert); + return 0; revert_values: @@ -498,6 +504,8 @@ revert_values: qman_dqrr_set_ithresh(portal, prev_thresh); } + kfree(needs_revert); + return res; } diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c index e80e9388c71f..7f476519b7ad 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c @@ -794,7 +794,7 @@ int dpaa2_phc_index = -1; EXPORT_SYMBOL(dpaa2_phc_index); static int dpaa2_eth_get_ts_info(struct net_device *dev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { if (!dpaa2_ptp) return ethtool_op_get_ts_info(dev, info); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c index f7753ea5b57e..5e684b23c5f5 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c @@ -841,7 +841,7 @@ static int enetc_set_coalesce(struct net_device *ndev, } static int enetc_get_ts_info(struct net_device *ndev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { int *phc_idx; diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 881ece735dcf..a923cb95cdc6 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -1361,6 +1361,12 @@ fec_stop(struct net_device *ndev) writel(FEC_ECR_ETHEREN, fep->hwp + FEC_ECNTRL); writel(rmii_mode, fep->hwp + FEC_R_CNTRL); } + + if (fep->bufdesc_ex) { + val = readl(fep->hwp + FEC_ECNTRL); + val |= FEC_ECR_EN1588; + writel(val, fep->hwp + FEC_ECNTRL); + } } static void @@ -2762,7 +2768,7 @@ static void fec_enet_get_regs(struct net_device *ndev, } static int fec_enet_get_ts_info(struct net_device *ndev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct fec_enet_private *fep = netdev_priv(ndev); diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c index 92b8f4ab26f1..796e6f4e583d 100644 --- a/drivers/net/ethernet/freescale/fman/fman_memac.c +++ b/drivers/net/ethernet/freescale/fman/fman_memac.c @@ -1066,7 +1066,6 @@ int memac_initialization(struct mac_device *mac_dev, struct fman_mac_params *params) { int err; - struct device_node *fixed; struct phylink_pcs *pcs; struct fman_mac *memac; unsigned long capabilities; @@ -1222,18 +1221,15 @@ int memac_initialization(struct mac_device *mac_dev, memac->rgmii_no_half_duplex = true; /* Most boards should use MLO_AN_INBAND, but existing boards don't have - * a managed property. Default to MLO_AN_INBAND if nothing else is - * specified. We need to be careful and not enable this if we have a - * fixed link or if we are using MII or RGMII, since those - * configurations modes don't use in-band autonegotiation. + * a managed property. Default to MLO_AN_INBAND rather than MLO_AN_PHY. + * Phylink will allow this to be overriden by a fixed link. We need to + * be careful and not enable this if we are using MII or RGMII, since + * those configurations modes don't use in-band autonegotiation. */ - fixed = of_get_child_by_name(mac_node, "fixed-link"); - if (!fixed && !of_property_read_bool(mac_node, "fixed-link") && - !of_property_read_bool(mac_node, "managed") && + if (!of_property_read_bool(mac_node, "managed") && mac_dev->phy_if != PHY_INTERFACE_MODE_MII && !phy_interface_mode_is_rgmii(mac_dev->phy_if)) - mac_dev->phylink_config.ovr_an_inband = true; - of_node_put(fixed); + mac_dev->phylink_config.default_an_inband = true; err = memac_init(mac_dev->fman_mac); if (err < 0) diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c index 7a15b9245698..f581402ad740 100644 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c @@ -1448,7 +1448,7 @@ static int gfar_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd, } static int gfar_get_ts_info(struct net_device *dev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct gfar_private *priv = netdev_priv(dev); struct platform_device *ptp_dev; diff --git a/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c b/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c index 4edd0adfc6c7..7f081e6e8c87 100644 --- a/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c +++ b/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c @@ -1040,7 +1040,7 @@ static int fun_set_rxfh(struct net_device *netdev, } static int fun_get_ts_info(struct net_device *netdev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE | SOF_TIMESTAMPING_RX_HARDWARE | diff --git a/drivers/net/ethernet/google/gve/Makefile b/drivers/net/ethernet/google/gve/Makefile index b9a6be76531b..9ed07080b38a 100644 --- a/drivers/net/ethernet/google/gve/Makefile +++ b/drivers/net/ethernet/google/gve/Makefile @@ -1,4 +1,4 @@ # Makefile for the Google virtual Ethernet (gve) driver obj-$(CONFIG_GVE) += gve.o -gve-objs := gve_main.o gve_tx.o gve_tx_dqo.o gve_rx.o gve_rx_dqo.o gve_ethtool.o gve_adminq.o gve_utils.o +gve-objs := gve_main.o gve_tx.o gve_tx_dqo.o gve_rx.o gve_rx_dqo.o gve_ethtool.o gve_adminq.o gve_utils.o gve_flow_rule.o diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h index ae1e21c9b0a5..84ac004d3953 100644 --- a/drivers/net/ethernet/google/gve/gve.h +++ b/drivers/net/ethernet/google/gve/gve.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0 OR MIT) * Google virtual Ethernet (gve) driver * - * Copyright (C) 2015-2021 Google, Inc. + * Copyright (C) 2015-2024 Google LLC */ #ifndef _GVE_H_ @@ -60,6 +60,11 @@ #define GVE_DEFAULT_RX_BUFFER_OFFSET 2048 +#define GVE_FLOW_RULES_CACHE_SIZE \ + (GVE_ADMINQ_BUFFER_SIZE / sizeof(struct gve_adminq_queried_flow_rule)) +#define GVE_FLOW_RULE_IDS_CACHE_SIZE \ + (GVE_ADMINQ_BUFFER_SIZE / sizeof(((struct gve_adminq_queried_flow_rule *)0)->location)) + #define GVE_XDP_ACTIONS 5 #define GVE_GQ_TX_MIN_PKT_DESC_BYTES 182 @@ -678,6 +683,39 @@ enum gve_queue_format { GVE_DQO_QPL_FORMAT = 0x4, }; +struct gve_flow_spec { + __be32 src_ip[4]; + __be32 dst_ip[4]; + union { + struct { + __be16 src_port; + __be16 dst_port; + }; + __be32 spi; + }; + union { + u8 tos; + u8 tclass; + }; +}; + +struct gve_flow_rule { + u32 location; + u16 flow_type; + u16 action; + struct gve_flow_spec key; + struct gve_flow_spec mask; +}; + +struct gve_flow_rules_cache { + bool rules_cache_synced; /* False if the driver's rules_cache is outdated */ + struct gve_adminq_queried_flow_rule *rules_cache; + __be32 *rule_ids_cache; + /* The total number of queried rules that stored in the caches */ + u32 rules_cache_num; + u32 rule_ids_cache_num; +}; + struct gve_priv { struct net_device *dev; struct gve_tx_ring *tx; /* array of tx_cfg.num_queues */ @@ -724,6 +762,7 @@ struct gve_priv { union gve_adminq_command *adminq; dma_addr_t adminq_bus_addr; struct dma_pool *adminq_pool; + struct mutex adminq_lock; /* Protects adminq command execution */ u32 adminq_mask; /* masks prod_cnt to adminq size */ u32 adminq_prod_cnt; /* free-running count of AQ cmds executed */ u32 adminq_cmd_fail; /* free-running count of AQ cmds failed */ @@ -743,6 +782,8 @@ struct gve_priv { u32 adminq_report_link_speed_cnt; u32 adminq_get_ptype_map_cnt; u32 adminq_verify_driver_compatibility_cnt; + u32 adminq_query_flow_rules_cnt; + u32 adminq_cfg_flow_rule_cnt; /* Global stats */ u32 interface_up_cnt; /* count of times interface turned up since last reset */ @@ -785,6 +826,11 @@ struct gve_priv { u16 header_buf_size; /* device configured, header-split supported if non-zero */ bool header_split_enabled; /* True if the header split is enabled by the user */ + + u32 max_flow_rules; + u32 num_flow_rules; + + struct gve_flow_rules_cache flow_rules_cache; }; enum gve_service_task_flags_bit { @@ -1124,6 +1170,12 @@ int gve_adjust_config(struct gve_priv *priv, int gve_adjust_queues(struct gve_priv *priv, struct gve_queue_config new_rx_config, struct gve_queue_config new_tx_config); +/* flow steering rule */ +int gve_get_flow_rule_entry(struct gve_priv *priv, struct ethtool_rxnfc *cmd); +int gve_get_flow_rule_ids(struct gve_priv *priv, struct ethtool_rxnfc *cmd, u32 *rule_locs); +int gve_add_flow_rule(struct gve_priv *priv, struct ethtool_rxnfc *cmd); +int gve_del_flow_rule(struct gve_priv *priv, struct ethtool_rxnfc *cmd); +int gve_flow_rules_reset(struct gve_priv *priv); /* report stats handling */ void gve_handle_report_stats(struct gve_priv *priv); /* exported by ethtool.c */ diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c index 8ca0def176ef..c5bbc1b7524e 100644 --- a/drivers/net/ethernet/google/gve/gve_adminq.c +++ b/drivers/net/ethernet/google/gve/gve_adminq.c @@ -44,6 +44,7 @@ void gve_parse_device_option(struct gve_priv *priv, struct gve_device_option_jumbo_frames **dev_op_jumbo_frames, struct gve_device_option_dqo_qpl **dev_op_dqo_qpl, struct gve_device_option_buffer_sizes **dev_op_buffer_sizes, + struct gve_device_option_flow_steering **dev_op_flow_steering, struct gve_device_option_modify_ring **dev_op_modify_ring) { u32 req_feat_mask = be32_to_cpu(option->required_features_mask); @@ -189,6 +190,23 @@ void gve_parse_device_option(struct gve_priv *priv, if (option_length == GVE_DEVICE_OPTION_NO_MIN_RING_SIZE) priv->default_min_ring_size = true; break; + case GVE_DEV_OPT_ID_FLOW_STEERING: + if (option_length < sizeof(**dev_op_flow_steering) || + req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_FLOW_STEERING) { + dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT, + "Flow Steering", + (int)sizeof(**dev_op_flow_steering), + GVE_DEV_OPT_REQ_FEAT_MASK_FLOW_STEERING, + option_length, req_feat_mask); + break; + } + + if (option_length > sizeof(**dev_op_flow_steering)) + dev_warn(&priv->pdev->dev, + GVE_DEVICE_OPTION_TOO_BIG_FMT, + "Flow Steering"); + *dev_op_flow_steering = (void *)(option + 1); + break; default: /* If we don't recognize the option just continue * without doing anything. @@ -208,6 +226,7 @@ gve_process_device_options(struct gve_priv *priv, struct gve_device_option_jumbo_frames **dev_op_jumbo_frames, struct gve_device_option_dqo_qpl **dev_op_dqo_qpl, struct gve_device_option_buffer_sizes **dev_op_buffer_sizes, + struct gve_device_option_flow_steering **dev_op_flow_steering, struct gve_device_option_modify_ring **dev_op_modify_ring) { const int num_options = be16_to_cpu(descriptor->num_device_options); @@ -230,7 +249,7 @@ gve_process_device_options(struct gve_priv *priv, dev_op_gqi_rda, dev_op_gqi_qpl, dev_op_dqo_rda, dev_op_jumbo_frames, dev_op_dqo_qpl, dev_op_buffer_sizes, - dev_op_modify_ring); + dev_op_flow_steering, dev_op_modify_ring); dev_opt = next_opt; } @@ -268,6 +287,8 @@ int gve_adminq_alloc(struct device *dev, struct gve_priv *priv) priv->adminq_report_stats_cnt = 0; priv->adminq_report_link_speed_cnt = 0; priv->adminq_get_ptype_map_cnt = 0; + priv->adminq_query_flow_rules_cnt = 0; + priv->adminq_cfg_flow_rule_cnt = 0; /* Setup Admin queue with the device */ if (priv->pdev->revision < 0x1) { @@ -284,6 +305,7 @@ int gve_adminq_alloc(struct device *dev, struct gve_priv *priv) &priv->reg_bar0->adminq_base_address_lo); iowrite32be(GVE_DRIVER_STATUS_RUN_MASK, &priv->reg_bar0->driver_status); } + mutex_init(&priv->adminq_lock); gve_set_admin_queue_ok(priv); return 0; } @@ -460,6 +482,8 @@ static int gve_adminq_issue_cmd(struct gve_priv *priv, memcpy(cmd, cmd_orig, sizeof(*cmd_orig)); opcode = be32_to_cpu(READ_ONCE(cmd->opcode)); + if (opcode == GVE_ADMINQ_EXTENDED_COMMAND) + opcode = be32_to_cpu(cmd->extended_command.inner_opcode); switch (opcode) { case GVE_ADMINQ_DESCRIBE_DEVICE: @@ -504,6 +528,12 @@ static int gve_adminq_issue_cmd(struct gve_priv *priv, case GVE_ADMINQ_VERIFY_DRIVER_COMPATIBILITY: priv->adminq_verify_driver_compatibility_cnt++; break; + case GVE_ADMINQ_QUERY_FLOW_RULES: + priv->adminq_query_flow_rules_cnt++; + break; + case GVE_ADMINQ_CONFIGURE_FLOW_RULE: + priv->adminq_cfg_flow_rule_cnt++; + break; default: dev_err(&priv->pdev->dev, "unknown AQ command opcode %d\n", opcode); } @@ -511,28 +541,58 @@ static int gve_adminq_issue_cmd(struct gve_priv *priv, return 0; } -/* This function is not threadsafe - the caller is responsible for any - * necessary locks. - * The caller is also responsible for making sure there are no commands - * waiting to be executed. - */ static int gve_adminq_execute_cmd(struct gve_priv *priv, union gve_adminq_command *cmd_orig) { u32 tail, head; int err; + mutex_lock(&priv->adminq_lock); tail = ioread32be(&priv->reg_bar0->adminq_event_counter); head = priv->adminq_prod_cnt; - if (tail != head) - // This is not a valid path - return -EINVAL; + if (tail != head) { + err = -EINVAL; + goto out; + } err = gve_adminq_issue_cmd(priv, cmd_orig); if (err) - return err; + goto out; - return gve_adminq_kick_and_wait(priv); + err = gve_adminq_kick_and_wait(priv); + +out: + mutex_unlock(&priv->adminq_lock); + return err; +} + +static int gve_adminq_execute_extended_cmd(struct gve_priv *priv, u32 opcode, + size_t cmd_size, void *cmd_orig) +{ + union gve_adminq_command cmd; + dma_addr_t inner_cmd_bus; + void *inner_cmd; + int err; + + inner_cmd = dma_alloc_coherent(&priv->pdev->dev, cmd_size, + &inner_cmd_bus, GFP_KERNEL); + if (!inner_cmd) + return -ENOMEM; + + memcpy(inner_cmd, cmd_orig, cmd_size); + + memset(&cmd, 0, sizeof(cmd)); + cmd.opcode = cpu_to_be32(GVE_ADMINQ_EXTENDED_COMMAND); + cmd.extended_command = (struct gve_adminq_extended_command) { + .inner_opcode = cpu_to_be32(opcode), + .inner_length = cpu_to_be32(cmd_size), + .inner_command_addr = cpu_to_be64(inner_cmd_bus), + }; + + err = gve_adminq_execute_cmd(priv, &cmd); + + dma_free_coherent(&priv->pdev->dev, cmd_size, inner_cmd, inner_cmd_bus); + return err; } /* The device specifies that the management vector can either be the first irq @@ -805,6 +865,8 @@ static void gve_enable_supported_features(struct gve_priv *priv, *dev_op_dqo_qpl, const struct gve_device_option_buffer_sizes *dev_op_buffer_sizes, + const struct gve_device_option_flow_steering + *dev_op_flow_steering, const struct gve_device_option_modify_ring *dev_op_modify_ring) { @@ -857,10 +919,23 @@ static void gve_enable_supported_features(struct gve_priv *priv, priv->min_tx_desc_cnt = be16_to_cpu(dev_op_modify_ring->min_tx_ring_size); } } + + if (dev_op_flow_steering && + (supported_features_mask & GVE_SUP_FLOW_STEERING_MASK)) { + if (dev_op_flow_steering->max_flow_rules) { + priv->max_flow_rules = + be32_to_cpu(dev_op_flow_steering->max_flow_rules); + priv->dev->hw_features |= NETIF_F_NTUPLE; + dev_info(&priv->pdev->dev, + "FLOW STEERING device option enabled with max rule limit of %u.\n", + priv->max_flow_rules); + } + } } int gve_adminq_describe_device(struct gve_priv *priv) { + struct gve_device_option_flow_steering *dev_op_flow_steering = NULL; struct gve_device_option_buffer_sizes *dev_op_buffer_sizes = NULL; struct gve_device_option_jumbo_frames *dev_op_jumbo_frames = NULL; struct gve_device_option_modify_ring *dev_op_modify_ring = NULL; @@ -897,6 +972,7 @@ int gve_adminq_describe_device(struct gve_priv *priv) &dev_op_gqi_qpl, &dev_op_dqo_rda, &dev_op_jumbo_frames, &dev_op_dqo_qpl, &dev_op_buffer_sizes, + &dev_op_flow_steering, &dev_op_modify_ring); if (err) goto free_device_descriptor; @@ -958,7 +1034,8 @@ int gve_adminq_describe_device(struct gve_priv *priv) gve_enable_supported_features(priv, supported_features_mask, dev_op_jumbo_frames, dev_op_dqo_qpl, - dev_op_buffer_sizes, dev_op_modify_ring); + dev_op_buffer_sizes, dev_op_flow_steering, + dev_op_modify_ring); free_device_descriptor: dma_pool_free(priv->adminq_pool, descriptor, descriptor_bus); @@ -1121,3 +1198,130 @@ err: ptype_map_bus); return err; } + +static int +gve_adminq_configure_flow_rule(struct gve_priv *priv, + struct gve_adminq_configure_flow_rule *flow_rule_cmd) +{ + int err = gve_adminq_execute_extended_cmd(priv, + GVE_ADMINQ_CONFIGURE_FLOW_RULE, + sizeof(struct gve_adminq_configure_flow_rule), + flow_rule_cmd); + + if (err) { + dev_err(&priv->pdev->dev, "Timeout to configure the flow rule, trigger reset"); + gve_reset(priv, true); + } else { + priv->flow_rules_cache.rules_cache_synced = false; + } + + return err; +} + +int gve_adminq_add_flow_rule(struct gve_priv *priv, struct gve_adminq_flow_rule *rule, u32 loc) +{ + struct gve_adminq_configure_flow_rule flow_rule_cmd = { + .opcode = cpu_to_be16(GVE_FLOW_RULE_CFG_ADD), + .location = cpu_to_be32(loc), + .rule = *rule, + }; + + return gve_adminq_configure_flow_rule(priv, &flow_rule_cmd); +} + +int gve_adminq_del_flow_rule(struct gve_priv *priv, u32 loc) +{ + struct gve_adminq_configure_flow_rule flow_rule_cmd = { + .opcode = cpu_to_be16(GVE_FLOW_RULE_CFG_DEL), + .location = cpu_to_be32(loc), + }; + + return gve_adminq_configure_flow_rule(priv, &flow_rule_cmd); +} + +int gve_adminq_reset_flow_rules(struct gve_priv *priv) +{ + struct gve_adminq_configure_flow_rule flow_rule_cmd = { + .opcode = cpu_to_be16(GVE_FLOW_RULE_CFG_RESET), + }; + + return gve_adminq_configure_flow_rule(priv, &flow_rule_cmd); +} + +/* In the dma memory that the driver allocated for the device to query the flow rules, the device + * will first write it with a struct of gve_query_flow_rules_descriptor. Next to it, the device + * will write an array of rules or rule ids with the count that specified in the descriptor. + * For GVE_FLOW_RULE_QUERY_STATS, the device will only write the descriptor. + */ +static int gve_adminq_process_flow_rules_query(struct gve_priv *priv, u16 query_opcode, + struct gve_query_flow_rules_descriptor *descriptor) +{ + struct gve_flow_rules_cache *flow_rules_cache = &priv->flow_rules_cache; + u32 num_queried_rules, total_memory_len, rule_info_len; + void *rule_info; + + total_memory_len = be32_to_cpu(descriptor->total_length); + num_queried_rules = be32_to_cpu(descriptor->num_queried_rules); + rule_info = (void *)(descriptor + 1); + + switch (query_opcode) { + case GVE_FLOW_RULE_QUERY_RULES: + rule_info_len = num_queried_rules * sizeof(*flow_rules_cache->rules_cache); + if (sizeof(*descriptor) + rule_info_len != total_memory_len) { + dev_err(&priv->dev->dev, "flow rules query is out of memory.\n"); + return -ENOMEM; + } + + memcpy(flow_rules_cache->rules_cache, rule_info, rule_info_len); + flow_rules_cache->rules_cache_num = num_queried_rules; + break; + case GVE_FLOW_RULE_QUERY_IDS: + rule_info_len = num_queried_rules * sizeof(*flow_rules_cache->rule_ids_cache); + if (sizeof(*descriptor) + rule_info_len != total_memory_len) { + dev_err(&priv->dev->dev, "flow rule ids query is out of memory.\n"); + return -ENOMEM; + } + + memcpy(flow_rules_cache->rule_ids_cache, rule_info, rule_info_len); + flow_rules_cache->rule_ids_cache_num = num_queried_rules; + break; + case GVE_FLOW_RULE_QUERY_STATS: + priv->num_flow_rules = be32_to_cpu(descriptor->num_flow_rules); + priv->max_flow_rules = be32_to_cpu(descriptor->max_flow_rules); + return 0; + default: + return -EINVAL; + } + + return 0; +} + +int gve_adminq_query_flow_rules(struct gve_priv *priv, u16 query_opcode, u32 starting_loc) +{ + struct gve_query_flow_rules_descriptor *descriptor; + union gve_adminq_command cmd; + dma_addr_t descriptor_bus; + int err = 0; + + memset(&cmd, 0, sizeof(cmd)); + descriptor = dma_pool_alloc(priv->adminq_pool, GFP_KERNEL, &descriptor_bus); + if (!descriptor) + return -ENOMEM; + + cmd.opcode = cpu_to_be32(GVE_ADMINQ_QUERY_FLOW_RULES); + cmd.query_flow_rules = (struct gve_adminq_query_flow_rules) { + .opcode = cpu_to_be16(query_opcode), + .starting_rule_id = cpu_to_be32(starting_loc), + .available_length = cpu_to_be64(GVE_ADMINQ_BUFFER_SIZE), + .rule_descriptor_addr = cpu_to_be64(descriptor_bus), + }; + err = gve_adminq_execute_cmd(priv, &cmd); + if (err) + goto out; + + err = gve_adminq_process_flow_rules_query(priv, query_opcode, descriptor); + +out: + dma_pool_free(priv->adminq_pool, descriptor, descriptor_bus); + return err; +} diff --git a/drivers/net/ethernet/google/gve/gve_adminq.h b/drivers/net/ethernet/google/gve/gve_adminq.h index e64f0dbe744d..ed1370c9b197 100644 --- a/drivers/net/ethernet/google/gve/gve_adminq.h +++ b/drivers/net/ethernet/google/gve/gve_adminq.h @@ -25,6 +25,19 @@ enum gve_adminq_opcodes { GVE_ADMINQ_REPORT_LINK_SPEED = 0xD, GVE_ADMINQ_GET_PTYPE_MAP = 0xE, GVE_ADMINQ_VERIFY_DRIVER_COMPATIBILITY = 0xF, + GVE_ADMINQ_QUERY_FLOW_RULES = 0x10, + + /* For commands that are larger than 56 bytes */ + GVE_ADMINQ_EXTENDED_COMMAND = 0xFF, +}; + +/* The normal adminq command is restricted to be 56 bytes at maximum. For the + * longer adminq command, it is wrapped by GVE_ADMINQ_EXTENDED_COMMAND with + * inner opcode of gve_adminq_extended_cmd_opcodes specified. The inner command + * is written in the dma memory allocated by GVE_ADMINQ_EXTENDED_COMMAND. + */ +enum gve_adminq_extended_cmd_opcodes { + GVE_ADMINQ_CONFIGURE_FLOW_RULE = 0x101, }; /* Admin queue status codes */ @@ -143,6 +156,14 @@ struct gve_device_option_modify_ring { static_assert(sizeof(struct gve_device_option_modify_ring) == 12); +struct gve_device_option_flow_steering { + __be32 supported_features_mask; + __be32 reserved; + __be32 max_flow_rules; +}; + +static_assert(sizeof(struct gve_device_option_flow_steering) == 12); + /* Terminology: * * RDA - Raw DMA Addressing - Buffers associated with SKBs are directly DMA @@ -160,6 +181,7 @@ enum gve_dev_opt_id { GVE_DEV_OPT_ID_DQO_QPL = 0x7, GVE_DEV_OPT_ID_JUMBO_FRAMES = 0x8, GVE_DEV_OPT_ID_BUFFER_SIZES = 0xa, + GVE_DEV_OPT_ID_FLOW_STEERING = 0xb, }; enum gve_dev_opt_req_feat_mask { @@ -171,12 +193,14 @@ enum gve_dev_opt_req_feat_mask { GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL = 0x0, GVE_DEV_OPT_REQ_FEAT_MASK_BUFFER_SIZES = 0x0, GVE_DEV_OPT_REQ_FEAT_MASK_MODIFY_RING = 0x0, + GVE_DEV_OPT_REQ_FEAT_MASK_FLOW_STEERING = 0x0, }; enum gve_sup_feature_mask { GVE_SUP_MODIFY_RING_MASK = 1 << 0, GVE_SUP_JUMBO_FRAMES_MASK = 1 << 2, GVE_SUP_BUFFER_SIZES_MASK = 1 << 4, + GVE_SUP_FLOW_STEERING_MASK = 1 << 5, }; #define GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING 0x0 @@ -208,6 +232,14 @@ enum gve_driver_capbility { #define GVE_DRIVER_CAPABILITY_FLAGS3 0x0 #define GVE_DRIVER_CAPABILITY_FLAGS4 0x0 +struct gve_adminq_extended_command { + __be32 inner_opcode; + __be32 inner_length; + __be64 inner_command_addr; +}; + +static_assert(sizeof(struct gve_adminq_extended_command) == 16); + struct gve_driver_info { u8 os_type; /* 0x01 = Linux */ u8 driver_major; @@ -412,6 +444,71 @@ struct gve_adminq_get_ptype_map { __be64 ptype_map_addr; }; +/* Flow-steering related definitions */ +enum gve_adminq_flow_rule_cfg_opcode { + GVE_FLOW_RULE_CFG_ADD = 0, + GVE_FLOW_RULE_CFG_DEL = 1, + GVE_FLOW_RULE_CFG_RESET = 2, +}; + +enum gve_adminq_flow_rule_query_opcode { + GVE_FLOW_RULE_QUERY_RULES = 0, + GVE_FLOW_RULE_QUERY_IDS = 1, + GVE_FLOW_RULE_QUERY_STATS = 2, +}; + +enum gve_adminq_flow_type { + GVE_FLOW_TYPE_TCPV4, + GVE_FLOW_TYPE_UDPV4, + GVE_FLOW_TYPE_SCTPV4, + GVE_FLOW_TYPE_AHV4, + GVE_FLOW_TYPE_ESPV4, + GVE_FLOW_TYPE_TCPV6, + GVE_FLOW_TYPE_UDPV6, + GVE_FLOW_TYPE_SCTPV6, + GVE_FLOW_TYPE_AHV6, + GVE_FLOW_TYPE_ESPV6, +}; + +/* Flow-steering command */ +struct gve_adminq_flow_rule { + __be16 flow_type; + __be16 action; /* RX queue id */ + struct gve_flow_spec key; + struct gve_flow_spec mask; +}; + +struct gve_adminq_configure_flow_rule { + __be16 opcode; + u8 padding[2]; + struct gve_adminq_flow_rule rule; + __be32 location; +}; + +static_assert(sizeof(struct gve_adminq_configure_flow_rule) == 92); + +struct gve_query_flow_rules_descriptor { + __be32 num_flow_rules; + __be32 max_flow_rules; + __be32 num_queried_rules; + __be32 total_length; +}; + +struct gve_adminq_queried_flow_rule { + __be32 location; + struct gve_adminq_flow_rule flow_rule; +}; + +struct gve_adminq_query_flow_rules { + __be16 opcode; + u8 padding[2]; + __be32 starting_rule_id; + __be64 available_length; /* The dma memory length that the driver allocated */ + __be64 rule_descriptor_addr; /* The dma memory address */ +}; + +static_assert(sizeof(struct gve_adminq_query_flow_rules) == 24); + union gve_adminq_command { struct { __be32 opcode; @@ -432,6 +529,8 @@ union gve_adminq_command { struct gve_adminq_get_ptype_map get_ptype_map; struct gve_adminq_verify_driver_compatibility verify_driver_compatibility; + struct gve_adminq_query_flow_rules query_flow_rules; + struct gve_adminq_extended_command extended_command; }; }; u8 reserved[64]; @@ -465,6 +564,10 @@ int gve_adminq_verify_driver_compatibility(struct gve_priv *priv, u64 driver_info_len, dma_addr_t driver_info_addr); int gve_adminq_report_link_speed(struct gve_priv *priv); +int gve_adminq_add_flow_rule(struct gve_priv *priv, struct gve_adminq_flow_rule *rule, u32 loc); +int gve_adminq_del_flow_rule(struct gve_priv *priv, u32 loc); +int gve_adminq_reset_flow_rules(struct gve_priv *priv); +int gve_adminq_query_flow_rules(struct gve_priv *priv, u16 query_opcode, u32 starting_loc); struct gve_ptype_lut; int gve_adminq_get_ptype_map_dqo(struct gve_priv *priv, diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c index fe1741d482b4..3480ff5c7ed6 100644 --- a/drivers/net/ethernet/google/gve/gve_ethtool.c +++ b/drivers/net/ethernet/google/gve/gve_ethtool.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: (GPL-2.0 OR MIT) /* Google virtual Ethernet (gve) driver * - * Copyright (C) 2015-2021 Google, Inc. + * Copyright (C) 2015-2024 Google LLC */ #include <linux/rtnetlink.h> @@ -74,7 +74,8 @@ static const char gve_gstrings_adminq_stats[][ETH_GSTRING_LEN] = { "adminq_create_tx_queue_cnt", "adminq_create_rx_queue_cnt", "adminq_destroy_tx_queue_cnt", "adminq_destroy_rx_queue_cnt", "adminq_dcfg_device_resources_cnt", "adminq_set_driver_parameter_cnt", - "adminq_report_stats_cnt", "adminq_report_link_speed_cnt", "adminq_get_ptype_map_cnt" + "adminq_report_stats_cnt", "adminq_report_link_speed_cnt", "adminq_get_ptype_map_cnt", + "adminq_query_flow_rules", "adminq_cfg_flow_rule", }; static const char gve_gstrings_priv_flags[][ETH_GSTRING_LEN] = { @@ -450,6 +451,8 @@ gve_get_ethtool_stats(struct net_device *netdev, data[i++] = priv->adminq_report_stats_cnt; data[i++] = priv->adminq_report_link_speed_cnt; data[i++] = priv->adminq_get_ptype_map_cnt; + data[i++] = priv->adminq_query_flow_rules_cnt; + data[i++] = priv->adminq_cfg_flow_rule_cnt; } static void gve_get_channels(struct net_device *netdev, @@ -772,6 +775,69 @@ static int gve_set_coalesce(struct net_device *netdev, return 0; } +static int gve_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) +{ + struct gve_priv *priv = netdev_priv(netdev); + int err = 0; + + if (!(netdev->features & NETIF_F_NTUPLE)) + return -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_SRXCLSRLINS: + err = gve_add_flow_rule(priv, cmd); + break; + case ETHTOOL_SRXCLSRLDEL: + err = gve_del_flow_rule(priv, cmd); + break; + case ETHTOOL_SRXFH: + err = -EOPNOTSUPP; + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +static int gve_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, u32 *rule_locs) +{ + struct gve_priv *priv = netdev_priv(netdev); + int err = 0; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = priv->rx_cfg.num_queues; + break; + case ETHTOOL_GRXCLSRLCNT: + if (!priv->max_flow_rules) + return -EOPNOTSUPP; + + err = gve_adminq_query_flow_rules(priv, GVE_FLOW_RULE_QUERY_STATS, 0); + if (err) + return err; + + cmd->rule_cnt = priv->num_flow_rules; + cmd->data = priv->max_flow_rules; + break; + case ETHTOOL_GRXCLSRULE: + err = gve_get_flow_rule_entry(priv, cmd); + break; + case ETHTOOL_GRXCLSRLALL: + err = gve_get_flow_rule_ids(priv, cmd, (u32 *)rule_locs); + break; + case ETHTOOL_GRXFH: + err = -EOPNOTSUPP; + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + const struct ethtool_ops gve_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS, .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT, @@ -783,6 +849,8 @@ const struct ethtool_ops gve_ethtool_ops = { .get_msglevel = gve_get_msglevel, .set_channels = gve_set_channels, .get_channels = gve_get_channels, + .set_rxnfc = gve_set_rxnfc, + .get_rxnfc = gve_get_rxnfc, .get_link = ethtool_op_get_link, .get_coalesce = gve_get_coalesce, .set_coalesce = gve_set_coalesce, diff --git a/drivers/net/ethernet/google/gve/gve_flow_rule.c b/drivers/net/ethernet/google/gve/gve_flow_rule.c new file mode 100644 index 000000000000..0bb8cd1876a3 --- /dev/null +++ b/drivers/net/ethernet/google/gve/gve_flow_rule.c @@ -0,0 +1,298 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* Google virtual Ethernet (gve) driver + * + * Copyright (C) 2015-2024 Google LLC + */ + +#include "gve.h" +#include "gve_adminq.h" + +static +int gve_fill_ethtool_flow_spec(struct ethtool_rx_flow_spec *fsp, + struct gve_adminq_queried_flow_rule *rule) +{ + struct gve_adminq_flow_rule *flow_rule = &rule->flow_rule; + static const u16 flow_type_lut[] = { + [GVE_FLOW_TYPE_TCPV4] = TCP_V4_FLOW, + [GVE_FLOW_TYPE_UDPV4] = UDP_V4_FLOW, + [GVE_FLOW_TYPE_SCTPV4] = SCTP_V4_FLOW, + [GVE_FLOW_TYPE_AHV4] = AH_V4_FLOW, + [GVE_FLOW_TYPE_ESPV4] = ESP_V4_FLOW, + [GVE_FLOW_TYPE_TCPV6] = TCP_V6_FLOW, + [GVE_FLOW_TYPE_UDPV6] = UDP_V6_FLOW, + [GVE_FLOW_TYPE_SCTPV6] = SCTP_V6_FLOW, + [GVE_FLOW_TYPE_AHV6] = AH_V6_FLOW, + [GVE_FLOW_TYPE_ESPV6] = ESP_V6_FLOW, + }; + + if (be16_to_cpu(flow_rule->flow_type) >= ARRAY_SIZE(flow_type_lut)) + return -EINVAL; + + fsp->flow_type = flow_type_lut[be16_to_cpu(flow_rule->flow_type)]; + + memset(&fsp->h_u, 0, sizeof(fsp->h_u)); + memset(&fsp->h_ext, 0, sizeof(fsp->h_ext)); + memset(&fsp->m_u, 0, sizeof(fsp->m_u)); + memset(&fsp->m_ext, 0, sizeof(fsp->m_ext)); + + switch (fsp->flow_type) { + case TCP_V4_FLOW: + case UDP_V4_FLOW: + case SCTP_V4_FLOW: + fsp->h_u.tcp_ip4_spec.ip4src = flow_rule->key.src_ip[0]; + fsp->h_u.tcp_ip4_spec.ip4dst = flow_rule->key.dst_ip[0]; + fsp->h_u.tcp_ip4_spec.psrc = flow_rule->key.src_port; + fsp->h_u.tcp_ip4_spec.pdst = flow_rule->key.dst_port; + fsp->h_u.tcp_ip4_spec.tos = flow_rule->key.tos; + fsp->m_u.tcp_ip4_spec.ip4src = flow_rule->mask.src_ip[0]; + fsp->m_u.tcp_ip4_spec.ip4dst = flow_rule->mask.dst_ip[0]; + fsp->m_u.tcp_ip4_spec.psrc = flow_rule->mask.src_port; + fsp->m_u.tcp_ip4_spec.pdst = flow_rule->mask.dst_port; + fsp->m_u.tcp_ip4_spec.tos = flow_rule->mask.tos; + break; + case AH_V4_FLOW: + case ESP_V4_FLOW: + fsp->h_u.ah_ip4_spec.ip4src = flow_rule->key.src_ip[0]; + fsp->h_u.ah_ip4_spec.ip4dst = flow_rule->key.dst_ip[0]; + fsp->h_u.ah_ip4_spec.spi = flow_rule->key.spi; + fsp->h_u.ah_ip4_spec.tos = flow_rule->key.tos; + fsp->m_u.ah_ip4_spec.ip4src = flow_rule->mask.src_ip[0]; + fsp->m_u.ah_ip4_spec.ip4dst = flow_rule->mask.dst_ip[0]; + fsp->m_u.ah_ip4_spec.spi = flow_rule->mask.spi; + fsp->m_u.ah_ip4_spec.tos = flow_rule->mask.tos; + break; + case TCP_V6_FLOW: + case UDP_V6_FLOW: + case SCTP_V6_FLOW: + memcpy(fsp->h_u.tcp_ip6_spec.ip6src, &flow_rule->key.src_ip, + sizeof(struct in6_addr)); + memcpy(fsp->h_u.tcp_ip6_spec.ip6dst, &flow_rule->key.dst_ip, + sizeof(struct in6_addr)); + fsp->h_u.tcp_ip6_spec.psrc = flow_rule->key.src_port; + fsp->h_u.tcp_ip6_spec.pdst = flow_rule->key.dst_port; + fsp->h_u.tcp_ip6_spec.tclass = flow_rule->key.tclass; + memcpy(fsp->m_u.tcp_ip6_spec.ip6src, &flow_rule->mask.src_ip, + sizeof(struct in6_addr)); + memcpy(fsp->m_u.tcp_ip6_spec.ip6dst, &flow_rule->mask.dst_ip, + sizeof(struct in6_addr)); + fsp->m_u.tcp_ip6_spec.psrc = flow_rule->mask.src_port; + fsp->m_u.tcp_ip6_spec.pdst = flow_rule->mask.dst_port; + fsp->m_u.tcp_ip6_spec.tclass = flow_rule->mask.tclass; + break; + case AH_V6_FLOW: + case ESP_V6_FLOW: + memcpy(fsp->h_u.ah_ip6_spec.ip6src, &flow_rule->key.src_ip, + sizeof(struct in6_addr)); + memcpy(fsp->h_u.ah_ip6_spec.ip6dst, &flow_rule->key.dst_ip, + sizeof(struct in6_addr)); + fsp->h_u.ah_ip6_spec.spi = flow_rule->key.spi; + fsp->h_u.ah_ip6_spec.tclass = flow_rule->key.tclass; + memcpy(fsp->m_u.ah_ip6_spec.ip6src, &flow_rule->mask.src_ip, + sizeof(struct in6_addr)); + memcpy(fsp->m_u.ah_ip6_spec.ip6dst, &flow_rule->mask.dst_ip, + sizeof(struct in6_addr)); + fsp->m_u.ah_ip6_spec.spi = flow_rule->mask.spi; + fsp->m_u.ah_ip6_spec.tclass = flow_rule->mask.tclass; + break; + default: + return -EINVAL; + } + + fsp->ring_cookie = be16_to_cpu(flow_rule->action); + + return 0; +} + +static int gve_generate_flow_rule(struct gve_priv *priv, struct ethtool_rx_flow_spec *fsp, + struct gve_adminq_flow_rule *rule) +{ + static const u16 flow_type_lut[] = { + [TCP_V4_FLOW] = GVE_FLOW_TYPE_TCPV4, + [UDP_V4_FLOW] = GVE_FLOW_TYPE_UDPV4, + [SCTP_V4_FLOW] = GVE_FLOW_TYPE_SCTPV4, + [AH_V4_FLOW] = GVE_FLOW_TYPE_AHV4, + [ESP_V4_FLOW] = GVE_FLOW_TYPE_ESPV4, + [TCP_V6_FLOW] = GVE_FLOW_TYPE_TCPV6, + [UDP_V6_FLOW] = GVE_FLOW_TYPE_UDPV6, + [SCTP_V6_FLOW] = GVE_FLOW_TYPE_SCTPV6, + [AH_V6_FLOW] = GVE_FLOW_TYPE_AHV6, + [ESP_V6_FLOW] = GVE_FLOW_TYPE_ESPV6, + }; + u32 flow_type; + + if (fsp->ring_cookie == RX_CLS_FLOW_DISC) + return -EOPNOTSUPP; + + if (fsp->ring_cookie >= priv->rx_cfg.num_queues) + return -EINVAL; + + rule->action = cpu_to_be16(fsp->ring_cookie); + + flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS); + if (!flow_type || flow_type >= ARRAY_SIZE(flow_type_lut)) + return -EINVAL; + + rule->flow_type = cpu_to_be16(flow_type_lut[flow_type]); + + switch (flow_type) { + case TCP_V4_FLOW: + case UDP_V4_FLOW: + case SCTP_V4_FLOW: + rule->key.src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src; + rule->key.dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst; + rule->key.src_port = fsp->h_u.tcp_ip4_spec.psrc; + rule->key.dst_port = fsp->h_u.tcp_ip4_spec.pdst; + rule->mask.src_ip[0] = fsp->m_u.tcp_ip4_spec.ip4src; + rule->mask.dst_ip[0] = fsp->m_u.tcp_ip4_spec.ip4dst; + rule->mask.src_port = fsp->m_u.tcp_ip4_spec.psrc; + rule->mask.dst_port = fsp->m_u.tcp_ip4_spec.pdst; + break; + case AH_V4_FLOW: + case ESP_V4_FLOW: + rule->key.src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src; + rule->key.dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst; + rule->key.spi = fsp->h_u.ah_ip4_spec.spi; + rule->mask.src_ip[0] = fsp->m_u.tcp_ip4_spec.ip4src; + rule->mask.dst_ip[0] = fsp->m_u.tcp_ip4_spec.ip4dst; + rule->mask.spi = fsp->m_u.ah_ip4_spec.spi; + break; + case TCP_V6_FLOW: + case UDP_V6_FLOW: + case SCTP_V6_FLOW: + memcpy(&rule->key.src_ip, fsp->h_u.tcp_ip6_spec.ip6src, + sizeof(struct in6_addr)); + memcpy(&rule->key.dst_ip, fsp->h_u.tcp_ip6_spec.ip6dst, + sizeof(struct in6_addr)); + rule->key.src_port = fsp->h_u.tcp_ip6_spec.psrc; + rule->key.dst_port = fsp->h_u.tcp_ip6_spec.pdst; + memcpy(&rule->mask.src_ip, fsp->m_u.tcp_ip6_spec.ip6src, + sizeof(struct in6_addr)); + memcpy(&rule->mask.dst_ip, fsp->m_u.tcp_ip6_spec.ip6dst, + sizeof(struct in6_addr)); + rule->mask.src_port = fsp->m_u.tcp_ip6_spec.psrc; + rule->mask.dst_port = fsp->m_u.tcp_ip6_spec.pdst; + break; + case AH_V6_FLOW: + case ESP_V6_FLOW: + memcpy(&rule->key.src_ip, fsp->h_u.usr_ip6_spec.ip6src, + sizeof(struct in6_addr)); + memcpy(&rule->key.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst, + sizeof(struct in6_addr)); + rule->key.spi = fsp->h_u.ah_ip6_spec.spi; + memcpy(&rule->mask.src_ip, fsp->m_u.usr_ip6_spec.ip6src, + sizeof(struct in6_addr)); + memcpy(&rule->mask.dst_ip, fsp->m_u.usr_ip6_spec.ip6dst, + sizeof(struct in6_addr)); + rule->key.spi = fsp->h_u.ah_ip6_spec.spi; + break; + default: + /* not doing un-parsed flow types */ + return -EINVAL; + } + + return 0; +} + +int gve_get_flow_rule_entry(struct gve_priv *priv, struct ethtool_rxnfc *cmd) +{ + struct gve_adminq_queried_flow_rule *rules_cache = priv->flow_rules_cache.rules_cache; + struct ethtool_rx_flow_spec *fsp = (struct ethtool_rx_flow_spec *)&cmd->fs; + u32 *cache_num = &priv->flow_rules_cache.rules_cache_num; + struct gve_adminq_queried_flow_rule *rule = NULL; + int err = 0; + u32 i; + + if (!priv->max_flow_rules) + return -EOPNOTSUPP; + + if (!priv->flow_rules_cache.rules_cache_synced || + fsp->location < be32_to_cpu(rules_cache[0].location) || + fsp->location > be32_to_cpu(rules_cache[*cache_num - 1].location)) { + err = gve_adminq_query_flow_rules(priv, GVE_FLOW_RULE_QUERY_RULES, fsp->location); + if (err) + return err; + + priv->flow_rules_cache.rules_cache_synced = true; + } + + for (i = 0; i < *cache_num; i++) { + if (fsp->location == be32_to_cpu(rules_cache[i].location)) { + rule = &rules_cache[i]; + break; + } + } + + if (!rule) + return -EINVAL; + + err = gve_fill_ethtool_flow_spec(fsp, rule); + + return err; +} + +int gve_get_flow_rule_ids(struct gve_priv *priv, struct ethtool_rxnfc *cmd, u32 *rule_locs) +{ + __be32 *rule_ids_cache = priv->flow_rules_cache.rule_ids_cache; + u32 *cache_num = &priv->flow_rules_cache.rule_ids_cache_num; + u32 starting_rule_id = 0; + u32 i = 0, j = 0; + int err = 0; + + if (!priv->max_flow_rules) + return -EOPNOTSUPP; + + do { + err = gve_adminq_query_flow_rules(priv, GVE_FLOW_RULE_QUERY_IDS, + starting_rule_id); + if (err) + return err; + + for (i = 0; i < *cache_num; i++) { + if (j >= cmd->rule_cnt) + return -EMSGSIZE; + + rule_locs[j++] = be32_to_cpu(rule_ids_cache[i]); + starting_rule_id = be32_to_cpu(rule_ids_cache[i]) + 1; + } + } while (*cache_num != 0); + cmd->data = priv->max_flow_rules; + + return err; +} + +int gve_add_flow_rule(struct gve_priv *priv, struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = &cmd->fs; + struct gve_adminq_flow_rule *rule = NULL; + int err; + + if (!priv->max_flow_rules) + return -EOPNOTSUPP; + + rule = kvzalloc(sizeof(*rule), GFP_KERNEL); + if (!rule) + return -ENOMEM; + + err = gve_generate_flow_rule(priv, fsp, rule); + if (err) + goto out; + + err = gve_adminq_add_flow_rule(priv, rule, fsp->location); + +out: + kvfree(rule); + if (err) + dev_err(&priv->pdev->dev, "Failed to add the flow rule: %u", fsp->location); + + return err; +} + +int gve_del_flow_rule(struct gve_priv *priv, struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = (struct ethtool_rx_flow_spec *)&cmd->fs; + + if (!priv->max_flow_rules) + return -EOPNOTSUPP; + + return gve_adminq_del_flow_rule(priv, fsp->location); +} diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c index cabf7d4bcecb..9744b426940e 100644 --- a/drivers/net/ethernet/google/gve/gve_main.c +++ b/drivers/net/ethernet/google/gve/gve_main.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: (GPL-2.0 OR MIT) /* Google virtual Ethernet (gve) driver * - * Copyright (C) 2015-2021 Google, Inc. + * Copyright (C) 2015-2024 Google LLC */ #include <linux/bpf.h> @@ -141,6 +141,49 @@ static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s) } } +static int gve_alloc_flow_rule_caches(struct gve_priv *priv) +{ + struct gve_flow_rules_cache *flow_rules_cache = &priv->flow_rules_cache; + int err = 0; + + if (!priv->max_flow_rules) + return 0; + + flow_rules_cache->rules_cache = + kvcalloc(GVE_FLOW_RULES_CACHE_SIZE, sizeof(*flow_rules_cache->rules_cache), + GFP_KERNEL); + if (!flow_rules_cache->rules_cache) { + dev_err(&priv->pdev->dev, "Cannot alloc flow rules cache\n"); + return -ENOMEM; + } + + flow_rules_cache->rule_ids_cache = + kvcalloc(GVE_FLOW_RULE_IDS_CACHE_SIZE, sizeof(*flow_rules_cache->rule_ids_cache), + GFP_KERNEL); + if (!flow_rules_cache->rule_ids_cache) { + dev_err(&priv->pdev->dev, "Cannot alloc flow rule ids cache\n"); + err = -ENOMEM; + goto free_rules_cache; + } + + return 0; + +free_rules_cache: + kvfree(flow_rules_cache->rules_cache); + flow_rules_cache->rules_cache = NULL; + return err; +} + +static void gve_free_flow_rule_caches(struct gve_priv *priv) +{ + struct gve_flow_rules_cache *flow_rules_cache = &priv->flow_rules_cache; + + kvfree(flow_rules_cache->rule_ids_cache); + flow_rules_cache->rule_ids_cache = NULL; + kvfree(flow_rules_cache->rules_cache); + flow_rules_cache->rules_cache = NULL; +} + static int gve_alloc_counter_array(struct gve_priv *priv) { priv->counter_array = @@ -521,9 +564,12 @@ static int gve_setup_device_resources(struct gve_priv *priv) { int err; - err = gve_alloc_counter_array(priv); + err = gve_alloc_flow_rule_caches(priv); if (err) return err; + err = gve_alloc_counter_array(priv); + if (err) + goto abort_with_flow_rule_caches; err = gve_alloc_notify_blocks(priv); if (err) goto abort_with_counter; @@ -575,6 +621,8 @@ abort_with_ntfy_blocks: gve_free_notify_blocks(priv); abort_with_counter: gve_free_counter_array(priv); +abort_with_flow_rule_caches: + gve_free_flow_rule_caches(priv); return err; } @@ -587,6 +635,12 @@ static void gve_teardown_device_resources(struct gve_priv *priv) /* Tell device its resources are being freed */ if (gve_get_device_resources_ok(priv)) { + err = gve_flow_rules_reset(priv); + if (err) { + dev_err(&priv->pdev->dev, + "Failed to reset flow rules: err=%d\n", err); + gve_trigger_reset(priv); + } /* detach the stats report */ err = gve_adminq_report_stats(priv, 0, 0x0, GVE_STATS_REPORT_TIMER_PERIOD); if (err) { @@ -606,6 +660,7 @@ static void gve_teardown_device_resources(struct gve_priv *priv) kvfree(priv->ptype_lut_dqo); priv->ptype_lut_dqo = NULL; + gve_free_flow_rule_caches(priv); gve_free_counter_array(priv); gve_free_notify_blocks(priv); gve_free_stats_report(priv); @@ -1730,6 +1785,14 @@ static int gve_xdp(struct net_device *dev, struct netdev_bpf *xdp) } } +int gve_flow_rules_reset(struct gve_priv *priv) +{ + if (!priv->max_flow_rules) + return 0; + + return gve_adminq_reset_flow_rules(priv); +} + int gve_adjust_config(struct gve_priv *priv, struct gve_tx_alloc_rings_cfg *tx_alloc_cfg, struct gve_rx_alloc_rings_cfg *rx_alloc_cfg) @@ -2003,15 +2066,21 @@ static int gve_set_features(struct net_device *netdev, netdev->features ^= NETIF_F_LRO; if (netif_carrier_ok(netdev)) { err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg); - if (err) { - /* Revert the change on error. */ - netdev->features = orig_features; - return err; - } + if (err) + goto revert_features; } } + if ((netdev->features & NETIF_F_NTUPLE) && !(features & NETIF_F_NTUPLE)) { + err = gve_flow_rules_reset(priv); + if (err) + goto revert_features; + } return 0; + +revert_features: + netdev->features = orig_features; + return err; } static const struct net_device_ops gve_netdev_ops = { diff --git a/drivers/net/ethernet/hisilicon/hns3/Makefile b/drivers/net/ethernet/hisilicon/hns3/Makefile index 8e9293e57bfd..e8af26da1fc1 100644 --- a/drivers/net/ethernet/hisilicon/hns3/Makefile +++ b/drivers/net/ethernet/hisilicon/hns3/Makefile @@ -15,15 +15,14 @@ hns3-objs = hns3_enet.o hns3_ethtool.o hns3_debugfs.o hns3-$(CONFIG_HNS3_DCB) += hns3_dcbnl.o -obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o +obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o hclge-common.o -hclgevf-objs = hns3vf/hclgevf_main.o hns3vf/hclgevf_mbx.o hns3vf/hclgevf_devlink.o hns3vf/hclgevf_regs.o \ - hns3_common/hclge_comm_cmd.o hns3_common/hclge_comm_rss.o hns3_common/hclge_comm_tqp_stats.o +hclge-common-objs += hns3_common/hclge_comm_cmd.o hns3_common/hclge_comm_rss.o hns3_common/hclge_comm_tqp_stats.o -obj-$(CONFIG_HNS3_HCLGE) += hclge.o +hclgevf-objs = hns3vf/hclgevf_main.o hns3vf/hclgevf_mbx.o hns3vf/hclgevf_devlink.o hns3vf/hclgevf_regs.o + +obj-$(CONFIG_HNS3_HCLGE) += hclge.o hclge-common.o hclge-objs = hns3pf/hclge_main.o hns3pf/hclge_mdio.o hns3pf/hclge_tm.o hns3pf/hclge_regs.o \ hns3pf/hclge_mbx.o hns3pf/hclge_err.o hns3pf/hclge_debugfs.o hns3pf/hclge_ptp.o hns3pf/hclge_devlink.o \ - hns3_common/hclge_comm_cmd.o hns3_common/hclge_comm_rss.o hns3_common/hclge_comm_tqp_stats.o - hclge-$(CONFIG_HNS3_DCB) += hns3pf/hclge_dcb.o diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index 7cebb08bd320..27dbe367f3d3 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -786,7 +786,7 @@ struct hnae3_ae_ops { void (*get_rx_hwts)(struct hnae3_handle *handle, struct sk_buff *skb, u32 nsec, u32 sec); int (*get_ts_info)(struct hnae3_handle *handle, - struct ethtool_ts_info *info); + struct kernel_ethtool_ts_info *info); int (*get_link_diagnosis_info)(struct hnae3_handle *handle, u32 *status_code); void (*clean_vf_config)(struct hnae3_ae_dev *ae_dev, int num_vfs); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c index ea40b594dbac..4ad4e8ab2f1f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c @@ -48,6 +48,7 @@ void hclge_comm_cmd_reuse_desc(struct hclge_desc *desc, bool is_read) else desc->flag &= cpu_to_le16(~HCLGE_COMM_CMD_FLAG_WR); } +EXPORT_SYMBOL_GPL(hclge_comm_cmd_reuse_desc); static void hclge_comm_set_default_capability(struct hnae3_ae_dev *ae_dev, bool is_pf) @@ -72,6 +73,7 @@ void hclge_comm_cmd_setup_basic_desc(struct hclge_desc *desc, if (is_read) desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_WR); } +EXPORT_SYMBOL_GPL(hclge_comm_cmd_setup_basic_desc); int hclge_comm_firmware_compat_config(struct hnae3_ae_dev *ae_dev, struct hclge_comm_hw *hw, bool en) @@ -517,6 +519,7 @@ int hclge_comm_cmd_send(struct hclge_comm_hw *hw, struct hclge_desc *desc, return ret; } +EXPORT_SYMBOL_GPL(hclge_comm_cmd_send); static void hclge_comm_cmd_uninit_regs(struct hclge_comm_hw *hw) { @@ -553,6 +556,7 @@ void hclge_comm_cmd_uninit(struct hnae3_ae_dev *ae_dev, hclge_comm_free_cmd_desc(&cmdq->csq); hclge_comm_free_cmd_desc(&cmdq->crq); } +EXPORT_SYMBOL_GPL(hclge_comm_cmd_uninit); int hclge_comm_cmd_queue_init(struct pci_dev *pdev, struct hclge_comm_hw *hw) { @@ -591,6 +595,7 @@ err_csq: hclge_comm_free_cmd_desc(&hw->cmq.csq); return ret; } +EXPORT_SYMBOL_GPL(hclge_comm_cmd_queue_init); void hclge_comm_cmd_init_ops(struct hclge_comm_hw *hw, const struct hclge_comm_cmq_ops *ops) @@ -602,6 +607,7 @@ void hclge_comm_cmd_init_ops(struct hclge_comm_hw *hw, cmdq->ops.trace_cmd_get = ops->trace_cmd_get; } } +EXPORT_SYMBOL_GPL(hclge_comm_cmd_init_ops); int hclge_comm_cmd_init(struct hnae3_ae_dev *ae_dev, struct hclge_comm_hw *hw, u32 *fw_version, bool is_pf, @@ -672,3 +678,8 @@ err_cmd_init: return ret; } +EXPORT_SYMBOL_GPL(hclge_comm_cmd_init); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet PF/VF Common Library"); +MODULE_AUTHOR("Huawei Tech. Co., Ltd."); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c index b4ae2160aff4..4e2bb6556b1c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c @@ -62,6 +62,7 @@ int hclge_comm_rss_init_cfg(struct hnae3_handle *nic, return 0; } +EXPORT_SYMBOL_GPL(hclge_comm_rss_init_cfg); void hclge_comm_get_rss_tc_info(u16 rss_size, u8 hw_tc_map, u16 *tc_offset, u16 *tc_valid, u16 *tc_size) @@ -78,6 +79,7 @@ void hclge_comm_get_rss_tc_info(u16 rss_size, u8 hw_tc_map, u16 *tc_offset, tc_offset[i] = (hw_tc_map & BIT(i)) ? rss_size * i : 0; } } +EXPORT_SYMBOL_GPL(hclge_comm_get_rss_tc_info); int hclge_comm_set_rss_tc_mode(struct hclge_comm_hw *hw, u16 *tc_offset, u16 *tc_valid, u16 *tc_size) @@ -113,6 +115,7 @@ int hclge_comm_set_rss_tc_mode(struct hclge_comm_hw *hw, u16 *tc_offset, return ret; } +EXPORT_SYMBOL_GPL(hclge_comm_set_rss_tc_mode); int hclge_comm_set_rss_hash_key(struct hclge_comm_rss_cfg *rss_cfg, struct hclge_comm_hw *hw, const u8 *key, @@ -143,6 +146,7 @@ int hclge_comm_set_rss_hash_key(struct hclge_comm_rss_cfg *rss_cfg, return 0; } +EXPORT_SYMBOL_GPL(hclge_comm_set_rss_hash_key); int hclge_comm_set_rss_tuple(struct hnae3_ae_dev *ae_dev, struct hclge_comm_hw *hw, @@ -185,11 +189,13 @@ int hclge_comm_set_rss_tuple(struct hnae3_ae_dev *ae_dev, rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; return 0; } +EXPORT_SYMBOL_GPL(hclge_comm_set_rss_tuple); u32 hclge_comm_get_rss_key_size(struct hnae3_handle *handle) { return HCLGE_COMM_RSS_KEY_SIZE; } +EXPORT_SYMBOL_GPL(hclge_comm_get_rss_key_size); int hclge_comm_parse_rss_hfunc(struct hclge_comm_rss_cfg *rss_cfg, const u8 hfunc, u8 *hash_algo) @@ -217,6 +223,7 @@ void hclge_comm_rss_indir_init_cfg(struct hnae3_ae_dev *ae_dev, for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++) rss_cfg->rss_indirection_tbl[i] = i % rss_cfg->rss_size; } +EXPORT_SYMBOL_GPL(hclge_comm_rss_indir_init_cfg); int hclge_comm_get_rss_tuple(struct hclge_comm_rss_cfg *rss_cfg, int flow_type, u8 *tuple_sets) @@ -250,6 +257,7 @@ int hclge_comm_get_rss_tuple(struct hclge_comm_rss_cfg *rss_cfg, int flow_type, return 0; } +EXPORT_SYMBOL_GPL(hclge_comm_get_rss_tuple); static void hclge_comm_append_rss_msb_info(struct hclge_comm_rss_ind_tbl_cmd *req, @@ -304,6 +312,7 @@ int hclge_comm_set_rss_indir_table(struct hnae3_ae_dev *ae_dev, } return 0; } +EXPORT_SYMBOL_GPL(hclge_comm_set_rss_indir_table); int hclge_comm_set_rss_input_tuple(struct hclge_comm_hw *hw, struct hclge_comm_rss_cfg *rss_cfg) @@ -332,6 +341,7 @@ int hclge_comm_set_rss_input_tuple(struct hclge_comm_hw *hw, "failed to configure rss input, ret = %d.\n", ret); return ret; } +EXPORT_SYMBOL_GPL(hclge_comm_set_rss_input_tuple); void hclge_comm_get_rss_hash_info(struct hclge_comm_rss_cfg *rss_cfg, u8 *key, u8 *hfunc) @@ -355,6 +365,7 @@ void hclge_comm_get_rss_hash_info(struct hclge_comm_rss_cfg *rss_cfg, u8 *key, if (key) memcpy(key, rss_cfg->rss_hash_key, HCLGE_COMM_RSS_KEY_SIZE); } +EXPORT_SYMBOL_GPL(hclge_comm_get_rss_hash_info); void hclge_comm_get_rss_indir_tbl(struct hclge_comm_rss_cfg *rss_cfg, u32 *indir, u16 rss_ind_tbl_size) @@ -367,6 +378,7 @@ void hclge_comm_get_rss_indir_tbl(struct hclge_comm_rss_cfg *rss_cfg, for (i = 0; i < rss_ind_tbl_size; i++) indir[i] = rss_cfg->rss_indirection_tbl[i]; } +EXPORT_SYMBOL_GPL(hclge_comm_get_rss_indir_tbl); int hclge_comm_set_rss_algo_key(struct hclge_comm_hw *hw, const u8 hfunc, const u8 *key) @@ -408,6 +420,7 @@ int hclge_comm_set_rss_algo_key(struct hclge_comm_hw *hw, const u8 hfunc, return 0; } +EXPORT_SYMBOL_GPL(hclge_comm_set_rss_algo_key); static u8 hclge_comm_get_rss_hash_bits(struct ethtool_rxnfc *nfc) { @@ -502,3 +515,4 @@ u64 hclge_comm_convert_rss_tuple(u8 tuple_sets) return tuple_data; } +EXPORT_SYMBOL_GPL(hclge_comm_convert_rss_tuple); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c index 618f66d9586b..2b31188ff555 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c @@ -26,6 +26,7 @@ u64 *hclge_comm_tqps_get_stats(struct hnae3_handle *handle, u64 *data) return buff; } +EXPORT_SYMBOL_GPL(hclge_comm_tqps_get_stats); int hclge_comm_tqps_get_sset_count(struct hnae3_handle *handle) { @@ -33,6 +34,7 @@ int hclge_comm_tqps_get_sset_count(struct hnae3_handle *handle) return kinfo->num_tqps * HCLGE_COMM_QUEUE_PAIR_SIZE; } +EXPORT_SYMBOL_GPL(hclge_comm_tqps_get_sset_count); u8 *hclge_comm_tqps_get_strings(struct hnae3_handle *handle, u8 *data) { @@ -56,6 +58,7 @@ u8 *hclge_comm_tqps_get_strings(struct hnae3_handle *handle, u8 *data) return buff; } +EXPORT_SYMBOL_GPL(hclge_comm_tqps_get_strings); int hclge_comm_tqps_update_stats(struct hnae3_handle *handle, struct hclge_comm_hw *hw) @@ -99,6 +102,7 @@ int hclge_comm_tqps_update_stats(struct hnae3_handle *handle, return 0; } +EXPORT_SYMBOL_GPL(hclge_comm_tqps_update_stats); void hclge_comm_reset_tqp_stats(struct hnae3_handle *handle) { @@ -113,3 +117,4 @@ void hclge_comm_reset_tqp_stats(struct hnae3_handle *handle) memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); } } +EXPORT_SYMBOL_GPL(hclge_comm_reset_tqp_stats); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index 941cb529d671..b1e988347347 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -2009,7 +2009,7 @@ static int hns3_set_tunable(struct net_device *netdev, ETHTOOL_RING_USE_TX_PUSH) static int hns3_get_ts_info(struct net_device *netdev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct hnae3_handle *handle = hns3_get_handle(netdev); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c index 507d7ce26d83..5fff8ed388f8 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c @@ -378,7 +378,7 @@ int hclge_ptp_set_cfg(struct hclge_dev *hdev, struct ifreq *ifr) } int hclge_ptp_get_ts_info(struct hnae3_handle *handle, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h index bbee74cd8404..63483636c074 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h @@ -138,6 +138,6 @@ int hclge_ptp_set_cfg(struct hclge_dev *hdev, struct ifreq *ifr); int hclge_ptp_init(struct hclge_dev *hdev); void hclge_ptp_uninit(struct hclge_dev *hdev); int hclge_ptp_get_ts_info(struct hnae3_handle *handle, - struct ethtool_ts_info *info); + struct kernel_ethtool_ts_info *info); int hclge_ptp_cfg_qry(struct hclge_dev *hdev, u32 *cfg); #endif diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index e0287fbd501d..0375c7448a57 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig @@ -384,17 +384,6 @@ config IGC_LEDS Optional support for controlling the NIC LED's with the netdev LED trigger. -config IDPF - tristate "Intel(R) Infrastructure Data Path Function Support" - depends on PCI_MSI - select DIMLIB - select PAGE_POOL - select PAGE_POOL_STATS - help - This driver supports Intel(R) Infrastructure Data Path Function - devices. - - To compile this driver as a module, choose M here. The module - will be called idpf. +source "drivers/net/ethernet/intel/idpf/Kconfig" endif # NET_VENDOR_INTEL diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index 9b068d40778d..aa139b67a55b 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c @@ -161,7 +161,6 @@ #define FIRMWARE_D102E "e100/d102e_ucode.bin" MODULE_DESCRIPTION(DRV_DESCRIPTION); -MODULE_AUTHOR(DRV_COPYRIGHT); MODULE_LICENSE("GPL v2"); MODULE_FIRMWARE(FIRMWARE_D101M); MODULE_FIRMWARE(FIRMWARE_D101S); diff --git a/drivers/net/ethernet/intel/e1000/Makefile b/drivers/net/ethernet/intel/e1000/Makefile index 314c52d44b7c..79491dec47e1 100644 --- a/drivers/net/ethernet/intel/e1000/Makefile +++ b/drivers/net/ethernet/intel/e1000/Makefile @@ -7,4 +7,4 @@ obj-$(CONFIG_E1000) += e1000.o -e1000-objs := e1000_main.o e1000_hw.o e1000_ethtool.o e1000_param.o +e1000-y := e1000_main.o e1000_hw.o e1000_ethtool.o e1000_param.o diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index 60fff9a6c53e..ab7ae418d294 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -187,7 +187,6 @@ static struct pci_driver e1000_driver = { .err_handler = &e1000_err_handler }; -MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/intel/e1000e/Makefile b/drivers/net/ethernet/intel/e1000e/Makefile index 0baa15503c38..18f22b6374d5 100644 --- a/drivers/net/ethernet/intel/e1000e/Makefile +++ b/drivers/net/ethernet/intel/e1000e/Makefile @@ -10,7 +10,6 @@ subdir-ccflags-y += -I$(src) obj-$(CONFIG_E1000E) += e1000e.o -e1000e-objs := 82571.o ich8lan.o 80003es2lan.o \ - mac.o manage.o nvm.o phy.o \ - param.o ethtool.o netdev.o ptp.o - +e1000e-y := 82571.o ich8lan.o 80003es2lan.o \ + mac.o manage.o nvm.o phy.o \ + param.o ethtool.o netdev.o ptp.o diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index 85da20778e0f..9364bc2b4eb1 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c @@ -2263,7 +2263,7 @@ static int e1000e_set_eee(struct net_device *netdev, struct ethtool_keee *edata) } static int e1000e_get_ts_info(struct net_device *netdev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct e1000_adapter *adapter = netdev_priv(netdev); diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 3cd161c6672b..360ee26557f7 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -7969,7 +7969,6 @@ static void __exit e1000_exit_module(void) } module_exit(e1000_exit_module); -MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index fc373472e4e1..142f07ca8bc0 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -17,7 +17,6 @@ static const char fm10k_driver_string[] = DRV_SUMMARY; static const char fm10k_copyright[] = "Copyright(c) 2013 - 2019 Intel Corporation."; -MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); MODULE_DESCRIPTION(DRV_SUMMARY); MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/intel/i40e/Makefile b/drivers/net/ethernet/intel/i40e/Makefile index cad93f323bd5..9faa4339a76c 100644 --- a/drivers/net/ethernet/intel/i40e/Makefile +++ b/drivers/net/ethernet/intel/i40e/Makefile @@ -10,7 +10,7 @@ subdir-ccflags-y += -I$(src) obj-$(CONFIG_I40E) += i40e.o -i40e-objs := i40e_main.o \ +i40e-y := i40e_main.o \ i40e_ethtool.o \ i40e_adminq.o \ i40e_common.o \ diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index bca2084cc54b..d546567e0286 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -735,7 +735,7 @@ __i40e_pf_next_veb(struct i40e_pf *pf, int *idx) _i++, _veb = __i40e_pf_next_veb(_pf, &_i)) /** - * i40e_mac_to_hkey - Convert a 6-byte MAC Address to a u64 hash key + * i40e_addr_to_hkey - Convert a 6-byte MAC Address to a u64 hash key * @macaddr: the MAC Address as the base key * * Simply copies the address and returns it as a u64 for hashing diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 4e28785c9fb2..1d0d2e526adb 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -2546,7 +2546,7 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset, } static int i40e_get_ts_info(struct net_device *dev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct i40e_pf *pf = i40e_netdev_to_pf(dev); diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 310513d9321b..cbcfada7b357 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -98,7 +98,6 @@ static int debug = -1; module_param(debug, uint, 0); MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX)"); -MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>"); MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver"); MODULE_IMPORT_NS(LIBIE); MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/intel/iavf/Makefile b/drivers/net/ethernet/intel/iavf/Makefile index 2d154a4e2fd7..356ac9faa5bf 100644 --- a/drivers/net/ethernet/intel/iavf/Makefile +++ b/drivers/net/ethernet/intel/iavf/Makefile @@ -11,6 +11,5 @@ subdir-ccflags-y += -I$(src) obj-$(CONFIG_IAVF) += iavf.o -iavf-objs := iavf_main.o iavf_ethtool.o iavf_virtchnl.o iavf_fdir.o \ - iavf_adv_rss.o \ - iavf_txrx.o iavf_common.o iavf_adminq.o +iavf-y := iavf_main.o iavf_ethtool.o iavf_virtchnl.o iavf_fdir.o \ + iavf_adv_rss.o iavf_txrx.o iavf_common.o iavf_adminq.o diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index c6dff0963053..ff11bafb3b4f 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -45,7 +45,6 @@ static const struct pci_device_id iavf_pci_tbl[] = { MODULE_DEVICE_TABLE(pci, iavf_pci_tbl); MODULE_ALIAS("i40evf"); -MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver"); MODULE_IMPORT_NS(LIBETH); MODULE_IMPORT_NS(LIBIE); diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink.c b/drivers/net/ethernet/intel/ice/devlink/devlink.c index 704e9ad5144e..810a901d7afd 100644 --- a/drivers/net/ethernet/intel/ice/devlink/devlink.c +++ b/drivers/net/ethernet/intel/ice/devlink/devlink.c @@ -794,10 +794,8 @@ int ice_devlink_rate_init_tx_topology(struct devlink *devlink, struct ice_vsi *v tc_node = pi->root->children[0]; mutex_lock(&pi->sched_lock); - devl_lock(devlink); for (i = 0; i < tc_node->num_children; i++) ice_traverse_tx_tree(devlink, tc_node->children[i], tc_node, pf); - devl_unlock(devlink); mutex_unlock(&pi->sched_lock); return 0; @@ -1383,9 +1381,129 @@ ice_devlink_enable_iw_validate(struct devlink *devlink, u32 id, return 0; } +#define DEVLINK_LOCAL_FWD_DISABLED_STR "disabled" +#define DEVLINK_LOCAL_FWD_ENABLED_STR "enabled" +#define DEVLINK_LOCAL_FWD_PRIORITIZED_STR "prioritized" + +/** + * ice_devlink_local_fwd_mode_to_str - Get string for local_fwd mode. + * @mode: local forwarding for mode used in port_info struct. + * + * Return: Mode respective string or "Invalid". + */ +static const char * +ice_devlink_local_fwd_mode_to_str(enum ice_local_fwd_mode mode) +{ + switch (mode) { + case ICE_LOCAL_FWD_MODE_ENABLED: + return DEVLINK_LOCAL_FWD_ENABLED_STR; + case ICE_LOCAL_FWD_MODE_PRIORITIZED: + return DEVLINK_LOCAL_FWD_PRIORITIZED_STR; + case ICE_LOCAL_FWD_MODE_DISABLED: + return DEVLINK_LOCAL_FWD_DISABLED_STR; + } + + return "Invalid"; +} + +/** + * ice_devlink_local_fwd_str_to_mode - Get local_fwd mode from string name. + * @mode_str: local forwarding mode string. + * + * Return: Mode value or negative number if invalid. + */ +static int ice_devlink_local_fwd_str_to_mode(const char *mode_str) +{ + if (!strcmp(mode_str, DEVLINK_LOCAL_FWD_ENABLED_STR)) + return ICE_LOCAL_FWD_MODE_ENABLED; + else if (!strcmp(mode_str, DEVLINK_LOCAL_FWD_PRIORITIZED_STR)) + return ICE_LOCAL_FWD_MODE_PRIORITIZED; + else if (!strcmp(mode_str, DEVLINK_LOCAL_FWD_DISABLED_STR)) + return ICE_LOCAL_FWD_MODE_DISABLED; + + return -EINVAL; +} + +/** + * ice_devlink_local_fwd_get - Get local_fwd parameter. + * @devlink: Pointer to the devlink instance. + * @id: The parameter ID to set. + * @ctx: Context to store the parameter value. + * + * Return: Zero. + */ +static int ice_devlink_local_fwd_get(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct ice_pf *pf = devlink_priv(devlink); + struct ice_port_info *pi; + const char *mode_str; + + pi = pf->hw.port_info; + mode_str = ice_devlink_local_fwd_mode_to_str(pi->local_fwd_mode); + snprintf(ctx->val.vstr, sizeof(ctx->val.vstr), "%s", mode_str); + + return 0; +} + +/** + * ice_devlink_local_fwd_set - Set local_fwd parameter. + * @devlink: Pointer to the devlink instance. + * @id: The parameter ID to set. + * @ctx: Context to get the parameter value. + * @extack: Netlink extended ACK structure. + * + * Return: Zero. + */ +static int ice_devlink_local_fwd_set(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) +{ + int new_local_fwd_mode = ice_devlink_local_fwd_str_to_mode(ctx->val.vstr); + struct ice_pf *pf = devlink_priv(devlink); + struct device *dev = ice_pf_to_dev(pf); + struct ice_port_info *pi; + + pi = pf->hw.port_info; + if (pi->local_fwd_mode != new_local_fwd_mode) { + pi->local_fwd_mode = new_local_fwd_mode; + dev_info(dev, "Setting local_fwd to %s\n", ctx->val.vstr); + ice_schedule_reset(pf, ICE_RESET_CORER); + } + + return 0; +} + +/** + * ice_devlink_local_fwd_validate - Validate passed local_fwd parameter value. + * @devlink: Unused pointer to devlink instance. + * @id: The parameter ID to validate. + * @val: Value to validate. + * @extack: Netlink extended ACK structure. + * + * Supported values are: + * "enabled" - local_fwd is enabled, "disabled" - local_fwd is disabled + * "prioritized" - local_fwd traffic is prioritized in scheduling. + * + * Return: Zero when passed parameter value is supported. Negative value on + * error. + */ +static int ice_devlink_local_fwd_validate(struct devlink *devlink, u32 id, + union devlink_param_value val, + struct netlink_ext_ack *extack) +{ + if (ice_devlink_local_fwd_str_to_mode(val.vstr) < 0) { + NL_SET_ERR_MSG_MOD(extack, "Error: Requested value is not supported."); + return -EINVAL; + } + + return 0; +} + enum ice_param_id { ICE_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, ICE_DEVLINK_PARAM_ID_TX_SCHED_LAYERS, + ICE_DEVLINK_PARAM_ID_LOCAL_FWD, }; static const struct devlink_param ice_dvl_rdma_params[] = { @@ -1407,6 +1525,12 @@ static const struct devlink_param ice_dvl_sched_params[] = { ice_devlink_tx_sched_layers_get, ice_devlink_tx_sched_layers_set, ice_devlink_tx_sched_layers_validate), + DEVLINK_PARAM_DRIVER(ICE_DEVLINK_PARAM_ID_LOCAL_FWD, + "local_forwarding", DEVLINK_PARAM_TYPE_STRING, + BIT(DEVLINK_PARAM_CMODE_RUNTIME), + ice_devlink_local_fwd_get, + ice_devlink_local_fwd_set, + ice_devlink_local_fwd_validate), }; static void ice_devlink_free(void *devlink_ptr) diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink_port.c b/drivers/net/ethernet/intel/ice/devlink/devlink_port.c index 13e6790d3cae..00fed5a61d62 100644 --- a/drivers/net/ethernet/intel/ice/devlink/devlink_port.c +++ b/drivers/net/ethernet/intel/ice/devlink/devlink_port.c @@ -373,6 +373,62 @@ void ice_devlink_destroy_pf_port(struct ice_pf *pf) } /** + * ice_devlink_port_get_vf_fn_mac - .port_fn_hw_addr_get devlink handler + * @port: devlink port structure + * @hw_addr: MAC address of the port + * @hw_addr_len: length of MAC address + * @extack: extended netdev ack structure + * + * Callback for the devlink .port_fn_hw_addr_get operation + * Return: zero on success or an error code on failure. + */ +static int ice_devlink_port_get_vf_fn_mac(struct devlink_port *port, + u8 *hw_addr, int *hw_addr_len, + struct netlink_ext_ack *extack) +{ + struct ice_vf *vf = container_of(port, struct ice_vf, devlink_port); + + ether_addr_copy(hw_addr, vf->dev_lan_addr); + *hw_addr_len = ETH_ALEN; + + return 0; +} + +/** + * ice_devlink_port_set_vf_fn_mac - .port_fn_hw_addr_set devlink handler + * @port: devlink port structure + * @hw_addr: MAC address of the port + * @hw_addr_len: length of MAC address + * @extack: extended netdev ack structure + * + * Callback for the devlink .port_fn_hw_addr_set operation + * Return: zero on success or an error code on failure. + */ +static int ice_devlink_port_set_vf_fn_mac(struct devlink_port *port, + const u8 *hw_addr, + int hw_addr_len, + struct netlink_ext_ack *extack) + +{ + struct devlink_port_attrs *attrs = &port->attrs; + struct devlink_port_pci_vf_attrs *pci_vf; + struct devlink *devlink = port->devlink; + struct ice_pf *pf; + u16 vf_id; + + pf = devlink_priv(devlink); + pci_vf = &attrs->pci_vf; + vf_id = pci_vf->vf; + + return __ice_set_vf_mac(pf, vf_id, hw_addr); +} + +static const struct devlink_port_ops ice_devlink_vf_port_ops = { + .port_fn_hw_addr_get = ice_devlink_port_get_vf_fn_mac, + .port_fn_hw_addr_set = ice_devlink_port_set_vf_fn_mac, +}; + +/** * ice_devlink_create_vf_port - Create a devlink port for this VF * @vf: the VF to create a port for * @@ -407,7 +463,8 @@ int ice_devlink_create_vf_port(struct ice_vf *vf) devlink_port_attrs_set(devlink_port, &attrs); devlink = priv_to_devlink(pf); - err = devlink_port_register(devlink, devlink_port, vsi->idx); + err = devl_port_register_with_ops(devlink, devlink_port, vsi->idx, + &ice_devlink_vf_port_ops); if (err) { dev_err(dev, "Failed to create devlink port for VF %d, error %d\n", vf->vf_id, err); @@ -426,5 +483,5 @@ int ice_devlink_create_vf_port(struct ice_vf *vf) void ice_devlink_destroy_vf_port(struct ice_vf *vf) { devl_rate_leaf_destroy(&vf->devlink_port); - devlink_port_unregister(&vf->devlink_port); + devl_port_unregister(&vf->devlink_port); } diff --git a/drivers/net/ethernet/intel/ice/ice_adapter.c b/drivers/net/ethernet/intel/ice/ice_adapter.c index 52d15ef7f4b1..ad84d8ad49a6 100644 --- a/drivers/net/ethernet/intel/ice/ice_adapter.c +++ b/drivers/net/ethernet/intel/ice/ice_adapter.c @@ -11,6 +11,7 @@ #include "ice_adapter.h" static DEFINE_XARRAY(ice_adapters); +static DEFINE_MUTEX(ice_adapters_mutex); /* PCI bus number is 8 bits. Slot is 5 bits. Domain can have the rest. */ #define INDEX_FIELD_DOMAIN GENMASK(BITS_PER_LONG - 1, 13) @@ -47,8 +48,6 @@ static void ice_adapter_free(struct ice_adapter *adapter) kfree(adapter); } -DEFINE_FREE(ice_adapter_free, struct ice_adapter*, if (_T) ice_adapter_free(_T)) - /** * ice_adapter_get - Get a shared ice_adapter structure. * @pdev: Pointer to the pci_dev whose driver is getting the ice_adapter. @@ -64,27 +63,26 @@ DEFINE_FREE(ice_adapter_free, struct ice_adapter*, if (_T) ice_adapter_free(_T)) */ struct ice_adapter *ice_adapter_get(const struct pci_dev *pdev) { - struct ice_adapter *ret, __free(ice_adapter_free) *adapter = NULL; unsigned long index = ice_adapter_index(pdev); - - adapter = ice_adapter_new(); - if (!adapter) - return ERR_PTR(-ENOMEM); - - xa_lock(&ice_adapters); - ret = __xa_cmpxchg(&ice_adapters, index, NULL, adapter, GFP_KERNEL); - if (xa_is_err(ret)) { - ret = ERR_PTR(xa_err(ret)); - goto unlock; - } - if (ret) { - refcount_inc(&ret->refcount); - goto unlock; + struct ice_adapter *adapter; + int err; + + scoped_guard(mutex, &ice_adapters_mutex) { + err = xa_insert(&ice_adapters, index, NULL, GFP_KERNEL); + if (err == -EBUSY) { + adapter = xa_load(&ice_adapters, index); + refcount_inc(&adapter->refcount); + return adapter; + } + if (err) + return ERR_PTR(err); + + adapter = ice_adapter_new(); + if (!adapter) + return ERR_PTR(-ENOMEM); + xa_store(&ice_adapters, index, adapter, GFP_KERNEL); } - ret = no_free_ptr(adapter); -unlock: - xa_unlock(&ice_adapters); - return ret; + return adapter; } /** @@ -94,23 +92,21 @@ unlock: * Releases the reference to ice_adapter previously obtained with * ice_adapter_get. * - * Context: Any. + * Context: Process, may sleep. */ void ice_adapter_put(const struct pci_dev *pdev) { unsigned long index = ice_adapter_index(pdev); struct ice_adapter *adapter; - xa_lock(&ice_adapters); - adapter = xa_load(&ice_adapters, index); - if (WARN_ON(!adapter)) - goto unlock; + scoped_guard(mutex, &ice_adapters_mutex) { + adapter = xa_load(&ice_adapters, index); + if (WARN_ON(!adapter)) + return; + if (!refcount_dec_and_test(&adapter->refcount)) + return; - if (!refcount_dec_and_test(&adapter->refcount)) - goto unlock; - - WARN_ON(__xa_erase(&ice_adapters, index) != adapter); + WARN_ON(xa_erase(&ice_adapters, index) != adapter); + } ice_adapter_free(adapter); -unlock: - xa_unlock(&ice_adapters); } diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index e76c388b9905..66f02988d549 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -122,6 +122,7 @@ struct ice_aqc_list_caps_elem { #define ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT 0x0077 #define ICE_AQC_CAPS_NVM_MGMT 0x0080 #define ICE_AQC_CAPS_TX_SCHED_TOPO_COMP_MODE 0x0085 +#define ICE_AQC_CAPS_NAC_TOPOLOGY 0x0087 #define ICE_AQC_CAPS_FW_LAG_SUPPORT 0x0092 #define ICE_AQC_BIT_ROCEV2_LAG 0x01 #define ICE_AQC_BIT_SRIOV_LAG 0x02 @@ -231,6 +232,13 @@ struct ice_aqc_get_sw_cfg_resp_elem { #define ICE_AQC_GET_SW_CONF_RESP_IS_VF BIT(15) }; +/* Loopback port parameter mode values. */ +enum ice_local_fwd_mode { + ICE_LOCAL_FWD_MODE_ENABLED = 0, + ICE_LOCAL_FWD_MODE_DISABLED = 1, + ICE_LOCAL_FWD_MODE_PRIORITIZED = 2, +}; + /* Set Port parameters, (direct, 0x0203) */ struct ice_aqc_set_port_params { __le16 cmd_flags; @@ -239,7 +247,9 @@ struct ice_aqc_set_port_params { __le16 swid; #define ICE_AQC_PORT_SWID_VALID BIT(15) #define ICE_AQC_PORT_SWID_M 0xFF - u8 reserved[10]; + u8 local_fwd_mode; +#define ICE_AQC_SET_P_PARAMS_LOCAL_FWD_MODE_VALID BIT(2) + u8 reserved[9]; }; /* These resource type defines are used for all switch resource @@ -1460,6 +1470,55 @@ struct ice_aqc_get_sensor_reading_resp { } data; }; +/* DNL call command (indirect 0x0682) + * Struct is used for both command and response + */ +struct ice_aqc_dnl_call_command { + u8 ctx; /* Used in command, reserved in response */ + u8 reserved; + __le16 activity_id; +#define ICE_AQC_ACT_ID_DNL 0x1129 + __le32 reserved1; + __le32 addr_high; + __le32 addr_low; +}; + +struct ice_aqc_dnl_equa_param { + __le16 data_in; +#define ICE_AQC_RX_EQU_SHIFT 8 +#define ICE_AQC_RX_EQU_PRE2 (0x10 << ICE_AQC_RX_EQU_SHIFT) +#define ICE_AQC_RX_EQU_PRE1 (0x11 << ICE_AQC_RX_EQU_SHIFT) +#define ICE_AQC_RX_EQU_POST1 (0x12 << ICE_AQC_RX_EQU_SHIFT) +#define ICE_AQC_RX_EQU_BFLF (0x13 << ICE_AQC_RX_EQU_SHIFT) +#define ICE_AQC_RX_EQU_BFHF (0x14 << ICE_AQC_RX_EQU_SHIFT) +#define ICE_AQC_RX_EQU_DRATE (0x15 << ICE_AQC_RX_EQU_SHIFT) +#define ICE_AQC_TX_EQU_PRE1 0x0 +#define ICE_AQC_TX_EQU_PRE3 0x3 +#define ICE_AQC_TX_EQU_ATTEN 0x4 +#define ICE_AQC_TX_EQU_POST1 0x8 +#define ICE_AQC_TX_EQU_PRE2 0xC + __le16 op_code_serdes_sel; +#define ICE_AQC_OP_CODE_SHIFT 4 +#define ICE_AQC_OP_CODE_RX_EQU (0x9 << ICE_AQC_OP_CODE_SHIFT) +#define ICE_AQC_OP_CODE_TX_EQU (0x10 << ICE_AQC_OP_CODE_SHIFT) + __le32 reserved[3]; +}; + +struct ice_aqc_dnl_equa_respon { + /* Equalization value can be negative */ + int val; + __le32 reserved[3]; +}; + +/* DNL call command/response buffer (indirect 0x0682) */ +struct ice_aqc_dnl_call { + union { + struct ice_aqc_dnl_equa_param txrx_equa_reqs; + __le32 stores[4]; + struct ice_aqc_dnl_equa_respon txrx_equa_resp; + } sto; +}; + struct ice_aqc_link_topo_params { u8 lport_num; u8 lport_num_valid; @@ -2563,6 +2622,7 @@ struct ice_aq_desc { struct ice_aqc_get_link_status get_link_status; struct ice_aqc_event_lan_overflow lan_overflow; struct ice_aqc_get_link_topo get_link_topo; + struct ice_aqc_dnl_call_command dnl_call; struct ice_aqc_i2c read_write_i2c; struct ice_aqc_read_i2c_resp read_i2c_resp; struct ice_aqc_get_set_tx_topo get_set_tx_topo; @@ -2687,6 +2747,7 @@ enum ice_adminq_opc { ice_aqc_opc_set_phy_rec_clk_out = 0x0630, ice_aqc_opc_get_phy_rec_clk_out = 0x0631, ice_aqc_opc_get_sensor_reading = 0x0632, + ice_aqc_opc_dnl_call = 0x0682, ice_aqc_opc_get_link_topo = 0x06E0, ice_aqc_opc_read_i2c = 0x06E2, ice_aqc_opc_write_i2c = 0x06E3, diff --git a/drivers/net/ethernet/intel/ice/ice_cgu_regs.h b/drivers/net/ethernet/intel/ice/ice_cgu_regs.h index 57abd52386d0..10d9d74f3545 100644 --- a/drivers/net/ethernet/intel/ice/ice_cgu_regs.h +++ b/drivers/net/ethernet/intel/ice/ice_cgu_regs.h @@ -23,7 +23,18 @@ union nac_cgu_dword9 { u32 clk_synce0_amp : 2; u32 one_pps_out_amp : 2; u32 misc24 : 12; - } field; + }; + u32 val; +}; + +#define NAC_CGU_DWORD16_E825C 0x40 +union nac_cgu_dword16_e825c { + struct { + u32 synce_remndr : 6; + u32 synce_phlmt_en : 1; + u32 misc13 : 17; + u32 tspll_ck_refclkfreq : 8; + }; u32 val; }; @@ -39,7 +50,7 @@ union nac_cgu_dword19 { u32 japll_ndivratio : 4; u32 japll_iref_ndivratio : 3; u32 misc27 : 1; - } field; + }; u32 val; }; @@ -63,7 +74,23 @@ union nac_cgu_dword22 { u32 fdpllclk_sel_div2 : 1; u32 time1588clk_sel_div2 : 1; u32 misc3 : 1; - } field; + }; + u32 val; +}; + +#define NAC_CGU_DWORD23_E825C 0x5C +union nac_cgu_dword23_e825c { + struct { + u32 cgupll_fbdiv_intgr : 10; + u32 ux56pll_fbdiv_intgr : 10; + u32 misc20 : 4; + u32 ts_pll_enable : 1; + u32 time_sync_tspll_align_sel : 1; + u32 ext_synce_sel : 1; + u32 ref1588_ck_div : 4; + u32 time_ref_sel : 1; + + }; u32 val; }; @@ -77,7 +104,7 @@ union nac_cgu_dword24 { u32 ext_synce_sel : 1; u32 ref1588_ck_div : 4; u32 time_ref_sel : 1; - } field; + }; u32 val; }; @@ -92,7 +119,7 @@ union tspll_cntr_bist_settings { u32 i_plllock_cnt_6_0 : 7; u32 i_plllock_cnt_10_7 : 4; u32 reserved200 : 4; - } field; + }; u32 val; }; @@ -109,7 +136,45 @@ union tspll_ro_bwm_lf { u32 afcdone_cri : 1; u32 feedfwrdgain_cal_cri_7_0 : 8; u32 m2fbdivmod_cri_7_0 : 8; - } field; + }; + u32 val; +}; + +#define TSPLL_RO_LOCK_E825C 0x3f0 +union tspll_ro_lock_e825c { + struct { + u32 bw_freqov_high_cri_7_0 : 8; + u32 bw_freqov_high_cri_9_8 : 2; + u32 reserved455 : 1; + u32 plllock_gain_tran_cri : 1; + u32 plllock_true_lock_cri : 1; + u32 pllunlock_flag_cri : 1; + u32 afcerr_cri : 1; + u32 afcdone_cri : 1; + u32 feedfwrdgain_cal_cri_7_0 : 8; + u32 reserved462 : 8; + }; + u32 val; +}; + +#define TSPLL_BW_TDC_E825C 0x31c +union tspll_bw_tdc_e825c { + struct { + u32 i_tdc_offset_lock_1_0 : 2; + u32 i_bbthresh1_2_0 : 3; + u32 i_bbthresh2_2_0 : 3; + u32 i_tdcsel_1_0 : 2; + u32 i_tdcovccorr_en_h : 1; + u32 i_divretimeren : 1; + u32 i_bw_ampmeas_window : 1; + u32 i_bw_lowerbound_2_0 : 3; + u32 i_bw_upperbound_2_0 : 3; + u32 i_bw_mode_1_0 : 2; + u32 i_ft_mode_sel_2_0 : 3; + u32 i_bwphase_4_0 : 5; + u32 i_plllock_sel_1_0 : 2; + u32 i_afc_divratio : 1; + }; u32 val; }; diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 24716a3b494c..009716a12a26 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -240,6 +240,30 @@ bool ice_is_e810t(struct ice_hw *hw) } /** + * ice_is_e822 - Check if a device is E822 family device + * @hw: pointer to the hardware structure + * + * Return: true if the device is E822 based, false if not. + */ +bool ice_is_e822(struct ice_hw *hw) +{ + switch (hw->device_id) { + case ICE_DEV_ID_E822C_BACKPLANE: + case ICE_DEV_ID_E822C_QSFP: + case ICE_DEV_ID_E822C_SFP: + case ICE_DEV_ID_E822C_10G_BASE_T: + case ICE_DEV_ID_E822C_SGMII: + case ICE_DEV_ID_E822L_BACKPLANE: + case ICE_DEV_ID_E822L_SFP: + case ICE_DEV_ID_E822L_10G_BASE_T: + case ICE_DEV_ID_E822L_SGMII: + return true; + default: + return false; + } +} + +/** * ice_is_e823 * @hw: pointer to the hardware structure * @@ -910,6 +934,9 @@ static int ice_init_fltr_mgmt_struct(struct ice_hw *hw) INIT_LIST_HEAD(&sw->vsi_list_map_head); sw->prof_res_bm_init = 0; + /* Initialize recipe count with default recipes read from NVM */ + sw->recp_cnt = ICE_SW_LKUP_LAST; + status = ice_init_def_sw_recp(hw); if (status) { devm_kfree(ice_hw_to_dev(hw), hw->switch_info); @@ -937,14 +964,7 @@ static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw) } recps = sw->recp_list; for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { - struct ice_recp_grp_entry *rg_entry, *tmprg_entry; - recps[i].root_rid = i; - list_for_each_entry_safe(rg_entry, tmprg_entry, - &recps[i].rg_list, l_entry) { - list_del(&rg_entry->l_entry); - devm_kfree(ice_hw_to_dev(hw), rg_entry); - } if (recps[i].adv_rule) { struct ice_adv_fltr_mgmt_list_entry *tmp_entry; @@ -969,7 +989,6 @@ static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw) devm_kfree(ice_hw_to_dev(hw), lst_itr); } } - devm_kfree(ice_hw_to_dev(hw), recps[i].root_buf); } ice_rm_all_sw_replay_rule_info(hw); devm_kfree(ice_hw_to_dev(hw), sw->recp_list); @@ -1062,6 +1081,7 @@ int ice_init_hw(struct ice_hw *hw) goto err_unroll_cqinit; } + hw->port_info->local_fwd_mode = ICE_LOCAL_FWD_MODE_ENABLED; /* set the back pointer to HW */ hw->port_info->hw = hw; @@ -1473,8 +1493,9 @@ ice_sbq_send_cmd(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc, * ice_sbq_rw_reg - Fill Sideband Queue command * @hw: pointer to the HW struct * @in: message info to be filled in descriptor + * @flags: control queue descriptor flags */ -int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in) +int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in, u16 flags) { struct ice_sbq_cmd_desc desc = {0}; struct ice_sbq_msg_req msg = {0}; @@ -1498,7 +1519,7 @@ int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in) */ msg_len -= sizeof(msg.data); - desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD); + desc.flags = cpu_to_le16(flags); desc.opcode = cpu_to_le16(ice_sbq_opc_neigh_dev_req); desc.param0.cmd_len = cpu_to_le16(msg_len); status = ice_sbq_send_cmd(hw, &desc, &msg, msg_len, NULL); @@ -2290,8 +2311,13 @@ ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, info->tmr_index_owned = ((number & ICE_TS_TMR_IDX_OWND_M) != 0); info->tmr_index_assoc = ((number & ICE_TS_TMR_IDX_ASSOC_M) != 0); - info->clk_freq = FIELD_GET(ICE_TS_CLK_FREQ_M, number); - info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0); + if (!ice_is_e825c(hw)) { + info->clk_freq = FIELD_GET(ICE_TS_CLK_FREQ_M, number); + info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0); + } else { + info->clk_freq = ICE_TIME_REF_FREQ_156_250; + info->clk_src = ICE_CLK_SRC_TCXO; + } if (info->clk_freq < NUM_ICE_TIME_REF_FREQ) { info->time_ref = (enum ice_time_ref_freq)info->clk_freq; @@ -2565,6 +2591,34 @@ ice_parse_sensor_reading_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, } /** + * ice_parse_nac_topo_dev_caps - Parse ICE_AQC_CAPS_NAC_TOPOLOGY cap + * @hw: pointer to the HW struct + * @dev_p: pointer to device capabilities structure + * @cap: capability element to parse + * + * Parse ICE_AQC_CAPS_NAC_TOPOLOGY for device capabilities. + */ +static void ice_parse_nac_topo_dev_caps(struct ice_hw *hw, + struct ice_hw_dev_caps *dev_p, + struct ice_aqc_list_caps_elem *cap) +{ + dev_p->nac_topo.mode = le32_to_cpu(cap->number); + dev_p->nac_topo.id = le32_to_cpu(cap->phys_id) & ICE_NAC_TOPO_ID_M; + + dev_info(ice_hw_to_dev(hw), + "PF is configured in %s mode with IP instance ID %d\n", + (dev_p->nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M) ? + "primary" : "secondary", dev_p->nac_topo.id); + + ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology is_primary = %d\n", + !!(dev_p->nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M)); + ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology is_dual = %d\n", + !!(dev_p->nac_topo.mode & ICE_NAC_TOPO_DUAL_M)); + ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology id = %d\n", + dev_p->nac_topo.id); +} + +/** * ice_parse_dev_caps - Parse device capabilities * @hw: pointer to the HW struct * @dev_p: pointer to device capabilities structure @@ -2615,6 +2669,9 @@ ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, case ICE_AQC_CAPS_SENSOR_READING: ice_parse_sensor_reading_cap(hw, dev_p, &cap_resp[i]); break; + case ICE_AQC_CAPS_NAC_TOPOLOGY: + ice_parse_nac_topo_dev_caps(hw, dev_p, &cap_resp[i]); + break; default: /* Don't list common capabilities as unknown */ if (!found) @@ -3010,6 +3067,9 @@ ice_aq_set_port_params(struct ice_port_info *pi, bool double_vlan, cmd_flags |= ICE_AQC_SET_P_PARAMS_DOUBLE_VLAN_ENA; cmd->cmd_flags = cpu_to_le16(cmd_flags); + cmd->local_fwd_mode = pi->local_fwd_mode | + ICE_AQC_SET_P_PARAMS_LOCAL_FWD_MODE_VALID; + return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); } @@ -3043,11 +3103,13 @@ bool ice_is_100m_speed_supported(struct ice_hw *hw) * Note: In the structure of [phy_type_low, phy_type_high], there should * be one bit set, as this function will convert one PHY type to its * speed. - * If no bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned - * If more than one bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned + * + * Return: + * * PHY speed for recognized PHY type + * * If no bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned + * * If more than one bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned */ -static u16 -ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high) +u16 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high) { u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN; u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN; @@ -3309,6 +3371,100 @@ int ice_update_link_info(struct ice_port_info *pi) } /** + * ice_aq_get_phy_equalization - function to read serdes equaliser + * value from firmware using admin queue command. + * @hw: pointer to the HW struct + * @data_in: represents the serdes equalization parameter requested + * @op_code: represents the serdes number and flag to represent tx or rx + * @serdes_num: represents the serdes number + * @output: pointer to the caller-supplied buffer to return serdes equaliser + * + * Return: non-zero status on error and 0 on success. + */ +int ice_aq_get_phy_equalization(struct ice_hw *hw, u16 data_in, u16 op_code, + u8 serdes_num, int *output) +{ + struct ice_aqc_dnl_call_command *cmd; + struct ice_aqc_dnl_call buf = {}; + struct ice_aq_desc desc; + int err; + + buf.sto.txrx_equa_reqs.data_in = cpu_to_le16(data_in); + buf.sto.txrx_equa_reqs.op_code_serdes_sel = + cpu_to_le16(op_code | (serdes_num & 0xF)); + cmd = &desc.params.dnl_call; + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dnl_call); + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_BUF | + ICE_AQ_FLAG_RD | + ICE_AQ_FLAG_SI); + desc.datalen = cpu_to_le16(sizeof(struct ice_aqc_dnl_call)); + cmd->activity_id = cpu_to_le16(ICE_AQC_ACT_ID_DNL); + + err = ice_aq_send_cmd(hw, &desc, &buf, sizeof(struct ice_aqc_dnl_call), + NULL); + *output = err ? 0 : buf.sto.txrx_equa_resp.val; + + return err; +} + +#define FEC_REG_PORT(port) { \ + FEC_CORR_LOW_REG_PORT##port, \ + FEC_CORR_HIGH_REG_PORT##port, \ + FEC_UNCORR_LOW_REG_PORT##port, \ + FEC_UNCORR_HIGH_REG_PORT##port, \ +} + +static const u32 fec_reg[][ICE_FEC_MAX] = { + FEC_REG_PORT(0), + FEC_REG_PORT(1), + FEC_REG_PORT(2), + FEC_REG_PORT(3) +}; + +/** + * ice_aq_get_fec_stats - reads fec stats from phy + * @hw: pointer to the HW struct + * @pcs_quad: represents pcsquad of user input serdes + * @pcs_port: represents the pcs port number part of above pcs quad + * @fec_type: represents FEC stats type + * @output: pointer to the caller-supplied buffer to return requested fec stats + * + * Return: non-zero status on error and 0 on success. + */ +int ice_aq_get_fec_stats(struct ice_hw *hw, u16 pcs_quad, u16 pcs_port, + enum ice_fec_stats_types fec_type, u32 *output) +{ + u16 flag = (ICE_AQ_FLAG_RD | ICE_AQ_FLAG_BUF | ICE_AQ_FLAG_SI); + struct ice_sbq_msg_input msg = {}; + u32 receiver_id, reg_offset; + int err; + + if (pcs_port > 3) + return -EINVAL; + + reg_offset = fec_reg[pcs_port][fec_type]; + + if (pcs_quad == 0) + receiver_id = FEC_RECEIVER_ID_PCS0; + else if (pcs_quad == 1) + receiver_id = FEC_RECEIVER_ID_PCS1; + else + return -EINVAL; + + msg.msg_addr_low = lower_16_bits(reg_offset); + msg.msg_addr_high = receiver_id; + msg.opcode = ice_sbq_msg_rd; + msg.dest_dev = rmn_0; + + err = ice_sbq_rw_reg(hw, &msg, flag); + if (err) + return err; + + *output = msg.data; + return 0; +} + +/** * ice_cache_phy_user_req * @pi: port information structure * @cache_data: PHY logging data diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h index ffb22c7ce28b..66f29bac783a 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.h +++ b/drivers/net/ethernet/intel/ice/ice_common.h @@ -17,13 +17,34 @@ #define ICE_SQ_SEND_DELAY_TIME_MS 10 #define ICE_SQ_SEND_MAX_EXECUTE 3 +#define FEC_REG_SHIFT 2 +#define FEC_RECV_ID_SHIFT 4 +#define FEC_CORR_LOW_REG_PORT0 (0x02 << FEC_REG_SHIFT) +#define FEC_CORR_HIGH_REG_PORT0 (0x03 << FEC_REG_SHIFT) +#define FEC_UNCORR_LOW_REG_PORT0 (0x04 << FEC_REG_SHIFT) +#define FEC_UNCORR_HIGH_REG_PORT0 (0x05 << FEC_REG_SHIFT) +#define FEC_CORR_LOW_REG_PORT1 (0x42 << FEC_REG_SHIFT) +#define FEC_CORR_HIGH_REG_PORT1 (0x43 << FEC_REG_SHIFT) +#define FEC_UNCORR_LOW_REG_PORT1 (0x44 << FEC_REG_SHIFT) +#define FEC_UNCORR_HIGH_REG_PORT1 (0x45 << FEC_REG_SHIFT) +#define FEC_CORR_LOW_REG_PORT2 (0x4A << FEC_REG_SHIFT) +#define FEC_CORR_HIGH_REG_PORT2 (0x4B << FEC_REG_SHIFT) +#define FEC_UNCORR_LOW_REG_PORT2 (0x4C << FEC_REG_SHIFT) +#define FEC_UNCORR_HIGH_REG_PORT2 (0x4D << FEC_REG_SHIFT) +#define FEC_CORR_LOW_REG_PORT3 (0x52 << FEC_REG_SHIFT) +#define FEC_CORR_HIGH_REG_PORT3 (0x53 << FEC_REG_SHIFT) +#define FEC_UNCORR_LOW_REG_PORT3 (0x54 << FEC_REG_SHIFT) +#define FEC_UNCORR_HIGH_REG_PORT3 (0x55 << FEC_REG_SHIFT) +#define FEC_RECEIVER_ID_PCS0 (0x33 << FEC_RECV_ID_SHIFT) +#define FEC_RECEIVER_ID_PCS1 (0x34 << FEC_RECV_ID_SHIFT) + int ice_init_hw(struct ice_hw *hw); void ice_deinit_hw(struct ice_hw *hw); int ice_check_reset(struct ice_hw *hw); int ice_reset(struct ice_hw *hw, enum ice_reset_req req); int ice_create_all_ctrlq(struct ice_hw *hw); int ice_init_all_ctrlq(struct ice_hw *hw); -void ice_shutdown_all_ctrlq(struct ice_hw *hw); +void ice_shutdown_all_ctrlq(struct ice_hw *hw, bool unloading); void ice_destroy_all_ctrlq(struct ice_hw *hw); int ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, @@ -121,6 +142,11 @@ int ice_get_link_default_override(struct ice_link_default_override_tlv *ldo, struct ice_port_info *pi); bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps); +int ice_aq_get_phy_equalization(struct ice_hw *hw, u16 data_in, u16 op_code, + u8 serdes_num, int *output); +int +ice_aq_get_fec_stats(struct ice_hw *hw, u16 pcs_quad, u16 pcs_port, + enum ice_fec_stats_types fec_type, u32 *output); enum ice_fc_mode ice_caps_to_fc_mode(u8 caps); enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options); @@ -201,7 +227,7 @@ int ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle); void ice_replay_post(struct ice_hw *hw); struct ice_q_ctx * ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle); -int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in); +int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in, u16 flag); int ice_aq_get_cgu_abilities(struct ice_hw *hw, struct ice_aqc_get_cgu_abilities *abilities); @@ -249,6 +275,7 @@ void ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat); bool ice_is_e810t(struct ice_hw *hw); +bool ice_is_e822(struct ice_hw *hw); bool ice_is_e823(struct ice_hw *hw); bool ice_is_e825c(struct ice_hw *hw); int @@ -261,6 +288,7 @@ int ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool *value, struct ice_sq_cd *cd); bool ice_is_100m_speed_supported(struct ice_hw *hw); +u16 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high); int ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size, struct ice_sq_cd *cd); diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c index ffe660f34992..ffaa6511c455 100644 --- a/drivers/net/ethernet/intel/ice/ice_controlq.c +++ b/drivers/net/ethernet/intel/ice/ice_controlq.c @@ -510,16 +510,19 @@ shutdown_sq_out: */ static bool ice_aq_ver_check(struct ice_hw *hw) { - if (hw->api_maj_ver > EXP_FW_API_VER_MAJOR) { + u8 exp_fw_api_ver_major = EXP_FW_API_VER_MAJOR_BY_MAC(hw); + u8 exp_fw_api_ver_minor = EXP_FW_API_VER_MINOR_BY_MAC(hw); + + if (hw->api_maj_ver > exp_fw_api_ver_major) { /* Major API version is newer than expected, don't load */ dev_warn(ice_hw_to_dev(hw), "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n"); return false; - } else if (hw->api_maj_ver == EXP_FW_API_VER_MAJOR) { - if (hw->api_min_ver > (EXP_FW_API_VER_MINOR + 2)) + } else if (hw->api_maj_ver == exp_fw_api_ver_major) { + if (hw->api_min_ver > (exp_fw_api_ver_minor + 2)) dev_info(ice_hw_to_dev(hw), "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n"); - else if ((hw->api_min_ver + 2) < EXP_FW_API_VER_MINOR) + else if ((hw->api_min_ver + 2) < exp_fw_api_ver_minor) dev_info(ice_hw_to_dev(hw), "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n"); } else { @@ -684,10 +687,12 @@ struct ice_ctl_q_info *ice_get_sbq(struct ice_hw *hw) * ice_shutdown_ctrlq - shutdown routine for any control queue * @hw: pointer to the hardware structure * @q_type: specific Control queue type + * @unloading: is the driver unloading itself * * NOTE: this function does not destroy the control queue locks. */ -static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) +static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type, + bool unloading) { struct ice_ctl_q_info *cq; @@ -695,7 +700,7 @@ static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) case ICE_CTL_Q_ADMIN: cq = &hw->adminq; if (ice_check_sq_alive(hw, cq)) - ice_aq_q_shutdown(hw, true); + ice_aq_q_shutdown(hw, unloading); break; case ICE_CTL_Q_SB: cq = &hw->sbq; @@ -714,20 +719,21 @@ static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) /** * ice_shutdown_all_ctrlq - shutdown routine for all control queues * @hw: pointer to the hardware structure + * @unloading: is the driver unloading itself * * NOTE: this function does not destroy the control queue locks. The driver * may call this at runtime to shutdown and later restart control queues, such * as in response to a reset event. */ -void ice_shutdown_all_ctrlq(struct ice_hw *hw) +void ice_shutdown_all_ctrlq(struct ice_hw *hw, bool unloading) { /* Shutdown FW admin queue */ - ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN); + ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN, unloading); /* Shutdown PHY Sideband */ if (ice_is_sbq_supported(hw)) - ice_shutdown_ctrlq(hw, ICE_CTL_Q_SB); + ice_shutdown_ctrlq(hw, ICE_CTL_Q_SB, unloading); /* Shutdown PF-VF Mailbox */ - ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX); + ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX, unloading); } /** @@ -759,7 +765,7 @@ int ice_init_all_ctrlq(struct ice_hw *hw) break; ice_debug(hw, ICE_DBG_AQ_MSG, "Retry Admin Queue init due to FW critical error\n"); - ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN); + ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN, true); msleep(ICE_CTL_Q_ADMIN_INIT_MSEC); } while (retry++ < ICE_CTL_Q_ADMIN_INIT_TIMEOUT); @@ -840,7 +846,7 @@ static void ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq) void ice_destroy_all_ctrlq(struct ice_hw *hw) { /* shut down all the control queues first */ - ice_shutdown_all_ctrlq(hw); + ice_shutdown_all_ctrlq(hw, true); ice_destroy_ctrlq_locks(&hw->adminq); if (ice_is_sbq_supported(hw)) diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.h b/drivers/net/ethernet/intel/ice/ice_controlq.h index 8f2fd1613a95..1d54b1cdb1c5 100644 --- a/drivers/net/ethernet/intel/ice/ice_controlq.h +++ b/drivers/net/ethernet/intel/ice/ice_controlq.h @@ -21,9 +21,18 @@ /* Defines that help manage the driver vs FW API checks. * Take a look at ice_aq_ver_check in ice_controlq.c for actual usage. */ -#define EXP_FW_API_VER_BRANCH 0x00 -#define EXP_FW_API_VER_MAJOR 0x01 -#define EXP_FW_API_VER_MINOR 0x05 +#define EXP_FW_API_VER_MAJOR_E810 0x01 +#define EXP_FW_API_VER_MINOR_E810 0x05 + +#define EXP_FW_API_VER_MAJOR_E830 0x01 +#define EXP_FW_API_VER_MINOR_E830 0x07 + +#define EXP_FW_API_VER_MAJOR_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? \ + EXP_FW_API_VER_MAJOR_E830 : \ + EXP_FW_API_VER_MAJOR_E810) +#define EXP_FW_API_VER_MINOR_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? \ + EXP_FW_API_VER_MINOR_E830 : \ + EXP_FW_API_VER_MINOR_E810) /* Different control queue types: These are mainly for SW consumption. */ enum ice_ctl_q { diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c index b102db8b829a..3cfa071e3718 100644 --- a/drivers/net/ethernet/intel/ice/ice_eswitch.c +++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c @@ -117,17 +117,10 @@ static int ice_eswitch_setup_repr(struct ice_pf *pf, struct ice_repr *repr) struct ice_vsi *vsi = repr->src_vsi; struct metadata_dst *dst; - ice_remove_vsi_fltr(&pf->hw, vsi->idx); repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX, GFP_KERNEL); if (!repr->dst) - goto err_add_mac_fltr; - - if (ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof)) - goto err_dst_free; - - if (ice_vsi_add_vlan_zero(vsi)) - goto err_update_security; + return -ENOMEM; netif_keep_dst(uplink_vsi->netdev); @@ -136,16 +129,48 @@ static int ice_eswitch_setup_repr(struct ice_pf *pf, struct ice_repr *repr) dst->u.port_info.lower_dev = uplink_vsi->netdev; return 0; +} -err_update_security: +/** + * ice_eswitch_cfg_vsi - configure VSI to work in slow-path + * @vsi: VSI structure of representee + * @mac: representee MAC + * + * Return: 0 on success, non-zero on error. + */ +int ice_eswitch_cfg_vsi(struct ice_vsi *vsi, const u8 *mac) +{ + int err; + + ice_remove_vsi_fltr(&vsi->back->hw, vsi->idx); + + err = ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof); + if (err) + goto err_update_security; + + err = ice_vsi_add_vlan_zero(vsi); + if (err) + goto err_vlan_zero; + + return 0; + +err_vlan_zero: ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof); -err_dst_free: - metadata_dst_free(repr->dst); - repr->dst = NULL; -err_add_mac_fltr: - ice_fltr_add_mac_and_broadcast(vsi, repr->parent_mac, ICE_FWD_TO_VSI); +err_update_security: + ice_fltr_add_mac_and_broadcast(vsi, mac, ICE_FWD_TO_VSI); - return -ENODEV; + return err; +} + +/** + * ice_eswitch_decfg_vsi - unroll changes done to VSI for switchdev + * @vsi: VSI structure of representee + * @mac: representee MAC + */ +void ice_eswitch_decfg_vsi(struct ice_vsi *vsi, const u8 *mac) +{ + ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof); + ice_fltr_add_mac_and_broadcast(vsi, mac, ICE_FWD_TO_VSI); } /** @@ -153,16 +178,16 @@ err_add_mac_fltr: * @repr_id: representor ID * @vsi: VSI for which port representor is configured */ -void ice_eswitch_update_repr(unsigned long repr_id, struct ice_vsi *vsi) +void ice_eswitch_update_repr(unsigned long *repr_id, struct ice_vsi *vsi) { struct ice_pf *pf = vsi->back; struct ice_repr *repr; - int ret; + int err; if (!ice_is_switchdev_running(pf)) return; - repr = xa_load(&pf->eswitch.reprs, repr_id); + repr = xa_load(&pf->eswitch.reprs, *repr_id); if (!repr) return; @@ -172,12 +197,19 @@ void ice_eswitch_update_repr(unsigned long repr_id, struct ice_vsi *vsi) if (repr->br_port) repr->br_port->vsi = vsi; - ret = ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof); - if (ret) { - ice_fltr_add_mac_and_broadcast(vsi, repr->parent_mac, - ICE_FWD_TO_VSI); + err = ice_eswitch_cfg_vsi(vsi, repr->parent_mac); + if (err) dev_err(ice_pf_to_dev(pf), "Failed to update VSI of port representor %d", repr->id); + + /* The VSI number is different, reload the PR with new id */ + if (repr->id != vsi->vsi_num) { + xa_erase(&pf->eswitch.reprs, repr->id); + repr->id = vsi->vsi_num; + if (xa_insert(&pf->eswitch.reprs, repr->id, repr, GFP_KERNEL)) + dev_err(ice_pf_to_dev(pf), "Failed to reload port representor %d", + repr->id); + *repr_id = repr->id; } } @@ -423,6 +455,7 @@ static void ice_eswitch_start_reprs(struct ice_pf *pf) int ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf) { + struct devlink *devlink = priv_to_devlink(pf); struct ice_repr *repr; int err; @@ -437,7 +470,9 @@ ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf) ice_eswitch_stop_reprs(pf); + devl_lock(devlink); repr = ice_repr_add_vf(vf); + devl_unlock(devlink); if (IS_ERR(repr)) { err = PTR_ERR(repr); goto err_create_repr; @@ -460,7 +495,9 @@ ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf) err_xa_alloc: ice_eswitch_release_repr(pf, repr); err_setup_repr: + devl_lock(devlink); ice_repr_rem_vf(repr); + devl_unlock(devlink); err_create_repr: if (xa_empty(&pf->eswitch.reprs)) ice_eswitch_disable_switchdev(pf); @@ -484,6 +521,7 @@ void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf) ice_eswitch_disable_switchdev(pf); ice_eswitch_release_repr(pf, repr); + devl_lock(devlink); ice_repr_rem_vf(repr); if (xa_empty(&pf->eswitch.reprs)) { @@ -491,28 +529,11 @@ void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf) * no point in keeping the nodes */ ice_devlink_rate_clear_tx_topology(ice_get_main_vsi(pf)); - devl_lock(devlink); devl_rate_nodes_destroy(devlink); - devl_unlock(devlink); } else { ice_eswitch_start_reprs(pf); } -} - -/** - * ice_eswitch_rebuild - rebuild eswitch - * @pf: pointer to PF structure - */ -void ice_eswitch_rebuild(struct ice_pf *pf) -{ - struct ice_repr *repr; - unsigned long id; - - if (!ice_is_switchdev_running(pf)) - return; - - xa_for_each(&pf->eswitch.reprs, id, repr) - ice_eswitch_detach(pf, repr->vf); + devl_unlock(devlink); } /** diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.h b/drivers/net/ethernet/intel/ice/ice_eswitch.h index e2e5c0c75e7d..78fd39a6935d 100644 --- a/drivers/net/ethernet/intel/ice/ice_eswitch.h +++ b/drivers/net/ethernet/intel/ice/ice_eswitch.h @@ -10,7 +10,6 @@ void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf); int ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf); -void ice_eswitch_rebuild(struct ice_pf *pf); int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode); int @@ -18,7 +17,7 @@ ice_eswitch_mode_set(struct devlink *devlink, u16 mode, struct netlink_ext_ack *extack); bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf); -void ice_eswitch_update_repr(unsigned long repr_id, struct ice_vsi *vsi); +void ice_eswitch_update_repr(unsigned long *repr_id, struct ice_vsi *vsi); void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf); @@ -28,6 +27,9 @@ netdev_tx_t ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev); struct net_device *ice_eswitch_get_target(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc); + +int ice_eswitch_cfg_vsi(struct ice_vsi *vsi, const u8 *mac); +void ice_eswitch_decfg_vsi(struct ice_vsi *vsi, const u8 *mac); #else /* CONFIG_ICE_SWITCHDEV */ static inline void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf) { } @@ -44,18 +46,13 @@ ice_eswitch_set_target_vsi(struct sk_buff *skb, struct ice_tx_offload_params *off) { } static inline void -ice_eswitch_update_repr(unsigned long repr_id, struct ice_vsi *vsi) { } +ice_eswitch_update_repr(unsigned long *repr_id, struct ice_vsi *vsi) { } static inline int ice_eswitch_configure(struct ice_pf *pf) { return 0; } -static inline int ice_eswitch_rebuild(struct ice_pf *pf) -{ - return -EOPNOTSUPP; -} - static inline int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode) { return DEVLINK_ESWITCH_MODE_LEGACY; @@ -85,5 +82,12 @@ ice_eswitch_get_target(struct ice_rx_ring *rx_ring, { return rx_ring->netdev; } + +static inline int ice_eswitch_cfg_vsi(struct ice_vsi *vsi, const u8 *mac) +{ + return -EOPNOTSUPP; +} + +static inline void ice_eswitch_decfg_vsi(struct ice_vsi *vsi, const u8 *mac) { } #endif /* CONFIG_ICE_SWITCHDEV */ #endif /* _ICE_ESWITCH_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch_br.c b/drivers/net/ethernet/intel/ice/ice_eswitch_br.c index ac5beecd028b..f5aceb32bf4d 100644 --- a/drivers/net/ethernet/intel/ice/ice_eswitch_br.c +++ b/drivers/net/ethernet/intel/ice/ice_eswitch_br.c @@ -896,7 +896,8 @@ ice_eswitch_br_port_deinit(struct ice_esw_br *bridge, if (br_port->type == ICE_ESWITCH_BR_UPLINK_PORT && vsi->back) { vsi->back->br_port = NULL; } else { - struct ice_repr *repr = ice_repr_get_by_vsi(vsi); + struct ice_repr *repr = + ice_repr_get(vsi->back, br_port->repr_id); if (repr) repr->br_port = NULL; @@ -937,6 +938,7 @@ ice_eswitch_br_vf_repr_port_init(struct ice_esw_br *bridge, br_port->vsi = repr->src_vsi; br_port->vsi_idx = br_port->vsi->idx; br_port->type = ICE_ESWITCH_BR_VF_REPR_PORT; + br_port->repr_id = repr->id; repr->br_port = br_port; err = xa_insert(&bridge->ports, br_port->vsi_idx, br_port, GFP_KERNEL); diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch_br.h b/drivers/net/ethernet/intel/ice/ice_eswitch_br.h index 85a8fadb2928..c15c7344d7f8 100644 --- a/drivers/net/ethernet/intel/ice/ice_eswitch_br.h +++ b/drivers/net/ethernet/intel/ice/ice_eswitch_br.h @@ -46,6 +46,7 @@ struct ice_esw_br_port { enum ice_esw_br_port_type type; u16 vsi_idx; u16 pvid; + u32 repr_id; struct xarray vlans; }; diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index 62c8205fceba..8c990c976132 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -463,7 +463,354 @@ ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) static int ice_get_regs_len(struct net_device __always_unused *netdev) { - return sizeof(ice_regs_dump_list); + return (sizeof(ice_regs_dump_list) + + sizeof(struct ice_regdump_to_ethtool)); +} + +/** + * ice_ethtool_get_maxspeed - Get the max speed for given lport + * @hw: pointer to the HW struct + * @lport: logical port for which max speed is requested + * @max_speed: return max speed for input lport + * + * Return: 0 on success, negative on failure. + */ +static int ice_ethtool_get_maxspeed(struct ice_hw *hw, u8 lport, u8 *max_speed) +{ + struct ice_aqc_get_port_options_elem options[ICE_AQC_PORT_OPT_MAX] = {}; + bool active_valid = false, pending_valid = true; + u8 option_count = ICE_AQC_PORT_OPT_MAX; + u8 active_idx = 0, pending_idx = 0; + int status; + + status = ice_aq_get_port_options(hw, options, &option_count, lport, + true, &active_idx, &active_valid, + &pending_idx, &pending_valid); + if (status) + return -EIO; + if (!active_valid) + return -EINVAL; + + *max_speed = options[active_idx].max_lane_speed & ICE_AQC_PORT_OPT_MAX_LANE_M; + return 0; +} + +/** + * ice_is_serdes_muxed - returns whether serdes is muxed in hardware + * @hw: pointer to the HW struct + * + * Return: true when serdes is muxed, false when serdes is not muxed. + */ +static bool ice_is_serdes_muxed(struct ice_hw *hw) +{ + u32 reg_value = rd32(hw, GLGEN_SWITCH_MODE_CONFIG); + + return FIELD_GET(GLGEN_SWITCH_MODE_CONFIG_25X4_QUAD_M, reg_value); +} + +static int ice_map_port_topology_for_sfp(struct ice_port_topology *port_topology, + u8 lport, bool is_muxed) +{ + switch (lport) { + case 0: + port_topology->pcs_quad_select = 0; + port_topology->pcs_port = 0; + port_topology->primary_serdes_lane = 0; + break; + case 1: + port_topology->pcs_quad_select = 1; + port_topology->pcs_port = 0; + if (is_muxed) + port_topology->primary_serdes_lane = 2; + else + port_topology->primary_serdes_lane = 4; + break; + case 2: + port_topology->pcs_quad_select = 0; + port_topology->pcs_port = 1; + port_topology->primary_serdes_lane = 1; + break; + case 3: + port_topology->pcs_quad_select = 1; + port_topology->pcs_port = 1; + if (is_muxed) + port_topology->primary_serdes_lane = 3; + else + port_topology->primary_serdes_lane = 5; + break; + case 4: + port_topology->pcs_quad_select = 0; + port_topology->pcs_port = 2; + port_topology->primary_serdes_lane = 2; + break; + case 5: + port_topology->pcs_quad_select = 1; + port_topology->pcs_port = 2; + port_topology->primary_serdes_lane = 6; + break; + case 6: + port_topology->pcs_quad_select = 0; + port_topology->pcs_port = 3; + port_topology->primary_serdes_lane = 3; + break; + case 7: + port_topology->pcs_quad_select = 1; + port_topology->pcs_port = 3; + port_topology->primary_serdes_lane = 7; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int ice_map_port_topology_for_qsfp(struct ice_port_topology *port_topology, + u8 lport, bool is_muxed) +{ + switch (lport) { + case 0: + port_topology->pcs_quad_select = 0; + port_topology->pcs_port = 0; + port_topology->primary_serdes_lane = 0; + break; + case 1: + port_topology->pcs_quad_select = 1; + port_topology->pcs_port = 0; + if (is_muxed) + port_topology->primary_serdes_lane = 2; + else + port_topology->primary_serdes_lane = 4; + break; + case 2: + port_topology->pcs_quad_select = 0; + port_topology->pcs_port = 1; + port_topology->primary_serdes_lane = 1; + break; + case 3: + port_topology->pcs_quad_select = 1; + port_topology->pcs_port = 1; + if (is_muxed) + port_topology->primary_serdes_lane = 3; + else + port_topology->primary_serdes_lane = 5; + break; + case 4: + port_topology->pcs_quad_select = 0; + port_topology->pcs_port = 2; + port_topology->primary_serdes_lane = 2; + break; + case 5: + port_topology->pcs_quad_select = 1; + port_topology->pcs_port = 2; + port_topology->primary_serdes_lane = 6; + break; + case 6: + port_topology->pcs_quad_select = 0; + port_topology->pcs_port = 3; + port_topology->primary_serdes_lane = 3; + break; + case 7: + port_topology->pcs_quad_select = 1; + port_topology->pcs_port = 3; + port_topology->primary_serdes_lane = 7; + break; + default: + return -EINVAL; + } + + return 0; +} + +/** + * ice_get_port_topology - returns physical topology like pcsquad, pcsport, + * serdes number + * @hw: pointer to the HW struct + * @lport: logical port for which physical info requested + * @port_topology: buffer to hold port topology + * + * Return: 0 on success, negative on failure. + */ +static int ice_get_port_topology(struct ice_hw *hw, u8 lport, + struct ice_port_topology *port_topology) +{ + struct ice_aqc_get_link_topo cmd = {}; + u16 node_handle = 0; + u8 cage_type = 0; + bool is_muxed; + int err; + u8 ctx; + + ctx = ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE << ICE_AQC_LINK_TOPO_NODE_TYPE_S; + ctx |= ICE_AQC_LINK_TOPO_NODE_CTX_PORT << ICE_AQC_LINK_TOPO_NODE_CTX_S; + cmd.addr.topo_params.node_type_ctx = ctx; + + err = ice_aq_get_netlist_node(hw, &cmd, &cage_type, &node_handle); + if (err) + return -EINVAL; + + is_muxed = ice_is_serdes_muxed(hw); + + if (cage_type == 0x11 || /* SFP+ */ + cage_type == 0x12) { /* SFP28 */ + port_topology->serdes_lane_count = 1; + err = ice_map_port_topology_for_sfp(port_topology, lport, is_muxed); + if (err) + return err; + } else if (cage_type == 0x13 || /* QSFP */ + cage_type == 0x14) { /* QSFP28 */ + u8 max_speed = 0; + + err = ice_ethtool_get_maxspeed(hw, lport, &max_speed); + if (err) + return err; + + if (max_speed == ICE_AQC_PORT_OPT_MAX_LANE_100G) + port_topology->serdes_lane_count = 4; + else if (max_speed == ICE_AQC_PORT_OPT_MAX_LANE_50G) + port_topology->serdes_lane_count = 2; + else + port_topology->serdes_lane_count = 1; + + err = ice_map_port_topology_for_qsfp(port_topology, lport, is_muxed); + if (err) + return err; + } else { + return -EINVAL; + } + + return 0; +} + +/** + * ice_get_tx_rx_equa - read serdes tx rx equaliser param + * @hw: pointer to the HW struct + * @serdes_num: represents the serdes number + * @ptr: structure to read all serdes parameter for given serdes + * + * Return: all serdes equalization parameter supported per serdes number + */ +static int ice_get_tx_rx_equa(struct ice_hw *hw, u8 serdes_num, + struct ice_serdes_equalization_to_ethtool *ptr) +{ + int err; + + err = ice_aq_get_phy_equalization(hw, ICE_AQC_TX_EQU_PRE1, + ICE_AQC_OP_CODE_TX_EQU, serdes_num, + &ptr->tx_equalization_pre1); + if (err) + return err; + + err = ice_aq_get_phy_equalization(hw, ICE_AQC_TX_EQU_PRE3, + ICE_AQC_OP_CODE_TX_EQU, serdes_num, + &ptr->tx_equalization_pre3); + if (err) + return err; + + err = ice_aq_get_phy_equalization(hw, ICE_AQC_TX_EQU_ATTEN, + ICE_AQC_OP_CODE_TX_EQU, serdes_num, + &ptr->tx_equalization_atten); + if (err) + return err; + + err = ice_aq_get_phy_equalization(hw, ICE_AQC_TX_EQU_POST1, + ICE_AQC_OP_CODE_TX_EQU, serdes_num, + &ptr->tx_equalization_post1); + if (err) + return err; + + err = ice_aq_get_phy_equalization(hw, ICE_AQC_TX_EQU_PRE2, + ICE_AQC_OP_CODE_TX_EQU, serdes_num, + &ptr->tx_equalization_pre2); + if (err) + return err; + + err = ice_aq_get_phy_equalization(hw, ICE_AQC_RX_EQU_PRE2, + ICE_AQC_OP_CODE_RX_EQU, serdes_num, + &ptr->rx_equalization_pre2); + if (err) + return err; + + err = ice_aq_get_phy_equalization(hw, ICE_AQC_RX_EQU_PRE1, + ICE_AQC_OP_CODE_RX_EQU, serdes_num, + &ptr->rx_equalization_pre1); + if (err) + return err; + + err = ice_aq_get_phy_equalization(hw, ICE_AQC_RX_EQU_POST1, + ICE_AQC_OP_CODE_RX_EQU, serdes_num, + &ptr->rx_equalization_post1); + if (err) + return err; + + err = ice_aq_get_phy_equalization(hw, ICE_AQC_RX_EQU_BFLF, + ICE_AQC_OP_CODE_RX_EQU, serdes_num, + &ptr->rx_equalization_bflf); + if (err) + return err; + + err = ice_aq_get_phy_equalization(hw, ICE_AQC_RX_EQU_BFHF, + ICE_AQC_OP_CODE_RX_EQU, serdes_num, + &ptr->rx_equalization_bfhf); + if (err) + return err; + + err = ice_aq_get_phy_equalization(hw, ICE_AQC_RX_EQU_DRATE, + ICE_AQC_OP_CODE_RX_EQU, serdes_num, + &ptr->rx_equalization_drate); + if (err) + return err; + + return 0; +} + +/** + * ice_get_extended_regs - returns FEC correctable, uncorrectable stats per + * pcsquad, pcsport + * @netdev: pointer to net device structure + * @p: output buffer to fill requested register dump + * + * Return: 0 on success, negative on failure. + */ +static int ice_get_extended_regs(struct net_device *netdev, void *p) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_regdump_to_ethtool *ice_prv_regs_buf; + struct ice_port_topology port_topology = {}; + struct ice_port_info *pi; + struct ice_pf *pf; + struct ice_hw *hw; + unsigned int i; + int err; + + pf = np->vsi->back; + hw = &pf->hw; + pi = np->vsi->port_info; + + /* Serdes parameters are not supported if not the PF VSI */ + if (np->vsi->type != ICE_VSI_PF || !pi) + return -EINVAL; + + err = ice_get_port_topology(hw, pi->lport, &port_topology); + if (err) + return -EINVAL; + if (port_topology.serdes_lane_count > 4) + return -EINVAL; + + ice_prv_regs_buf = p; + + /* Get serdes equalization parameter for available serdes */ + for (i = 0; i < port_topology.serdes_lane_count; i++) { + u8 serdes_num = 0; + + serdes_num = port_topology.primary_serdes_lane + i; + err = ice_get_tx_rx_equa(hw, serdes_num, + &ice_prv_regs_buf->equalization[i]); + if (err) + return -EINVAL; + } + + return 0; } static void @@ -475,10 +822,12 @@ ice_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) u32 *regs_buf = (u32 *)p; unsigned int i; - regs->version = 1; + regs->version = 2; for (i = 0; i < ARRAY_SIZE(ice_regs_dump_list); ++i) regs_buf[i] = rd32(hw, ice_regs_dump_list[i]); + + ice_get_extended_regs(netdev, (void *)®s_buf[i]); } static u32 ice_get_msglevel(struct net_device *netdev) @@ -3434,7 +3783,7 @@ ice_set_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh, } static int -ice_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) +ice_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *info) { struct ice_pf *pf = ice_netdev_to_pf(dev); @@ -4282,6 +4631,94 @@ ice_get_module_eeprom(struct net_device *netdev, return 0; } +/** + * ice_get_port_fec_stats - returns FEC correctable, uncorrectable stats per + * pcsquad, pcsport + * @hw: pointer to the HW struct + * @pcs_quad: pcsquad for input port + * @pcs_port: pcsport for input port + * @fec_stats: buffer to hold FEC statistics for given port + * + * Return: 0 on success, negative on failure. + */ +static int ice_get_port_fec_stats(struct ice_hw *hw, u16 pcs_quad, u16 pcs_port, + struct ethtool_fec_stats *fec_stats) +{ + u32 fec_uncorr_low_val = 0, fec_uncorr_high_val = 0; + u32 fec_corr_low_val = 0, fec_corr_high_val = 0; + int err; + + if (pcs_quad > 1 || pcs_port > 3) + return -EINVAL; + + err = ice_aq_get_fec_stats(hw, pcs_quad, pcs_port, ICE_FEC_CORR_LOW, + &fec_corr_low_val); + if (err) + return err; + + err = ice_aq_get_fec_stats(hw, pcs_quad, pcs_port, ICE_FEC_CORR_HIGH, + &fec_corr_high_val); + if (err) + return err; + + err = ice_aq_get_fec_stats(hw, pcs_quad, pcs_port, + ICE_FEC_UNCORR_LOW, + &fec_uncorr_low_val); + if (err) + return err; + + err = ice_aq_get_fec_stats(hw, pcs_quad, pcs_port, + ICE_FEC_UNCORR_HIGH, + &fec_uncorr_high_val); + if (err) + return err; + + fec_stats->uncorrectable_blocks.total = (fec_corr_high_val << 16) + + fec_corr_low_val; + fec_stats->corrected_blocks.total = (fec_uncorr_high_val << 16) + + fec_uncorr_low_val; + return 0; +} + +/** + * ice_get_fec_stats - returns FEC correctable, uncorrectable stats per netdev + * @netdev: network interface device structure + * @fec_stats: buffer to hold FEC statistics for given port + * + */ +static void ice_get_fec_stats(struct net_device *netdev, + struct ethtool_fec_stats *fec_stats) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_port_topology port_topology; + struct ice_port_info *pi; + struct ice_pf *pf; + struct ice_hw *hw; + int err; + + pf = np->vsi->back; + hw = &pf->hw; + pi = np->vsi->port_info; + + /* Serdes parameters are not supported if not the PF VSI */ + if (np->vsi->type != ICE_VSI_PF || !pi) + return; + + err = ice_get_port_topology(hw, pi->lport, &port_topology); + if (err) { + netdev_info(netdev, "Extended register dump failed Lport %d\n", + pi->lport); + return; + } + + /* Get FEC correctable, uncorrectable counter */ + err = ice_get_port_fec_stats(hw, port_topology.pcs_quad_select, + port_topology.pcs_port, fec_stats); + if (err) + netdev_info(netdev, "FEC stats get failed Lport %d Err %d\n", + pi->lport, err); +} + static const struct ethtool_ops ice_ethtool_ops = { .cap_rss_ctx_supported = true, .supported_coalesce_params = ETHTOOL_COALESCE_USECS | @@ -4290,6 +4727,7 @@ static const struct ethtool_ops ice_ethtool_ops = { .cap_rss_sym_xor_supported = true, .get_link_ksettings = ice_get_link_ksettings, .set_link_ksettings = ice_set_link_ksettings, + .get_fec_stats = ice_get_fec_stats, .get_drvinfo = ice_get_drvinfo, .get_regs_len = ice_get_regs_len, .get_regs = ice_get_regs, diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.h b/drivers/net/ethernet/intel/ice/ice_ethtool.h index b88e3da06f13..9acccae38625 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.h +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.h @@ -9,6 +9,35 @@ struct ice_phy_type_to_ethtool { u8 link_mode; }; +struct ice_serdes_equalization_to_ethtool { + int rx_equalization_pre2; + int rx_equalization_pre1; + int rx_equalization_post1; + int rx_equalization_bflf; + int rx_equalization_bfhf; + int rx_equalization_drate; + int tx_equalization_pre1; + int tx_equalization_pre3; + int tx_equalization_atten; + int tx_equalization_post1; + int tx_equalization_pre2; +}; + +struct ice_regdump_to_ethtool { + /* A multilane port can have max 4 serdes */ + struct ice_serdes_equalization_to_ethtool equalization[4]; +}; + +/* Port topology from lport i.e. + * serdes mapping, pcsquad, macport, cage etc... + */ +struct ice_port_topology { + u16 pcs_port; + u16 primary_serdes_lane; + u16 serdes_lane_count; + u16 pcs_quad_select; +}; + /* Macro to make PHY type to Ethtool link mode table entry. * The index is the PHY type. */ diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h index cfac1d432c15..91cbae1eec89 100644 --- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h +++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h @@ -157,6 +157,8 @@ #define GLGEN_RTRIG_CORER_M BIT(0) #define GLGEN_RTRIG_GLOBR_M BIT(1) #define GLGEN_STAT 0x000B612C +#define GLGEN_SWITCH_MODE_CONFIG 0x000B81E0 +#define GLGEN_SWITCH_MODE_CONFIG_25X4_QUAD_M BIT(2) #define GLGEN_VFLRSTAT(_i) (0x00093A04 + ((_i) * 4)) #define PFGEN_CTRL 0x00091000 #define PFGEN_CTRL_PFSWR_M BIT(0) @@ -177,6 +179,8 @@ #define GLINT_CTL_ITR_GRAN_50_M ICE_M(0xF, 24) #define GLINT_CTL_ITR_GRAN_25_S 28 #define GLINT_CTL_ITR_GRAN_25_M ICE_M(0xF, 28) +#define GLGEN_MAC_LINK_TOPO 0x000B81DC +#define GLGEN_MAC_LINK_TOPO_LINK_TOPO_M GENMASK(1, 0) #define GLINT_DYN_CTL(_INT) (0x00160000 + ((_INT) * 4)) #define GLINT_DYN_CTL_INTENA_M BIT(0) #define GLINT_DYN_CTL_CLEARPBA_M BIT(1) diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 7629b0190578..f559e60992fa 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -2580,8 +2580,8 @@ void ice_vsi_free_irq(struct ice_vsi *vsi) if (!IS_ENABLED(CONFIG_RFS_ACCEL)) irq_set_affinity_notifier(irq_num, NULL); - /* clear the affinity_mask in the IRQ descriptor */ - irq_set_affinity_hint(irq_num, NULL); + /* clear the affinity_hint in the IRQ descriptor */ + irq_update_affinity_hint(irq_num, NULL); synchronize_irq(irq_num); devm_free_irq(ice_pf_to_dev(pf), irq_num, vsi->q_vectors[i]); } diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 55a42aad92a5..ec636be4d17d 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -35,7 +35,6 @@ static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation."; #define ICE_DDP_PKG_PATH "intel/ice/ddp/" #define ICE_DDP_PKG_FILE ICE_DDP_PKG_PATH "ice.pkg" -MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); MODULE_DESCRIPTION(DRV_SUMMARY); MODULE_IMPORT_NS(LIBIE); MODULE_LICENSE("GPL v2"); @@ -623,7 +622,7 @@ skip: if (hw->port_info) ice_sched_clear_port(hw->port_info); - ice_shutdown_all_ctrlq(hw); + ice_shutdown_all_ctrlq(hw, false); set_bit(ICE_PREPARED_FOR_RESET, pf->state); } @@ -2610,7 +2609,7 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename) } /* assign the mask for this irq */ - irq_set_affinity_hint(irq_num, &q_vector->affinity_mask); + irq_update_affinity_hint(irq_num, &q_vector->affinity_mask); } err = ice_set_cpu_rx_rmap(vsi); @@ -2628,7 +2627,7 @@ free_q_irqs: irq_num = vsi->q_vectors[vector]->irq.virq; if (!IS_ENABLED(CONFIG_RFS_ACCEL)) irq_set_affinity_notifier(irq_num, NULL); - irq_set_affinity_hint(irq_num, NULL); + irq_update_affinity_hint(irq_num, NULL); devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]); } return err; @@ -4158,13 +4157,17 @@ int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked) /* set for the next time the netdev is started */ if (!netif_running(vsi->netdev)) { - ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT); + err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT); + if (err) + goto rebuild_err; dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n"); goto done; } ice_vsi_close(vsi); - ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT); + err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT); + if (err) + goto rebuild_err; ice_for_each_traffic_class(i) { if (vsi->tc_cfg.ena_tc & BIT(i)) @@ -4175,6 +4178,11 @@ int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked) } ice_pf_dcb_recfg(pf, locked); ice_vsi_open(vsi); + goto done; + +rebuild_err: + dev_err(ice_pf_to_dev(pf), "Error during VSI rebuild: %d. Unload and reload the driver.\n", + err); done: clear_bit(ICE_CFG_BUSY, pf->state); return err; @@ -5490,7 +5498,7 @@ static void ice_prepare_for_shutdown(struct ice_pf *pf) if (pf->vsi[v]) pf->vsi[v]->vsi_num = 0; - ice_shutdown_all_ctrlq(hw); + ice_shutdown_all_ctrlq(hw, true); } /** @@ -7694,8 +7702,6 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) goto err_vsi_rebuild; } - ice_eswitch_rebuild(pf); - if (reset_type == ICE_RESET_PFR) { err = ice_rebuild_channels(pf); if (err) { @@ -7750,7 +7756,7 @@ err_vsi_rebuild: err_sched_init_port: ice_sched_cleanup_all(hw); err_init_ctrlq: - ice_shutdown_all_ctrlq(hw); + ice_shutdown_all_ctrlq(hw, false); set_bit(ICE_RESET_FAILED, pf->state); clear_recovery: /* set this bit in PF state to control service task scheduling */ diff --git a/drivers/net/ethernet/intel/ice/ice_protocol_type.h b/drivers/net/ethernet/intel/ice/ice_protocol_type.h index 755a9c55267c..7c09ea0f03ba 100644 --- a/drivers/net/ethernet/intel/ice/ice_protocol_type.h +++ b/drivers/net/ethernet/intel/ice/ice_protocol_type.h @@ -7,18 +7,24 @@ /* Each recipe can match up to 5 different fields. Fields to match can be meta- * data, values extracted from packet headers, or results from other recipes. - * One of the 5 fields is reserved for matching the switch ID. So, up to 4 - * recipes can provide intermediate results to another one through chaining, - * e.g. recipes 0, 1, 2, and 3 can provide intermediate results to recipe 4. + * Therefore, up to 5 recipes can provide intermediate results to another one + * through chaining, e.g. recipes 0, 1, 2, 3 and 4 can provide intermediate + * results to recipe 5. Note that one of the fields in one of the recipes must + * always be reserved for matching the switch ID. */ -#define ICE_NUM_WORDS_RECIPE 4 +#define ICE_NUM_WORDS_RECIPE 5 -/* Max recipes that can be chained */ +/* Max recipes that can be chained, not including the last one, which combines + * intermediate results. + */ #define ICE_MAX_CHAIN_RECIPE 5 -/* 1 word reserved for switch ID from allowed 5 words. - * So a recipe can have max 4 words. And you can chain 5 such recipes - * together. So maximum words that can be programmed for look up is 5 * 4. +/* Total max recipes in chain recipe (including intermediate results) */ +#define ICE_MAX_CHAIN_RECIPE_RES (ICE_MAX_CHAIN_RECIPE + 1) + +/* A recipe can have max 5 words, and 5 recipes can be chained together (using + * the 6th one, which would contain only result indexes). So maximum words that + * can be programmed for lookup is 5 * 5 (not including intermediate results). */ #define ICE_MAX_CHAIN_WORDS (ICE_NUM_WORDS_RECIPE * ICE_MAX_CHAIN_RECIPE) @@ -449,32 +455,11 @@ struct ice_prot_ext_tbl_entry { /* Extractions to be looked up for a given recipe */ struct ice_prot_lkup_ext { - u16 prot_type; u8 n_val_words; /* create a buffer to hold max words per recipe */ - u16 field_off[ICE_MAX_CHAIN_WORDS]; u16 field_mask[ICE_MAX_CHAIN_WORDS]; struct ice_fv_word fv_words[ICE_MAX_CHAIN_WORDS]; - - /* Indicate field offsets that have field vector indices assigned */ - DECLARE_BITMAP(done, ICE_MAX_CHAIN_WORDS); }; -struct ice_pref_recipe_group { - u8 n_val_pairs; /* Number of valid pairs */ - struct ice_fv_word pairs[ICE_NUM_WORDS_RECIPE]; - u16 mask[ICE_NUM_WORDS_RECIPE]; -}; - -struct ice_recp_grp_entry { - struct list_head l_entry; - -#define ICE_INVAL_CHAIN_IND 0xFF - u16 rid; - u8 chain_idx; - u16 fv_idx[ICE_NUM_WORDS_RECIPE]; - u16 fv_mask[ICE_NUM_WORDS_RECIPE]; - struct ice_pref_recipe_group r_group; -}; #endif /* _ICE_PROTOCOL_TYPE_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c index 927b623cedd5..51fac8f18cb0 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp.c @@ -7,8 +7,6 @@ #define E810_OUT_PROP_DELAY_NS 1 -#define UNKNOWN_INCVAL_E82X 0x100000000ULL - static const struct ptp_pin_desc ice_pin_desc_e810t[] = { /* name idx func chan */ { "GNSS", GNSS, PTP_PF_EXTTS, 0, { 0, } }, @@ -813,7 +811,7 @@ static enum ice_tx_tstamp_work ice_ptp_tx_tstamp_owner(struct ice_pf *pf) } mutex_unlock(&pf->ptp.ports_owner.lock); - for (i = 0; i < ICE_MAX_QUAD; i++) { + for (i = 0; i < ICE_GET_QUAD_NUM(pf->hw.ptp.num_lports); i++) { u64 tstamp_ready; int err; @@ -1014,6 +1012,28 @@ ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx) } /** + * ice_ptp_init_tx_eth56g - Initialize tracking for Tx timestamps + * @pf: Board private structure + * @tx: the Tx tracking structure to initialize + * @port: the port this structure tracks + * + * Initialize the Tx timestamp tracker for this port. ETH56G PHYs + * have independent memory blocks for all ports. + * + * Return: 0 for success, -ENOMEM when failed to allocate Tx tracker + */ +static int ice_ptp_init_tx_eth56g(struct ice_pf *pf, struct ice_ptp_tx *tx, + u8 port) +{ + tx->block = port; + tx->offset = 0; + tx->len = INDEX_PER_PORT_ETH56G; + tx->has_ready_bitmap = 1; + + return ice_ptp_alloc_tx_tracker(tx); +} + +/** * ice_ptp_init_tx_e82x - Initialize tracking for Tx timestamps * @pf: Board private structure * @tx: the Tx tracking structure to initialize @@ -1027,7 +1047,7 @@ ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx) static int ice_ptp_init_tx_e82x(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port) { - tx->block = port / ICE_PORTS_PER_QUAD; + tx->block = ICE_GET_QUAD_NUM(port); tx->offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT_E82X; tx->len = INDEX_PER_PORT_E82X; tx->has_ready_bitmap = 1; @@ -1210,12 +1230,7 @@ static u64 ice_base_incval(struct ice_pf *pf) struct ice_hw *hw = &pf->hw; u64 incval; - if (ice_is_e810(hw)) - incval = ICE_PTP_NOMINAL_INCVAL_E810; - else if (ice_e82x_time_ref(hw) < NUM_ICE_TIME_REF_FREQ) - incval = ice_e82x_nominal_incval(ice_e82x_time_ref(hw)); - else - incval = UNKNOWN_INCVAL_E82X; + incval = ice_get_base_incval(hw); dev_dbg(ice_pf_to_dev(pf), "PTP: using base increment value of 0x%016llx\n", incval); @@ -1229,8 +1244,8 @@ static u64 ice_base_incval(struct ice_pf *pf) */ static int ice_ptp_check_tx_fifo(struct ice_ptp_port *port) { - int quad = port->port_num / ICE_PORTS_PER_QUAD; int offs = port->port_num % ICE_PORTS_PER_QUAD; + int quad = ICE_GET_QUAD_NUM(port->port_num); struct ice_pf *pf; struct ice_hw *hw; u32 val, phy_sts; @@ -1348,10 +1363,19 @@ ice_ptp_port_phy_stop(struct ice_ptp_port *ptp_port) mutex_lock(&ptp_port->ps_lock); - kthread_cancel_delayed_work_sync(&ptp_port->ov_work); + switch (hw->ptp.phy_model) { + case ICE_PHY_ETH56G: + err = ice_stop_phy_timer_eth56g(hw, port, true); + break; + case ICE_PHY_E82X: + kthread_cancel_delayed_work_sync(&ptp_port->ov_work); - err = ice_stop_phy_timer_e82x(hw, port, true); - if (err) + err = ice_stop_phy_timer_e82x(hw, port, true); + break; + default: + err = -ENODEV; + } + if (err && err != -EBUSY) dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d down, err %d\n", port, err); @@ -1385,27 +1409,39 @@ ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port) mutex_lock(&ptp_port->ps_lock); - kthread_cancel_delayed_work_sync(&ptp_port->ov_work); + switch (hw->ptp.phy_model) { + case ICE_PHY_ETH56G: + err = ice_start_phy_timer_eth56g(hw, port); + break; + case ICE_PHY_E82X: + /* Start the PHY timer in Vernier mode */ + kthread_cancel_delayed_work_sync(&ptp_port->ov_work); - /* temporarily disable Tx timestamps while calibrating PHY offset */ - spin_lock_irqsave(&ptp_port->tx.lock, flags); - ptp_port->tx.calibrating = true; - spin_unlock_irqrestore(&ptp_port->tx.lock, flags); - ptp_port->tx_fifo_busy_cnt = 0; + /* temporarily disable Tx timestamps while calibrating + * PHY offset + */ + spin_lock_irqsave(&ptp_port->tx.lock, flags); + ptp_port->tx.calibrating = true; + spin_unlock_irqrestore(&ptp_port->tx.lock, flags); + ptp_port->tx_fifo_busy_cnt = 0; - /* Start the PHY timer in Vernier mode */ - err = ice_start_phy_timer_e82x(hw, port); - if (err) - goto out_unlock; + /* Start the PHY timer in Vernier mode */ + err = ice_start_phy_timer_e82x(hw, port); + if (err) + break; - /* Enable Tx timestamps right away */ - spin_lock_irqsave(&ptp_port->tx.lock, flags); - ptp_port->tx.calibrating = false; - spin_unlock_irqrestore(&ptp_port->tx.lock, flags); + /* Enable Tx timestamps right away */ + spin_lock_irqsave(&ptp_port->tx.lock, flags); + ptp_port->tx.calibrating = false; + spin_unlock_irqrestore(&ptp_port->tx.lock, flags); - kthread_queue_delayed_work(pf->ptp.kworker, &ptp_port->ov_work, 0); + kthread_queue_delayed_work(pf->ptp.kworker, &ptp_port->ov_work, + 0); + break; + default: + err = -ENODEV; + } -out_unlock: if (err) dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d up, err %d\n", port, err); @@ -1429,20 +1465,23 @@ void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup) if (pf->ptp.state != ICE_PTP_READY) return; - if (WARN_ON_ONCE(port >= ICE_NUM_EXTERNAL_PORTS)) + if (WARN_ON_ONCE(port >= hw->ptp.num_lports)) return; ptp_port = &pf->ptp.port; + if (ice_is_e825c(hw) && hw->ptp.is_2x50g_muxed_topo) + port *= 2; if (WARN_ON_ONCE(ptp_port->port_num != port)) return; /* Update cached link status for this port immediately */ ptp_port->link_up = linkup; - switch (hw->phy_model) { + switch (hw->ptp.phy_model) { case ICE_PHY_E810: /* Do not reconfigure E810 PHY */ return; + case ICE_PHY_ETH56G: case ICE_PHY_E82X: ice_ptp_port_phy_restart(ptp_port); return; @@ -1457,42 +1496,62 @@ void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup) * @ena: bool value to enable or disable interrupt * @threshold: Minimum number of packets at which intr is triggered * - * Utility function to enable or disable Tx timestamp interrupt and threshold + * Utility function to configure all the PHY interrupt settings, including + * whether the PHY interrupt is enabled, and what threshold to use. Also + * configures The E82X timestamp owner to react to interrupts from all PHYs. + * + * Return: 0 on success, -EOPNOTSUPP when PHY model incorrect, other error codes + * when failed to configure PHY interrupt for E82X */ static int ice_ptp_cfg_phy_interrupt(struct ice_pf *pf, bool ena, u32 threshold) { + struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; - int err = 0; - int quad; - u32 val; ice_ptp_reset_ts_memory(hw); - for (quad = 0; quad < ICE_MAX_QUAD; quad++) { - err = ice_read_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG, - &val); - if (err) - break; + switch (hw->ptp.phy_model) { + case ICE_PHY_ETH56G: { + int port; - if (ena) { - val |= Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M; - val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_THR_M; - val |= FIELD_PREP(Q_REG_TX_MEM_GBL_CFG_INTR_THR_M, - threshold); - } else { - val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M; + for (port = 0; port < hw->ptp.num_lports; port++) { + int err; + + err = ice_phy_cfg_intr_eth56g(hw, port, ena, threshold); + if (err) { + dev_err(dev, "Failed to configure PHY interrupt for port %d, err %d\n", + port, err); + return err; + } } - err = ice_write_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG, - val); - if (err) - break; + return 0; } + case ICE_PHY_E82X: { + int quad; - if (err) - dev_err(ice_pf_to_dev(pf), "PTP failed in intr ena, err %d\n", - err); - return err; + for (quad = 0; quad < ICE_GET_QUAD_NUM(hw->ptp.num_lports); + quad++) { + int err; + + err = ice_phy_cfg_intr_e82x(hw, quad, ena, threshold); + if (err) { + dev_err(dev, "Failed to configure PHY interrupt for quad %d, err %d\n", + quad, err); + return err; + } + } + + return 0; + } + case ICE_PHY_E810: + return 0; + case ICE_PHY_UNSUP: + default: + dev_warn(dev, "%s: Unexpected PHY model %d\n", __func__, + hw->ptp.phy_model); + return -EOPNOTSUPP; + } } /** @@ -1767,8 +1826,7 @@ static int ice_ptp_cfg_clkout(struct ice_pf *pf, unsigned int chan, * maintaining phase */ if (start_time < current_time) - start_time = div64_u64(current_time + NSEC_PER_SEC - 1, - NSEC_PER_SEC) * NSEC_PER_SEC + phase; + start_time = roundup_u64(current_time, NSEC_PER_SEC) + phase; if (ice_is_e810(hw)) start_time -= E810_OUT_PROP_DELAY_NS; @@ -1994,11 +2052,14 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts) struct ice_hw *hw = &pf->hw; int err; - /* For Vernier mode, we need to recalibrate after new settime - * Start with disabling timestamp block + /* For Vernier mode on E82X, we need to recalibrate after new settime. + * Start with marking timestamps as invalid. */ - if (pf->ptp.port.link_up) - ice_ptp_port_phy_stop(&pf->ptp.port); + if (hw->ptp.phy_model == ICE_PHY_E82X) { + err = ice_ptp_clear_phy_offset_ready_e82x(hw); + if (err) + dev_warn(ice_pf_to_dev(pf), "Failed to mark timestamps as invalid before settime\n"); + } if (!ice_ptp_lock(hw)) { err = -EBUSY; @@ -2018,7 +2079,7 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts) ice_ptp_enable_all_clkout(pf); /* Recalibrate and re-enable timestamp blocks for E822/E823 */ - if (hw->phy_model == ICE_PHY_E82X) + if (hw->ptp.phy_model == ICE_PHY_E82X) ice_ptp_restart_all_phy(pf); exit: if (err) { @@ -2644,7 +2705,7 @@ static void ice_ptp_maybe_trigger_tx_interrupt(struct ice_pf *pf) if (!ice_pf_src_tmr_owned(pf)) return; - for (i = 0; i < ICE_MAX_QUAD; i++) { + for (i = 0; i < ICE_GET_QUAD_NUM(hw->ptp.num_lports); i++) { u64 tstamp_ready; int err; @@ -3080,12 +3141,10 @@ static int ice_ptp_init_owner(struct ice_pf *pf) /* Release the global hardware lock */ ice_ptp_unlock(hw); - if (!ice_is_e810(hw)) { - /* Enable quad interrupts */ - err = ice_ptp_cfg_phy_interrupt(pf, true, 1); - if (err) - goto err_exit; - } + /* Configure PHY interrupt settings */ + err = ice_ptp_cfg_phy_interrupt(pf, true, 1); + if (err) + goto err_exit; /* Ensure we have a clock device */ err = ice_ptp_create_clock(pf); @@ -3146,7 +3205,10 @@ static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port) mutex_init(&ptp_port->ps_lock); - switch (hw->phy_model) { + switch (hw->ptp.phy_model) { + case ICE_PHY_ETH56G: + return ice_ptp_init_tx_eth56g(pf, &ptp_port->tx, + ptp_port->port_num); case ICE_PHY_E810: return ice_ptp_init_tx_e810(pf, &ptp_port->tx); case ICE_PHY_E82X: @@ -3241,7 +3303,7 @@ static void ice_ptp_remove_auxbus_device(struct ice_pf *pf) */ static void ice_ptp_init_tx_interrupt_mode(struct ice_pf *pf) { - switch (pf->hw.phy_model) { + switch (pf->hw.ptp.phy_model) { case ICE_PHY_E82X: /* E822 based PHY has the clock owner process the interrupt * for all ports. @@ -3277,7 +3339,7 @@ void ice_ptp_init(struct ice_pf *pf) ptp->state = ICE_PTP_INITIALIZING; - ice_ptp_init_phy_model(hw); + ice_ptp_init_hw(hw); ice_ptp_init_tx_interrupt_mode(pf); @@ -3291,6 +3353,9 @@ void ice_ptp_init(struct ice_pf *pf) } ptp->port.port_num = hw->pf_id; + if (ice_is_e825c(hw) && hw->ptp.is_2x50g_muxed_topo) + ptp->port.port_num = hw->pf_id * 2; + err = ice_ptp_init_port(pf, &ptp->port); if (err) goto err; diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h index e2af9749061c..2db2257a0fb2 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.h +++ b/drivers/net/ethernet/intel/ice/ice_ptp.h @@ -160,6 +160,7 @@ struct ice_ptp_tx { #define INDEX_PER_QUAD 64 #define INDEX_PER_PORT_E82X 16 #define INDEX_PER_PORT_E810 64 +#define INDEX_PER_PORT_ETH56G 64 /** * struct ice_ptp_port - data used to initialize an external port for PTP diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h index 2c4dab0c48ab..e6980b94a6c1 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h +++ b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h @@ -9,6 +9,321 @@ */ /* Constants defined for the PTP 1588 clock hardware. */ +const struct ice_phy_reg_info_eth56g eth56g_phy_res[NUM_ETH56G_PHY_RES] = { + /* ETH56G_PHY_REG_PTP */ + { + /* base_addr */ + { + 0x092000, + 0x126000, + 0x1BA000, + 0x24E000, + 0x2E2000, + }, + /* step */ + 0x98, + }, + /* ETH56G_PHY_MEM_PTP */ + { + /* base_addr */ + { + 0x093000, + 0x127000, + 0x1BB000, + 0x24F000, + 0x2E3000, + }, + /* step */ + 0x200, + }, + /* ETH56G_PHY_REG_XPCS */ + { + /* base_addr */ + { + 0x000000, + 0x009400, + 0x128000, + 0x1BC000, + 0x250000, + }, + /* step */ + 0x21000, + }, + /* ETH56G_PHY_REG_MAC */ + { + /* base_addr */ + { + 0x085000, + 0x119000, + 0x1AD000, + 0x241000, + 0x2D5000, + }, + /* step */ + 0x1000, + }, + /* ETH56G_PHY_REG_GPCS */ + { + /* base_addr */ + { + 0x084000, + 0x118000, + 0x1AC000, + 0x240000, + 0x2D4000, + }, + /* step */ + 0x400, + }, +}; + +const +struct ice_eth56g_mac_reg_cfg eth56g_mac_cfg[NUM_ICE_ETH56G_LNK_SPD] = { + [ICE_ETH56G_LNK_SPD_1G] = { + .tx_mode = { .def = 6, }, + .rx_mode = { .def = 6, }, + .blks_per_clk = 1, + .blktime = 0x4000, /* 32 */ + .tx_offset = { + .serdes = 0x6666, /* 51.2 */ + .no_fec = 0xd066, /* 104.2 */ + .sfd = 0x3000, /* 24 */ + .onestep = 0x30000 /* 384 */ + }, + .rx_offset = { + .serdes = 0xffffc59a, /* -29.2 */ + .no_fec = 0xffff0a80, /* -122.75 */ + .sfd = 0x2c00, /* 22 */ + .bs_ds = 0x19a /* 0.8 */ + /* Dynamic bitslip 0 equals to 10 */ + } + }, + [ICE_ETH56G_LNK_SPD_2_5G] = { + .tx_mode = { .def = 6, }, + .rx_mode = { .def = 6, }, + .blks_per_clk = 1, + .blktime = 0x199a, /* 12.8 */ + .tx_offset = { + .serdes = 0x28f6, /* 20.48 */ + .no_fec = 0x53b8, /* 41.86 */ + .sfd = 0x1333, /* 9.6 */ + .onestep = 0x13333 /* 153.6 */ + }, + .rx_offset = { + .serdes = 0xffffe8a4, /* -11.68 */ + .no_fec = 0xffff9a76, /* -50.77 */ + .sfd = 0xf33, /* 7.6 */ + .bs_ds = 0xa4 /* 0.32 */ + } + }, + [ICE_ETH56G_LNK_SPD_10G] = { + .tx_mode = { .def = 1, }, + .rx_mode = { .def = 1, }, + .blks_per_clk = 1, + .blktime = 0x666, /* 3.2 */ + .tx_offset = { + .serdes = 0x234c, /* 17.6484848 */ + .no_fec = 0x8e80, /* 71.25 */ + .fc = 0xb4a4, /* 90.32 */ + .sfd = 0x4a4, /* 2.32 */ + .onestep = 0x4ccd /* 38.4 */ + }, + .rx_offset = { + .serdes = 0xffffeb27, /* -10.42424 */ + .no_fec = 0xffffcccd, /* -25.6 */ + .fc = 0xfffe0014, /* -255.96 */ + .sfd = 0x4a4, /* 2.32 */ + .bs_ds = 0x32 /* 0.0969697 */ + } + }, + [ICE_ETH56G_LNK_SPD_25G] = { + .tx_mode = { + .def = 1, + .rs = 4 + }, + .tx_mk_dly = 4, + .tx_cw_dly = { + .def = 1, + .onestep = 6 + }, + .rx_mode = { + .def = 1, + .rs = 4 + }, + .rx_mk_dly = { + .def = 1, + .rs = 1 + }, + .rx_cw_dly = { + .def = 1, + .rs = 1 + }, + .blks_per_clk = 1, + .blktime = 0x28f, /* 1.28 */ + .mktime = 0x147b, /* 10.24, only if RS-FEC enabled */ + .tx_offset = { + .serdes = 0xe1e, /* 7.0593939 */ + .no_fec = 0x3857, /* 28.17 */ + .fc = 0x48c3, /* 36.38 */ + .rs = 0x8100, /* 64.5 */ + .sfd = 0x1dc, /* 0.93 */ + .onestep = 0x1eb8 /* 15.36 */ + }, + .rx_offset = { + .serdes = 0xfffff7a9, /* -4.1697 */ + .no_fec = 0xffffe71a, /* -12.45 */ + .fc = 0xfffe894d, /* -187.35 */ + .rs = 0xfffff8cd, /* -3.6 */ + .sfd = 0x1dc, /* 0.93 */ + .bs_ds = 0x14 /* 0.0387879, RS-FEC 0 */ + } + }, + [ICE_ETH56G_LNK_SPD_40G] = { + .tx_mode = { .def = 3 }, + .tx_mk_dly = 4, + .tx_cw_dly = { + .def = 1, + .onestep = 6 + }, + .rx_mode = { .def = 4 }, + .rx_mk_dly = { .def = 1 }, + .rx_cw_dly = { .def = 1 }, + .blktime = 0x333, /* 1.6 */ + .mktime = 0xccd, /* 6.4 */ + .tx_offset = { + .serdes = 0x234c, /* 17.6484848 */ + .no_fec = 0x5a8a, /* 45.27 */ + .fc = 0x81b8, /* 64.86 */ + .sfd = 0x4a4, /* 2.32 */ + .onestep = 0x1333 /* 9.6 */ + }, + .rx_offset = { + .serdes = 0xffffeb27, /* -10.42424 */ + .no_fec = 0xfffff594, /* -5.21 */ + .fc = 0xfffe3080, /* -231.75 */ + .sfd = 0x4a4, /* 2.32 */ + .bs_ds = 0xccd /* 6.4 */ + } + }, + [ICE_ETH56G_LNK_SPD_50G] = { + .tx_mode = { .def = 5 }, + .tx_mk_dly = 4, + .tx_cw_dly = { + .def = 1, + .onestep = 6 + }, + .rx_mode = { .def = 5 }, + .rx_mk_dly = { .def = 1 }, + .rx_cw_dly = { .def = 1 }, + .blktime = 0x28f, /* 1.28 */ + .mktime = 0xa3d, /* 5.12 */ + .tx_offset = { + .serdes = 0x13ba, /* 9.86353 */ + .rs = 0x5400, /* 42 */ + .sfd = 0xe6, /* 0.45 */ + .onestep = 0xf5c /* 7.68 */ + }, + .rx_offset = { + .serdes = 0xfffff7e8, /* -4.04706 */ + .rs = 0xfffff994, /* -3.21 */ + .sfd = 0xe6 /* 0.45 */ + } + }, + [ICE_ETH56G_LNK_SPD_50G2] = { + .tx_mode = { + .def = 3, + .rs = 2 + }, + .tx_mk_dly = 4, + .tx_cw_dly = { + .def = 1, + .onestep = 6 + }, + .rx_mode = { + .def = 4, + .rs = 1 + }, + .rx_mk_dly = { .def = 1 }, + .rx_cw_dly = { .def = 1 }, + .blktime = 0x28f, /* 1.28 */ + .mktime = 0xa3d, /* 5.12 */ + .tx_offset = { + .serdes = 0xe1e, /* 7.0593939 */ + .no_fec = 0x3d33, /* 30.6 */ + .rs = 0x5057, /* 40.17 */ + .sfd = 0x1dc, /* 0.93 */ + .onestep = 0xf5c /* 7.68 */ + }, + .rx_offset = { + .serdes = 0xfffff7a9, /* -4.1697 */ + .no_fec = 0xfffff8cd, /* -3.6 */ + .rs = 0xfffff21a, /* -6.95 */ + .sfd = 0x1dc, /* 0.93 */ + .bs_ds = 0xa3d /* 5.12, RS-FEC 0x633 (3.1) */ + } + }, + [ICE_ETH56G_LNK_SPD_100G] = { + .tx_mode = { + .def = 3, + .rs = 2 + }, + .tx_mk_dly = 10, + .tx_cw_dly = { + .def = 3, + .onestep = 6 + }, + .rx_mode = { + .def = 4, + .rs = 1 + }, + .rx_mk_dly = { .def = 5 }, + .rx_cw_dly = { .def = 5 }, + .blks_per_clk = 1, + .blktime = 0x148, /* 0.64 */ + .mktime = 0x199a, /* 12.8 */ + .tx_offset = { + .serdes = 0xe1e, /* 7.0593939 */ + .no_fec = 0x67ec, /* 51.96 */ + .rs = 0x44fb, /* 34.49 */ + .sfd = 0x1dc, /* 0.93 */ + .onestep = 0xf5c /* 7.68 */ + }, + .rx_offset = { + .serdes = 0xfffff7a9, /* -4.1697 */ + .no_fec = 0xfffff5a9, /* -5.17 */ + .rs = 0xfffff6e6, /* -4.55 */ + .sfd = 0x1dc, /* 0.93 */ + .bs_ds = 0x199a /* 12.8, RS-FEC 0x31b (1.552) */ + } + }, + [ICE_ETH56G_LNK_SPD_100G2] = { + .tx_mode = { .def = 5 }, + .tx_mk_dly = 10, + .tx_cw_dly = { + .def = 3, + .onestep = 6 + }, + .rx_mode = { .def = 5 }, + .rx_mk_dly = { .def = 5 }, + .rx_cw_dly = { .def = 5 }, + .blks_per_clk = 1, + .blktime = 0x148, /* 0.64 */ + .mktime = 0x199a, /* 12.8 */ + .tx_offset = { + .serdes = 0x13ba, /* 9.86353 */ + .rs = 0x460a, /* 35.02 */ + .sfd = 0xe6, /* 0.45 */ + .onestep = 0xf5c /* 7.68 */ + }, + .rx_offset = { + .serdes = 0xfffff7e8, /* -4.04706 */ + .rs = 0xfffff548, /* -5.36 */ + .sfd = 0xe6, /* 0.45 */ + .bs_ds = 0x303 /* 1.506 */ + } + } +}; + /* struct ice_time_ref_info_e82x * * E822 hardware can use different sources as the reference for the PTP @@ -155,6 +470,93 @@ const struct ice_cgu_pll_params_e82x e822_cgu_params[NUM_ICE_TIME_REF_FREQ] = { }, }; +const +struct ice_cgu_pll_params_e825c e825c_cgu_params[NUM_ICE_TIME_REF_FREQ] = { + /* ICE_TIME_REF_FREQ_25_000 -> 25 MHz */ + { + /* tspll_ck_refclkfreq */ + 0x19, + /* tspll_ndivratio */ + 1, + /* tspll_fbdiv_intgr */ + 320, + /* tspll_fbdiv_frac */ + 0, + /* ref1588_ck_div */ + 0, + }, + + /* ICE_TIME_REF_FREQ_122_880 -> 122.88 MHz */ + { + /* tspll_ck_refclkfreq */ + 0x29, + /* tspll_ndivratio */ + 3, + /* tspll_fbdiv_intgr */ + 195, + /* tspll_fbdiv_frac */ + 1342177280UL, + /* ref1588_ck_div */ + 0, + }, + + /* ICE_TIME_REF_FREQ_125_000 -> 125 MHz */ + { + /* tspll_ck_refclkfreq */ + 0x3E, + /* tspll_ndivratio */ + 2, + /* tspll_fbdiv_intgr */ + 128, + /* tspll_fbdiv_frac */ + 0, + /* ref1588_ck_div */ + 0, + }, + + /* ICE_TIME_REF_FREQ_153_600 -> 153.6 MHz */ + { + /* tspll_ck_refclkfreq */ + 0x33, + /* tspll_ndivratio */ + 3, + /* tspll_fbdiv_intgr */ + 156, + /* tspll_fbdiv_frac */ + 1073741824UL, + /* ref1588_ck_div */ + 0, + }, + + /* ICE_TIME_REF_FREQ_156_250 -> 156.25 MHz */ + { + /* tspll_ck_refclkfreq */ + 0x1F, + /* tspll_ndivratio */ + 5, + /* tspll_fbdiv_intgr */ + 256, + /* tspll_fbdiv_frac */ + 0, + /* ref1588_ck_div */ + 0, + }, + + /* ICE_TIME_REF_FREQ_245_760 -> 245.76 MHz */ + { + /* tspll_ck_refclkfreq */ + 0x52, + /* tspll_ndivratio */ + 3, + /* tspll_fbdiv_intgr */ + 97, + /* tspll_fbdiv_frac */ + 2818572288UL, + /* ref1588_ck_div */ + 0, + }, +}; + /* struct ice_vernier_info_e82x * * E822 hardware calibrates the delay of the timestamp indication from the diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c index 2b9423a173bb..3a33e6b9b313 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c @@ -2,6 +2,7 @@ /* Copyright (C) 2021, Intel Corporation. */ #include <linux/delay.h> +#include <linux/iopoll.h> #include "ice_common.h" #include "ice_ptp_hw.h" #include "ice_ptp_consts.h" @@ -227,40 +228,632 @@ static u64 ice_ptp_read_src_incval(struct ice_hw *hw) } /** - * ice_ptp_src_cmd - Prepare source timer for a timer command - * @hw: pointer to HW structure + * ice_read_cgu_reg_e82x - Read a CGU register + * @hw: pointer to the HW struct + * @addr: Register address to read + * @val: storage for register value read + * + * Read the contents of a register of the Clock Generation Unit. Only + * applicable to E822 devices. + * + * Return: 0 on success, other error codes when failed to read from CGU + */ +static int ice_read_cgu_reg_e82x(struct ice_hw *hw, u32 addr, u32 *val) +{ + struct ice_sbq_msg_input cgu_msg = { + .opcode = ice_sbq_msg_rd, + .dest_dev = cgu, + .msg_addr_low = addr + }; + int err; + + err = ice_sbq_rw_reg(hw, &cgu_msg, ICE_AQ_FLAG_RD); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to read CGU register 0x%04x, err %d\n", + addr, err); + return err; + } + + *val = cgu_msg.data; + + return 0; +} + +/** + * ice_write_cgu_reg_e82x - Write a CGU register + * @hw: pointer to the HW struct + * @addr: Register address to write + * @val: value to write into the register + * + * Write the specified value to a register of the Clock Generation Unit. Only + * applicable to E822 devices. + * + * Return: 0 on success, other error codes when failed to write to CGU + */ +static int ice_write_cgu_reg_e82x(struct ice_hw *hw, u32 addr, u32 val) +{ + struct ice_sbq_msg_input cgu_msg = { + .opcode = ice_sbq_msg_wr, + .dest_dev = cgu, + .msg_addr_low = addr, + .data = val + }; + int err; + + err = ice_sbq_rw_reg(hw, &cgu_msg, ICE_AQ_FLAG_RD); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to write CGU register 0x%04x, err %d\n", + addr, err); + return err; + } + + return err; +} + +/** + * ice_clk_freq_str - Convert time_ref_freq to string + * @clk_freq: Clock frequency + * + * Return: specified TIME_REF clock frequency converted to a string + */ +static const char *ice_clk_freq_str(enum ice_time_ref_freq clk_freq) +{ + switch (clk_freq) { + case ICE_TIME_REF_FREQ_25_000: + return "25 MHz"; + case ICE_TIME_REF_FREQ_122_880: + return "122.88 MHz"; + case ICE_TIME_REF_FREQ_125_000: + return "125 MHz"; + case ICE_TIME_REF_FREQ_153_600: + return "153.6 MHz"; + case ICE_TIME_REF_FREQ_156_250: + return "156.25 MHz"; + case ICE_TIME_REF_FREQ_245_760: + return "245.76 MHz"; + default: + return "Unknown"; + } +} + +/** + * ice_clk_src_str - Convert time_ref_src to string + * @clk_src: Clock source + * + * Return: specified clock source converted to its string name + */ +static const char *ice_clk_src_str(enum ice_clk_src clk_src) +{ + switch (clk_src) { + case ICE_CLK_SRC_TCXO: + return "TCXO"; + case ICE_CLK_SRC_TIME_REF: + return "TIME_REF"; + default: + return "Unknown"; + } +} + +/** + * ice_cfg_cgu_pll_e82x - Configure the Clock Generation Unit + * @hw: pointer to the HW struct + * @clk_freq: Clock frequency to program + * @clk_src: Clock source to select (TIME_REF, or TCXO) + * + * Configure the Clock Generation Unit with the desired clock frequency and + * time reference, enabling the PLL which drives the PTP hardware clock. + * + * Return: + * * %0 - success + * * %-EINVAL - input parameters are incorrect + * * %-EBUSY - failed to lock TS PLL + * * %other - CGU read/write failure + */ +static int ice_cfg_cgu_pll_e82x(struct ice_hw *hw, + enum ice_time_ref_freq clk_freq, + enum ice_clk_src clk_src) +{ + union tspll_ro_bwm_lf bwm_lf; + union nac_cgu_dword19 dw19; + union nac_cgu_dword22 dw22; + union nac_cgu_dword24 dw24; + union nac_cgu_dword9 dw9; + int err; + + if (clk_freq >= NUM_ICE_TIME_REF_FREQ) { + dev_warn(ice_hw_to_dev(hw), "Invalid TIME_REF frequency %u\n", + clk_freq); + return -EINVAL; + } + + if (clk_src >= NUM_ICE_CLK_SRC) { + dev_warn(ice_hw_to_dev(hw), "Invalid clock source %u\n", + clk_src); + return -EINVAL; + } + + if (clk_src == ICE_CLK_SRC_TCXO && + clk_freq != ICE_TIME_REF_FREQ_25_000) { + dev_warn(ice_hw_to_dev(hw), + "TCXO only supports 25 MHz frequency\n"); + return -EINVAL; + } + + err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD9, &dw9.val); + if (err) + return err; + + err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD24, &dw24.val); + if (err) + return err; + + err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_BWM_LF, &bwm_lf.val); + if (err) + return err; + + /* Log the current clock configuration */ + ice_debug(hw, ICE_DBG_PTP, "Current CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n", + dw24.ts_pll_enable ? "enabled" : "disabled", + ice_clk_src_str(dw24.time_ref_sel), + ice_clk_freq_str(dw9.time_ref_freq_sel), + bwm_lf.plllock_true_lock_cri ? "locked" : "unlocked"); + + /* Disable the PLL before changing the clock source or frequency */ + if (dw24.ts_pll_enable) { + dw24.ts_pll_enable = 0; + + err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val); + if (err) + return err; + } + + /* Set the frequency */ + dw9.time_ref_freq_sel = clk_freq; + err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD9, dw9.val); + if (err) + return err; + + /* Configure the TS PLL feedback divisor */ + err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD19, &dw19.val); + if (err) + return err; + + dw19.tspll_fbdiv_intgr = e822_cgu_params[clk_freq].feedback_div; + dw19.tspll_ndivratio = 1; + + err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD19, dw19.val); + if (err) + return err; + + /* Configure the TS PLL post divisor */ + err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD22, &dw22.val); + if (err) + return err; + + dw22.time1588clk_div = e822_cgu_params[clk_freq].post_pll_div; + dw22.time1588clk_sel_div2 = 0; + + err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD22, dw22.val); + if (err) + return err; + + /* Configure the TS PLL pre divisor and clock source */ + err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD24, &dw24.val); + if (err) + return err; + + dw24.ref1588_ck_div = e822_cgu_params[clk_freq].refclk_pre_div; + dw24.tspll_fbdiv_frac = e822_cgu_params[clk_freq].frac_n_div; + dw24.time_ref_sel = clk_src; + + err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val); + if (err) + return err; + + /* Finally, enable the PLL */ + dw24.ts_pll_enable = 1; + + err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val); + if (err) + return err; + + /* Wait to verify if the PLL locks */ + usleep_range(1000, 5000); + + err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_BWM_LF, &bwm_lf.val); + if (err) + return err; + + if (!bwm_lf.plllock_true_lock_cri) { + dev_warn(ice_hw_to_dev(hw), "CGU PLL failed to lock\n"); + return -EBUSY; + } + + /* Log the current clock configuration */ + ice_debug(hw, ICE_DBG_PTP, "New CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n", + dw24.ts_pll_enable ? "enabled" : "disabled", + ice_clk_src_str(dw24.time_ref_sel), + ice_clk_freq_str(dw9.time_ref_freq_sel), + bwm_lf.plllock_true_lock_cri ? "locked" : "unlocked"); + + return 0; +} + +/** + * ice_cfg_cgu_pll_e825c - Configure the Clock Generation Unit for E825-C + * @hw: pointer to the HW struct + * @clk_freq: Clock frequency to program + * @clk_src: Clock source to select (TIME_REF, or TCXO) + * + * Configure the Clock Generation Unit with the desired clock frequency and + * time reference, enabling the PLL which drives the PTP hardware clock. + * + * Return: + * * %0 - success + * * %-EINVAL - input parameters are incorrect + * * %-EBUSY - failed to lock TS PLL + * * %other - CGU read/write failure + */ +static int ice_cfg_cgu_pll_e825c(struct ice_hw *hw, + enum ice_time_ref_freq clk_freq, + enum ice_clk_src clk_src) +{ + union tspll_ro_lock_e825c ro_lock; + union nac_cgu_dword16_e825c dw16; + union nac_cgu_dword23_e825c dw23; + union nac_cgu_dword19 dw19; + union nac_cgu_dword22 dw22; + union nac_cgu_dword24 dw24; + union nac_cgu_dword9 dw9; + int err; + + if (clk_freq >= NUM_ICE_TIME_REF_FREQ) { + dev_warn(ice_hw_to_dev(hw), "Invalid TIME_REF frequency %u\n", + clk_freq); + return -EINVAL; + } + + if (clk_src >= NUM_ICE_CLK_SRC) { + dev_warn(ice_hw_to_dev(hw), "Invalid clock source %u\n", + clk_src); + return -EINVAL; + } + + if (clk_src == ICE_CLK_SRC_TCXO && + clk_freq != ICE_TIME_REF_FREQ_156_250) { + dev_warn(ice_hw_to_dev(hw), + "TCXO only supports 156.25 MHz frequency\n"); + return -EINVAL; + } + + err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD9, &dw9.val); + if (err) + return err; + + err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD24, &dw24.val); + if (err) + return err; + + err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD16_E825C, &dw16.val); + if (err) + return err; + + err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD23_E825C, &dw23.val); + if (err) + return err; + + err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_LOCK_E825C, &ro_lock.val); + if (err) + return err; + + /* Log the current clock configuration */ + ice_debug(hw, ICE_DBG_PTP, "Current CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n", + dw24.ts_pll_enable ? "enabled" : "disabled", + ice_clk_src_str(dw23.time_ref_sel), + ice_clk_freq_str(dw9.time_ref_freq_sel), + ro_lock.plllock_true_lock_cri ? "locked" : "unlocked"); + + /* Disable the PLL before changing the clock source or frequency */ + if (dw23.ts_pll_enable) { + dw23.ts_pll_enable = 0; + + err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD23_E825C, + dw23.val); + if (err) + return err; + } + + /* Set the frequency */ + dw9.time_ref_freq_sel = clk_freq; + + /* Enable the correct receiver */ + if (clk_src == ICE_CLK_SRC_TCXO) { + dw9.time_ref_en = 0; + dw9.clk_eref0_en = 1; + } else { + dw9.time_ref_en = 1; + dw9.clk_eref0_en = 0; + } + err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD9, dw9.val); + if (err) + return err; + + /* Choose the referenced frequency */ + dw16.tspll_ck_refclkfreq = + e825c_cgu_params[clk_freq].tspll_ck_refclkfreq; + err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD16_E825C, dw16.val); + if (err) + return err; + + /* Configure the TS PLL feedback divisor */ + err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD19, &dw19.val); + if (err) + return err; + + dw19.tspll_fbdiv_intgr = + e825c_cgu_params[clk_freq].tspll_fbdiv_intgr; + dw19.tspll_ndivratio = + e825c_cgu_params[clk_freq].tspll_ndivratio; + + err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD19, dw19.val); + if (err) + return err; + + /* Configure the TS PLL post divisor */ + err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD22, &dw22.val); + if (err) + return err; + + /* These two are constant for E825C */ + dw22.time1588clk_div = 5; + dw22.time1588clk_sel_div2 = 0; + + err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD22, dw22.val); + if (err) + return err; + + /* Configure the TS PLL pre divisor and clock source */ + err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD23_E825C, &dw23.val); + if (err) + return err; + + dw23.ref1588_ck_div = + e825c_cgu_params[clk_freq].ref1588_ck_div; + dw23.time_ref_sel = clk_src; + + err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD23_E825C, dw23.val); + if (err) + return err; + + dw24.tspll_fbdiv_frac = + e825c_cgu_params[clk_freq].tspll_fbdiv_frac; + + err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val); + if (err) + return err; + + /* Finally, enable the PLL */ + dw23.ts_pll_enable = 1; + + err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD23_E825C, dw23.val); + if (err) + return err; + + /* Wait to verify if the PLL locks */ + usleep_range(1000, 5000); + + err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_LOCK_E825C, &ro_lock.val); + if (err) + return err; + + if (!ro_lock.plllock_true_lock_cri) { + dev_warn(ice_hw_to_dev(hw), "CGU PLL failed to lock\n"); + return -EBUSY; + } + + /* Log the current clock configuration */ + ice_debug(hw, ICE_DBG_PTP, "New CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n", + dw24.ts_pll_enable ? "enabled" : "disabled", + ice_clk_src_str(dw23.time_ref_sel), + ice_clk_freq_str(dw9.time_ref_freq_sel), + ro_lock.plllock_true_lock_cri ? "locked" : "unlocked"); + + return 0; +} + +/** + * ice_cfg_cgu_pll_dis_sticky_bits_e82x - disable TS PLL sticky bits + * @hw: pointer to the HW struct + * + * Configure the Clock Generation Unit TS PLL sticky bits so they don't latch on + * losing TS PLL lock, but always show current state. + * + * Return: 0 on success, other error codes when failed to read/write CGU + */ +static int ice_cfg_cgu_pll_dis_sticky_bits_e82x(struct ice_hw *hw) +{ + union tspll_cntr_bist_settings cntr_bist; + int err; + + err = ice_read_cgu_reg_e82x(hw, TSPLL_CNTR_BIST_SETTINGS, + &cntr_bist.val); + if (err) + return err; + + /* Disable sticky lock detection so lock err reported is accurate */ + cntr_bist.i_plllock_sel_0 = 0; + cntr_bist.i_plllock_sel_1 = 0; + + return ice_write_cgu_reg_e82x(hw, TSPLL_CNTR_BIST_SETTINGS, + cntr_bist.val); +} + +/** + * ice_cfg_cgu_pll_dis_sticky_bits_e825c - disable TS PLL sticky bits for E825-C + * @hw: pointer to the HW struct + * + * Configure the Clock Generation Unit TS PLL sticky bits so they don't latch on + * losing TS PLL lock, but always show current state. + * + * Return: 0 on success, other error codes when failed to read/write CGU + */ +static int ice_cfg_cgu_pll_dis_sticky_bits_e825c(struct ice_hw *hw) +{ + union tspll_bw_tdc_e825c bw_tdc; + int err; + + err = ice_read_cgu_reg_e82x(hw, TSPLL_BW_TDC_E825C, &bw_tdc.val); + if (err) + return err; + + bw_tdc.i_plllock_sel_1_0 = 0; + + return ice_write_cgu_reg_e82x(hw, TSPLL_BW_TDC_E825C, bw_tdc.val); +} + +/** + * ice_init_cgu_e82x - Initialize CGU with settings from firmware + * @hw: pointer to the HW structure + * + * Initialize the Clock Generation Unit of the E822 device. + * + * Return: 0 on success, other error codes when failed to read/write/cfg CGU + */ +static int ice_init_cgu_e82x(struct ice_hw *hw) +{ + struct ice_ts_func_info *ts_info = &hw->func_caps.ts_func_info; + int err; + + /* Disable sticky lock detection so lock err reported is accurate */ + if (ice_is_e825c(hw)) + err = ice_cfg_cgu_pll_dis_sticky_bits_e825c(hw); + else + err = ice_cfg_cgu_pll_dis_sticky_bits_e82x(hw); + if (err) + return err; + + /* Configure the CGU PLL using the parameters from the function + * capabilities. + */ + if (ice_is_e825c(hw)) + err = ice_cfg_cgu_pll_e825c(hw, ts_info->time_ref, + (enum ice_clk_src)ts_info->clk_src); + else + err = ice_cfg_cgu_pll_e82x(hw, ts_info->time_ref, + (enum ice_clk_src)ts_info->clk_src); + + return err; +} + +/** + * ice_ptp_tmr_cmd_to_src_reg - Convert to source timer command value + * @hw: pointer to HW struct * @cmd: Timer command * - * Prepare the source timer for an upcoming timer sync command. + * Return: the source timer command register value for the given PTP timer + * command. */ -void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) +static u32 ice_ptp_tmr_cmd_to_src_reg(struct ice_hw *hw, + enum ice_ptp_tmr_cmd cmd) { - u32 cmd_val; - u8 tmr_idx; + u32 cmd_val, tmr_idx; + + switch (cmd) { + case ICE_PTP_INIT_TIME: + cmd_val = GLTSYN_CMD_INIT_TIME; + break; + case ICE_PTP_INIT_INCVAL: + cmd_val = GLTSYN_CMD_INIT_INCVAL; + break; + case ICE_PTP_ADJ_TIME: + cmd_val = GLTSYN_CMD_ADJ_TIME; + break; + case ICE_PTP_ADJ_TIME_AT_TIME: + cmd_val = GLTSYN_CMD_ADJ_INIT_TIME; + break; + case ICE_PTP_NOP: + case ICE_PTP_READ_TIME: + cmd_val = GLTSYN_CMD_READ_TIME; + break; + default: + dev_warn(ice_hw_to_dev(hw), + "Ignoring unrecognized timer command %u\n", cmd); + cmd_val = 0; + } tmr_idx = ice_get_ptp_src_clock_index(hw); - cmd_val = tmr_idx << SEL_CPK_SRC; + + return tmr_idx << SEL_CPK_SRC | cmd_val; +} + +/** + * ice_ptp_tmr_cmd_to_port_reg- Convert to port timer command value + * @hw: pointer to HW struct + * @cmd: Timer command + * + * Note that some hardware families use a different command register value for + * the PHY ports, while other hardware families use the same register values + * as the source timer. + * + * Return: the PHY port timer command register value for the given PTP timer + * command. + */ +static u32 ice_ptp_tmr_cmd_to_port_reg(struct ice_hw *hw, + enum ice_ptp_tmr_cmd cmd) +{ + u32 cmd_val, tmr_idx; + + /* Certain hardware families share the same register values for the + * port register and source timer register. + */ + switch (hw->ptp.phy_model) { + case ICE_PHY_E810: + return ice_ptp_tmr_cmd_to_src_reg(hw, cmd) & TS_CMD_MASK_E810; + default: + break; + } switch (cmd) { case ICE_PTP_INIT_TIME: - cmd_val |= GLTSYN_CMD_INIT_TIME; + cmd_val = PHY_CMD_INIT_TIME; break; case ICE_PTP_INIT_INCVAL: - cmd_val |= GLTSYN_CMD_INIT_INCVAL; + cmd_val = PHY_CMD_INIT_INCVAL; break; case ICE_PTP_ADJ_TIME: - cmd_val |= GLTSYN_CMD_ADJ_TIME; + cmd_val = PHY_CMD_ADJ_TIME; break; case ICE_PTP_ADJ_TIME_AT_TIME: - cmd_val |= GLTSYN_CMD_ADJ_INIT_TIME; + cmd_val = PHY_CMD_ADJ_TIME_AT_TIME; break; case ICE_PTP_READ_TIME: - cmd_val |= GLTSYN_CMD_READ_TIME; + cmd_val = PHY_CMD_READ_TIME; break; case ICE_PTP_NOP: + cmd_val = 0; break; + default: + dev_warn(ice_hw_to_dev(hw), + "Ignoring unrecognized timer command %u\n", cmd); + cmd_val = 0; } + tmr_idx = ice_get_ptp_src_clock_index(hw); + + return tmr_idx << SEL_PHY_SRC | cmd_val; +} + +/** + * ice_ptp_src_cmd - Prepare source timer for a timer command + * @hw: pointer to HW structure + * @cmd: Timer command + * + * Prepare the source timer for an upcoming timer sync command. + */ +void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) +{ + u32 cmd_val = ice_ptp_tmr_cmd_to_src_reg(hw, cmd); + wr32(hw, GLTSYN_CMD, cmd_val); } @@ -281,6 +874,1832 @@ static void ice_ptp_exec_tmr_cmd(struct ice_hw *hw) ice_flush(hw); } +/* 56G PHY device functions + * + * The following functions operate on devices with the ETH 56G PHY. + */ + +/** + * ice_write_phy_eth56g - Write a PHY port register + * @hw: pointer to the HW struct + * @phy_idx: PHY index + * @addr: PHY register address + * @val: Value to write + * + * Return: 0 on success, other error codes when failed to write to PHY + */ +static int ice_write_phy_eth56g(struct ice_hw *hw, u8 phy_idx, u32 addr, + u32 val) +{ + struct ice_sbq_msg_input phy_msg; + int err; + + phy_msg.opcode = ice_sbq_msg_wr; + + phy_msg.msg_addr_low = lower_16_bits(addr); + phy_msg.msg_addr_high = upper_16_bits(addr); + + phy_msg.data = val; + phy_msg.dest_dev = hw->ptp.phy.eth56g.phy_addr[phy_idx]; + + err = ice_sbq_rw_reg(hw, &phy_msg, ICE_AQ_FLAG_RD); + + if (err) + ice_debug(hw, ICE_DBG_PTP, "PTP failed to send msg to phy %d\n", + err); + + return err; +} + +/** + * ice_read_phy_eth56g - Read a PHY port register + * @hw: pointer to the HW struct + * @phy_idx: PHY index + * @addr: PHY register address + * @val: Value to write + * + * Return: 0 on success, other error codes when failed to read from PHY + */ +static int ice_read_phy_eth56g(struct ice_hw *hw, u8 phy_idx, u32 addr, + u32 *val) +{ + struct ice_sbq_msg_input phy_msg; + int err; + + phy_msg.opcode = ice_sbq_msg_rd; + + phy_msg.msg_addr_low = lower_16_bits(addr); + phy_msg.msg_addr_high = upper_16_bits(addr); + + phy_msg.data = 0; + phy_msg.dest_dev = hw->ptp.phy.eth56g.phy_addr[phy_idx]; + + err = ice_sbq_rw_reg(hw, &phy_msg, ICE_AQ_FLAG_RD); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "PTP failed to send msg to phy %d\n", + err); + return err; + } + + *val = phy_msg.data; + + return 0; +} + +/** + * ice_phy_res_address_eth56g - Calculate a PHY port register address + * @port: Port number to be written + * @res_type: resource type (register/memory) + * @offset: Offset from PHY port register base + * @addr: The result address + * + * Return: + * * %0 - success + * * %EINVAL - invalid port number or resource type + */ +static int ice_phy_res_address_eth56g(u8 port, enum eth56g_res_type res_type, + u32 offset, u32 *addr) +{ + u8 lane = port % ICE_PORTS_PER_QUAD; + u8 phy = ICE_GET_QUAD_NUM(port); + + if (res_type >= NUM_ETH56G_PHY_RES) + return -EINVAL; + + *addr = eth56g_phy_res[res_type].base[phy] + + lane * eth56g_phy_res[res_type].step + offset; + return 0; +} + +/** + * ice_write_port_eth56g - Write a PHY port register + * @hw: pointer to the HW struct + * @offset: PHY register offset + * @port: Port number + * @val: Value to write + * @res_type: resource type (register/memory) + * + * Return: + * * %0 - success + * * %EINVAL - invalid port number or resource type + * * %other - failed to write to PHY + */ +static int ice_write_port_eth56g(struct ice_hw *hw, u8 port, u32 offset, + u32 val, enum eth56g_res_type res_type) +{ + u8 phy_port = port % hw->ptp.ports_per_phy; + u8 phy_idx = port / hw->ptp.ports_per_phy; + u32 addr; + int err; + + if (port >= hw->ptp.num_lports) + return -EINVAL; + + err = ice_phy_res_address_eth56g(phy_port, res_type, offset, &addr); + if (err) + return err; + + return ice_write_phy_eth56g(hw, phy_idx, addr, val); +} + +/** + * ice_read_port_eth56g - Read a PHY port register + * @hw: pointer to the HW struct + * @offset: PHY register offset + * @port: Port number + * @val: Value to write + * @res_type: resource type (register/memory) + * + * Return: + * * %0 - success + * * %EINVAL - invalid port number or resource type + * * %other - failed to read from PHY + */ +static int ice_read_port_eth56g(struct ice_hw *hw, u8 port, u32 offset, + u32 *val, enum eth56g_res_type res_type) +{ + u8 phy_port = port % hw->ptp.ports_per_phy; + u8 phy_idx = port / hw->ptp.ports_per_phy; + u32 addr; + int err; + + if (port >= hw->ptp.num_lports) + return -EINVAL; + + err = ice_phy_res_address_eth56g(phy_port, res_type, offset, &addr); + if (err) + return err; + + return ice_read_phy_eth56g(hw, phy_idx, addr, val); +} + +/** + * ice_write_ptp_reg_eth56g - Write a PHY port register + * @hw: pointer to the HW struct + * @port: Port number to be written + * @offset: Offset from PHY port register base + * @val: Value to write + * + * Return: + * * %0 - success + * * %EINVAL - invalid port number or resource type + * * %other - failed to write to PHY + */ +static int ice_write_ptp_reg_eth56g(struct ice_hw *hw, u8 port, u16 offset, + u32 val) +{ + return ice_write_port_eth56g(hw, port, offset, val, ETH56G_PHY_REG_PTP); +} + +/** + * ice_write_mac_reg_eth56g - Write a MAC PHY port register + * parameter + * @hw: pointer to the HW struct + * @port: Port number to be written + * @offset: Offset from PHY port register base + * @val: Value to write + * + * Return: + * * %0 - success + * * %EINVAL - invalid port number or resource type + * * %other - failed to write to PHY + */ +static int ice_write_mac_reg_eth56g(struct ice_hw *hw, u8 port, u32 offset, + u32 val) +{ + return ice_write_port_eth56g(hw, port, offset, val, ETH56G_PHY_REG_MAC); +} + +/** + * ice_write_xpcs_reg_eth56g - Write a PHY port register + * @hw: pointer to the HW struct + * @port: Port number to be written + * @offset: Offset from PHY port register base + * @val: Value to write + * + * Return: + * * %0 - success + * * %EINVAL - invalid port number or resource type + * * %other - failed to write to PHY + */ +static int ice_write_xpcs_reg_eth56g(struct ice_hw *hw, u8 port, u32 offset, + u32 val) +{ + return ice_write_port_eth56g(hw, port, offset, val, + ETH56G_PHY_REG_XPCS); +} + +/** + * ice_read_ptp_reg_eth56g - Read a PHY port register + * @hw: pointer to the HW struct + * @port: Port number to be read + * @offset: Offset from PHY port register base + * @val: Pointer to the value to read (out param) + * + * Return: + * * %0 - success + * * %EINVAL - invalid port number or resource type + * * %other - failed to read from PHY + */ +static int ice_read_ptp_reg_eth56g(struct ice_hw *hw, u8 port, u16 offset, + u32 *val) +{ + return ice_read_port_eth56g(hw, port, offset, val, ETH56G_PHY_REG_PTP); +} + +/** + * ice_read_mac_reg_eth56g - Read a PHY port register + * @hw: pointer to the HW struct + * @port: Port number to be read + * @offset: Offset from PHY port register base + * @val: Pointer to the value to read (out param) + * + * Return: + * * %0 - success + * * %EINVAL - invalid port number or resource type + * * %other - failed to read from PHY + */ +static int ice_read_mac_reg_eth56g(struct ice_hw *hw, u8 port, u16 offset, + u32 *val) +{ + return ice_read_port_eth56g(hw, port, offset, val, ETH56G_PHY_REG_MAC); +} + +/** + * ice_read_gpcs_reg_eth56g - Read a PHY port register + * @hw: pointer to the HW struct + * @port: Port number to be read + * @offset: Offset from PHY port register base + * @val: Pointer to the value to read (out param) + * + * Return: + * * %0 - success + * * %EINVAL - invalid port number or resource type + * * %other - failed to read from PHY + */ +static int ice_read_gpcs_reg_eth56g(struct ice_hw *hw, u8 port, u16 offset, + u32 *val) +{ + return ice_read_port_eth56g(hw, port, offset, val, ETH56G_PHY_REG_GPCS); +} + +/** + * ice_read_port_mem_eth56g - Read a PHY port memory location + * @hw: pointer to the HW struct + * @port: Port number to be read + * @offset: Offset from PHY port register base + * @val: Pointer to the value to read (out param) + * + * Return: + * * %0 - success + * * %EINVAL - invalid port number or resource type + * * %other - failed to read from PHY + */ +static int ice_read_port_mem_eth56g(struct ice_hw *hw, u8 port, u16 offset, + u32 *val) +{ + return ice_read_port_eth56g(hw, port, offset, val, ETH56G_PHY_MEM_PTP); +} + +/** + * ice_write_port_mem_eth56g - Write a PHY port memory location + * @hw: pointer to the HW struct + * @port: Port number to be read + * @offset: Offset from PHY port register base + * @val: Pointer to the value to read (out param) + * + * Return: + * * %0 - success + * * %EINVAL - invalid port number or resource type + * * %other - failed to write to PHY + */ +static int ice_write_port_mem_eth56g(struct ice_hw *hw, u8 port, u16 offset, + u32 val) +{ + return ice_write_port_eth56g(hw, port, offset, val, ETH56G_PHY_MEM_PTP); +} + +/** + * ice_is_64b_phy_reg_eth56g - Check if this is a 64bit PHY register + * @low_addr: the low address to check + * @high_addr: on return, contains the high address of the 64bit register + * + * Write the appropriate high register offset to use. + * + * Return: true if the provided low address is one of the known 64bit PHY values + * represented as two 32bit registers, false otherwise. + */ +static bool ice_is_64b_phy_reg_eth56g(u16 low_addr, u16 *high_addr) +{ + switch (low_addr) { + case PHY_REG_TX_TIMER_INC_PRE_L: + *high_addr = PHY_REG_TX_TIMER_INC_PRE_U; + return true; + case PHY_REG_RX_TIMER_INC_PRE_L: + *high_addr = PHY_REG_RX_TIMER_INC_PRE_U; + return true; + case PHY_REG_TX_CAPTURE_L: + *high_addr = PHY_REG_TX_CAPTURE_U; + return true; + case PHY_REG_RX_CAPTURE_L: + *high_addr = PHY_REG_RX_CAPTURE_U; + return true; + case PHY_REG_TOTAL_TX_OFFSET_L: + *high_addr = PHY_REG_TOTAL_TX_OFFSET_U; + return true; + case PHY_REG_TOTAL_RX_OFFSET_L: + *high_addr = PHY_REG_TOTAL_RX_OFFSET_U; + return true; + case PHY_REG_TX_MEMORY_STATUS_L: + *high_addr = PHY_REG_TX_MEMORY_STATUS_U; + return true; + default: + return false; + } +} + +/** + * ice_is_40b_phy_reg_eth56g - Check if this is a 40bit PHY register + * @low_addr: the low address to check + * @high_addr: on return, contains the high address of the 40bit value + * + * Write the appropriate high register offset to use. + * + * Return: true if the provided low address is one of the known 40bit PHY + * values split into two registers with the lower 8 bits in the low register and + * the upper 32 bits in the high register, false otherwise. + */ +static bool ice_is_40b_phy_reg_eth56g(u16 low_addr, u16 *high_addr) +{ + switch (low_addr) { + case PHY_REG_TIMETUS_L: + *high_addr = PHY_REG_TIMETUS_U; + return true; + case PHY_PCS_REF_TUS_L: + *high_addr = PHY_PCS_REF_TUS_U; + return true; + case PHY_PCS_REF_INC_L: + *high_addr = PHY_PCS_REF_INC_U; + return true; + default: + return false; + } +} + +/** + * ice_read_64b_phy_reg_eth56g - Read a 64bit value from PHY registers + * @hw: pointer to the HW struct + * @port: PHY port to read from + * @low_addr: offset of the lower register to read from + * @val: on return, the contents of the 64bit value from the PHY registers + * @res_type: resource type + * + * Check if the caller has specified a known 40 bit register offset and read + * the two registers associated with a 40bit value and return it in the val + * pointer. + * + * Return: + * * %0 - success + * * %EINVAL - not a 64 bit register + * * %other - failed to read from PHY + */ +static int ice_read_64b_phy_reg_eth56g(struct ice_hw *hw, u8 port, u16 low_addr, + u64 *val, enum eth56g_res_type res_type) +{ + u16 high_addr; + u32 lo, hi; + int err; + + if (!ice_is_64b_phy_reg_eth56g(low_addr, &high_addr)) + return -EINVAL; + + err = ice_read_port_eth56g(hw, port, low_addr, &lo, res_type); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to read from low register %#08x\n, err %d", + low_addr, err); + return err; + } + + err = ice_read_port_eth56g(hw, port, high_addr, &hi, res_type); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to read from high register %#08x\n, err %d", + high_addr, err); + return err; + } + + *val = ((u64)hi << 32) | lo; + + return 0; +} + +/** + * ice_read_64b_ptp_reg_eth56g - Read a 64bit value from PHY registers + * @hw: pointer to the HW struct + * @port: PHY port to read from + * @low_addr: offset of the lower register to read from + * @val: on return, the contents of the 64bit value from the PHY registers + * + * Check if the caller has specified a known 40 bit register offset and read + * the two registers associated with a 40bit value and return it in the val + * pointer. + * + * Return: + * * %0 - success + * * %EINVAL - not a 64 bit register + * * %other - failed to read from PHY + */ +static int ice_read_64b_ptp_reg_eth56g(struct ice_hw *hw, u8 port, u16 low_addr, + u64 *val) +{ + return ice_read_64b_phy_reg_eth56g(hw, port, low_addr, val, + ETH56G_PHY_REG_PTP); +} + +/** + * ice_write_40b_phy_reg_eth56g - Write a 40b value to the PHY + * @hw: pointer to the HW struct + * @port: port to write to + * @low_addr: offset of the low register + * @val: 40b value to write + * @res_type: resource type + * + * Check if the caller has specified a known 40 bit register offset and write + * provided 40b value to the two associated registers by splitting it up into + * two chunks, the lower 8 bits and the upper 32 bits. + * + * Return: + * * %0 - success + * * %EINVAL - not a 40 bit register + * * %other - failed to write to PHY + */ +static int ice_write_40b_phy_reg_eth56g(struct ice_hw *hw, u8 port, + u16 low_addr, u64 val, + enum eth56g_res_type res_type) +{ + u16 high_addr; + u32 lo, hi; + int err; + + if (!ice_is_40b_phy_reg_eth56g(low_addr, &high_addr)) + return -EINVAL; + + lo = FIELD_GET(P_REG_40B_LOW_M, val); + hi = (u32)(val >> P_REG_40B_HIGH_S); + + err = ice_write_port_eth56g(hw, port, low_addr, lo, res_type); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register 0x%08x\n, err %d", + low_addr, err); + return err; + } + + err = ice_write_port_eth56g(hw, port, high_addr, hi, res_type); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to write to high register 0x%08x\n, err %d", + high_addr, err); + return err; + } + + return 0; +} + +/** + * ice_write_40b_ptp_reg_eth56g - Write a 40b value to the PHY + * @hw: pointer to the HW struct + * @port: port to write to + * @low_addr: offset of the low register + * @val: 40b value to write + * + * Check if the caller has specified a known 40 bit register offset and write + * provided 40b value to the two associated registers by splitting it up into + * two chunks, the lower 8 bits and the upper 32 bits. + * + * Return: + * * %0 - success + * * %EINVAL - not a 40 bit register + * * %other - failed to write to PHY + */ +static int ice_write_40b_ptp_reg_eth56g(struct ice_hw *hw, u8 port, + u16 low_addr, u64 val) +{ + return ice_write_40b_phy_reg_eth56g(hw, port, low_addr, val, + ETH56G_PHY_REG_PTP); +} + +/** + * ice_write_64b_phy_reg_eth56g - Write a 64bit value to PHY registers + * @hw: pointer to the HW struct + * @port: PHY port to read from + * @low_addr: offset of the lower register to read from + * @val: the contents of the 64bit value to write to PHY + * @res_type: resource type + * + * Check if the caller has specified a known 64 bit register offset and write + * the 64bit value to the two associated 32bit PHY registers. + * + * Return: + * * %0 - success + * * %EINVAL - not a 64 bit register + * * %other - failed to write to PHY + */ +static int ice_write_64b_phy_reg_eth56g(struct ice_hw *hw, u8 port, + u16 low_addr, u64 val, + enum eth56g_res_type res_type) +{ + u16 high_addr; + u32 lo, hi; + int err; + + if (!ice_is_64b_phy_reg_eth56g(low_addr, &high_addr)) + return -EINVAL; + + lo = lower_32_bits(val); + hi = upper_32_bits(val); + + err = ice_write_port_eth56g(hw, port, low_addr, lo, res_type); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register 0x%08x\n, err %d", + low_addr, err); + return err; + } + + err = ice_write_port_eth56g(hw, port, high_addr, hi, res_type); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to write to high register 0x%08x\n, err %d", + high_addr, err); + return err; + } + + return 0; +} + +/** + * ice_write_64b_ptp_reg_eth56g - Write a 64bit value to PHY registers + * @hw: pointer to the HW struct + * @port: PHY port to read from + * @low_addr: offset of the lower register to read from + * @val: the contents of the 64bit value to write to PHY + * + * Check if the caller has specified a known 64 bit register offset and write + * the 64bit value to the two associated 32bit PHY registers. + * + * Return: + * * %0 - success + * * %EINVAL - not a 64 bit register + * * %other - failed to write to PHY + */ +static int ice_write_64b_ptp_reg_eth56g(struct ice_hw *hw, u8 port, + u16 low_addr, u64 val) +{ + return ice_write_64b_phy_reg_eth56g(hw, port, low_addr, val, + ETH56G_PHY_REG_PTP); +} + +/** + * ice_read_ptp_tstamp_eth56g - Read a PHY timestamp out of the port memory + * @hw: pointer to the HW struct + * @port: the port to read from + * @idx: the timestamp index to read + * @tstamp: on return, the 40bit timestamp value + * + * Read a 40bit timestamp value out of the two associated entries in the + * port memory block of the internal PHYs of the 56G devices. + * + * Return: + * * %0 - success + * * %other - failed to read from PHY + */ +static int ice_read_ptp_tstamp_eth56g(struct ice_hw *hw, u8 port, u8 idx, + u64 *tstamp) +{ + u16 lo_addr, hi_addr; + u32 lo, hi; + int err; + + lo_addr = (u16)PHY_TSTAMP_L(idx); + hi_addr = (u16)PHY_TSTAMP_U(idx); + + err = ice_read_port_mem_eth56g(hw, port, lo_addr, &lo); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to read low PTP timestamp register, err %d\n", + err); + return err; + } + + err = ice_read_port_mem_eth56g(hw, port, hi_addr, &hi); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to read high PTP timestamp register, err %d\n", + err); + return err; + } + + /* For 56G based internal PHYs, the timestamp is reported with the + * lower 8 bits in the low register, and the upper 32 bits in the high + * register. + */ + *tstamp = ((u64)hi) << TS_PHY_HIGH_S | ((u64)lo & TS_PHY_LOW_M); + + return 0; +} + +/** + * ice_clear_ptp_tstamp_eth56g - Clear a timestamp from the quad block + * @hw: pointer to the HW struct + * @port: the quad to read from + * @idx: the timestamp index to reset + * + * Read and then forcibly clear the timestamp index to ensure the valid bit is + * cleared and the timestamp status bit is reset in the PHY port memory of + * internal PHYs of the 56G devices. + * + * To directly clear the contents of the timestamp block entirely, discarding + * all timestamp data at once, software should instead use + * ice_ptp_reset_ts_memory_quad_eth56g(). + * + * This function should only be called on an idx whose bit is set according to + * ice_get_phy_tx_tstamp_ready(). + * + * Return: + * * %0 - success + * * %other - failed to write to PHY + */ +static int ice_clear_ptp_tstamp_eth56g(struct ice_hw *hw, u8 port, u8 idx) +{ + u64 unused_tstamp; + u16 lo_addr; + int err; + + /* Read the timestamp register to ensure the timestamp status bit is + * cleared. + */ + err = ice_read_ptp_tstamp_eth56g(hw, port, idx, &unused_tstamp); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to read the PHY timestamp register for port %u, idx %u, err %d\n", + port, idx, err); + } + + lo_addr = (u16)PHY_TSTAMP_L(idx); + + err = ice_write_port_mem_eth56g(hw, port, lo_addr, 0); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to clear low PTP timestamp register for port %u, idx %u, err %d\n", + port, idx, err); + return err; + } + + return 0; +} + +/** + * ice_ptp_reset_ts_memory_eth56g - Clear all timestamps from the port block + * @hw: pointer to the HW struct + */ +static void ice_ptp_reset_ts_memory_eth56g(struct ice_hw *hw) +{ + unsigned int port; + + for (port = 0; port < hw->ptp.num_lports; port++) { + ice_write_ptp_reg_eth56g(hw, port, PHY_REG_TX_MEMORY_STATUS_L, + 0); + ice_write_ptp_reg_eth56g(hw, port, PHY_REG_TX_MEMORY_STATUS_U, + 0); + } +} + +/** + * ice_ptp_prep_port_time_eth56g - Prepare one PHY port with initial time + * @hw: pointer to the HW struct + * @port: port number + * @time: time to initialize the PHY port clocks to + * + * Write a new initial time value into registers of a specific PHY port. + * + * Return: + * * %0 - success + * * %other - failed to write to PHY + */ +static int ice_ptp_prep_port_time_eth56g(struct ice_hw *hw, u8 port, + u64 time) +{ + int err; + + /* Tx case */ + err = ice_write_64b_ptp_reg_eth56g(hw, port, PHY_REG_TX_TIMER_INC_PRE_L, + time); + if (err) + return err; + + /* Rx case */ + return ice_write_64b_ptp_reg_eth56g(hw, port, + PHY_REG_RX_TIMER_INC_PRE_L, time); +} + +/** + * ice_ptp_prep_phy_time_eth56g - Prepare PHY port with initial time + * @hw: pointer to the HW struct + * @time: Time to initialize the PHY port clocks to + * + * Program the PHY port registers with a new initial time value. The port + * clock will be initialized once the driver issues an ICE_PTP_INIT_TIME sync + * command. The time value is the upper 32 bits of the PHY timer, usually in + * units of nominal nanoseconds. + * + * Return: + * * %0 - success + * * %other - failed to write to PHY + */ +static int ice_ptp_prep_phy_time_eth56g(struct ice_hw *hw, u32 time) +{ + u64 phy_time; + u8 port; + + /* The time represents the upper 32 bits of the PHY timer, so we need + * to shift to account for this when programming. + */ + phy_time = (u64)time << 32; + + for (port = 0; port < hw->ptp.num_lports; port++) { + int err; + + err = ice_ptp_prep_port_time_eth56g(hw, port, phy_time); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to write init time for port %u, err %d\n", + port, err); + return err; + } + } + + return 0; +} + +/** + * ice_ptp_prep_port_adj_eth56g - Prepare a single port for time adjust + * @hw: pointer to HW struct + * @port: Port number to be programmed + * @time: time in cycles to adjust the port clocks + * + * Program the port for an atomic adjustment by writing the Tx and Rx timer + * registers. The atomic adjustment won't be completed until the driver issues + * an ICE_PTP_ADJ_TIME command. + * + * Note that time is not in units of nanoseconds. It is in clock time + * including the lower sub-nanosecond portion of the port timer. + * + * Negative adjustments are supported using 2s complement arithmetic. + * + * Return: + * * %0 - success + * * %other - failed to write to PHY + */ +static int ice_ptp_prep_port_adj_eth56g(struct ice_hw *hw, u8 port, s64 time) +{ + u32 l_time, u_time; + int err; + + l_time = lower_32_bits(time); + u_time = upper_32_bits(time); + + /* Tx case */ + err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_TX_TIMER_INC_PRE_L, + l_time); + if (err) + goto exit_err; + + err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_TX_TIMER_INC_PRE_U, + u_time); + if (err) + goto exit_err; + + /* Rx case */ + err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_RX_TIMER_INC_PRE_L, + l_time); + if (err) + goto exit_err; + + err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_RX_TIMER_INC_PRE_U, + u_time); + if (err) + goto exit_err; + + return 0; + +exit_err: + ice_debug(hw, ICE_DBG_PTP, "Failed to write time adjust for port %u, err %d\n", + port, err); + return err; +} + +/** + * ice_ptp_prep_phy_adj_eth56g - Prep PHY ports for a time adjustment + * @hw: pointer to HW struct + * @adj: adjustment in nanoseconds + * + * Prepare the PHY ports for an atomic time adjustment by programming the PHY + * Tx and Rx port registers. The actual adjustment is completed by issuing an + * ICE_PTP_ADJ_TIME or ICE_PTP_ADJ_TIME_AT_TIME sync command. + * + * Return: + * * %0 - success + * * %other - failed to write to PHY + */ +static int ice_ptp_prep_phy_adj_eth56g(struct ice_hw *hw, s32 adj) +{ + s64 cycles; + u8 port; + + /* The port clock supports adjustment of the sub-nanosecond portion of + * the clock (lowest 32 bits). We shift the provided adjustment in + * nanoseconds by 32 to calculate the appropriate adjustment to program + * into the PHY ports. + */ + cycles = (s64)adj << 32; + + for (port = 0; port < hw->ptp.num_lports; port++) { + int err; + + err = ice_ptp_prep_port_adj_eth56g(hw, port, cycles); + if (err) + return err; + } + + return 0; +} + +/** + * ice_ptp_prep_phy_incval_eth56g - Prepare PHY ports for time adjustment + * @hw: pointer to HW struct + * @incval: new increment value to prepare + * + * Prepare each of the PHY ports for a new increment value by programming the + * port's TIMETUS registers. The new increment value will be updated after + * issuing an ICE_PTP_INIT_INCVAL command. + * + * Return: + * * %0 - success + * * %other - failed to write to PHY + */ +static int ice_ptp_prep_phy_incval_eth56g(struct ice_hw *hw, u64 incval) +{ + u8 port; + + for (port = 0; port < hw->ptp.num_lports; port++) { + int err; + + err = ice_write_40b_ptp_reg_eth56g(hw, port, PHY_REG_TIMETUS_L, + incval); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to write incval for port %u, err %d\n", + port, err); + return err; + } + } + + return 0; +} + +/** + * ice_ptp_read_port_capture_eth56g - Read a port's local time capture + * @hw: pointer to HW struct + * @port: Port number to read + * @tx_ts: on return, the Tx port time capture + * @rx_ts: on return, the Rx port time capture + * + * Read the port's Tx and Rx local time capture values. + * + * Return: + * * %0 - success + * * %other - failed to read from PHY + */ +static int ice_ptp_read_port_capture_eth56g(struct ice_hw *hw, u8 port, + u64 *tx_ts, u64 *rx_ts) +{ + int err; + + /* Tx case */ + err = ice_read_64b_ptp_reg_eth56g(hw, port, PHY_REG_TX_CAPTURE_L, + tx_ts); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to read REG_TX_CAPTURE, err %d\n", + err); + return err; + } + + ice_debug(hw, ICE_DBG_PTP, "tx_init = %#016llx\n", *tx_ts); + + /* Rx case */ + err = ice_read_64b_ptp_reg_eth56g(hw, port, PHY_REG_RX_CAPTURE_L, + rx_ts); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_CAPTURE, err %d\n", + err); + return err; + } + + ice_debug(hw, ICE_DBG_PTP, "rx_init = %#016llx\n", *rx_ts); + + return 0; +} + +/** + * ice_ptp_write_port_cmd_eth56g - Prepare a single PHY port for a timer command + * @hw: pointer to HW struct + * @port: Port to which cmd has to be sent + * @cmd: Command to be sent to the port + * + * Prepare the requested port for an upcoming timer sync command. + * + * Return: + * * %0 - success + * * %other - failed to write to PHY + */ +static int ice_ptp_write_port_cmd_eth56g(struct ice_hw *hw, u8 port, + enum ice_ptp_tmr_cmd cmd) +{ + u32 val = ice_ptp_tmr_cmd_to_port_reg(hw, cmd); + int err; + + /* Tx case */ + err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_TX_TMR_CMD, val); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to write back TX_TMR_CMD, err %d\n", + err); + return err; + } + + /* Rx case */ + err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_RX_TMR_CMD, val); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to write back RX_TMR_CMD, err %d\n", + err); + return err; + } + + return 0; +} + +/** + * ice_phy_get_speed_eth56g - Get link speed based on PHY link type + * @li: pointer to link information struct + * + * Return: simplified ETH56G PHY speed + */ +static enum ice_eth56g_link_spd +ice_phy_get_speed_eth56g(struct ice_link_status *li) +{ + u16 speed = ice_get_link_speed_based_on_phy_type(li->phy_type_low, + li->phy_type_high); + + switch (speed) { + case ICE_AQ_LINK_SPEED_1000MB: + return ICE_ETH56G_LNK_SPD_1G; + case ICE_AQ_LINK_SPEED_2500MB: + return ICE_ETH56G_LNK_SPD_2_5G; + case ICE_AQ_LINK_SPEED_10GB: + return ICE_ETH56G_LNK_SPD_10G; + case ICE_AQ_LINK_SPEED_25GB: + return ICE_ETH56G_LNK_SPD_25G; + case ICE_AQ_LINK_SPEED_40GB: + return ICE_ETH56G_LNK_SPD_40G; + case ICE_AQ_LINK_SPEED_50GB: + switch (li->phy_type_low) { + case ICE_PHY_TYPE_LOW_50GBASE_SR: + case ICE_PHY_TYPE_LOW_50GBASE_FR: + case ICE_PHY_TYPE_LOW_50GBASE_LR: + case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4: + case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC: + case ICE_PHY_TYPE_LOW_50G_AUI1: + return ICE_ETH56G_LNK_SPD_50G; + default: + return ICE_ETH56G_LNK_SPD_50G2; + } + case ICE_AQ_LINK_SPEED_100GB: + if (li->phy_type_high || + li->phy_type_low == ICE_PHY_TYPE_LOW_100GBASE_SR2) + return ICE_ETH56G_LNK_SPD_100G2; + else + return ICE_ETH56G_LNK_SPD_100G; + default: + return ICE_ETH56G_LNK_SPD_1G; + } +} + +/** + * ice_phy_cfg_parpcs_eth56g - Configure TUs per PAR/PCS clock cycle + * @hw: pointer to the HW struct + * @port: port to configure + * + * Configure the number of TUs for the PAR and PCS clocks used as part of the + * timestamp calibration process. + * + * Return: + * * %0 - success + * * %other - PHY read/write failed + */ +static int ice_phy_cfg_parpcs_eth56g(struct ice_hw *hw, u8 port) +{ + u8 port_blk = port & ~(ICE_PORTS_PER_QUAD - 1); + u32 val; + int err; + + err = ice_write_xpcs_reg_eth56g(hw, port, PHY_VENDOR_TXLANE_THRESH, + ICE_ETH56G_NOMINAL_THRESH4); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to read VENDOR_TXLANE_THRESH, status: %d", + err); + return err; + } + + switch (ice_phy_get_speed_eth56g(&hw->port_info->phy.link_info)) { + case ICE_ETH56G_LNK_SPD_1G: + case ICE_ETH56G_LNK_SPD_2_5G: + err = ice_read_ptp_reg_eth56g(hw, port_blk, + PHY_GPCS_CONFIG_REG0, &val); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to read PHY_GPCS_CONFIG_REG0, status: %d", + err); + return err; + } + + val &= ~PHY_GPCS_CONFIG_REG0_TX_THR_M; + val |= FIELD_PREP(PHY_GPCS_CONFIG_REG0_TX_THR_M, + ICE_ETH56G_NOMINAL_TX_THRESH); + + err = ice_write_ptp_reg_eth56g(hw, port_blk, + PHY_GPCS_CONFIG_REG0, val); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to write PHY_GPCS_CONFIG_REG0, status: %d", + err); + return err; + } + break; + default: + break; + } + + err = ice_write_40b_ptp_reg_eth56g(hw, port, PHY_PCS_REF_TUS_L, + ICE_ETH56G_NOMINAL_PCS_REF_TUS); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to write PHY_PCS_REF_TUS, status: %d", + err); + return err; + } + + err = ice_write_40b_ptp_reg_eth56g(hw, port, PHY_PCS_REF_INC_L, + ICE_ETH56G_NOMINAL_PCS_REF_INC); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to write PHY_PCS_REF_INC, status: %d", + err); + return err; + } + + return 0; +} + +/** + * ice_phy_cfg_ptp_1step_eth56g - Configure 1-step PTP settings + * @hw: Pointer to the HW struct + * @port: Port to configure + * + * Return: + * * %0 - success + * * %other - PHY read/write failed + */ +int ice_phy_cfg_ptp_1step_eth56g(struct ice_hw *hw, u8 port) +{ + u8 port_blk = port & ~(ICE_PORTS_PER_QUAD - 1); + u8 blk_port = port & (ICE_PORTS_PER_QUAD - 1); + bool enable, sfd_ena; + u32 val, peer_delay; + int err; + + enable = hw->ptp.phy.eth56g.onestep_ena; + peer_delay = hw->ptp.phy.eth56g.peer_delay; + sfd_ena = hw->ptp.phy.eth56g.sfd_ena; + + /* PHY_PTP_1STEP_CONFIG */ + err = ice_read_ptp_reg_eth56g(hw, port_blk, PHY_PTP_1STEP_CONFIG, &val); + if (err) + return err; + + if (enable) + val |= blk_port; + else + val &= ~blk_port; + + val &= ~(PHY_PTP_1STEP_T1S_UP64_M | PHY_PTP_1STEP_T1S_DELTA_M); + + err = ice_write_ptp_reg_eth56g(hw, port_blk, PHY_PTP_1STEP_CONFIG, val); + if (err) + return err; + + /* PHY_PTP_1STEP_PEER_DELAY */ + val = FIELD_PREP(PHY_PTP_1STEP_PD_DELAY_M, peer_delay); + if (peer_delay) + val |= PHY_PTP_1STEP_PD_ADD_PD_M; + val |= PHY_PTP_1STEP_PD_DLY_V_M; + err = ice_write_ptp_reg_eth56g(hw, port_blk, + PHY_PTP_1STEP_PEER_DELAY(blk_port), val); + if (err) + return err; + + val &= ~PHY_PTP_1STEP_PD_DLY_V_M; + err = ice_write_ptp_reg_eth56g(hw, port_blk, + PHY_PTP_1STEP_PEER_DELAY(blk_port), val); + if (err) + return err; + + /* PHY_MAC_XIF_MODE */ + err = ice_read_mac_reg_eth56g(hw, port, PHY_MAC_XIF_MODE, &val); + if (err) + return err; + + val &= ~(PHY_MAC_XIF_1STEP_ENA_M | PHY_MAC_XIF_TS_BIN_MODE_M | + PHY_MAC_XIF_TS_SFD_ENA_M | PHY_MAC_XIF_GMII_TS_SEL_M); + + switch (ice_phy_get_speed_eth56g(&hw->port_info->phy.link_info)) { + case ICE_ETH56G_LNK_SPD_1G: + case ICE_ETH56G_LNK_SPD_2_5G: + val |= PHY_MAC_XIF_GMII_TS_SEL_M; + break; + default: + break; + } + + val |= FIELD_PREP(PHY_MAC_XIF_1STEP_ENA_M, enable) | + FIELD_PREP(PHY_MAC_XIF_TS_BIN_MODE_M, enable) | + FIELD_PREP(PHY_MAC_XIF_TS_SFD_ENA_M, sfd_ena); + + return ice_write_mac_reg_eth56g(hw, port, PHY_MAC_XIF_MODE, val); +} + +/** + * mul_u32_u32_fx_q9 - Multiply two u32 fixed point Q9 values + * @a: multiplier value + * @b: multiplicand value + * + * Return: result of multiplication + */ +static u32 mul_u32_u32_fx_q9(u32 a, u32 b) +{ + return (u32)(((u64)a * b) >> ICE_ETH56G_MAC_CFG_FRAC_W); +} + +/** + * add_u32_u32_fx - Add two u32 fixed point values and discard overflow + * @a: first value + * @b: second value + * + * Return: result of addition + */ +static u32 add_u32_u32_fx(u32 a, u32 b) +{ + return lower_32_bits(((u64)a + b)); +} + +/** + * ice_ptp_calc_bitslip_eth56g - Calculate bitslip value + * @hw: pointer to the HW struct + * @port: port to configure + * @bs: bitslip multiplier + * @fc: FC-FEC enabled + * @rs: RS-FEC enabled + * @spd: link speed + * + * Return: calculated bitslip value + */ +static u32 ice_ptp_calc_bitslip_eth56g(struct ice_hw *hw, u8 port, u32 bs, + bool fc, bool rs, + enum ice_eth56g_link_spd spd) +{ + u8 port_offset = port & (ICE_PORTS_PER_QUAD - 1); + u8 port_blk = port & ~(ICE_PORTS_PER_QUAD - 1); + u32 bitslip; + int err; + + if (!bs || rs) + return 0; + + if (spd == ICE_ETH56G_LNK_SPD_1G || spd == ICE_ETH56G_LNK_SPD_2_5G) + err = ice_read_gpcs_reg_eth56g(hw, port, PHY_GPCS_BITSLIP, + &bitslip); + else + err = ice_read_ptp_reg_eth56g(hw, port_blk, + PHY_REG_SD_BIT_SLIP(port_offset), + &bitslip); + if (err) + return 0; + + if (spd == ICE_ETH56G_LNK_SPD_1G && !bitslip) { + /* Bitslip register value of 0 corresponds to 10 so substitute + * it for calculations + */ + bitslip = 10; + } else if (spd == ICE_ETH56G_LNK_SPD_10G || + spd == ICE_ETH56G_LNK_SPD_25G) { + if (fc) + bitslip = bitslip * 2 + 32; + else + bitslip = (u32)((s32)bitslip * -1 + 20); + } + + bitslip <<= ICE_ETH56G_MAC_CFG_FRAC_W; + return mul_u32_u32_fx_q9(bitslip, bs); +} + +/** + * ice_ptp_calc_deskew_eth56g - Calculate deskew value + * @hw: pointer to the HW struct + * @port: port to configure + * @ds: deskew multiplier + * @rs: RS-FEC enabled + * @spd: link speed + * + * Return: calculated deskew value + */ +static u32 ice_ptp_calc_deskew_eth56g(struct ice_hw *hw, u8 port, u32 ds, + bool rs, enum ice_eth56g_link_spd spd) +{ + u32 deskew_i, deskew_f; + int err; + + if (!ds) + return 0; + + read_poll_timeout(ice_read_ptp_reg_eth56g, err, + FIELD_GET(PHY_REG_DESKEW_0_VALID, deskew_i), 500, + 50 * USEC_PER_MSEC, false, hw, port, PHY_REG_DESKEW_0, + &deskew_i); + if (err) + return err; + + deskew_f = FIELD_GET(PHY_REG_DESKEW_0_RLEVEL_FRAC, deskew_i); + deskew_i = FIELD_GET(PHY_REG_DESKEW_0_RLEVEL, deskew_i); + + if (rs && spd == ICE_ETH56G_LNK_SPD_50G2) + ds = 0x633; /* 3.1 */ + else if (rs && spd == ICE_ETH56G_LNK_SPD_100G) + ds = 0x31b; /* 1.552 */ + + deskew_i = FIELD_PREP(ICE_ETH56G_MAC_CFG_RX_OFFSET_INT, deskew_i); + /* Shift 3 fractional bits to the end of the integer part */ + deskew_f <<= ICE_ETH56G_MAC_CFG_FRAC_W - PHY_REG_DESKEW_0_RLEVEL_FRAC_W; + return mul_u32_u32_fx_q9(deskew_i | deskew_f, ds); +} + +/** + * ice_phy_set_offsets_eth56g - Set Tx/Rx offset values + * @hw: pointer to the HW struct + * @port: port to configure + * @spd: link speed + * @cfg: structure to store output values + * @fc: FC-FEC enabled + * @rs: RS-FEC enabled + * + * Return: + * * %0 - success + * * %other - failed to write to PHY + */ +static int ice_phy_set_offsets_eth56g(struct ice_hw *hw, u8 port, + enum ice_eth56g_link_spd spd, + const struct ice_eth56g_mac_reg_cfg *cfg, + bool fc, bool rs) +{ + u32 rx_offset, tx_offset, bs_ds; + bool onestep, sfd; + + onestep = hw->ptp.phy.eth56g.onestep_ena; + sfd = hw->ptp.phy.eth56g.sfd_ena; + bs_ds = cfg->rx_offset.bs_ds; + + if (fc) + rx_offset = cfg->rx_offset.fc; + else if (rs) + rx_offset = cfg->rx_offset.rs; + else + rx_offset = cfg->rx_offset.no_fec; + + rx_offset = add_u32_u32_fx(rx_offset, cfg->rx_offset.serdes); + if (sfd) + rx_offset = add_u32_u32_fx(rx_offset, cfg->rx_offset.sfd); + + if (spd < ICE_ETH56G_LNK_SPD_40G) + bs_ds = ice_ptp_calc_bitslip_eth56g(hw, port, bs_ds, fc, rs, + spd); + else + bs_ds = ice_ptp_calc_deskew_eth56g(hw, port, bs_ds, rs, spd); + rx_offset = add_u32_u32_fx(rx_offset, bs_ds); + rx_offset &= ICE_ETH56G_MAC_CFG_RX_OFFSET_INT | + ICE_ETH56G_MAC_CFG_RX_OFFSET_FRAC; + + if (fc) + tx_offset = cfg->tx_offset.fc; + else if (rs) + tx_offset = cfg->tx_offset.rs; + else + tx_offset = cfg->tx_offset.no_fec; + tx_offset += cfg->tx_offset.serdes + cfg->tx_offset.sfd * sfd + + cfg->tx_offset.onestep * onestep; + + ice_write_mac_reg_eth56g(hw, port, PHY_MAC_RX_OFFSET, rx_offset); + return ice_write_mac_reg_eth56g(hw, port, PHY_MAC_TX_OFFSET, tx_offset); +} + +/** + * ice_phy_cfg_mac_eth56g - Configure MAC for PTP + * @hw: Pointer to the HW struct + * @port: Port to configure + * + * Return: + * * %0 - success + * * %other - failed to write to PHY + */ +static int ice_phy_cfg_mac_eth56g(struct ice_hw *hw, u8 port) +{ + const struct ice_eth56g_mac_reg_cfg *cfg; + enum ice_eth56g_link_spd spd; + struct ice_link_status *li; + bool fc = false; + bool rs = false; + bool onestep; + u32 val; + int err; + + onestep = hw->ptp.phy.eth56g.onestep_ena; + li = &hw->port_info->phy.link_info; + spd = ice_phy_get_speed_eth56g(li); + if (!!(li->an_info & ICE_AQ_FEC_EN)) { + if (spd == ICE_ETH56G_LNK_SPD_10G) { + fc = true; + } else { + fc = !!(li->fec_info & ICE_AQ_LINK_25G_KR_FEC_EN); + rs = !!(li->fec_info & ~ICE_AQ_LINK_25G_KR_FEC_EN); + } + } + cfg = ð56g_mac_cfg[spd]; + + err = ice_write_mac_reg_eth56g(hw, port, PHY_MAC_RX_MODULO, 0); + if (err) + return err; + + err = ice_write_mac_reg_eth56g(hw, port, PHY_MAC_TX_MODULO, 0); + if (err) + return err; + + val = FIELD_PREP(PHY_MAC_TSU_CFG_TX_MODE_M, + cfg->tx_mode.def + rs * cfg->tx_mode.rs) | + FIELD_PREP(PHY_MAC_TSU_CFG_TX_MII_MK_DLY_M, cfg->tx_mk_dly) | + FIELD_PREP(PHY_MAC_TSU_CFG_TX_MII_CW_DLY_M, + cfg->tx_cw_dly.def + + onestep * cfg->tx_cw_dly.onestep) | + FIELD_PREP(PHY_MAC_TSU_CFG_RX_MODE_M, + cfg->rx_mode.def + rs * cfg->rx_mode.rs) | + FIELD_PREP(PHY_MAC_TSU_CFG_RX_MII_MK_DLY_M, + cfg->rx_mk_dly.def + rs * cfg->rx_mk_dly.rs) | + FIELD_PREP(PHY_MAC_TSU_CFG_RX_MII_CW_DLY_M, + cfg->rx_cw_dly.def + rs * cfg->rx_cw_dly.rs) | + FIELD_PREP(PHY_MAC_TSU_CFG_BLKS_PER_CLK_M, cfg->blks_per_clk); + err = ice_write_mac_reg_eth56g(hw, port, PHY_MAC_TSU_CONFIG, val); + if (err) + return err; + + err = ice_write_mac_reg_eth56g(hw, port, PHY_MAC_BLOCKTIME, + cfg->blktime); + if (err) + return err; + + err = ice_phy_set_offsets_eth56g(hw, port, spd, cfg, fc, rs); + if (err) + return err; + + if (spd == ICE_ETH56G_LNK_SPD_25G && !rs) + val = 0; + else + val = cfg->mktime; + + return ice_write_mac_reg_eth56g(hw, port, PHY_MAC_MARKERTIME, val); +} + +/** + * ice_phy_cfg_intr_eth56g - Configure TX timestamp interrupt + * @hw: pointer to the HW struct + * @port: the timestamp port + * @ena: enable or disable interrupt + * @threshold: interrupt threshold + * + * Configure TX timestamp interrupt for the specified port + * + * Return: + * * %0 - success + * * %other - PHY read/write failed + */ +int ice_phy_cfg_intr_eth56g(struct ice_hw *hw, u8 port, bool ena, u8 threshold) +{ + int err; + u32 val; + + err = ice_read_ptp_reg_eth56g(hw, port, PHY_REG_TS_INT_CONFIG, &val); + if (err) + return err; + + if (ena) { + val |= PHY_TS_INT_CONFIG_ENA_M; + val &= ~PHY_TS_INT_CONFIG_THRESHOLD_M; + val |= FIELD_PREP(PHY_TS_INT_CONFIG_THRESHOLD_M, threshold); + } else { + val &= ~PHY_TS_INT_CONFIG_ENA_M; + } + + return ice_write_ptp_reg_eth56g(hw, port, PHY_REG_TS_INT_CONFIG, val); +} + +/** + * ice_read_phy_and_phc_time_eth56g - Simultaneously capture PHC and PHY time + * @hw: pointer to the HW struct + * @port: the PHY port to read + * @phy_time: on return, the 64bit PHY timer value + * @phc_time: on return, the lower 64bits of PHC time + * + * Issue a ICE_PTP_READ_TIME timer command to simultaneously capture the PHY + * and PHC timer values. + * + * Return: + * * %0 - success + * * %other - PHY read/write failed + */ +static int ice_read_phy_and_phc_time_eth56g(struct ice_hw *hw, u8 port, + u64 *phy_time, u64 *phc_time) +{ + u64 tx_time, rx_time; + u32 zo, lo; + u8 tmr_idx; + int err; + + tmr_idx = ice_get_ptp_src_clock_index(hw); + + /* Prepare the PHC timer for a ICE_PTP_READ_TIME capture command */ + ice_ptp_src_cmd(hw, ICE_PTP_READ_TIME); + + /* Prepare the PHY timer for a ICE_PTP_READ_TIME capture command */ + err = ice_ptp_one_port_cmd(hw, port, ICE_PTP_READ_TIME); + if (err) + return err; + + /* Issue the sync to start the ICE_PTP_READ_TIME capture */ + ice_ptp_exec_tmr_cmd(hw); + + /* Read the captured PHC time from the shadow time registers */ + zo = rd32(hw, GLTSYN_SHTIME_0(tmr_idx)); + lo = rd32(hw, GLTSYN_SHTIME_L(tmr_idx)); + *phc_time = (u64)lo << 32 | zo; + + /* Read the captured PHY time from the PHY shadow registers */ + err = ice_ptp_read_port_capture_eth56g(hw, port, &tx_time, &rx_time); + if (err) + return err; + + /* If the PHY Tx and Rx timers don't match, log a warning message. + * Note that this should not happen in normal circumstances since the + * driver always programs them together. + */ + if (tx_time != rx_time) + dev_warn(ice_hw_to_dev(hw), "PHY port %u Tx and Rx timers do not match, tx_time 0x%016llX, rx_time 0x%016llX\n", + port, tx_time, rx_time); + + *phy_time = tx_time; + + return 0; +} + +/** + * ice_sync_phy_timer_eth56g - Synchronize the PHY timer with PHC timer + * @hw: pointer to the HW struct + * @port: the PHY port to synchronize + * + * Perform an adjustment to ensure that the PHY and PHC timers are in sync. + * This is done by issuing a ICE_PTP_READ_TIME command which triggers a + * simultaneous read of the PHY timer and PHC timer. Then we use the + * difference to calculate an appropriate 2s complement addition to add + * to the PHY timer in order to ensure it reads the same value as the + * primary PHC timer. + * + * Return: + * * %0 - success + * * %-EBUSY- failed to acquire PTP semaphore + * * %other - PHY read/write failed + */ +static int ice_sync_phy_timer_eth56g(struct ice_hw *hw, u8 port) +{ + u64 phc_time, phy_time, difference; + int err; + + if (!ice_ptp_lock(hw)) { + ice_debug(hw, ICE_DBG_PTP, "Failed to acquire PTP semaphore\n"); + return -EBUSY; + } + + err = ice_read_phy_and_phc_time_eth56g(hw, port, &phy_time, &phc_time); + if (err) + goto err_unlock; + + /* Calculate the amount required to add to the port time in order for + * it to match the PHC time. + * + * Note that the port adjustment is done using 2s complement + * arithmetic. This is convenient since it means that we can simply + * calculate the difference between the PHC time and the port time, + * and it will be interpreted correctly. + */ + + ice_ptp_src_cmd(hw, ICE_PTP_NOP); + difference = phc_time - phy_time; + + err = ice_ptp_prep_port_adj_eth56g(hw, port, (s64)difference); + if (err) + goto err_unlock; + + err = ice_ptp_one_port_cmd(hw, port, ICE_PTP_ADJ_TIME); + if (err) + goto err_unlock; + + /* Issue the sync to activate the time adjustment */ + ice_ptp_exec_tmr_cmd(hw); + + /* Re-capture the timer values to flush the command registers and + * verify that the time was properly adjusted. + */ + err = ice_read_phy_and_phc_time_eth56g(hw, port, &phy_time, &phc_time); + if (err) + goto err_unlock; + + dev_info(ice_hw_to_dev(hw), + "Port %u PHY time synced to PHC: 0x%016llX, 0x%016llX\n", + port, phy_time, phc_time); + +err_unlock: + ice_ptp_unlock(hw); + return err; +} + +/** + * ice_stop_phy_timer_eth56g - Stop the PHY clock timer + * @hw: pointer to the HW struct + * @port: the PHY port to stop + * @soft_reset: if true, hold the SOFT_RESET bit of PHY_REG_PS + * + * Stop the clock of a PHY port. This must be done as part of the flow to + * re-calibrate Tx and Rx timestamping offsets whenever the clock time is + * initialized or when link speed changes. + * + * Return: + * * %0 - success + * * %other - failed to write to PHY + */ +int ice_stop_phy_timer_eth56g(struct ice_hw *hw, u8 port, bool soft_reset) +{ + int err; + + err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_TX_OFFSET_READY, 0); + if (err) + return err; + + err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_RX_OFFSET_READY, 0); + if (err) + return err; + + ice_debug(hw, ICE_DBG_PTP, "Disabled clock on PHY port %u\n", port); + + return 0; +} + +/** + * ice_start_phy_timer_eth56g - Start the PHY clock timer + * @hw: pointer to the HW struct + * @port: the PHY port to start + * + * Start the clock of a PHY port. This must be done as part of the flow to + * re-calibrate Tx and Rx timestamping offsets whenever the clock time is + * initialized or when link speed changes. + * + * Return: + * * %0 - success + * * %other - PHY read/write failed + */ +int ice_start_phy_timer_eth56g(struct ice_hw *hw, u8 port) +{ + u32 lo, hi; + u64 incval; + u8 tmr_idx; + int err; + + tmr_idx = ice_get_ptp_src_clock_index(hw); + + err = ice_stop_phy_timer_eth56g(hw, port, false); + if (err) + return err; + + ice_ptp_src_cmd(hw, ICE_PTP_NOP); + + err = ice_phy_cfg_parpcs_eth56g(hw, port); + if (err) + return err; + + err = ice_phy_cfg_ptp_1step_eth56g(hw, port); + if (err) + return err; + + err = ice_phy_cfg_mac_eth56g(hw, port); + if (err) + return err; + + lo = rd32(hw, GLTSYN_INCVAL_L(tmr_idx)); + hi = rd32(hw, GLTSYN_INCVAL_H(tmr_idx)); + incval = (u64)hi << 32 | lo; + + err = ice_write_40b_ptp_reg_eth56g(hw, port, PHY_REG_TIMETUS_L, incval); + if (err) + return err; + + err = ice_ptp_one_port_cmd(hw, port, ICE_PTP_INIT_INCVAL); + if (err) + return err; + + ice_ptp_exec_tmr_cmd(hw); + + err = ice_sync_phy_timer_eth56g(hw, port); + if (err) + return err; + + err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_TX_OFFSET_READY, 1); + if (err) + return err; + + err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_RX_OFFSET_READY, 1); + if (err) + return err; + + ice_debug(hw, ICE_DBG_PTP, "Enabled clock on PHY port %u\n", port); + + return 0; +} + +/** + * ice_sb_access_ena_eth56g - Enable SB devices (PHY and others) access + * @hw: pointer to HW struct + * @enable: Enable or disable access + * + * Enable sideband devices (PHY and others) access. + */ +static void ice_sb_access_ena_eth56g(struct ice_hw *hw, bool enable) +{ + u32 val = rd32(hw, PF_SB_REM_DEV_CTL); + + if (enable) + val |= BIT(eth56g_phy_0) | BIT(cgu) | BIT(eth56g_phy_1); + else + val &= ~(BIT(eth56g_phy_0) | BIT(cgu) | BIT(eth56g_phy_1)); + + wr32(hw, PF_SB_REM_DEV_CTL, val); +} + +/** + * ice_ptp_init_phc_eth56g - Perform E82X specific PHC initialization + * @hw: pointer to HW struct + * + * Perform PHC initialization steps specific to E82X devices. + * + * Return: + * * %0 - success + * * %other - failed to initialize CGU + */ +static int ice_ptp_init_phc_eth56g(struct ice_hw *hw) +{ + ice_sb_access_ena_eth56g(hw, true); + /* Initialize the Clock Generation Unit */ + return ice_init_cgu_e82x(hw); +} + +/** + * ice_ptp_read_tx_hwtstamp_status_eth56g - Get TX timestamp status + * @hw: pointer to the HW struct + * @ts_status: the timestamp mask pointer + * + * Read the PHY Tx timestamp status mask indicating which ports have Tx + * timestamps available. + * + * Return: + * * %0 - success + * * %other - failed to read from PHY + */ +int ice_ptp_read_tx_hwtstamp_status_eth56g(struct ice_hw *hw, u32 *ts_status) +{ + const struct ice_eth56g_params *params = &hw->ptp.phy.eth56g; + u8 phy, mask; + u32 status; + + mask = (1 << hw->ptp.ports_per_phy) - 1; + *ts_status = 0; + + for (phy = 0; phy < params->num_phys; phy++) { + int err; + + err = ice_read_phy_eth56g(hw, phy, PHY_PTP_INT_STATUS, &status); + if (err) + return err; + + *ts_status |= (status & mask) << (phy * hw->ptp.ports_per_phy); + } + + ice_debug(hw, ICE_DBG_PTP, "PHY interrupt err: %x\n", *ts_status); + + return 0; +} + +/** + * ice_get_phy_tx_tstamp_ready_eth56g - Read the Tx memory status register + * @hw: pointer to the HW struct + * @port: the PHY port to read from + * @tstamp_ready: contents of the Tx memory status register + * + * Read the PHY_REG_TX_MEMORY_STATUS register indicating which timestamps in + * the PHY are ready. A set bit means the corresponding timestamp is valid and + * ready to be captured from the PHY timestamp block. + * + * Return: + * * %0 - success + * * %other - failed to read from PHY + */ +static int ice_get_phy_tx_tstamp_ready_eth56g(struct ice_hw *hw, u8 port, + u64 *tstamp_ready) +{ + int err; + + err = ice_read_64b_ptp_reg_eth56g(hw, port, PHY_REG_TX_MEMORY_STATUS_L, + tstamp_ready); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_MEMORY_STATUS for port %u, err %d\n", + port, err); + return err; + } + + return 0; +} + +/** + * ice_is_muxed_topo - detect breakout 2x50G topology for E825C + * @hw: pointer to the HW struct + * + * Return: true if it's 2x50 breakout topology, false otherwise + */ +static bool ice_is_muxed_topo(struct ice_hw *hw) +{ + u8 link_topo; + bool mux; + u32 val; + + val = rd32(hw, GLGEN_SWITCH_MODE_CONFIG); + mux = FIELD_GET(GLGEN_SWITCH_MODE_CONFIG_25X4_QUAD_M, val); + val = rd32(hw, GLGEN_MAC_LINK_TOPO); + link_topo = FIELD_GET(GLGEN_MAC_LINK_TOPO_LINK_TOPO_M, val); + + return (mux && link_topo == ICE_LINK_TOPO_UP_TO_2_LINKS); +} + +/** + * ice_ptp_init_phy_e825c - initialize PHY parameters + * @hw: pointer to the HW struct + */ +static void ice_ptp_init_phy_e825c(struct ice_hw *hw) +{ + struct ice_ptp_hw *ptp = &hw->ptp; + struct ice_eth56g_params *params; + u8 phy; + + ptp->phy_model = ICE_PHY_ETH56G; + params = &ptp->phy.eth56g; + params->onestep_ena = false; + params->peer_delay = 0; + params->sfd_ena = false; + params->phy_addr[0] = eth56g_phy_0; + params->phy_addr[1] = eth56g_phy_1; + params->num_phys = 2; + ptp->ports_per_phy = 4; + ptp->num_lports = params->num_phys * ptp->ports_per_phy; + + ice_sb_access_ena_eth56g(hw, true); + for (phy = 0; phy < params->num_phys; phy++) { + u32 phy_rev; + int err; + + err = ice_read_phy_eth56g(hw, phy, PHY_REG_REVISION, &phy_rev); + if (err || phy_rev != PHY_REVISION_ETH56G) { + ptp->phy_model = ICE_PHY_UNSUP; + return; + } + } + + ptp->is_2x50g_muxed_topo = ice_is_muxed_topo(hw); +} + /* E822 family functions * * The following functions operate on the E822 family of devices. @@ -288,18 +2707,21 @@ static void ice_ptp_exec_tmr_cmd(struct ice_hw *hw) /** * ice_fill_phy_msg_e82x - Fill message data for a PHY register access + * @hw: pointer to the HW struct * @msg: the PHY message buffer to fill in * @port: the port to access * @offset: the register offset */ -static void -ice_fill_phy_msg_e82x(struct ice_sbq_msg_input *msg, u8 port, u16 offset) +static void ice_fill_phy_msg_e82x(struct ice_hw *hw, + struct ice_sbq_msg_input *msg, u8 port, + u16 offset) { int phy_port, phy, quadtype; - phy_port = port % ICE_PORTS_PER_PHY_E82X; - phy = port / ICE_PORTS_PER_PHY_E82X; - quadtype = (port / ICE_PORTS_PER_QUAD) % ICE_QUADS_PER_PHY_E82X; + phy_port = port % hw->ptp.ports_per_phy; + phy = port / hw->ptp.ports_per_phy; + quadtype = ICE_GET_QUAD_NUM(port) % + ICE_GET_QUAD_NUM(hw->ptp.ports_per_phy); if (quadtype == 0) { msg->msg_addr_low = P_Q0_L(P_0_BASE + offset, phy_port); @@ -430,10 +2852,10 @@ ice_read_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 offset, u32 *val) struct ice_sbq_msg_input msg = {0}; int err; - ice_fill_phy_msg_e82x(&msg, port, offset); + ice_fill_phy_msg_e82x(hw, &msg, port, offset); msg.opcode = ice_sbq_msg_rd; - err = ice_sbq_rw_reg(hw, &msg); + err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n", err); @@ -507,11 +2929,11 @@ ice_write_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 offset, u32 val) struct ice_sbq_msg_input msg = {0}; int err; - ice_fill_phy_msg_e82x(&msg, port, offset); + ice_fill_phy_msg_e82x(hw, &msg, port, offset); msg.opcode = ice_sbq_msg_wr; msg.data = val; - err = ice_sbq_rw_reg(hw, &msg); + err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n", err); @@ -546,8 +2968,7 @@ ice_write_40b_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 low_addr, u64 val) low_addr); return -EINVAL; } - - low = (u32)(val & P_REG_40B_LOW_M); + low = FIELD_GET(P_REG_40B_LOW_M, val); high = (u32)(val >> P_REG_40B_HIGH_S); err = ice_write_phy_reg_e82x(hw, port, low_addr, low); @@ -617,24 +3038,30 @@ ice_write_64b_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 low_addr, u64 val) /** * ice_fill_quad_msg_e82x - Fill message data for quad register access + * @hw: pointer to the HW struct * @msg: the PHY message buffer to fill in * @quad: the quad to access * @offset: the register offset * * Fill a message buffer for accessing a register in a quad shared between * multiple PHYs. + * + * Return: + * * %0 - OK + * * %-EINVAL - invalid quad number */ -static int -ice_fill_quad_msg_e82x(struct ice_sbq_msg_input *msg, u8 quad, u16 offset) +static int ice_fill_quad_msg_e82x(struct ice_hw *hw, + struct ice_sbq_msg_input *msg, u8 quad, + u16 offset) { u32 addr; - if (quad >= ICE_MAX_QUAD) + if (quad >= ICE_GET_QUAD_NUM(hw->ptp.num_lports)) return -EINVAL; msg->dest_dev = rmn_0; - if ((quad % ICE_QUADS_PER_PHY_E82X) == 0) + if (!(quad % ICE_GET_QUAD_NUM(hw->ptp.ports_per_phy))) addr = Q_0_BASE + offset; else addr = Q_1_BASE + offset; @@ -661,13 +3088,13 @@ ice_read_quad_reg_e82x(struct ice_hw *hw, u8 quad, u16 offset, u32 *val) struct ice_sbq_msg_input msg = {0}; int err; - err = ice_fill_quad_msg_e82x(&msg, quad, offset); + err = ice_fill_quad_msg_e82x(hw, &msg, quad, offset); if (err) return err; msg.opcode = ice_sbq_msg_rd; - err = ice_sbq_rw_reg(hw, &msg); + err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n", err); @@ -695,14 +3122,14 @@ ice_write_quad_reg_e82x(struct ice_hw *hw, u8 quad, u16 offset, u32 val) struct ice_sbq_msg_input msg = {0}; int err; - err = ice_fill_quad_msg_e82x(&msg, quad, offset); + err = ice_fill_quad_msg_e82x(hw, &msg, quad, offset); if (err) return err; msg.opcode = ice_sbq_msg_wr; msg.data = val; - err = ice_sbq_rw_reg(hw, &msg); + err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n", err); @@ -751,7 +3178,7 @@ ice_read_phy_tstamp_e82x(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp) * lower 8 bits in the low register, and the upper 32 bits in the high * register. */ - *tstamp = ((u64)hi) << TS_PHY_HIGH_S | ((u64)lo & TS_PHY_LOW_M); + *tstamp = FIELD_PREP(TS_PHY_HIGH_M, hi) | FIELD_PREP(TS_PHY_LOW_M, lo); return 0; } @@ -816,294 +3243,11 @@ static void ice_ptp_reset_ts_memory_e82x(struct ice_hw *hw) { unsigned int quad; - for (quad = 0; quad < ICE_MAX_QUAD; quad++) + for (quad = 0; quad < ICE_GET_QUAD_NUM(hw->ptp.num_lports); quad++) ice_ptp_reset_ts_memory_quad_e82x(hw, quad); } /** - * ice_read_cgu_reg_e82x - Read a CGU register - * @hw: pointer to the HW struct - * @addr: Register address to read - * @val: storage for register value read - * - * Read the contents of a register of the Clock Generation Unit. Only - * applicable to E822 devices. - */ -static int -ice_read_cgu_reg_e82x(struct ice_hw *hw, u32 addr, u32 *val) -{ - struct ice_sbq_msg_input cgu_msg; - int err; - - cgu_msg.opcode = ice_sbq_msg_rd; - cgu_msg.dest_dev = cgu; - cgu_msg.msg_addr_low = addr; - cgu_msg.msg_addr_high = 0x0; - - err = ice_sbq_rw_reg(hw, &cgu_msg); - if (err) { - ice_debug(hw, ICE_DBG_PTP, "Failed to read CGU register 0x%04x, err %d\n", - addr, err); - return err; - } - - *val = cgu_msg.data; - - return err; -} - -/** - * ice_write_cgu_reg_e82x - Write a CGU register - * @hw: pointer to the HW struct - * @addr: Register address to write - * @val: value to write into the register - * - * Write the specified value to a register of the Clock Generation Unit. Only - * applicable to E822 devices. - */ -static int -ice_write_cgu_reg_e82x(struct ice_hw *hw, u32 addr, u32 val) -{ - struct ice_sbq_msg_input cgu_msg; - int err; - - cgu_msg.opcode = ice_sbq_msg_wr; - cgu_msg.dest_dev = cgu; - cgu_msg.msg_addr_low = addr; - cgu_msg.msg_addr_high = 0x0; - cgu_msg.data = val; - - err = ice_sbq_rw_reg(hw, &cgu_msg); - if (err) { - ice_debug(hw, ICE_DBG_PTP, "Failed to write CGU register 0x%04x, err %d\n", - addr, err); - return err; - } - - return err; -} - -/** - * ice_clk_freq_str - Convert time_ref_freq to string - * @clk_freq: Clock frequency - * - * Convert the specified TIME_REF clock frequency to a string. - */ -static const char *ice_clk_freq_str(u8 clk_freq) -{ - switch ((enum ice_time_ref_freq)clk_freq) { - case ICE_TIME_REF_FREQ_25_000: - return "25 MHz"; - case ICE_TIME_REF_FREQ_122_880: - return "122.88 MHz"; - case ICE_TIME_REF_FREQ_125_000: - return "125 MHz"; - case ICE_TIME_REF_FREQ_153_600: - return "153.6 MHz"; - case ICE_TIME_REF_FREQ_156_250: - return "156.25 MHz"; - case ICE_TIME_REF_FREQ_245_760: - return "245.76 MHz"; - default: - return "Unknown"; - } -} - -/** - * ice_clk_src_str - Convert time_ref_src to string - * @clk_src: Clock source - * - * Convert the specified clock source to its string name. - */ -static const char *ice_clk_src_str(u8 clk_src) -{ - switch ((enum ice_clk_src)clk_src) { - case ICE_CLK_SRC_TCX0: - return "TCX0"; - case ICE_CLK_SRC_TIME_REF: - return "TIME_REF"; - default: - return "Unknown"; - } -} - -/** - * ice_cfg_cgu_pll_e82x - Configure the Clock Generation Unit - * @hw: pointer to the HW struct - * @clk_freq: Clock frequency to program - * @clk_src: Clock source to select (TIME_REF, or TCX0) - * - * Configure the Clock Generation Unit with the desired clock frequency and - * time reference, enabling the PLL which drives the PTP hardware clock. - */ -static int -ice_cfg_cgu_pll_e82x(struct ice_hw *hw, enum ice_time_ref_freq clk_freq, - enum ice_clk_src clk_src) -{ - union tspll_ro_bwm_lf bwm_lf; - union nac_cgu_dword19 dw19; - union nac_cgu_dword22 dw22; - union nac_cgu_dword24 dw24; - union nac_cgu_dword9 dw9; - int err; - - if (clk_freq >= NUM_ICE_TIME_REF_FREQ) { - dev_warn(ice_hw_to_dev(hw), "Invalid TIME_REF frequency %u\n", - clk_freq); - return -EINVAL; - } - - if (clk_src >= NUM_ICE_CLK_SRC) { - dev_warn(ice_hw_to_dev(hw), "Invalid clock source %u\n", - clk_src); - return -EINVAL; - } - - if (clk_src == ICE_CLK_SRC_TCX0 && - clk_freq != ICE_TIME_REF_FREQ_25_000) { - dev_warn(ice_hw_to_dev(hw), - "TCX0 only supports 25 MHz frequency\n"); - return -EINVAL; - } - - err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD9, &dw9.val); - if (err) - return err; - - err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD24, &dw24.val); - if (err) - return err; - - err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_BWM_LF, &bwm_lf.val); - if (err) - return err; - - /* Log the current clock configuration */ - ice_debug(hw, ICE_DBG_PTP, "Current CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n", - dw24.field.ts_pll_enable ? "enabled" : "disabled", - ice_clk_src_str(dw24.field.time_ref_sel), - ice_clk_freq_str(dw9.field.time_ref_freq_sel), - bwm_lf.field.plllock_true_lock_cri ? "locked" : "unlocked"); - - /* Disable the PLL before changing the clock source or frequency */ - if (dw24.field.ts_pll_enable) { - dw24.field.ts_pll_enable = 0; - - err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val); - if (err) - return err; - } - - /* Set the frequency */ - dw9.field.time_ref_freq_sel = clk_freq; - err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD9, dw9.val); - if (err) - return err; - - /* Configure the TS PLL feedback divisor */ - err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD19, &dw19.val); - if (err) - return err; - - dw19.field.tspll_fbdiv_intgr = e822_cgu_params[clk_freq].feedback_div; - dw19.field.tspll_ndivratio = 1; - - err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD19, dw19.val); - if (err) - return err; - - /* Configure the TS PLL post divisor */ - err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD22, &dw22.val); - if (err) - return err; - - dw22.field.time1588clk_div = e822_cgu_params[clk_freq].post_pll_div; - dw22.field.time1588clk_sel_div2 = 0; - - err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD22, dw22.val); - if (err) - return err; - - /* Configure the TS PLL pre divisor and clock source */ - err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD24, &dw24.val); - if (err) - return err; - - dw24.field.ref1588_ck_div = e822_cgu_params[clk_freq].refclk_pre_div; - dw24.field.tspll_fbdiv_frac = e822_cgu_params[clk_freq].frac_n_div; - dw24.field.time_ref_sel = clk_src; - - err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val); - if (err) - return err; - - /* Finally, enable the PLL */ - dw24.field.ts_pll_enable = 1; - - err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val); - if (err) - return err; - - /* Wait to verify if the PLL locks */ - usleep_range(1000, 5000); - - err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_BWM_LF, &bwm_lf.val); - if (err) - return err; - - if (!bwm_lf.field.plllock_true_lock_cri) { - dev_warn(ice_hw_to_dev(hw), "CGU PLL failed to lock\n"); - return -EBUSY; - } - - /* Log the current clock configuration */ - ice_debug(hw, ICE_DBG_PTP, "New CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n", - dw24.field.ts_pll_enable ? "enabled" : "disabled", - ice_clk_src_str(dw24.field.time_ref_sel), - ice_clk_freq_str(dw9.field.time_ref_freq_sel), - bwm_lf.field.plllock_true_lock_cri ? "locked" : "unlocked"); - - return 0; -} - -/** - * ice_init_cgu_e82x - Initialize CGU with settings from firmware - * @hw: pointer to the HW structure - * - * Initialize the Clock Generation Unit of the E822 device. - */ -static int ice_init_cgu_e82x(struct ice_hw *hw) -{ - struct ice_ts_func_info *ts_info = &hw->func_caps.ts_func_info; - union tspll_cntr_bist_settings cntr_bist; - int err; - - err = ice_read_cgu_reg_e82x(hw, TSPLL_CNTR_BIST_SETTINGS, - &cntr_bist.val); - if (err) - return err; - - /* Disable sticky lock detection so lock err reported is accurate */ - cntr_bist.field.i_plllock_sel_0 = 0; - cntr_bist.field.i_plllock_sel_1 = 0; - - err = ice_write_cgu_reg_e82x(hw, TSPLL_CNTR_BIST_SETTINGS, - cntr_bist.val); - if (err) - return err; - - /* Configure the CGU PLL using the parameters from the function - * capabilities. - */ - err = ice_cfg_cgu_pll_e82x(hw, ts_info->time_ref, - (enum ice_clk_src)ts_info->clk_src); - if (err) - return err; - - return 0; -} - -/** * ice_ptp_set_vernier_wl - Set the window length for vernier calibration * @hw: pointer to the HW struct * @@ -1113,7 +3257,7 @@ static int ice_ptp_set_vernier_wl(struct ice_hw *hw) { u8 port; - for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) { + for (port = 0; port < hw->ptp.num_lports; port++) { int err; err = ice_write_phy_reg_e82x(hw, port, P_REG_WL, @@ -1137,15 +3281,14 @@ static int ice_ptp_set_vernier_wl(struct ice_hw *hw) static int ice_ptp_init_phc_e82x(struct ice_hw *hw) { int err; - u32 regval; + u32 val; /* Enable reading switch and PHY registers over the sideband queue */ #define PF_SB_REM_DEV_CTL_SWITCH_READ BIT(1) #define PF_SB_REM_DEV_CTL_PHY0 BIT(2) - regval = rd32(hw, PF_SB_REM_DEV_CTL); - regval |= (PF_SB_REM_DEV_CTL_SWITCH_READ | - PF_SB_REM_DEV_CTL_PHY0); - wr32(hw, PF_SB_REM_DEV_CTL, regval); + val = rd32(hw, PF_SB_REM_DEV_CTL); + val |= (PF_SB_REM_DEV_CTL_SWITCH_READ | PF_SB_REM_DEV_CTL_PHY0); + wr32(hw, PF_SB_REM_DEV_CTL, val); /* Initialize the Clock Generation Unit */ err = ice_init_cgu_e82x(hw); @@ -1178,7 +3321,7 @@ ice_ptp_prep_phy_time_e82x(struct ice_hw *hw, u32 time) */ phy_time = (u64)time << 32; - for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) { + for (port = 0; port < hw->ptp.num_lports; port++) { /* Tx case */ err = ice_write_64b_phy_reg_e82x(hw, port, P_REG_TX_TIMER_INC_PRE_L, @@ -1281,7 +3424,7 @@ ice_ptp_prep_phy_adj_e82x(struct ice_hw *hw, s32 adj) else cycles = -(((s64)-adj) << 32); - for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) { + for (port = 0; port < hw->ptp.num_lports; port++) { int err; err = ice_ptp_prep_port_adj_e82x(hw, port, cycles); @@ -1307,7 +3450,7 @@ ice_ptp_prep_phy_incval_e82x(struct ice_hw *hw, u64 incval) int err; u8 port; - for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) { + for (port = 0; port < hw->ptp.num_lports; port++) { err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_TIMETUS_L, incval); if (err) @@ -1372,51 +3515,20 @@ ice_ptp_read_port_capture(struct ice_hw *hw, u8 port, u64 *tx_ts, u64 *rx_ts) * * Prepare the requested port for an upcoming timer sync command. * - * Do not use this function directly. If you want to configure exactly one - * port, use ice_ptp_one_port_cmd() instead. + * Note there is no equivalent of this operation on E810, as that device + * always handles all external PHYs internally. + * + * Return: + * * %0 - success + * * %other - failed to write to PHY */ static int ice_ptp_write_port_cmd_e82x(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd) { - u32 cmd_val, val; - u8 tmr_idx; + u32 val = ice_ptp_tmr_cmd_to_port_reg(hw, cmd); int err; - tmr_idx = ice_get_ptp_src_clock_index(hw); - cmd_val = tmr_idx << SEL_PHY_SRC; - switch (cmd) { - case ICE_PTP_INIT_TIME: - cmd_val |= PHY_CMD_INIT_TIME; - break; - case ICE_PTP_INIT_INCVAL: - cmd_val |= PHY_CMD_INIT_INCVAL; - break; - case ICE_PTP_ADJ_TIME: - cmd_val |= PHY_CMD_ADJ_TIME; - break; - case ICE_PTP_READ_TIME: - cmd_val |= PHY_CMD_READ_TIME; - break; - case ICE_PTP_ADJ_TIME_AT_TIME: - cmd_val |= PHY_CMD_ADJ_TIME_AT_TIME; - break; - case ICE_PTP_NOP: - break; - } - /* Tx case */ - /* Read, modify, write */ - err = ice_read_phy_reg_e82x(hw, port, P_REG_TX_TMR_CMD, &val); - if (err) { - ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_TMR_CMD, err %d\n", - err); - return err; - } - - /* Modify necessary bits only and perform write */ - val &= ~TS_CMD_MASK; - val |= cmd_val; - err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_TMR_CMD, val); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to write back TX_TMR_CMD, err %d\n", @@ -1425,19 +3537,8 @@ static int ice_ptp_write_port_cmd_e82x(struct ice_hw *hw, u8 port, } /* Rx case */ - /* Read, modify, write */ - err = ice_read_phy_reg_e82x(hw, port, P_REG_RX_TMR_CMD, &val); - if (err) { - ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_TMR_CMD, err %d\n", - err); - return err; - } - - /* Modify necessary bits only and perform write */ - val &= ~TS_CMD_MASK; - val |= cmd_val; - - err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_TMR_CMD, val); + err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_TMR_CMD, + val | TS_CMD_RX_TYPE); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to write back RX_TMR_CMD, err %d\n", err); @@ -1447,63 +3548,6 @@ static int ice_ptp_write_port_cmd_e82x(struct ice_hw *hw, u8 port, return 0; } -/** - * ice_ptp_one_port_cmd - Prepare one port for a timer command - * @hw: pointer to the HW struct - * @configured_port: the port to configure with configured_cmd - * @configured_cmd: timer command to prepare on the configured_port - * - * Prepare the configured_port for the configured_cmd, and prepare all other - * ports for ICE_PTP_NOP. This causes the configured_port to execute the - * desired command while all other ports perform no operation. - */ -static int -ice_ptp_one_port_cmd(struct ice_hw *hw, u8 configured_port, - enum ice_ptp_tmr_cmd configured_cmd) -{ - u8 port; - - for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) { - enum ice_ptp_tmr_cmd cmd; - int err; - - if (port == configured_port) - cmd = configured_cmd; - else - cmd = ICE_PTP_NOP; - - err = ice_ptp_write_port_cmd_e82x(hw, port, cmd); - if (err) - return err; - } - - return 0; -} - -/** - * ice_ptp_port_cmd_e82x - Prepare all ports for a timer command - * @hw: pointer to the HW struct - * @cmd: timer command to prepare - * - * Prepare all ports connected to this device for an upcoming timer sync - * command. - */ -static int -ice_ptp_port_cmd_e82x(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) -{ - u8 port; - - for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) { - int err; - - err = ice_ptp_write_port_cmd_e82x(hw, port, cmd); - if (err) - return err; - } - - return 0; -} - /* E822 Vernier calibration functions * * The following functions are used as part of the vernier calibration of @@ -1606,7 +3650,7 @@ static void ice_phy_cfg_lane_e82x(struct ice_hw *hw, u8 port) return; } - quad = port / ICE_PORTS_PER_QUAD; + quad = ICE_GET_QUAD_NUM(port); err = ice_read_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG, &val); if (err) { @@ -2327,6 +4371,40 @@ int ice_phy_cfg_rx_offset_e82x(struct ice_hw *hw, u8 port) } /** + * ice_ptp_clear_phy_offset_ready_e82x - Clear PHY TX_/RX_OFFSET_READY registers + * @hw: pointer to the HW struct + * + * Clear PHY TX_/RX_OFFSET_READY registers, effectively marking all transmitted + * and received timestamps as invalid. + * + * Return: 0 on success, other error codes when failed to write to PHY + */ +int ice_ptp_clear_phy_offset_ready_e82x(struct ice_hw *hw) +{ + u8 port; + + for (port = 0; port < hw->ptp.num_lports; port++) { + int err; + + err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_OR, 0); + if (err) { + dev_warn(ice_hw_to_dev(hw), + "Failed to clear PHY TX_OFFSET_READY register\n"); + return err; + } + + err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_OR, 0); + if (err) { + dev_warn(ice_hw_to_dev(hw), + "Failed to clear PHY RX_OFFSET_READY register\n"); + return err; + } + } + + return 0; +} + +/** * ice_read_phy_and_phc_time_e82x - Simultaneously capture PHC and PHY time * @hw: pointer to the HW struct * @port: the PHY port to read @@ -2636,6 +4714,48 @@ ice_get_phy_tx_tstamp_ready_e82x(struct ice_hw *hw, u8 quad, u64 *tstamp_ready) return 0; } +/** + * ice_phy_cfg_intr_e82x - Configure TX timestamp interrupt + * @hw: pointer to the HW struct + * @quad: the timestamp quad + * @ena: enable or disable interrupt + * @threshold: interrupt threshold + * + * Configure TX timestamp interrupt for the specified quad + * + * Return: 0 on success, other error codes when failed to read/write quad + */ + +int ice_phy_cfg_intr_e82x(struct ice_hw *hw, u8 quad, bool ena, u8 threshold) +{ + int err; + u32 val; + + err = ice_read_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG, &val); + if (err) + return err; + + val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M; + if (ena) { + val |= Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M; + val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_THR_M; + val |= FIELD_PREP(Q_REG_TX_MEM_GBL_CFG_INTR_THR_M, threshold); + } + + return ice_write_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG, val); +} + +/** + * ice_ptp_init_phy_e82x - initialize PHY parameters + * @ptp: pointer to the PTP HW struct + */ +static void ice_ptp_init_phy_e82x(struct ice_ptp_hw *ptp) +{ + ptp->phy_model = ICE_PHY_E82X; + ptp->num_lports = 8; + ptp->ports_per_phy = 8; +} + /* E810 functions * * The following functions operate on the E810 series devices which use @@ -2660,7 +4780,7 @@ static int ice_read_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 *val) msg.opcode = ice_sbq_msg_rd; msg.dest_dev = rmn_0; - err = ice_sbq_rw_reg(hw, &msg); + err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n", err); @@ -2691,7 +4811,7 @@ static int ice_write_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 val) msg.dest_dev = rmn_0; msg.data = val; - err = ice_sbq_rw_reg(hw, &msg); + err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n", err); @@ -2863,17 +4983,21 @@ static int ice_clear_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx) } /** - * ice_ptp_init_phy_e810 - Enable PTP function on the external PHY + * ice_ptp_init_phc_e810 - Perform E810 specific PHC initialization * @hw: pointer to HW struct * - * Enable the timesync PTP functionality for the external PHY connected to - * this function. + * Perform E810-specific PTP hardware clock initialization steps. + * + * Return: 0 on success, other error codes when failed to initialize TimeSync */ -int ice_ptp_init_phy_e810(struct ice_hw *hw) +static int ice_ptp_init_phc_e810(struct ice_hw *hw) { u8 tmr_idx; int err; + /* Ensure synchronization delay is zero */ + wr32(hw, GLTSYN_SYNC_DLAY, 0); + tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_ENA(tmr_idx), GLTSYN_ENA_TSYN_ENA_M); @@ -2885,21 +5009,6 @@ int ice_ptp_init_phy_e810(struct ice_hw *hw) } /** - * ice_ptp_init_phc_e810 - Perform E810 specific PHC initialization - * @hw: pointer to HW struct - * - * Perform E810-specific PTP hardware clock initialization steps. - */ -static int ice_ptp_init_phc_e810(struct ice_hw *hw) -{ - /* Ensure synchronization delay is zero */ - wr32(hw, GLTSYN_SYNC_DLAY, 0); - - /* Initialize the PHY */ - return ice_ptp_init_phy_e810(hw); -} - -/** * ice_ptp_prep_phy_time_e810 - Prepare PHY port with initial time * @hw: Board private structure * @time: Time to initialize the PHY port clock to @@ -3020,47 +5129,9 @@ static int ice_ptp_prep_phy_incval_e810(struct ice_hw *hw, u64 incval) */ static int ice_ptp_port_cmd_e810(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) { - u32 cmd_val, val; - int err; - - switch (cmd) { - case ICE_PTP_INIT_TIME: - cmd_val = GLTSYN_CMD_INIT_TIME; - break; - case ICE_PTP_INIT_INCVAL: - cmd_val = GLTSYN_CMD_INIT_INCVAL; - break; - case ICE_PTP_ADJ_TIME: - cmd_val = GLTSYN_CMD_ADJ_TIME; - break; - case ICE_PTP_READ_TIME: - cmd_val = GLTSYN_CMD_READ_TIME; - break; - case ICE_PTP_ADJ_TIME_AT_TIME: - cmd_val = GLTSYN_CMD_ADJ_INIT_TIME; - break; - case ICE_PTP_NOP: - return 0; - } + u32 val = ice_ptp_tmr_cmd_to_port_reg(hw, cmd); - /* Read, modify, write */ - err = ice_read_phy_reg_e810(hw, ETH_GLTSYN_CMD, &val); - if (err) { - ice_debug(hw, ICE_DBG_PTP, "Failed to read GLTSYN_CMD, err %d\n", err); - return err; - } - - /* Modify necessary bits only and perform write */ - val &= ~TS_CMD_MASK_E810; - val |= cmd_val; - - err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_CMD, val); - if (err) { - ice_debug(hw, ICE_DBG_PTP, "Failed to write back GLTSYN_CMD, err %d\n", err); - return err; - } - - return 0; + return ice_write_phy_reg_e810(hw, E810_ETH_GLTSYN_CMD, val); } /** @@ -3242,6 +5313,17 @@ int ice_read_pca9575_reg_e810t(struct ice_hw *hw, u8 offset, u8 *data) return ice_aq_read_i2c(hw, link_topo, 0, addr, 1, data, NULL); } +/** + * ice_ptp_init_phy_e810 - initialize PHY parameters + * @ptp: pointer to the PTP HW struct + */ +static void ice_ptp_init_phy_e810(struct ice_ptp_hw *ptp) +{ + ptp->phy_model = ICE_PHY_E810; + ptp->num_lports = 8; + ptp->ports_per_phy = 4; +} + /* Device agnostic functions * * The following functions implement shared behavior common to both E822 and @@ -3299,18 +5381,126 @@ void ice_ptp_unlock(struct ice_hw *hw) } /** - * ice_ptp_init_phy_model - Initialize hw->phy_model based on device type + * ice_ptp_init_hw - Initialize hw based on device type * @hw: pointer to the HW structure * - * Determine the PHY model for the device, and initialize hw->phy_model + * Determine the PHY model for the device, and initialize hw * for use by other functions. */ -void ice_ptp_init_phy_model(struct ice_hw *hw) +void ice_ptp_init_hw(struct ice_hw *hw) { - if (ice_is_e810(hw)) - hw->phy_model = ICE_PHY_E810; + struct ice_ptp_hw *ptp = &hw->ptp; + + if (ice_is_e822(hw) || ice_is_e823(hw)) + ice_ptp_init_phy_e82x(ptp); + else if (ice_is_e810(hw)) + ice_ptp_init_phy_e810(ptp); + else if (ice_is_e825c(hw)) + ice_ptp_init_phy_e825c(hw); else - hw->phy_model = ICE_PHY_E82X; + ptp->phy_model = ICE_PHY_UNSUP; +} + +/** + * ice_ptp_write_port_cmd - Prepare a single PHY port for a timer command + * @hw: pointer to HW struct + * @port: Port to which cmd has to be sent + * @cmd: Command to be sent to the port + * + * Prepare one port for the upcoming timer sync command. Do not use this for + * programming only a single port, instead use ice_ptp_one_port_cmd() to + * ensure non-modified ports get properly initialized to ICE_PTP_NOP. + * + * Return: + * * %0 - success + * %-EBUSY - PHY type not supported + * * %other - failed to write port command + */ +static int ice_ptp_write_port_cmd(struct ice_hw *hw, u8 port, + enum ice_ptp_tmr_cmd cmd) +{ + switch (hw->ptp.phy_model) { + case ICE_PHY_ETH56G: + return ice_ptp_write_port_cmd_eth56g(hw, port, cmd); + case ICE_PHY_E82X: + return ice_ptp_write_port_cmd_e82x(hw, port, cmd); + default: + return -EOPNOTSUPP; + } +} + +/** + * ice_ptp_one_port_cmd - Program one PHY port for a timer command + * @hw: pointer to HW struct + * @configured_port: the port that should execute the command + * @configured_cmd: the command to be executed on the configured port + * + * Prepare one port for executing a timer command, while preparing all other + * ports to ICE_PTP_NOP. This allows executing a command on a single port + * while ensuring all other ports do not execute stale commands. + * + * Return: + * * %0 - success + * * %other - failed to write port command + */ +int ice_ptp_one_port_cmd(struct ice_hw *hw, u8 configured_port, + enum ice_ptp_tmr_cmd configured_cmd) +{ + u32 port; + + for (port = 0; port < hw->ptp.num_lports; port++) { + int err; + + /* Program the configured port with the configured command, + * program all other ports with ICE_PTP_NOP. + */ + if (port == configured_port) + err = ice_ptp_write_port_cmd(hw, port, configured_cmd); + else + err = ice_ptp_write_port_cmd(hw, port, ICE_PTP_NOP); + + if (err) + return err; + } + + return 0; +} + +/** + * ice_ptp_port_cmd - Prepare PHY ports for a timer sync command + * @hw: pointer to HW struct + * @cmd: the timer command to setup + * + * Prepare all PHY ports on this device for the requested timer command. For + * some families this can be done in one shot, but for other families each + * port must be configured individually. + * + * Return: + * * %0 - success + * * %other - failed to write port command + */ +static int ice_ptp_port_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) +{ + u32 port; + + /* PHY models which can program all ports simultaneously */ + switch (hw->ptp.phy_model) { + case ICE_PHY_E810: + return ice_ptp_port_cmd_e810(hw, cmd); + default: + break; + } + + /* PHY models which require programming each port separately */ + for (port = 0; port < hw->ptp.num_lports; port++) { + int err; + + err = ice_ptp_write_port_cmd(hw, port, cmd); + if (err) + return err; + } + + return 0; } /** @@ -3331,17 +5521,7 @@ static int ice_ptp_tmr_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) ice_ptp_src_cmd(hw, cmd); /* Next, prepare the ports */ - switch (hw->phy_model) { - case ICE_PHY_E810: - err = ice_ptp_port_cmd_e810(hw, cmd); - break; - case ICE_PHY_E82X: - err = ice_ptp_port_cmd_e82x(hw, cmd); - break; - default: - err = -EOPNOTSUPP; - } - + err = ice_ptp_port_cmd(hw, cmd); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to prepare PHY ports for timer command %u, err %d\n", cmd, err); @@ -3383,7 +5563,11 @@ int ice_ptp_init_time(struct ice_hw *hw, u64 time) /* PHY timers */ /* Fill Rx and Tx ports and send msg to PHY */ - switch (hw->phy_model) { + switch (hw->ptp.phy_model) { + case ICE_PHY_ETH56G: + err = ice_ptp_prep_phy_time_eth56g(hw, + (u32)(time & 0xFFFFFFFF)); + break; case ICE_PHY_E810: err = ice_ptp_prep_phy_time_e810(hw, time & 0xFFFFFFFF); break; @@ -3425,7 +5609,10 @@ int ice_ptp_write_incval(struct ice_hw *hw, u64 incval) wr32(hw, GLTSYN_SHADJ_L(tmr_idx), lower_32_bits(incval)); wr32(hw, GLTSYN_SHADJ_H(tmr_idx), upper_32_bits(incval)); - switch (hw->phy_model) { + switch (hw->ptp.phy_model) { + case ICE_PHY_ETH56G: + err = ice_ptp_prep_phy_incval_eth56g(hw, incval); + break; case ICE_PHY_E810: err = ice_ptp_prep_phy_incval_e810(hw, incval); break; @@ -3491,7 +5678,10 @@ int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj) wr32(hw, GLTSYN_SHADJ_L(tmr_idx), 0); wr32(hw, GLTSYN_SHADJ_H(tmr_idx), adj); - switch (hw->phy_model) { + switch (hw->ptp.phy_model) { + case ICE_PHY_ETH56G: + err = ice_ptp_prep_phy_adj_eth56g(hw, adj); + break; case ICE_PHY_E810: err = ice_ptp_prep_phy_adj_e810(hw, adj); break; @@ -3521,7 +5711,9 @@ int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj) */ int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp) { - switch (hw->phy_model) { + switch (hw->ptp.phy_model) { + case ICE_PHY_ETH56G: + return ice_read_ptp_tstamp_eth56g(hw, block, idx, tstamp); case ICE_PHY_E810: return ice_read_phy_tstamp_e810(hw, block, idx, tstamp); case ICE_PHY_E82X: @@ -3549,7 +5741,9 @@ int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp) */ int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx) { - switch (hw->phy_model) { + switch (hw->ptp.phy_model) { + case ICE_PHY_ETH56G: + return ice_clear_ptp_tstamp_eth56g(hw, block, idx); case ICE_PHY_E810: return ice_clear_phy_tstamp_e810(hw, block, idx); case ICE_PHY_E82X: @@ -3610,7 +5804,10 @@ static int ice_get_pf_c827_idx(struct ice_hw *hw, u8 *idx) */ void ice_ptp_reset_ts_memory(struct ice_hw *hw) { - switch (hw->phy_model) { + switch (hw->ptp.phy_model) { + case ICE_PHY_ETH56G: + ice_ptp_reset_ts_memory_eth56g(hw); + break; case ICE_PHY_E82X: ice_ptp_reset_ts_memory_e82x(hw); break; @@ -3636,7 +5833,9 @@ int ice_ptp_init_phc(struct ice_hw *hw) /* Clear event err indications for auxiliary pins */ (void)rd32(hw, GLTSYN_STAT(src_idx)); - switch (hw->phy_model) { + switch (hw->ptp.phy_model) { + case ICE_PHY_ETH56G: + return ice_ptp_init_phc_eth56g(hw); case ICE_PHY_E810: return ice_ptp_init_phc_e810(hw); case ICE_PHY_E82X: @@ -3659,7 +5858,10 @@ int ice_ptp_init_phc(struct ice_hw *hw) */ int ice_get_phy_tx_tstamp_ready(struct ice_hw *hw, u8 block, u64 *tstamp_ready) { - switch (hw->phy_model) { + switch (hw->ptp.phy_model) { + case ICE_PHY_ETH56G: + return ice_get_phy_tx_tstamp_ready_eth56g(hw, block, + tstamp_ready); case ICE_PHY_E810: return ice_get_phy_tx_tstamp_ready_e810(hw, block, tstamp_ready); diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h index 1f3e03124430..0852a34ade91 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h +++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h @@ -41,6 +41,41 @@ enum ice_ptp_fec_mode { ICE_PTP_FEC_MODE_RS_FEC }; +enum eth56g_res_type { + ETH56G_PHY_REG_PTP, + ETH56G_PHY_MEM_PTP, + ETH56G_PHY_REG_XPCS, + ETH56G_PHY_REG_MAC, + ETH56G_PHY_REG_GPCS, + NUM_ETH56G_PHY_RES +}; + +enum ice_eth56g_link_spd { + ICE_ETH56G_LNK_SPD_1G, + ICE_ETH56G_LNK_SPD_2_5G, + ICE_ETH56G_LNK_SPD_10G, + ICE_ETH56G_LNK_SPD_25G, + ICE_ETH56G_LNK_SPD_40G, + ICE_ETH56G_LNK_SPD_50G, + ICE_ETH56G_LNK_SPD_50G2, + ICE_ETH56G_LNK_SPD_100G, + ICE_ETH56G_LNK_SPD_100G2, + NUM_ICE_ETH56G_LNK_SPD /* Must be last */ +}; + +/** + * struct ice_phy_reg_info_eth56g - ETH56G PHY register parameters + * @base: base address for each PHY block + * @step: step between PHY lanes + * + * Characteristic information for the various PHY register parameters in the + * ETH56G devices + */ +struct ice_phy_reg_info_eth56g { + u32 base[NUM_ETH56G_PHY_RES]; + u32 step; +}; + /** * struct ice_time_ref_info_e82x * @pll_freq: Frequency of PLL that drives timer ticks in Hz @@ -94,8 +129,75 @@ struct ice_vernier_info_e82x { u32 rx_fixed_delay; }; +#define ICE_ETH56G_MAC_CFG_RX_OFFSET_INT GENMASK(19, 9) +#define ICE_ETH56G_MAC_CFG_RX_OFFSET_FRAC GENMASK(8, 0) +#define ICE_ETH56G_MAC_CFG_FRAC_W 9 /** - * struct ice_cgu_pll_params_e82x + * struct ice_eth56g_mac_reg_cfg - MAC config values for specific PTP registers + * @tx_mode: Tx timestamp compensation mode + * @tx_mk_dly: Tx timestamp marker start strobe delay + * @tx_cw_dly: Tx timestamp codeword start strobe delay + * @rx_mode: Rx timestamp compensation mode + * @rx_mk_dly: Rx timestamp marker start strobe delay + * @rx_cw_dly: Rx timestamp codeword start strobe delay + * @blks_per_clk: number of blocks transferred per clock cycle + * @blktime: block time, fixed point + * @mktime: marker time, fixed point + * @tx_offset: total Tx offset, fixed point + * @rx_offset: total Rx offset, contains value for bitslip/deskew, fixed point + * + * All fixed point registers except Rx offset are 23 bit unsigned ints with + * a 9 bit fractional. + * Rx offset is 11 bit unsigned int with a 9 bit fractional. + */ +struct ice_eth56g_mac_reg_cfg { + struct { + u8 def; + u8 rs; + } tx_mode; + u8 tx_mk_dly; + struct { + u8 def; + u8 onestep; + } tx_cw_dly; + struct { + u8 def; + u8 rs; + } rx_mode; + struct { + u8 def; + u8 rs; + } rx_mk_dly; + struct { + u8 def; + u8 rs; + } rx_cw_dly; + u8 blks_per_clk; + u16 blktime; + u16 mktime; + struct { + u32 serdes; + u32 no_fec; + u32 fc; + u32 rs; + u32 sfd; + u32 onestep; + } tx_offset; + struct { + u32 serdes; + u32 no_fec; + u32 fc; + u32 rs; + u32 sfd; + u32 bs_ds; + } rx_offset; +}; + +extern +const struct ice_eth56g_mac_reg_cfg eth56g_mac_cfg[NUM_ICE_ETH56G_LNK_SPD]; + +/** + * struct ice_cgu_pll_params_e82x - E82X CGU parameters * @refclk_pre_div: Reference clock pre-divisor * @feedback_div: Feedback divisor * @frac_n_div: Fractional divisor @@ -185,9 +287,34 @@ struct ice_cgu_pin_desc { extern const struct ice_cgu_pll_params_e82x e822_cgu_params[NUM_ICE_TIME_REF_FREQ]; +/** + * struct ice_cgu_pll_params_e825c - E825C CGU parameters + * @tspll_ck_refclkfreq: tspll_ck_refclkfreq selection + * @tspll_ndivratio: ndiv ratio that goes directly to the pll + * @tspll_fbdiv_intgr: TS PLL integer feedback divide + * @tspll_fbdiv_frac: TS PLL fractional feedback divide + * @ref1588_ck_div: clock divider for tspll ref + * + * Clock Generation Unit parameters used to program the PLL based on the + * selected TIME_REF/TCXO frequency. + */ +struct ice_cgu_pll_params_e825c { + u32 tspll_ck_refclkfreq; + u32 tspll_ndivratio; + u32 tspll_fbdiv_intgr; + u32 tspll_fbdiv_frac; + u32 ref1588_ck_div; +}; + +extern const struct +ice_cgu_pll_params_e825c e825c_cgu_params[NUM_ICE_TIME_REF_FREQ]; + #define E810C_QSFP_C827_0_HANDLE 2 #define E810C_QSFP_C827_1_HANDLE 3 +/* Table of constants related to possible ETH56G PHY resources */ +extern const struct ice_phy_reg_info_eth56g eth56g_phy_res[NUM_ETH56G_PHY_RES]; + /* Table of constants related to possible TIME_REF sources */ extern const struct ice_time_ref_info_e82x e822_time_ref[NUM_ICE_TIME_REF_FREQ]; @@ -197,7 +324,9 @@ extern const struct ice_vernier_info_e82x e822_vernier[NUM_ICE_PTP_LNK_SPD]; /* Increment value to generate nanoseconds in the GLTSYN_TIME_L register for * the E810 devices. Based off of a PLL with an 812.5 MHz frequency. */ -#define ICE_PTP_NOMINAL_INCVAL_E810 0x13b13b13bULL +#define ICE_E810_PLL_FREQ 812500000 +#define ICE_PTP_NOMINAL_INCVAL_E810 0x13b13b13bULL +#define E810_OUT_PROP_DELAY_NS 1 /* Device agnostic functions */ u8 ice_get_ptp_src_clock_index(struct ice_hw *hw); @@ -208,11 +337,15 @@ int ice_ptp_init_time(struct ice_hw *hw, u64 time); int ice_ptp_write_incval(struct ice_hw *hw, u64 incval); int ice_ptp_write_incval_locked(struct ice_hw *hw, u64 incval); int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj); +int ice_ptp_clear_phy_offset_ready_e82x(struct ice_hw *hw); int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp); int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx); void ice_ptp_reset_ts_memory(struct ice_hw *hw); int ice_ptp_init_phc(struct ice_hw *hw); +void ice_ptp_init_hw(struct ice_hw *hw); int ice_get_phy_tx_tstamp_ready(struct ice_hw *hw, u8 block, u64 *tstamp_ready); +int ice_ptp_one_port_cmd(struct ice_hw *hw, u8 configured_port, + enum ice_ptp_tmr_cmd configured_cmd); /* E822 family functions */ int ice_read_quad_reg_e82x(struct ice_hw *hw, u8 quad, u16 offset, u32 *val); @@ -264,9 +397,9 @@ int ice_stop_phy_timer_e82x(struct ice_hw *hw, u8 port, bool soft_reset); int ice_start_phy_timer_e82x(struct ice_hw *hw, u8 port); int ice_phy_cfg_tx_offset_e82x(struct ice_hw *hw, u8 port); int ice_phy_cfg_rx_offset_e82x(struct ice_hw *hw, u8 port); +int ice_phy_cfg_intr_e82x(struct ice_hw *hw, u8 quad, bool ena, u8 threshold); /* E810 family functions */ -int ice_ptp_init_phy_e810(struct ice_hw *hw); int ice_read_sma_ctrl_e810t(struct ice_hw *hw, u8 *data); int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data); int ice_read_pca9575_reg_e810t(struct ice_hw *hw, u8 offset, u8 *data); @@ -280,11 +413,44 @@ int ice_get_cgu_state(struct ice_hw *hw, u8 dpll_idx, u8 *ref_state, u8 *eec_mode, s64 *phase_offset, enum dpll_lock_status *dpll_state); int ice_get_cgu_rclk_pin_info(struct ice_hw *hw, u8 *base_idx, u8 *pin_num); - -void ice_ptp_init_phy_model(struct ice_hw *hw); int ice_cgu_get_output_pin_state_caps(struct ice_hw *hw, u8 pin_id, unsigned long *caps); +/* ETH56G family functions */ +int ice_ptp_read_tx_hwtstamp_status_eth56g(struct ice_hw *hw, u32 *ts_status); +int ice_stop_phy_timer_eth56g(struct ice_hw *hw, u8 port, bool soft_reset); +int ice_start_phy_timer_eth56g(struct ice_hw *hw, u8 port); +int ice_phy_cfg_tx_offset_eth56g(struct ice_hw *hw, u8 port); +int ice_phy_cfg_rx_offset_eth56g(struct ice_hw *hw, u8 port); +int ice_phy_cfg_intr_eth56g(struct ice_hw *hw, u8 port, bool ena, u8 threshold); +int ice_phy_cfg_ptp_1step_eth56g(struct ice_hw *hw, u8 port); + +#define ICE_ETH56G_NOMINAL_INCVAL 0x140000000ULL +#define ICE_ETH56G_NOMINAL_PCS_REF_TUS 0x100000000ULL +#define ICE_ETH56G_NOMINAL_PCS_REF_INC 0x300000000ULL +#define ICE_ETH56G_NOMINAL_THRESH4 0x7777 +#define ICE_ETH56G_NOMINAL_TX_THRESH 0x6 + +/** + * ice_get_base_incval - Get base clock increment value + * @hw: pointer to the HW struct + * + * Return: base clock increment value for supported PHYs, 0 otherwise + */ +static inline u64 ice_get_base_incval(struct ice_hw *hw) +{ + switch (hw->ptp.phy_model) { + case ICE_PHY_ETH56G: + return ICE_ETH56G_NOMINAL_INCVAL; + case ICE_PHY_E810: + return ICE_PTP_NOMINAL_INCVAL_E810; + case ICE_PHY_E82X: + return ice_e82x_nominal_incval(ice_e82x_time_ref(hw)); + default: + return 0; + } +} + #define PFTSYN_SEM_BYTES 4 #define ICE_PTP_CLOCK_INDEX_0 0x00 @@ -312,6 +478,7 @@ int ice_cgu_get_output_pin_state_caps(struct ice_hw *hw, u8 pin_id, #define TS_CMD_MASK_E810 0xFF #define TS_CMD_MASK 0xF #define SYNC_EXEC_CMD 0x3 +#define TS_CMD_RX_TYPE ICE_M(0x18, 0x4) /* Macros to derive port low and high addresses on both quads */ #define P_Q0_L(a, p) ((((a) + (0x2000 * (p)))) & 0xFFFF) @@ -344,11 +511,8 @@ int ice_cgu_get_output_pin_state_caps(struct ice_hw *hw, u8 pin_id, #define Q_REG_TX_MEM_GBL_CFG 0xC08 #define Q_REG_TX_MEM_GBL_CFG_LANE_TYPE_S 0 #define Q_REG_TX_MEM_GBL_CFG_LANE_TYPE_M BIT(0) -#define Q_REG_TX_MEM_GBL_CFG_TX_TYPE_S 1 #define Q_REG_TX_MEM_GBL_CFG_TX_TYPE_M ICE_M(0xFF, 1) -#define Q_REG_TX_MEM_GBL_CFG_INTR_THR_S 9 #define Q_REG_TX_MEM_GBL_CFG_INTR_THR_M ICE_M(0x3F, 9) -#define Q_REG_TX_MEM_GBL_CFG_INTR_ENA_S 15 #define Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M BIT(15) /* Tx Timestamp data registers */ @@ -380,7 +544,7 @@ int ice_cgu_get_output_pin_state_caps(struct ice_hw *hw, u8 pin_id, #define P_REG_TIMETUS_L 0x410 #define P_REG_TIMETUS_U 0x414 -#define P_REG_40B_LOW_M 0xFF +#define P_REG_40B_LOW_M GENMASK(7, 0) #define P_REG_40B_HIGH_S 8 /* PHY window length registers */ @@ -487,7 +651,7 @@ int ice_cgu_get_output_pin_state_caps(struct ice_hw *hw, u8 pin_id, #define ETH_GLTSYN_SHADJ_H(_i) (0x0300037C + ((_i) * 32)) /* E810 timer command register */ -#define ETH_GLTSYN_CMD 0x03000344 +#define E810_ETH_GLTSYN_CMD 0x03000344 /* Source timer incval macros */ #define INCVAL_HIGH_M 0xFF @@ -549,4 +713,115 @@ int ice_cgu_get_output_pin_state_caps(struct ice_hw *hw, u8 pin_id, /* E810T PCA9575 IO controller pin control */ #define ICE_E810T_P0_GNSS_PRSNT_N BIT(4) +/* ETH56G PHY register addresses */ +/* Timestamp PHY incval registers */ +#define PHY_REG_TIMETUS_L 0x8 +#define PHY_REG_TIMETUS_U 0xC + +/* Timestamp PCS registers */ +#define PHY_PCS_REF_TUS_L 0x18 +#define PHY_PCS_REF_TUS_U 0x1C + +/* Timestamp PCS ref incval registers */ +#define PHY_PCS_REF_INC_L 0x20 +#define PHY_PCS_REF_INC_U 0x24 + +/* Timestamp init registers */ +#define PHY_REG_RX_TIMER_INC_PRE_L 0x64 +#define PHY_REG_RX_TIMER_INC_PRE_U 0x68 +#define PHY_REG_TX_TIMER_INC_PRE_L 0x44 +#define PHY_REG_TX_TIMER_INC_PRE_U 0x48 + +/* Timestamp match and adjust target registers */ +#define PHY_REG_RX_TIMER_CNT_ADJ_L 0x6C +#define PHY_REG_RX_TIMER_CNT_ADJ_U 0x70 +#define PHY_REG_TX_TIMER_CNT_ADJ_L 0x4C +#define PHY_REG_TX_TIMER_CNT_ADJ_U 0x50 + +/* Timestamp command registers */ +#define PHY_REG_TX_TMR_CMD 0x40 +#define PHY_REG_RX_TMR_CMD 0x60 + +/* Phy offset ready registers */ +#define PHY_REG_TX_OFFSET_READY 0x54 +#define PHY_REG_RX_OFFSET_READY 0x74 + +/* Phy total offset registers */ +#define PHY_REG_TOTAL_TX_OFFSET_L 0x38 +#define PHY_REG_TOTAL_TX_OFFSET_U 0x3C +#define PHY_REG_TOTAL_RX_OFFSET_L 0x58 +#define PHY_REG_TOTAL_RX_OFFSET_U 0x5C + +/* Timestamp capture registers */ +#define PHY_REG_TX_CAPTURE_L 0x78 +#define PHY_REG_TX_CAPTURE_U 0x7C +#define PHY_REG_RX_CAPTURE_L 0x8C +#define PHY_REG_RX_CAPTURE_U 0x90 + +/* Memory status registers */ +#define PHY_REG_TX_MEMORY_STATUS_L 0x80 +#define PHY_REG_TX_MEMORY_STATUS_U 0x84 + +/* Interrupt config register */ +#define PHY_REG_TS_INT_CONFIG 0x88 + +/* XIF mode config register */ +#define PHY_MAC_XIF_MODE 0x24 +#define PHY_MAC_XIF_1STEP_ENA_M ICE_M(0x1, 5) +#define PHY_MAC_XIF_TS_BIN_MODE_M ICE_M(0x1, 11) +#define PHY_MAC_XIF_TS_SFD_ENA_M ICE_M(0x1, 20) +#define PHY_MAC_XIF_GMII_TS_SEL_M ICE_M(0x1, 21) + +/* GPCS config register */ +#define PHY_GPCS_CONFIG_REG0 0x268 +#define PHY_GPCS_CONFIG_REG0_TX_THR_M ICE_M(0xF, 24) +#define PHY_GPCS_BITSLIP 0x5C + +#define PHY_TS_INT_CONFIG_THRESHOLD_M ICE_M(0x3F, 0) +#define PHY_TS_INT_CONFIG_ENA_M BIT(6) + +/* 1-step PTP config */ +#define PHY_PTP_1STEP_CONFIG 0x270 +#define PHY_PTP_1STEP_T1S_UP64_M ICE_M(0xF, 4) +#define PHY_PTP_1STEP_T1S_DELTA_M ICE_M(0xF, 8) +#define PHY_PTP_1STEP_PEER_DELAY(_port) (0x274 + 4 * (_port)) +#define PHY_PTP_1STEP_PD_ADD_PD_M ICE_M(0x1, 0) +#define PHY_PTP_1STEP_PD_DELAY_M ICE_M(0x3fffffff, 1) +#define PHY_PTP_1STEP_PD_DLY_V_M ICE_M(0x1, 31) + +/* Macros to derive offsets for TimeStampLow and TimeStampHigh */ +#define PHY_TSTAMP_L(x) (((x) * 8) + 0) +#define PHY_TSTAMP_U(x) (((x) * 8) + 4) + +#define PHY_REG_REVISION 0x85000 + +#define PHY_REG_DESKEW_0 0x94 +#define PHY_REG_DESKEW_0_RLEVEL GENMASK(6, 0) +#define PHY_REG_DESKEW_0_RLEVEL_FRAC GENMASK(9, 7) +#define PHY_REG_DESKEW_0_RLEVEL_FRAC_W 3 +#define PHY_REG_DESKEW_0_VALID GENMASK(10, 10) + +#define PHY_REG_GPCS_BITSLIP 0x5C +#define PHY_REG_SD_BIT_SLIP(_port_offset) (0x29C + 4 * (_port_offset)) +#define PHY_REVISION_ETH56G 0x10200 +#define PHY_VENDOR_TXLANE_THRESH 0x2000C + +#define PHY_MAC_TSU_CONFIG 0x40 +#define PHY_MAC_TSU_CFG_RX_MODE_M ICE_M(0x7, 0) +#define PHY_MAC_TSU_CFG_RX_MII_CW_DLY_M ICE_M(0x7, 4) +#define PHY_MAC_TSU_CFG_RX_MII_MK_DLY_M ICE_M(0x7, 8) +#define PHY_MAC_TSU_CFG_TX_MODE_M ICE_M(0x7, 12) +#define PHY_MAC_TSU_CFG_TX_MII_CW_DLY_M ICE_M(0x1F, 16) +#define PHY_MAC_TSU_CFG_TX_MII_MK_DLY_M ICE_M(0x1F, 21) +#define PHY_MAC_TSU_CFG_BLKS_PER_CLK_M ICE_M(0x1, 28) +#define PHY_MAC_RX_MODULO 0x44 +#define PHY_MAC_RX_OFFSET 0x48 +#define PHY_MAC_RX_OFFSET_M ICE_M(0xFFFFFF, 0) +#define PHY_MAC_TX_MODULO 0x4C +#define PHY_MAC_BLOCKTIME 0x50 +#define PHY_MAC_MARKERTIME 0x54 +#define PHY_MAC_TX_OFFSET 0x58 + +#define PHY_PTP_INT_STATUS 0x7FD140 + #endif /* _ICE_PTP_HW_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_repr.c b/drivers/net/ethernet/intel/ice/ice_repr.c index d367f4c66dcd..bdda3401e343 100644 --- a/drivers/net/ethernet/intel/ice/ice_repr.c +++ b/drivers/net/ethernet/intel/ice/ice_repr.c @@ -285,9 +285,7 @@ ice_repr_reg_netdev(struct net_device *netdev) static void ice_repr_remove_node(struct devlink_port *devlink_port) { - devl_lock(devlink_port->devlink); devl_rate_leaf_destroy(devlink_port); - devl_unlock(devlink_port->devlink); } /** @@ -308,6 +306,7 @@ static void ice_repr_rem(struct ice_repr *repr) void ice_repr_rem_vf(struct ice_repr *repr) { ice_repr_remove_node(&repr->vf->devlink_port); + ice_eswitch_decfg_vsi(repr->src_vsi, repr->parent_mac); unregister_netdev(repr->netdev); ice_devlink_destroy_vf_port(repr->vf); ice_virtchnl_set_dflt_ops(repr->vf); @@ -403,11 +402,17 @@ struct ice_repr *ice_repr_add_vf(struct ice_vf *vf) if (err) goto err_netdev; + err = ice_eswitch_cfg_vsi(repr->src_vsi, repr->parent_mac); + if (err) + goto err_cfg_vsi; + ice_virtchnl_set_repr_ops(vf); ice_repr_set_tx_topology(vf->pf); return repr; +err_cfg_vsi: + unregister_netdev(repr->netdev); err_netdev: ice_repr_rem(repr); err_repr_add: @@ -415,12 +420,9 @@ err_repr_add: return ERR_PTR(err); } -struct ice_repr *ice_repr_get_by_vsi(struct ice_vsi *vsi) +struct ice_repr *ice_repr_get(struct ice_pf *pf, u32 id) { - if (!vsi->vf) - return NULL; - - return xa_load(&vsi->back->eswitch.reprs, vsi->vf->repr_id); + return xa_load(&pf->eswitch.reprs, id); } /** diff --git a/drivers/net/ethernet/intel/ice/ice_repr.h b/drivers/net/ethernet/intel/ice/ice_repr.h index cff730b15ca0..488661b2900b 100644 --- a/drivers/net/ethernet/intel/ice/ice_repr.h +++ b/drivers/net/ethernet/intel/ice/ice_repr.h @@ -35,9 +35,8 @@ void ice_repr_stop_tx_queues(struct ice_repr *repr); struct ice_repr *ice_netdev_to_repr(const struct net_device *netdev); bool ice_is_port_repr_netdev(const struct net_device *netdev); -struct ice_repr *ice_repr_get_by_vsi(struct ice_vsi *vsi); - void ice_repr_inc_tx_stats(struct ice_repr *repr, unsigned int len, int xmit_status); void ice_repr_inc_rx_stats(struct net_device *netdev, unsigned int len); +struct ice_repr *ice_repr_get(struct ice_pf *pf, u32 id); #endif diff --git a/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h b/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h index ead75fe2bcda..3b0054faf70c 100644 --- a/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h @@ -47,10 +47,12 @@ struct ice_sbq_evt_desc { }; enum ice_sbq_msg_dev { - rmn_0 = 0x02, - rmn_1 = 0x03, - rmn_2 = 0x04, - cgu = 0x06 + eth56g_phy_0 = 0x02, + rmn_0 = 0x02, + rmn_1 = 0x03, + rmn_2 = 0x04, + cgu = 0x06, + eth56g_phy_1 = 0x0D, }; enum ice_sbq_msg_opcode { diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c index 067712f4923f..55ef33208456 100644 --- a/drivers/net/ethernet/intel/ice/ice_sriov.c +++ b/drivers/net/ethernet/intel/ice/ice_sriov.c @@ -1416,21 +1416,23 @@ out_put_vf: } /** - * ice_set_vf_mac - * @netdev: network interface device structure + * __ice_set_vf_mac - program VF MAC address + * @pf: PF to be configure * @vf_id: VF identifier * @mac: MAC address * * program VF MAC address + * Return: zero on success or an error code on failure */ -int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) +int __ice_set_vf_mac(struct ice_pf *pf, u16 vf_id, const u8 *mac) { - struct ice_pf *pf = ice_netdev_to_pf(netdev); + struct device *dev; struct ice_vf *vf; int ret; + dev = ice_pf_to_dev(pf); if (is_multicast_ether_addr(mac)) { - netdev_err(netdev, "%pM not a valid unicast address\n", mac); + dev_err(dev, "%pM not a valid unicast address\n", mac); return -EINVAL; } @@ -1459,13 +1461,13 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) if (is_zero_ether_addr(mac)) { /* VF will send VIRTCHNL_OP_ADD_ETH_ADDR message with its MAC */ vf->pf_set_mac = false; - netdev_info(netdev, "Removing MAC on VF %d. VF driver will be reinitialized\n", - vf->vf_id); + dev_info(dev, "Removing MAC on VF %d. VF driver will be reinitialized\n", + vf->vf_id); } else { /* PF will add MAC rule for the VF */ vf->pf_set_mac = true; - netdev_info(netdev, "Setting MAC %pM on VF %d. VF driver will be reinitialized\n", - mac, vf_id); + dev_info(dev, "Setting MAC %pM on VF %d. VF driver will be reinitialized\n", + mac, vf_id); } ice_reset_vf(vf, ICE_VF_RESET_NOTIFY); @@ -1477,6 +1479,20 @@ out_put_vf: } /** + * ice_set_vf_mac - .ndo_set_vf_mac handler + * @netdev: network interface device structure + * @vf_id: VF identifier + * @mac: MAC address + * + * program VF MAC address + * Return: zero on success or an error code on failure + */ +int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) +{ + return __ice_set_vf_mac(ice_netdev_to_pf(netdev), vf_id, mac); +} + +/** * ice_set_vf_trust * @netdev: network interface device structure * @vf_id: VF identifier diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.h b/drivers/net/ethernet/intel/ice/ice_sriov.h index 8f22313474d6..96549ca5c52c 100644 --- a/drivers/net/ethernet/intel/ice/ice_sriov.h +++ b/drivers/net/ethernet/intel/ice/ice_sriov.h @@ -28,6 +28,7 @@ #ifdef CONFIG_PCI_IOV void ice_process_vflr_event(struct ice_pf *pf); int ice_sriov_configure(struct pci_dev *pdev, int num_vfs); +int __ice_set_vf_mac(struct ice_pf *pf, u16 vf_id, const u8 *mac); int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac); int ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi); @@ -81,6 +82,13 @@ ice_sriov_configure(struct pci_dev __always_unused *pdev, } static inline int +__ice_set_vf_mac(struct ice_pf __always_unused *pf, + u16 __always_unused vf_id, const u8 __always_unused *mac) +{ + return -EOPNOTSUPP; +} + +static inline int ice_set_vf_mac(struct net_device __always_unused *netdev, int __always_unused vf_id, u8 __always_unused *mac) { diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index 1191031b2a43..3caafcdc301f 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -3,6 +3,7 @@ #include "ice_lib.h" #include "ice_switch.h" +#include "ice_trace.h" #define ICE_ETH_DA_OFFSET 0 #define ICE_ETH_ETHTYPE_OFFSET 12 @@ -1471,7 +1472,6 @@ int ice_init_def_sw_recp(struct ice_hw *hw) recps[i].root_rid = i; INIT_LIST_HEAD(&recps[i].filt_rules); INIT_LIST_HEAD(&recps[i].filt_replay_rules); - INIT_LIST_HEAD(&recps[i].rg_list); mutex_init(&recps[i].filt_rule_lock); } @@ -1962,6 +1962,15 @@ ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz, hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT) status = -ENOENT; + if (!status) { + if (opc == ice_aqc_opc_add_sw_rules) + hw->switch_info->rule_cnt += num_rules; + else if (opc == ice_aqc_opc_remove_sw_rules) + hw->switch_info->rule_cnt -= num_rules; + } + + trace_ice_aq_sw_rules(hw->switch_info); + return status; } @@ -2182,8 +2191,10 @@ int ice_alloc_recipe(struct ice_hw *hw, u16 *rid) sw_buf->res_type = cpu_to_le16(res_type); status = ice_aq_alloc_free_res(hw, sw_buf, buf_len, ice_aqc_opc_alloc_res); - if (!status) + if (!status) { *rid = le16_to_cpu(sw_buf->elem[0].e.sw_resp); + hw->switch_info->recp_cnt++; + } return status; } @@ -2197,7 +2208,13 @@ int ice_alloc_recipe(struct ice_hw *hw, u16 *rid) */ static int ice_free_recipe_res(struct ice_hw *hw, u16 rid) { - return ice_free_hw_res(hw, ICE_AQC_RES_TYPE_RECIPE, 1, &rid); + int status; + + status = ice_free_hw_res(hw, ICE_AQC_RES_TYPE_RECIPE, 1, &rid); + if (!status) + hw->switch_info->recp_cnt--; + + return status; } /** @@ -2282,20 +2299,6 @@ static void ice_get_recp_to_prof_map(struct ice_hw *hw) } /** - * ice_collect_result_idx - copy result index values - * @buf: buffer that contains the result index - * @recp: the recipe struct to copy data into - */ -static void -ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf, - struct ice_sw_recipe *recp) -{ - if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN) - set_bit(buf->content.result_indx & ~ICE_AQ_RECIPE_RESULT_EN, - recp->res_idxs); -} - -/** * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries * @hw: pointer to hardware structure * @recps: struct that we need to populate @@ -2353,18 +2356,10 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid, for (sub_recps = 0; sub_recps < num_recps; sub_recps++) { struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps]; - struct ice_recp_grp_entry *rg_entry; u8 i, prof, idx, prot = 0; bool is_root; u16 off = 0; - rg_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rg_entry), - GFP_KERNEL); - if (!rg_entry) { - status = -ENOMEM; - goto err_unroll; - } - idx = root_bufs.recipe_indx; is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT; @@ -2377,11 +2372,8 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid, prof = find_first_bit(recipe_to_profile[idx], ICE_MAX_NUM_PROFILES); for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) { - u8 lkup_indx = root_bufs.content.lkup_indx[i + 1]; - - rg_entry->fv_idx[i] = lkup_indx; - rg_entry->fv_mask[i] = - le16_to_cpu(root_bufs.content.mask[i + 1]); + u8 lkup_indx = root_bufs.content.lkup_indx[i]; + u16 lkup_mask = le16_to_cpu(root_bufs.content.mask[i]); /* If the recipe is a chained recipe then all its * child recipe's result will have a result index. @@ -2392,26 +2384,21 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid, * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a * valid offset value. */ - if (test_bit(rg_entry->fv_idx[i], hw->switch_info->prof_res_bm[prof]) || - rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE || - rg_entry->fv_idx[i] == 0) + if (!lkup_indx || + (lkup_indx & ICE_AQ_RECIPE_LKUP_IGNORE) || + test_bit(lkup_indx, + hw->switch_info->prof_res_bm[prof])) continue; - ice_find_prot_off(hw, ICE_BLK_SW, prof, - rg_entry->fv_idx[i], &prot, &off); + ice_find_prot_off(hw, ICE_BLK_SW, prof, lkup_indx, + &prot, &off); lkup_exts->fv_words[fv_word_idx].prot_id = prot; lkup_exts->fv_words[fv_word_idx].off = off; - lkup_exts->field_mask[fv_word_idx] = - rg_entry->fv_mask[i]; + lkup_exts->field_mask[fv_word_idx] = lkup_mask; fv_word_idx++; } - /* populate rg_list with the data from the child entry of this - * recipe - */ - list_add(&rg_entry->l_entry, &recps[rid].rg_list); /* Propagate some data to the recipe database */ - recps[idx].is_root = !!is_root; recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority; recps[idx].need_pass_l2 = root_bufs.content.act_ctrl & ICE_AQ_RECIPE_ACT_NEED_PASS_L2; @@ -2419,11 +2406,8 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid, ICE_AQ_RECIPE_ACT_ALLOW_PASS_L2; bitmap_zero(recps[idx].res_idxs, ICE_MAX_FV_WORDS); if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) { - recps[idx].chain_idx = root_bufs.content.result_indx & - ~ICE_AQ_RECIPE_RESULT_EN; - set_bit(recps[idx].chain_idx, recps[idx].res_idxs); - } else { - recps[idx].chain_idx = ICE_INVAL_CHAIN_IND; + set_bit(root_bufs.content.result_indx & + ~ICE_AQ_RECIPE_RESULT_EN, recps[idx].res_idxs); } if (!is_root) { @@ -2443,15 +2427,6 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid, /* Complete initialization of the root recipe entry */ lkup_exts->n_val_words = fv_word_idx; - recps[rid].big_recp = (num_recps > 1); - recps[rid].n_grp_count = (u8)num_recps; - recps[rid].root_buf = devm_kmemdup(ice_hw_to_dev(hw), tmp, - recps[rid].n_grp_count * sizeof(*recps[rid].root_buf), - GFP_KERNEL); - if (!recps[rid].root_buf) { - status = -ENOMEM; - goto err_unroll; - } /* Copy result indexes */ bitmap_copy(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS); @@ -4768,11 +4743,6 @@ ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts, continue; } - /* Skip inverse action recipes */ - if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl & - ICE_AQ_RECIPE_ACT_INV_ACT) - continue; - /* if number of words we are looking for match */ if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) { struct ice_fv_word *ar = recp[i].lkup_exts.fv_words; @@ -4897,110 +4867,55 @@ ice_fill_valid_words(struct ice_adv_lkup_elem *rule, } /** - * ice_create_first_fit_recp_def - Create a recipe grouping - * @hw: pointer to the hardware structure - * @lkup_exts: an array of protocol header extractions - * @rg_list: pointer to a list that stores new recipe groups - * @recp_cnt: pointer to a variable that stores returned number of recipe groups - * - * Using first fit algorithm, take all the words that are still not done - * and start grouping them in 4-word groups. Each group makes up one - * recipe. - */ -static int -ice_create_first_fit_recp_def(struct ice_hw *hw, - struct ice_prot_lkup_ext *lkup_exts, - struct list_head *rg_list, - u8 *recp_cnt) -{ - struct ice_pref_recipe_group *grp = NULL; - u8 j; - - *recp_cnt = 0; - - /* Walk through every word in the rule to check if it is not done. If so - * then this word needs to be part of a new recipe. - */ - for (j = 0; j < lkup_exts->n_val_words; j++) - if (!test_bit(j, lkup_exts->done)) { - if (!grp || - grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) { - struct ice_recp_grp_entry *entry; - - entry = devm_kzalloc(ice_hw_to_dev(hw), - sizeof(*entry), - GFP_KERNEL); - if (!entry) - return -ENOMEM; - list_add(&entry->l_entry, rg_list); - grp = &entry->r_group; - (*recp_cnt)++; - } - - grp->pairs[grp->n_val_pairs].prot_id = - lkup_exts->fv_words[j].prot_id; - grp->pairs[grp->n_val_pairs].off = - lkup_exts->fv_words[j].off; - grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j]; - grp->n_val_pairs++; - } - - return 0; -} - -/** * ice_fill_fv_word_index - fill in the field vector indices for a recipe group * @hw: pointer to the hardware structure - * @fv_list: field vector with the extraction sequence information - * @rg_list: recipe groupings with protocol-offset pairs + * @rm: recipe management list entry * * Helper function to fill in the field vector indices for protocol-offset * pairs. These indexes are then ultimately programmed into a recipe. */ static int -ice_fill_fv_word_index(struct ice_hw *hw, struct list_head *fv_list, - struct list_head *rg_list) +ice_fill_fv_word_index(struct ice_hw *hw, struct ice_sw_recipe *rm) { struct ice_sw_fv_list_entry *fv; - struct ice_recp_grp_entry *rg; struct ice_fv_word *fv_ext; + u8 i; - if (list_empty(fv_list)) - return 0; + if (list_empty(&rm->fv_list)) + return -EINVAL; - fv = list_first_entry(fv_list, struct ice_sw_fv_list_entry, + fv = list_first_entry(&rm->fv_list, struct ice_sw_fv_list_entry, list_entry); fv_ext = fv->fv_ptr->ew; - list_for_each_entry(rg, rg_list, l_entry) { - u8 i; - - for (i = 0; i < rg->r_group.n_val_pairs; i++) { - struct ice_fv_word *pr; - bool found = false; - u16 mask; - u8 j; + /* Add switch id as the first word. */ + rm->fv_idx[0] = ICE_AQ_SW_ID_LKUP_IDX; + rm->fv_mask[0] = ICE_AQ_SW_ID_LKUP_MASK; + rm->n_ext_words++; - pr = &rg->r_group.pairs[i]; - mask = rg->r_group.mask[i]; - - for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++) - if (fv_ext[j].prot_id == pr->prot_id && - fv_ext[j].off == pr->off) { - found = true; + for (i = 1; i < rm->n_ext_words; i++) { + struct ice_fv_word *fv_word = &rm->ext_words[i - 1]; + u16 fv_mask = rm->word_masks[i - 1]; + bool found = false; + u8 j; - /* Store index of field vector */ - rg->fv_idx[i] = j; - rg->fv_mask[i] = mask; - break; - } + for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++) { + if (fv_ext[j].prot_id == fv_word->prot_id && + fv_ext[j].off == fv_word->off) { + found = true; - /* Protocol/offset could not be found, caller gave an - * invalid pair - */ - if (!found) - return -EINVAL; + /* Store index of field vector */ + rm->fv_idx[i] = j; + rm->fv_mask[i] = fv_mask; + break; + } } + + /* Protocol/offset could not be found, caller gave an invalid + * pair. + */ + if (!found) + return -EINVAL; } return 0; @@ -5074,335 +4989,223 @@ ice_find_free_recp_res_idx(struct ice_hw *hw, const unsigned long *profiles, } /** - * ice_add_sw_recipe - function to call AQ calls to create switch recipe - * @hw: pointer to hardware structure - * @rm: recipe management list entry - * @profiles: bitmap of profiles that will be associated. + * ice_calc_recp_cnt - calculate number of recipes based on word count + * @word_cnt: number of lookup words + * + * Word count should include switch ID word and regular lookup words. + * Returns: number of recipes required to fit @word_cnt, including extra recipes + * needed for recipe chaining (if needed). */ -static int -ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, - unsigned long *profiles) +static int ice_calc_recp_cnt(u8 word_cnt) { - DECLARE_BITMAP(result_idx_bm, ICE_MAX_FV_WORDS); - struct ice_aqc_recipe_content *content; - struct ice_aqc_recipe_data_elem *tmp; - struct ice_aqc_recipe_data_elem *buf; - struct ice_recp_grp_entry *entry; - u16 free_res_idx; - u16 recipe_count; - u8 chain_idx; - u8 recps = 0; - int status; + /* All words fit in a single recipe, no need for chaining. */ + if (word_cnt <= ICE_NUM_WORDS_RECIPE) + return 1; - /* When more than one recipe are required, another recipe is needed to - * chain them together. Matching a tunnel metadata ID takes up one of - * the match fields in the chaining recipe reducing the number of - * chained recipes by one. + /* Recipe chaining required. Result indexes are fitted right after + * regular lookup words. In some cases a new recipe must be added in + * order to fit result indexes. + * + * While the word count increases, every 5 words an extra recipe needs + * to be added. However, by adding a recipe, one word for its result + * index must also be added, therefore every 4 words recipe count + * increases by 1. This calculation does not apply to word count == 1, + * which is handled above. */ - /* check number of free result indices */ - bitmap_zero(result_idx_bm, ICE_MAX_FV_WORDS); - free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm); + return (word_cnt + 2) / (ICE_NUM_WORDS_RECIPE - 1); +} - ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n", - free_res_idx, rm->n_grp_count); +static void fill_recipe_template(struct ice_aqc_recipe_data_elem *recp, u16 rid, + const struct ice_sw_recipe *rm) +{ + int i; - if (rm->n_grp_count > 1) { - if (rm->n_grp_count > free_res_idx) - return -ENOSPC; + recp->recipe_indx = rid; + recp->content.act_ctrl |= ICE_AQ_RECIPE_ACT_PRUNE_INDX_M; - rm->n_grp_count++; + for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) { + recp->content.lkup_indx[i] = ICE_AQ_RECIPE_LKUP_IGNORE; + recp->content.mask[i] = cpu_to_le16(0); } - if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE) - return -ENOSPC; + set_bit(rid, (unsigned long *)recp->recipe_bitmap); + recp->content.act_ctrl_fwd_priority = rm->priority; - tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL); - if (!tmp) - return -ENOMEM; - - buf = devm_kcalloc(ice_hw_to_dev(hw), rm->n_grp_count, sizeof(*buf), - GFP_KERNEL); - if (!buf) { - status = -ENOMEM; - goto err_mem; - } + if (rm->need_pass_l2) + recp->content.act_ctrl |= ICE_AQ_RECIPE_ACT_NEED_PASS_L2; - bitmap_zero(rm->r_bitmap, ICE_MAX_NUM_RECIPES); - recipe_count = ICE_MAX_NUM_RECIPES; - status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC, - NULL); - if (status || recipe_count == 0) - goto err_unroll; + if (rm->allow_pass_l2) + recp->content.act_ctrl |= ICE_AQ_RECIPE_ACT_ALLOW_PASS_L2; +} - /* Allocate the recipe resources, and configure them according to the - * match fields from protocol headers and extracted field vectors. - */ - chain_idx = find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS); - list_for_each_entry(entry, &rm->rg_list, l_entry) { - u8 i; +static void bookkeep_recipe(struct ice_sw_recipe *recipe, + struct ice_aqc_recipe_data_elem *r, + const struct ice_sw_recipe *rm) +{ + memcpy(recipe->r_bitmap, r->recipe_bitmap, sizeof(recipe->r_bitmap)); - status = ice_alloc_recipe(hw, &entry->rid); - if (status) - goto err_unroll; + recipe->priority = r->content.act_ctrl_fwd_priority; + recipe->tun_type = rm->tun_type; + recipe->need_pass_l2 = rm->need_pass_l2; + recipe->allow_pass_l2 = rm->allow_pass_l2; + recipe->recp_created = true; +} - content = &buf[recps].content; +/* For memcpy in ice_add_sw_recipe. */ +static_assert(sizeof_field(struct ice_aqc_recipe_data_elem, recipe_bitmap) == + sizeof_field(struct ice_sw_recipe, r_bitmap)); - /* Clear the result index of the located recipe, as this will be - * updated, if needed, later in the recipe creation process. - */ - tmp[0].content.result_indx = 0; +/** + * ice_add_sw_recipe - function to call AQ calls to create switch recipe + * @hw: pointer to hardware structure + * @rm: recipe management list entry + * @profiles: bitmap of profiles that will be associated. + */ +static int +ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, + unsigned long *profiles) +{ + struct ice_aqc_recipe_data_elem *buf __free(kfree) = NULL; + DECLARE_BITMAP(result_idx_bm, ICE_MAX_FV_WORDS); + struct ice_aqc_recipe_data_elem *root; + struct ice_sw_recipe *recipe; + u16 free_res_idx, rid; + int lookup = 0; + int recp_cnt; + int status; + int word; + int i; - buf[recps] = tmp[0]; - buf[recps].recipe_indx = (u8)entry->rid; - /* if the recipe is a non-root recipe RID should be programmed - * as 0 for the rules to be applied correctly. - */ - content->rid = 0; - memset(&content->lkup_indx, 0, - sizeof(content->lkup_indx)); - - /* All recipes use look-up index 0 to match switch ID. */ - content->lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX; - content->mask[0] = cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK); - /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask - * to be 0 - */ - for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) { - content->lkup_indx[i] = 0x80; - content->mask[i] = 0; - } + recp_cnt = ice_calc_recp_cnt(rm->n_ext_words); - for (i = 0; i < entry->r_group.n_val_pairs; i++) { - content->lkup_indx[i + 1] = entry->fv_idx[i]; - content->mask[i + 1] = cpu_to_le16(entry->fv_mask[i]); - } + bitmap_zero(result_idx_bm, ICE_MAX_FV_WORDS); + bitmap_zero(rm->r_bitmap, ICE_MAX_NUM_RECIPES); - if (rm->n_grp_count > 1) { - /* Checks to see if there really is a valid result index - * that can be used. - */ - if (chain_idx >= ICE_MAX_FV_WORDS) { - ice_debug(hw, ICE_DBG_SW, "No chain index available\n"); - status = -ENOSPC; - goto err_unroll; - } + /* Check number of free result indices */ + free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm); - entry->chain_idx = chain_idx; - content->result_indx = - ICE_AQ_RECIPE_RESULT_EN | - FIELD_PREP(ICE_AQ_RECIPE_RESULT_DATA_M, - chain_idx); - clear_bit(chain_idx, result_idx_bm); - chain_idx = find_first_bit(result_idx_bm, - ICE_MAX_FV_WORDS); - } + ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n", + free_res_idx, recp_cnt); - /* fill recipe dependencies */ - bitmap_zero((unsigned long *)buf[recps].recipe_bitmap, - ICE_MAX_NUM_RECIPES); - set_bit(buf[recps].recipe_indx, - (unsigned long *)buf[recps].recipe_bitmap); - content->act_ctrl_fwd_priority = rm->priority; + /* Last recipe doesn't need result index */ + if (recp_cnt - 1 > free_res_idx) + return -ENOSPC; - if (rm->need_pass_l2) - content->act_ctrl |= ICE_AQ_RECIPE_ACT_NEED_PASS_L2; + if (recp_cnt > ICE_MAX_CHAIN_RECIPE_RES) + return -E2BIG; - if (rm->allow_pass_l2) - content->act_ctrl |= ICE_AQ_RECIPE_ACT_ALLOW_PASS_L2; - recps++; - } + buf = kcalloc(recp_cnt, sizeof(*buf), GFP_KERNEL); + if (!buf) + return -ENOMEM; - if (rm->n_grp_count == 1) { - rm->root_rid = buf[0].recipe_indx; - set_bit(buf[0].recipe_indx, rm->r_bitmap); - buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT; - if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) { - memcpy(buf[0].recipe_bitmap, rm->r_bitmap, - sizeof(buf[0].recipe_bitmap)); - } else { - status = -EINVAL; - goto err_unroll; - } - /* Applicable only for ROOT_RECIPE, set the fwd_priority for - * the recipe which is getting created if specified - * by user. Usually any advanced switch filter, which results - * into new extraction sequence, ended up creating a new recipe - * of type ROOT and usually recipes are associated with profiles - * Switch rule referreing newly created recipe, needs to have - * either/or 'fwd' or 'join' priority, otherwise switch rule - * evaluation will not happen correctly. In other words, if - * switch rule to be evaluated on priority basis, then recipe - * needs to have priority, otherwise it will be evaluated last. - */ - buf[0].content.act_ctrl_fwd_priority = rm->priority; - } else { - struct ice_recp_grp_entry *last_chain_entry; - u16 rid, i; + /* Setup the non-root subrecipes. These do not contain lookups for other + * subrecipes results. Set associated recipe only to own recipe index. + * Each non-root subrecipe needs a free result index from FV. + * + * Note: only done if there is more than one recipe. + */ + for (i = 0; i < recp_cnt - 1; i++) { + struct ice_aqc_recipe_content *content; + u8 result_idx; - /* Allocate the last recipe that will chain the outcomes of the - * other recipes together - */ status = ice_alloc_recipe(hw, &rid); if (status) - goto err_unroll; + return status; - content = &buf[recps].content; + fill_recipe_template(&buf[i], rid, rm); - buf[recps].recipe_indx = (u8)rid; - content->rid = (u8)rid; - content->rid |= ICE_AQ_RECIPE_ID_IS_ROOT; - /* the new entry created should also be part of rg_list to - * make sure we have complete recipe + result_idx = find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS); + /* Check if there really is a valid result index that can be + * used. */ - last_chain_entry = devm_kzalloc(ice_hw_to_dev(hw), - sizeof(*last_chain_entry), - GFP_KERNEL); - if (!last_chain_entry) { - status = -ENOMEM; - goto err_unroll; - } - last_chain_entry->rid = rid; - memset(&content->lkup_indx, 0, sizeof(content->lkup_indx)); - /* All recipes use look-up index 0 to match switch ID. */ - content->lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX; - content->mask[0] = cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK); - for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) { - content->lkup_indx[i] = ICE_AQ_RECIPE_LKUP_IGNORE; - content->mask[i] = 0; + if (result_idx >= ICE_MAX_FV_WORDS) { + ice_debug(hw, ICE_DBG_SW, "No chain index available\n"); + return -ENOSPC; } + clear_bit(result_idx, result_idx_bm); - i = 1; - /* update r_bitmap with the recp that is used for chaining */ + content = &buf[i].content; + content->result_indx = ICE_AQ_RECIPE_RESULT_EN | + FIELD_PREP(ICE_AQ_RECIPE_RESULT_DATA_M, + result_idx); + + /* Set recipe association to be used for root recipe */ set_bit(rid, rm->r_bitmap); - /* this is the recipe that chains all the other recipes so it - * should not have a chaining ID to indicate the same - */ - last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND; - list_for_each_entry(entry, &rm->rg_list, l_entry) { - last_chain_entry->fv_idx[i] = entry->chain_idx; - content->lkup_indx[i] = entry->chain_idx; - content->mask[i++] = cpu_to_le16(0xFFFF); - set_bit(entry->rid, rm->r_bitmap); - } - list_add(&last_chain_entry->l_entry, &rm->rg_list); - if (sizeof(buf[recps].recipe_bitmap) >= - sizeof(rm->r_bitmap)) { - memcpy(buf[recps].recipe_bitmap, rm->r_bitmap, - sizeof(buf[recps].recipe_bitmap)); - } else { - status = -EINVAL; - goto err_unroll; + + word = 0; + while (lookup < rm->n_ext_words && + word < ICE_NUM_WORDS_RECIPE) { + content->lkup_indx[word] = rm->fv_idx[lookup]; + content->mask[word] = cpu_to_le16(rm->fv_mask[lookup]); + + lookup++; + word++; } - content->act_ctrl_fwd_priority = rm->priority; - recps++; - rm->root_rid = (u8)rid; + recipe = &hw->switch_info->recp_list[rid]; + set_bit(result_idx, recipe->res_idxs); + bookkeep_recipe(recipe, &buf[i], rm); } - status = ice_acquire_change_lock(hw, ICE_RES_WRITE); - if (status) - goto err_unroll; - status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL); - ice_release_change_lock(hw); + /* Setup the root recipe */ + status = ice_alloc_recipe(hw, &rid); if (status) - goto err_unroll; - - /* Every recipe that just got created add it to the recipe - * book keeping list - */ - list_for_each_entry(entry, &rm->rg_list, l_entry) { - struct ice_switch_info *sw = hw->switch_info; - bool is_root, idx_found = false; - struct ice_sw_recipe *recp; - u16 idx, buf_idx = 0; - - /* find buffer index for copying some data */ - for (idx = 0; idx < rm->n_grp_count; idx++) - if (buf[idx].recipe_indx == entry->rid) { - buf_idx = idx; - idx_found = true; - } + return status; - if (!idx_found) { - status = -EIO; - goto err_unroll; - } + recipe = &hw->switch_info->recp_list[rid]; + root = &buf[recp_cnt - 1]; + fill_recipe_template(root, rid, rm); - recp = &sw->recp_list[entry->rid]; - is_root = (rm->root_rid == entry->rid); - recp->is_root = is_root; + /* Set recipe association, use previously set bitmap and own rid */ + set_bit(rid, rm->r_bitmap); + memcpy(root->recipe_bitmap, rm->r_bitmap, sizeof(root->recipe_bitmap)); - recp->root_rid = entry->rid; - recp->big_recp = (is_root && rm->n_grp_count > 1); + /* For non-root recipes rid should be 0, for root it should be correct + * rid value ored with 0x80 (is root bit). + */ + root->content.rid = rid | ICE_AQ_RECIPE_ID_IS_ROOT; - memcpy(&recp->ext_words, entry->r_group.pairs, - entry->r_group.n_val_pairs * sizeof(struct ice_fv_word)); + /* Fill remaining lookups in root recipe */ + word = 0; + while (lookup < rm->n_ext_words && + word < ICE_NUM_WORDS_RECIPE /* should always be true */) { + root->content.lkup_indx[word] = rm->fv_idx[lookup]; + root->content.mask[word] = cpu_to_le16(rm->fv_mask[lookup]); - memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap, - sizeof(recp->r_bitmap)); + lookup++; + word++; + } - /* Copy non-result fv index values and masks to recipe. This - * call will also update the result recipe bitmask. + /* Fill result indexes as lookups */ + i = 0; + while (i < recp_cnt - 1 && + word < ICE_NUM_WORDS_RECIPE /* should always be true */) { + root->content.lkup_indx[word] = buf[i].content.result_indx & + ~ICE_AQ_RECIPE_RESULT_EN; + root->content.mask[word] = cpu_to_le16(0xffff); + /* For bookkeeping, it is needed to mark FV index as used for + * intermediate result. */ - ice_collect_result_idx(&buf[buf_idx], recp); + set_bit(root->content.lkup_indx[word], recipe->res_idxs); - /* for non-root recipes, also copy to the root, this allows - * easier matching of a complete chained recipe - */ - if (!is_root) - ice_collect_result_idx(&buf[buf_idx], - &sw->recp_list[rm->root_rid]); - - recp->n_ext_words = entry->r_group.n_val_pairs; - recp->chain_idx = entry->chain_idx; - recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority; - recp->n_grp_count = rm->n_grp_count; - recp->tun_type = rm->tun_type; - recp->need_pass_l2 = rm->need_pass_l2; - recp->allow_pass_l2 = rm->allow_pass_l2; - recp->recp_created = true; + i++; + word++; } - rm->root_buf = buf; - kfree(tmp); - return status; -err_unroll: -err_mem: - kfree(tmp); - devm_kfree(ice_hw_to_dev(hw), buf); - return status; -} + rm->root_rid = rid; + bookkeep_recipe(&hw->switch_info->recp_list[rid], root, rm); -/** - * ice_create_recipe_group - creates recipe group - * @hw: pointer to hardware structure - * @rm: recipe management list entry - * @lkup_exts: lookup elements - */ -static int -ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm, - struct ice_prot_lkup_ext *lkup_exts) -{ - u8 recp_count = 0; - int status; - - rm->n_grp_count = 0; + /* Program the recipe */ + status = ice_acquire_change_lock(hw, ICE_RES_WRITE); + if (status) + return status; - /* Create recipes for words that are marked not done by packing them - * as best fit. - */ - status = ice_create_first_fit_recp_def(hw, lkup_exts, - &rm->rg_list, &recp_count); - if (!status) { - rm->n_grp_count += recp_count; - rm->n_ext_words = lkup_exts->n_val_words; - memcpy(&rm->ext_words, lkup_exts->fv_words, - sizeof(rm->ext_words)); - memcpy(rm->word_masks, lkup_exts->field_mask, - sizeof(rm->word_masks)); - } + status = ice_aq_add_recipe(hw, buf, recp_cnt, NULL); + ice_release_change_lock(hw); + if (status) + return status; - return status; + return 0; } /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule @@ -5509,9 +5312,7 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, DECLARE_BITMAP(fv_bitmap, ICE_MAX_NUM_PROFILES); DECLARE_BITMAP(profiles, ICE_MAX_NUM_PROFILES); struct ice_prot_lkup_ext *lkup_exts; - struct ice_recp_grp_entry *r_entry; struct ice_sw_fv_list_entry *fvit; - struct ice_recp_grp_entry *r_tmp; struct ice_sw_fv_list_entry *tmp; struct ice_sw_recipe *rm; int status = 0; @@ -5553,7 +5354,6 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, * headers being programmed. */ INIT_LIST_HEAD(&rm->fv_list); - INIT_LIST_HEAD(&rm->rg_list); /* Get bitmap of field vectors (profiles) that are compatible with the * rule request; only these will be searched in the subsequent call to @@ -5565,12 +5365,10 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, if (status) goto err_unroll; - /* Group match words into recipes using preferred recipe grouping - * criteria. - */ - status = ice_create_recipe_group(hw, rm, lkup_exts); - if (status) - goto err_unroll; + /* Copy FV words and masks from lkup_exts to recipe struct. */ + rm->n_ext_words = lkup_exts->n_val_words; + memcpy(rm->ext_words, lkup_exts->fv_words, sizeof(rm->ext_words)); + memcpy(rm->word_masks, lkup_exts->field_mask, sizeof(rm->word_masks)); /* set the recipe priority if specified */ rm->priority = (u8)rinfo->priority; @@ -5581,7 +5379,7 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, /* Find offsets from the field vector. Pick the first one for all the * recipes. */ - status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list); + status = ice_fill_fv_word_index(hw, rm); if (status) goto err_unroll; @@ -5659,17 +5457,11 @@ err_free_recipe: } err_unroll: - list_for_each_entry_safe(r_entry, r_tmp, &rm->rg_list, l_entry) { - list_del(&r_entry->l_entry); - devm_kfree(ice_hw_to_dev(hw), r_entry); - } - list_for_each_entry_safe(fvit, tmp, &rm->fv_list, list_entry) { list_del(&fvit->list_entry); devm_kfree(ice_hw_to_dev(hw), fvit); } - devm_kfree(ice_hw_to_dev(hw), rm->root_buf); kfree(rm); err_free_lkup_exts: diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h index ad98e98c812d..671d7a5f359f 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.h +++ b/drivers/net/ethernet/intel/ice/ice_switch.h @@ -216,7 +216,6 @@ struct ice_sw_recipe { /* For a chained recipe the root recipe is what should be used for * programming rules */ - u8 is_root; u8 root_rid; u8 recp_created; @@ -227,19 +226,8 @@ struct ice_sw_recipe { */ struct ice_fv_word ext_words[ICE_MAX_CHAIN_WORDS]; u16 word_masks[ICE_MAX_CHAIN_WORDS]; - - /* if this recipe is a collection of other recipe */ - u8 big_recp; - - /* if this recipe is part of another bigger recipe then chain index - * corresponding to this recipe - */ - u8 chain_idx; - - /* if this recipe is a collection of other recipe then count of other - * recipes and recipe IDs of those recipes - */ - u8 n_grp_count; + u8 fv_idx[ICE_MAX_CHAIN_WORDS]; + u16 fv_mask[ICE_MAX_CHAIN_WORDS]; /* Bit map specifying the IDs associated with this group of recipe */ DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES); @@ -272,10 +260,6 @@ struct ice_sw_recipe { u8 need_pass_l2:1; u8 allow_pass_l2:1; - struct list_head rg_list; - - /* AQ buffer associated with this recipe */ - struct ice_aqc_recipe_data_elem *root_buf; /* This struct saves the fv_words for a given lookup */ struct ice_prot_lkup_ext lkup_exts; }; diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c index 8bd24b33f3a6..e6923f8121a9 100644 --- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c @@ -1353,6 +1353,7 @@ ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule, struct ice_tc_flower_fltr *fltr) { struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers; + struct netlink_ext_ack *extack = fltr->extack; struct flow_match_control enc_control; fltr->tunnel_type = ice_tc_tun_get_type(dev); @@ -1373,6 +1374,9 @@ ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule, flow_rule_match_enc_control(rule, &enc_control); + if (flow_rule_has_enc_control_flags(enc_control.mask->flags, extack)) + return -EOPNOTSUPP; + if (enc_control.key->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { struct flow_match_ipv4_addrs match; diff --git a/drivers/net/ethernet/intel/ice/ice_trace.h b/drivers/net/ethernet/intel/ice/ice_trace.h index 244cddd2a9ea..07aab6e130cd 100644 --- a/drivers/net/ethernet/intel/ice/ice_trace.h +++ b/drivers/net/ethernet/intel/ice/ice_trace.h @@ -330,6 +330,24 @@ DEFINE_EVENT(ice_esw_br_port_template, TP_ARGS(port) ); +DECLARE_EVENT_CLASS(ice_switch_stats_template, + TP_PROTO(struct ice_switch_info *sw_info), + TP_ARGS(sw_info), + TP_STRUCT__entry(__field(u16, rule_cnt) + __field(u8, recp_cnt)), + TP_fast_assign(__entry->rule_cnt = sw_info->rule_cnt; + __entry->recp_cnt = sw_info->recp_cnt;), + TP_printk("rules=%u recipes=%u", + __entry->rule_cnt, + __entry->recp_cnt) +); + +DEFINE_EVENT(ice_switch_stats_template, + ice_aq_sw_rules, + TP_PROTO(struct ice_switch_info *sw_info), + TP_ARGS(sw_info) +); + /* End tracepoints */ #endif /* _ICE_TRACE_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index eef397e5baa0..96037bef3e78 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -71,6 +71,14 @@ enum ice_aq_res_ids { ICE_GLOBAL_CFG_LOCK_RES_ID }; +enum ice_fec_stats_types { + ICE_FEC_CORR_LOW, + ICE_FEC_CORR_HIGH, + ICE_FEC_UNCORR_LOW, + ICE_FEC_UNCORR_HIGH, + ICE_FEC_MAX +}; + /* FW update timeout definitions are in milliseconds */ #define ICE_NVM_TIMEOUT 180000 #define ICE_CHANGE_LOCK_TIMEOUT 1000 @@ -322,12 +330,14 @@ enum ice_time_ref_freq { ICE_TIME_REF_FREQ_156_250 = 4, ICE_TIME_REF_FREQ_245_760 = 5, - NUM_ICE_TIME_REF_FREQ + NUM_ICE_TIME_REF_FREQ, + + ICE_TIME_REF_FREQ_INVALID = -1, }; /* Clock source specification */ enum ice_clk_src { - ICE_CLK_SRC_TCX0 = 0, /* Temperature compensated oscillator */ + ICE_CLK_SRC_TCXO = 0, /* Temperature compensated oscillator */ ICE_CLK_SRC_TIME_REF = 1, /* Use TIME_REF reference clock */ NUM_ICE_CLK_SRC @@ -372,6 +382,15 @@ struct ice_ts_dev_info { u8 ts_ll_int_read; }; +#define ICE_NAC_TOPO_PRIMARY_M BIT(0) +#define ICE_NAC_TOPO_DUAL_M BIT(1) +#define ICE_NAC_TOPO_ID_M GENMASK(0xF, 0) + +struct ice_nac_topology { + u32 mode; + u8 id; +}; + /* Function specific capabilities */ struct ice_hw_func_caps { struct ice_hw_common_caps common_cap; @@ -393,6 +412,7 @@ struct ice_hw_dev_caps { u32 num_flow_director_fltr; /* Number of FD filters available */ struct ice_ts_dev_info ts_dev_info; u32 num_funcs; + struct ice_nac_topology nac_topo; /* bitmap of supported sensors * bit 0 - internal temperature sensor * bit 31:1 - Reserved @@ -718,6 +738,7 @@ struct ice_port_info { u16 sw_id; /* Initial switch ID belongs to port */ u16 pf_vf_num; u8 port_state; + u8 local_fwd_mode; #define ICE_SCHED_PORT_STATE_INIT 0x0 #define ICE_SCHED_PORT_STATE_READY 0x1 u8 lport; @@ -741,6 +762,8 @@ struct ice_switch_info { struct ice_sw_recipe *recp_list; u16 prof_res_bm_init; u16 max_used_prof_index; + u16 rule_cnt; + u8 recp_cnt; DECLARE_BITMAP(prof_res_bm[ICE_MAX_NUM_PROFILES], ICE_MAX_FV_WORDS); }; @@ -820,11 +843,43 @@ struct ice_mbx_data { u16 async_watermark_val; }; +#define ICE_PORTS_PER_QUAD 4 +#define ICE_GET_QUAD_NUM(port) ((port) / ICE_PORTS_PER_QUAD) + +struct ice_eth56g_params { + u8 num_phys; + u8 phy_addr[2]; + bool onestep_ena; + bool sfd_ena; + u32 peer_delay; +}; + +union ice_phy_params { + struct ice_eth56g_params eth56g; +}; + /* PHY model */ enum ice_phy_model { ICE_PHY_UNSUP = -1, - ICE_PHY_E810 = 1, + ICE_PHY_E810 = 1, ICE_PHY_E82X, + ICE_PHY_ETH56G, +}; + +/* Global Link Topology */ +enum ice_global_link_topo { + ICE_LINK_TOPO_UP_TO_2_LINKS, + ICE_LINK_TOPO_UP_TO_4_LINKS, + ICE_LINK_TOPO_UP_TO_8_LINKS, + ICE_LINK_TOPO_RESERVED, +}; + +struct ice_ptp_hw { + enum ice_phy_model phy_model; + union ice_phy_params phy; + u8 num_lports; + u8 ports_per_phy; + bool is_2x50g_muxed_topo; }; /* Port hardware description */ @@ -848,7 +903,6 @@ struct ice_hw { u8 revision_id; u8 pf_id; /* device profile info */ - enum ice_phy_model phy_model; u16 max_burst_size; /* driver sets this value */ @@ -911,12 +965,7 @@ struct ice_hw { /* INTRL granularity in 1 us */ u8 intrl_gran; -#define ICE_MAX_QUAD 2 -#define ICE_QUADS_PER_PHY_E82X 2 -#define ICE_PORTS_PER_PHY_E82X 8 -#define ICE_PORTS_PER_QUAD 4 -#define ICE_PORTS_PER_PHY_E810 4 -#define ICE_NUM_EXTERNAL_PORTS (ICE_MAX_QUAD * ICE_PORTS_PER_QUAD) + struct ice_ptp_hw ptp; /* Active package version (currently active) */ struct ice_pkg_ver active_pkg_ver; diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c index 48a8d462d76a..5635e9da2212 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c @@ -948,7 +948,7 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags) goto out_unlock; } - ice_eswitch_update_repr(vf->repr_id, vsi); + ice_eswitch_update_repr(&vf->repr_id, vsi); /* if the VF has been reset allow it to come up again */ ice_mbx_clear_malvf(&vf->mbx_info); diff --git a/drivers/net/ethernet/intel/idpf/Kconfig b/drivers/net/ethernet/intel/idpf/Kconfig new file mode 100644 index 000000000000..1addd663acad --- /dev/null +++ b/drivers/net/ethernet/intel/idpf/Kconfig @@ -0,0 +1,26 @@ +# SPDX-License-Identifier: GPL-2.0-only +# Copyright (C) 2024 Intel Corporation + +config IDPF + tristate "Intel(R) Infrastructure Data Path Function Support" + depends on PCI_MSI + select DIMLIB + select LIBETH + help + This driver supports Intel(R) Infrastructure Data Path Function + devices. + + To compile this driver as a module, choose M here. The module + will be called idpf. + +if IDPF + +config IDPF_SINGLEQ + bool "idpf singleq support" + help + This option enables support for legacy single Rx/Tx queues w/no + completion and fill queues. Only enable if you have hardware which + wants to work in this mode as it increases the driver size and adds + runtme checks on hotpath. + +endif # IDPF diff --git a/drivers/net/ethernet/intel/idpf/Makefile b/drivers/net/ethernet/intel/idpf/Makefile index 6844ead2f3ac..2ce01a0b5898 100644 --- a/drivers/net/ethernet/intel/idpf/Makefile +++ b/drivers/net/ethernet/intel/idpf/Makefile @@ -12,7 +12,8 @@ idpf-y := \ idpf_ethtool.o \ idpf_lib.o \ idpf_main.o \ - idpf_singleq_txrx.o \ idpf_txrx.o \ idpf_virtchnl.o \ idpf_vf_dev.o + +idpf-$(CONFIG_IDPF_SINGLEQ) += idpf_singleq_txrx.o diff --git a/drivers/net/ethernet/intel/idpf/idpf.h b/drivers/net/ethernet/intel/idpf/idpf.h index e7a036538246..2c31ad87587a 100644 --- a/drivers/net/ethernet/intel/idpf/idpf.h +++ b/drivers/net/ethernet/intel/idpf/idpf.h @@ -17,10 +17,8 @@ struct idpf_vport_max_q; #include <linux/sctp.h> #include <linux/ethtool_netlink.h> #include <net/gro.h> -#include <linux/dim.h> #include "virtchnl2.h" -#include "idpf_lan_txrx.h" #include "idpf_txrx.h" #include "idpf_controlq.h" @@ -266,7 +264,6 @@ struct idpf_port_stats { * the worst case. * @num_bufqs_per_qgrp: Buffer queues per RX queue in a given grouping * @bufq_desc_count: Buffer queue descriptor count - * @bufq_size: Size of buffers in ring (e.g. 2K, 4K, etc) * @num_rxq_grp: Number of RX queues in a group * @rxq_grps: Total number of RX groups. Number of groups * number of RX per * group will yield total number of RX queues. @@ -302,7 +299,7 @@ struct idpf_vport { u16 num_txq_grp; struct idpf_txq_group *txq_grps; u32 txq_model; - struct idpf_queue **txqs; + struct idpf_tx_queue **txqs; bool crc_enable; u16 num_rxq; @@ -310,11 +307,10 @@ struct idpf_vport { u32 rxq_desc_count; u8 num_bufqs_per_qgrp; u32 bufq_desc_count[IDPF_MAX_BUFQS_PER_RXQ_GRP]; - u32 bufq_size[IDPF_MAX_BUFQS_PER_RXQ_GRP]; u16 num_rxq_grp; struct idpf_rxq_group *rxq_grps; u32 rxq_model; - struct idpf_rx_ptype_decoded rx_ptype_lkup[IDPF_RX_MAX_PTYPE]; + struct libeth_rx_pt *rx_ptype_lkup; struct idpf_adapter *adapter; struct net_device *netdev; @@ -601,7 +597,8 @@ struct idpf_adapter { */ static inline int idpf_is_queue_model_split(u16 q_model) { - return q_model == VIRTCHNL2_QUEUE_MODEL_SPLIT; + return !IS_ENABLED(CONFIG_IDPF_SINGLEQ) || + q_model == VIRTCHNL2_QUEUE_MODEL_SPLIT; } #define idpf_is_cap_ena(adapter, field, flag) \ diff --git a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c index 1885ba618981..3806ddd3ce4a 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c +++ b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c @@ -437,22 +437,24 @@ struct idpf_stats { .stat_offset = offsetof(_type, _stat) \ } -/* Helper macro for defining some statistics related to queues */ -#define IDPF_QUEUE_STAT(_name, _stat) \ - IDPF_STAT(struct idpf_queue, _name, _stat) +/* Helper macros for defining some statistics related to queues */ +#define IDPF_RX_QUEUE_STAT(_name, _stat) \ + IDPF_STAT(struct idpf_rx_queue, _name, _stat) +#define IDPF_TX_QUEUE_STAT(_name, _stat) \ + IDPF_STAT(struct idpf_tx_queue, _name, _stat) /* Stats associated with a Tx queue */ static const struct idpf_stats idpf_gstrings_tx_queue_stats[] = { - IDPF_QUEUE_STAT("pkts", q_stats.tx.packets), - IDPF_QUEUE_STAT("bytes", q_stats.tx.bytes), - IDPF_QUEUE_STAT("lso_pkts", q_stats.tx.lso_pkts), + IDPF_TX_QUEUE_STAT("pkts", q_stats.packets), + IDPF_TX_QUEUE_STAT("bytes", q_stats.bytes), + IDPF_TX_QUEUE_STAT("lso_pkts", q_stats.lso_pkts), }; /* Stats associated with an Rx queue */ static const struct idpf_stats idpf_gstrings_rx_queue_stats[] = { - IDPF_QUEUE_STAT("pkts", q_stats.rx.packets), - IDPF_QUEUE_STAT("bytes", q_stats.rx.bytes), - IDPF_QUEUE_STAT("rx_gro_hw_pkts", q_stats.rx.rsc_pkts), + IDPF_RX_QUEUE_STAT("pkts", q_stats.packets), + IDPF_RX_QUEUE_STAT("bytes", q_stats.bytes), + IDPF_RX_QUEUE_STAT("rx_gro_hw_pkts", q_stats.rsc_pkts), }; #define IDPF_TX_QUEUE_STATS_LEN ARRAY_SIZE(idpf_gstrings_tx_queue_stats) @@ -563,8 +565,6 @@ static void idpf_get_stat_strings(struct net_device *netdev, u8 *data) for (i = 0; i < vport_config->max_q.max_rxq; i++) idpf_add_qstat_strings(&data, idpf_gstrings_rx_queue_stats, "rx", i); - - page_pool_ethtool_stats_get_strings(data); } /** @@ -598,7 +598,6 @@ static int idpf_get_sset_count(struct net_device *netdev, int sset) struct idpf_netdev_priv *np = netdev_priv(netdev); struct idpf_vport_config *vport_config; u16 max_txq, max_rxq; - unsigned int size; if (sset != ETH_SS_STATS) return -EINVAL; @@ -617,11 +616,8 @@ static int idpf_get_sset_count(struct net_device *netdev, int sset) max_txq = vport_config->max_q.max_txq; max_rxq = vport_config->max_q.max_rxq; - size = IDPF_PORT_STATS_LEN + (IDPF_TX_QUEUE_STATS_LEN * max_txq) + + return IDPF_PORT_STATS_LEN + (IDPF_TX_QUEUE_STATS_LEN * max_txq) + (IDPF_RX_QUEUE_STATS_LEN * max_rxq); - size += page_pool_ethtool_stats_get_count(); - - return size; } /** @@ -633,7 +629,7 @@ static int idpf_get_sset_count(struct net_device *netdev, int sset) * Copies the stat data defined by the pointer and stat structure pair into * the memory supplied as data. If the pointer is null, data will be zero'd. */ -static void idpf_add_one_ethtool_stat(u64 *data, void *pstat, +static void idpf_add_one_ethtool_stat(u64 *data, const void *pstat, const struct idpf_stats *stat) { char *p; @@ -671,6 +667,7 @@ static void idpf_add_one_ethtool_stat(u64 *data, void *pstat, * idpf_add_queue_stats - copy queue statistics into supplied buffer * @data: ethtool stats buffer * @q: the queue to copy + * @type: type of the queue * * Queue statistics must be copied while protected by u64_stats_fetch_begin, * so we can't directly use idpf_add_ethtool_stats. Assumes that queue stats @@ -681,19 +678,23 @@ static void idpf_add_one_ethtool_stat(u64 *data, void *pstat, * * This function expects to be called while under rcu_read_lock(). */ -static void idpf_add_queue_stats(u64 **data, struct idpf_queue *q) +static void idpf_add_queue_stats(u64 **data, const void *q, + enum virtchnl2_queue_type type) { + const struct u64_stats_sync *stats_sync; const struct idpf_stats *stats; unsigned int start; unsigned int size; unsigned int i; - if (q->q_type == VIRTCHNL2_QUEUE_TYPE_RX) { + if (type == VIRTCHNL2_QUEUE_TYPE_RX) { size = IDPF_RX_QUEUE_STATS_LEN; stats = idpf_gstrings_rx_queue_stats; + stats_sync = &((const struct idpf_rx_queue *)q)->stats_sync; } else { size = IDPF_TX_QUEUE_STATS_LEN; stats = idpf_gstrings_tx_queue_stats; + stats_sync = &((const struct idpf_tx_queue *)q)->stats_sync; } /* To avoid invalid statistics values, ensure that we keep retrying @@ -701,10 +702,10 @@ static void idpf_add_queue_stats(u64 **data, struct idpf_queue *q) * u64_stats_fetch_retry. */ do { - start = u64_stats_fetch_begin(&q->stats_sync); + start = u64_stats_fetch_begin(stats_sync); for (i = 0; i < size; i++) idpf_add_one_ethtool_stat(&(*data)[i], q, &stats[i]); - } while (u64_stats_fetch_retry(&q->stats_sync, start)); + } while (u64_stats_fetch_retry(stats_sync, start)); /* Once we successfully copy the stats in, update the data pointer */ *data += size; @@ -793,7 +794,7 @@ static void idpf_collect_queue_stats(struct idpf_vport *vport) for (j = 0; j < num_rxq; j++) { u64 hw_csum_err, hsplit, hsplit_hbo, bad_descs; struct idpf_rx_queue_stats *stats; - struct idpf_queue *rxq; + struct idpf_rx_queue *rxq; unsigned int start; if (idpf_is_queue_model_split(vport->rxq_model)) @@ -807,7 +808,7 @@ static void idpf_collect_queue_stats(struct idpf_vport *vport) do { start = u64_stats_fetch_begin(&rxq->stats_sync); - stats = &rxq->q_stats.rx; + stats = &rxq->q_stats; hw_csum_err = u64_stats_read(&stats->hw_csum_err); hsplit = u64_stats_read(&stats->hsplit_pkts); hsplit_hbo = u64_stats_read(&stats->hsplit_buf_ovf); @@ -828,7 +829,7 @@ static void idpf_collect_queue_stats(struct idpf_vport *vport) for (j = 0; j < txq_grp->num_txq; j++) { u64 linearize, qbusy, skb_drops, dma_map_errs; - struct idpf_queue *txq = txq_grp->txqs[j]; + struct idpf_tx_queue *txq = txq_grp->txqs[j]; struct idpf_tx_queue_stats *stats; unsigned int start; @@ -838,7 +839,7 @@ static void idpf_collect_queue_stats(struct idpf_vport *vport) do { start = u64_stats_fetch_begin(&txq->stats_sync); - stats = &txq->q_stats.tx; + stats = &txq->q_stats; linearize = u64_stats_read(&stats->linearize); qbusy = u64_stats_read(&stats->q_busy); skb_drops = u64_stats_read(&stats->skb_drops); @@ -869,7 +870,6 @@ static void idpf_get_ethtool_stats(struct net_device *netdev, { struct idpf_netdev_priv *np = netdev_priv(netdev); struct idpf_vport_config *vport_config; - struct page_pool_stats pp_stats = { }; struct idpf_vport *vport; unsigned int total = 0; unsigned int i, j; @@ -896,12 +896,12 @@ static void idpf_get_ethtool_stats(struct net_device *netdev, qtype = VIRTCHNL2_QUEUE_TYPE_TX; for (j = 0; j < txq_grp->num_txq; j++, total++) { - struct idpf_queue *txq = txq_grp->txqs[j]; + struct idpf_tx_queue *txq = txq_grp->txqs[j]; if (!txq) idpf_add_empty_queue_stats(&data, qtype); else - idpf_add_queue_stats(&data, txq); + idpf_add_queue_stats(&data, txq, qtype); } } @@ -929,7 +929,7 @@ static void idpf_get_ethtool_stats(struct net_device *netdev, num_rxq = rxq_grp->singleq.num_rxq; for (j = 0; j < num_rxq; j++, total++) { - struct idpf_queue *rxq; + struct idpf_rx_queue *rxq; if (is_splitq) rxq = &rxq_grp->splitq.rxq_sets[j]->rxq; @@ -938,93 +938,77 @@ static void idpf_get_ethtool_stats(struct net_device *netdev, if (!rxq) idpf_add_empty_queue_stats(&data, qtype); else - idpf_add_queue_stats(&data, rxq); - - /* In splitq mode, don't get page pool stats here since - * the pools are attached to the buffer queues - */ - if (is_splitq) - continue; - - if (rxq) - page_pool_get_stats(rxq->pp, &pp_stats); - } - } - - for (i = 0; i < vport->num_rxq_grp; i++) { - for (j = 0; j < vport->num_bufqs_per_qgrp; j++) { - struct idpf_queue *rxbufq = - &vport->rxq_grps[i].splitq.bufq_sets[j].bufq; - - page_pool_get_stats(rxbufq->pp, &pp_stats); + idpf_add_queue_stats(&data, rxq, qtype); } } for (; total < vport_config->max_q.max_rxq; total++) idpf_add_empty_queue_stats(&data, VIRTCHNL2_QUEUE_TYPE_RX); - page_pool_ethtool_stats_get(data, &pp_stats); - rcu_read_unlock(); idpf_vport_ctrl_unlock(netdev); } /** - * idpf_find_rxq - find rxq from q index + * idpf_find_rxq_vec - find rxq vector from q index * @vport: virtual port associated to queue * @q_num: q index used to find queue * - * returns pointer to rx queue + * returns pointer to rx vector */ -static struct idpf_queue *idpf_find_rxq(struct idpf_vport *vport, int q_num) +static struct idpf_q_vector *idpf_find_rxq_vec(const struct idpf_vport *vport, + int q_num) { int q_grp, q_idx; if (!idpf_is_queue_model_split(vport->rxq_model)) - return vport->rxq_grps->singleq.rxqs[q_num]; + return vport->rxq_grps->singleq.rxqs[q_num]->q_vector; q_grp = q_num / IDPF_DFLT_SPLITQ_RXQ_PER_GROUP; q_idx = q_num % IDPF_DFLT_SPLITQ_RXQ_PER_GROUP; - return &vport->rxq_grps[q_grp].splitq.rxq_sets[q_idx]->rxq; + return vport->rxq_grps[q_grp].splitq.rxq_sets[q_idx]->rxq.q_vector; } /** - * idpf_find_txq - find txq from q index + * idpf_find_txq_vec - find txq vector from q index * @vport: virtual port associated to queue * @q_num: q index used to find queue * - * returns pointer to tx queue + * returns pointer to tx vector */ -static struct idpf_queue *idpf_find_txq(struct idpf_vport *vport, int q_num) +static struct idpf_q_vector *idpf_find_txq_vec(const struct idpf_vport *vport, + int q_num) { int q_grp; if (!idpf_is_queue_model_split(vport->txq_model)) - return vport->txqs[q_num]; + return vport->txqs[q_num]->q_vector; q_grp = q_num / IDPF_DFLT_SPLITQ_TXQ_PER_GROUP; - return vport->txq_grps[q_grp].complq; + return vport->txq_grps[q_grp].complq->q_vector; } /** * __idpf_get_q_coalesce - get ITR values for specific queue * @ec: ethtool structure to fill with driver's coalesce settings - * @q: quuee of Rx or Tx + * @q_vector: queue vector corresponding to this queue + * @type: queue type */ static void __idpf_get_q_coalesce(struct ethtool_coalesce *ec, - struct idpf_queue *q) + const struct idpf_q_vector *q_vector, + enum virtchnl2_queue_type type) { - if (q->q_type == VIRTCHNL2_QUEUE_TYPE_RX) { + if (type == VIRTCHNL2_QUEUE_TYPE_RX) { ec->use_adaptive_rx_coalesce = - IDPF_ITR_IS_DYNAMIC(q->q_vector->rx_intr_mode); - ec->rx_coalesce_usecs = q->q_vector->rx_itr_value; + IDPF_ITR_IS_DYNAMIC(q_vector->rx_intr_mode); + ec->rx_coalesce_usecs = q_vector->rx_itr_value; } else { ec->use_adaptive_tx_coalesce = - IDPF_ITR_IS_DYNAMIC(q->q_vector->tx_intr_mode); - ec->tx_coalesce_usecs = q->q_vector->tx_itr_value; + IDPF_ITR_IS_DYNAMIC(q_vector->tx_intr_mode); + ec->tx_coalesce_usecs = q_vector->tx_itr_value; } } @@ -1040,8 +1024,8 @@ static int idpf_get_q_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, u32 q_num) { - struct idpf_netdev_priv *np = netdev_priv(netdev); - struct idpf_vport *vport; + const struct idpf_netdev_priv *np = netdev_priv(netdev); + const struct idpf_vport *vport; int err = 0; idpf_vport_ctrl_lock(netdev); @@ -1056,10 +1040,12 @@ static int idpf_get_q_coalesce(struct net_device *netdev, } if (q_num < vport->num_rxq) - __idpf_get_q_coalesce(ec, idpf_find_rxq(vport, q_num)); + __idpf_get_q_coalesce(ec, idpf_find_rxq_vec(vport, q_num), + VIRTCHNL2_QUEUE_TYPE_RX); if (q_num < vport->num_txq) - __idpf_get_q_coalesce(ec, idpf_find_txq(vport, q_num)); + __idpf_get_q_coalesce(ec, idpf_find_txq_vec(vport, q_num), + VIRTCHNL2_QUEUE_TYPE_TX); unlock_mutex: idpf_vport_ctrl_unlock(netdev); @@ -1103,16 +1089,15 @@ static int idpf_get_per_q_coalesce(struct net_device *netdev, u32 q_num, /** * __idpf_set_q_coalesce - set ITR values for specific queue * @ec: ethtool structure from user to update ITR settings - * @q: queue for which itr values has to be set + * @qv: queue vector for which itr values has to be set * @is_rxq: is queue type rx * * Returns 0 on success, negative otherwise. */ -static int __idpf_set_q_coalesce(struct ethtool_coalesce *ec, - struct idpf_queue *q, bool is_rxq) +static int __idpf_set_q_coalesce(const struct ethtool_coalesce *ec, + struct idpf_q_vector *qv, bool is_rxq) { u32 use_adaptive_coalesce, coalesce_usecs; - struct idpf_q_vector *qv = q->q_vector; bool is_dim_ena = false; u16 itr_val; @@ -1128,7 +1113,7 @@ static int __idpf_set_q_coalesce(struct ethtool_coalesce *ec, itr_val = qv->tx_itr_value; } if (coalesce_usecs != itr_val && use_adaptive_coalesce) { - netdev_err(q->vport->netdev, "Cannot set coalesce usecs if adaptive enabled\n"); + netdev_err(qv->vport->netdev, "Cannot set coalesce usecs if adaptive enabled\n"); return -EINVAL; } @@ -1137,7 +1122,7 @@ static int __idpf_set_q_coalesce(struct ethtool_coalesce *ec, return 0; if (coalesce_usecs > IDPF_ITR_MAX) { - netdev_err(q->vport->netdev, + netdev_err(qv->vport->netdev, "Invalid value, %d-usecs range is 0-%d\n", coalesce_usecs, IDPF_ITR_MAX); @@ -1146,7 +1131,7 @@ static int __idpf_set_q_coalesce(struct ethtool_coalesce *ec, if (coalesce_usecs % 2) { coalesce_usecs--; - netdev_info(q->vport->netdev, + netdev_info(qv->vport->netdev, "HW only supports even ITR values, ITR rounded to %d\n", coalesce_usecs); } @@ -1185,15 +1170,16 @@ static int __idpf_set_q_coalesce(struct ethtool_coalesce *ec, * * Return 0 on success, and negative on failure */ -static int idpf_set_q_coalesce(struct idpf_vport *vport, - struct ethtool_coalesce *ec, +static int idpf_set_q_coalesce(const struct idpf_vport *vport, + const struct ethtool_coalesce *ec, int q_num, bool is_rxq) { - struct idpf_queue *q; + struct idpf_q_vector *qv; - q = is_rxq ? idpf_find_rxq(vport, q_num) : idpf_find_txq(vport, q_num); + qv = is_rxq ? idpf_find_rxq_vec(vport, q_num) : + idpf_find_txq_vec(vport, q_num); - if (q && __idpf_set_q_coalesce(ec, q, is_rxq)) + if (qv && __idpf_set_q_coalesce(ec, qv, is_rxq)) return -EINVAL; return 0; diff --git a/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h index a5752dcab888..8c7f8ef8f1a1 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h +++ b/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h @@ -4,6 +4,8 @@ #ifndef _IDPF_LAN_TXRX_H_ #define _IDPF_LAN_TXRX_H_ +#include <linux/bits.h> + enum idpf_rss_hash { IDPF_HASH_INVALID = 0, /* Values 1 - 28 are reserved for future use */ diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c index f1ee5584e8fa..5dbf2b4ba1b0 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_lib.c +++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c @@ -4,8 +4,7 @@ #include "idpf.h" #include "idpf_virtchnl.h" -static const struct net_device_ops idpf_netdev_ops_splitq; -static const struct net_device_ops idpf_netdev_ops_singleq; +static const struct net_device_ops idpf_netdev_ops; /** * idpf_init_vector_stack - Fill the MSIX vector stack with vector index @@ -69,7 +68,7 @@ static void idpf_deinit_vector_stack(struct idpf_adapter *adapter) static void idpf_mb_intr_rel_irq(struct idpf_adapter *adapter) { clear_bit(IDPF_MB_INTR_MODE, adapter->flags); - free_irq(adapter->msix_entries[0].vector, adapter); + kfree(free_irq(adapter->msix_entries[0].vector, adapter)); queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0); } @@ -124,15 +123,14 @@ static void idpf_mb_irq_enable(struct idpf_adapter *adapter) */ static int idpf_mb_intr_req_irq(struct idpf_adapter *adapter) { - struct idpf_q_vector *mb_vector = &adapter->mb_vector; int irq_num, mb_vidx = 0, err; + char *name; irq_num = adapter->msix_entries[mb_vidx].vector; - mb_vector->name = kasprintf(GFP_KERNEL, "%s-%s-%d", - dev_driver_string(&adapter->pdev->dev), - "Mailbox", mb_vidx); - err = request_irq(irq_num, adapter->irq_mb_handler, 0, - mb_vector->name, adapter); + name = kasprintf(GFP_KERNEL, "%s-%s-%d", + dev_driver_string(&adapter->pdev->dev), + "Mailbox", mb_vidx); + err = request_irq(irq_num, adapter->irq_mb_handler, 0, name, adapter); if (err) { dev_err(&adapter->pdev->dev, "IRQ request for mailbox failed, error: %d\n", err); @@ -765,10 +763,7 @@ static int idpf_cfg_netdev(struct idpf_vport *vport) } /* assign netdev_ops */ - if (idpf_is_queue_model_split(vport->txq_model)) - netdev->netdev_ops = &idpf_netdev_ops_splitq; - else - netdev->netdev_ops = &idpf_netdev_ops_singleq; + netdev->netdev_ops = &idpf_netdev_ops; /* setup watchdog timeout value to be 5 second */ netdev->watchdog_timeo = 5 * HZ; @@ -946,6 +941,9 @@ static void idpf_decfg_netdev(struct idpf_vport *vport) { struct idpf_adapter *adapter = vport->adapter; + kfree(vport->rx_ptype_lkup); + vport->rx_ptype_lkup = NULL; + unregister_netdev(vport->netdev); free_netdev(vport->netdev); vport->netdev = NULL; @@ -1318,14 +1316,14 @@ static void idpf_rx_init_buf_tail(struct idpf_vport *vport) if (idpf_is_queue_model_split(vport->rxq_model)) { for (j = 0; j < vport->num_bufqs_per_qgrp; j++) { - struct idpf_queue *q = + const struct idpf_buf_queue *q = &grp->splitq.bufq_sets[j].bufq; writel(q->next_to_alloc, q->tail); } } else { for (j = 0; j < grp->singleq.num_rxq; j++) { - struct idpf_queue *q = + const struct idpf_rx_queue *q = grp->singleq.rxqs[j]; writel(q->next_to_alloc, q->tail); @@ -1855,7 +1853,7 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport, enum idpf_vport_state current_state = np->state; struct idpf_adapter *adapter = vport->adapter; struct idpf_vport *new_vport; - int err, i; + int err; /* If the system is low on memory, we can end up in bad state if we * free all the memory for queue resources and try to allocate them @@ -1929,46 +1927,6 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport, */ memcpy(vport, new_vport, offsetof(struct idpf_vport, link_speed_mbps)); - /* Since idpf_vport_queues_alloc was called with new_port, the queue - * back pointers are currently pointing to the local new_vport. Reset - * the backpointers to the original vport here - */ - for (i = 0; i < vport->num_txq_grp; i++) { - struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; - int j; - - tx_qgrp->vport = vport; - for (j = 0; j < tx_qgrp->num_txq; j++) - tx_qgrp->txqs[j]->vport = vport; - - if (idpf_is_queue_model_split(vport->txq_model)) - tx_qgrp->complq->vport = vport; - } - - for (i = 0; i < vport->num_rxq_grp; i++) { - struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; - struct idpf_queue *q; - u16 num_rxq; - int j; - - rx_qgrp->vport = vport; - for (j = 0; j < vport->num_bufqs_per_qgrp; j++) - rx_qgrp->splitq.bufq_sets[j].bufq.vport = vport; - - if (idpf_is_queue_model_split(vport->rxq_model)) - num_rxq = rx_qgrp->splitq.num_rxq_sets; - else - num_rxq = rx_qgrp->singleq.num_rxq; - - for (j = 0; j < num_rxq; j++) { - if (idpf_is_queue_model_split(vport->rxq_model)) - q = &rx_qgrp->splitq.rxq_sets[j]->rxq; - else - q = rx_qgrp->singleq.rxqs[j]; - q->vport = vport; - } - } - if (reset_cause == IDPF_SR_Q_CHANGE) idpf_vport_alloc_vec_indexes(vport); @@ -2393,24 +2351,10 @@ void idpf_free_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem) mem->pa = 0; } -static const struct net_device_ops idpf_netdev_ops_splitq = { - .ndo_open = idpf_open, - .ndo_stop = idpf_stop, - .ndo_start_xmit = idpf_tx_splitq_start, - .ndo_features_check = idpf_features_check, - .ndo_set_rx_mode = idpf_set_rx_mode, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = idpf_set_mac, - .ndo_change_mtu = idpf_change_mtu, - .ndo_get_stats64 = idpf_get_stats64, - .ndo_set_features = idpf_set_features, - .ndo_tx_timeout = idpf_tx_timeout, -}; - -static const struct net_device_ops idpf_netdev_ops_singleq = { +static const struct net_device_ops idpf_netdev_ops = { .ndo_open = idpf_open, .ndo_stop = idpf_stop, - .ndo_start_xmit = idpf_tx_singleq_start, + .ndo_start_xmit = idpf_tx_start, .ndo_features_check = idpf_features_check, .ndo_set_rx_mode = idpf_set_rx_mode, .ndo_validate_addr = eth_validate_addr, diff --git a/drivers/net/ethernet/intel/idpf/idpf_main.c b/drivers/net/ethernet/intel/idpf/idpf_main.c index f784eea044bd..db476b3314c8 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_main.c +++ b/drivers/net/ethernet/intel/idpf/idpf_main.c @@ -8,6 +8,7 @@ #define DRV_SUMMARY "Intel(R) Infrastructure Data Path Function Linux Driver" MODULE_DESCRIPTION(DRV_SUMMARY); +MODULE_IMPORT_NS(LIBETH); MODULE_LICENSE("GPL"); /** diff --git a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c index 27b93592c4ba..fe64febf7436 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c @@ -1,6 +1,8 @@ // SPDX-License-Identifier: GPL-2.0-only /* Copyright (C) 2023 Intel Corporation */ +#include <net/libeth/rx.h> + #include "idpf.h" /** @@ -186,7 +188,7 @@ static int idpf_tx_singleq_csum(struct sk_buff *skb, * and gets a physical address for each memory location and programs * it and the length into the transmit base mode descriptor. */ -static void idpf_tx_singleq_map(struct idpf_queue *tx_q, +static void idpf_tx_singleq_map(struct idpf_tx_queue *tx_q, struct idpf_tx_buf *first, struct idpf_tx_offload_params *offloads) { @@ -205,12 +207,12 @@ static void idpf_tx_singleq_map(struct idpf_queue *tx_q, data_len = skb->data_len; size = skb_headlen(skb); - tx_desc = IDPF_BASE_TX_DESC(tx_q, i); + tx_desc = &tx_q->base_tx[i]; dma = dma_map_single(tx_q->dev, skb->data, size, DMA_TO_DEVICE); /* write each descriptor with CRC bit */ - if (tx_q->vport->crc_enable) + if (idpf_queue_has(CRC_EN, tx_q)) td_cmd |= IDPF_TX_DESC_CMD_ICRC; for (frag = &skb_shinfo(skb)->frags[0];; frag++) { @@ -239,7 +241,7 @@ static void idpf_tx_singleq_map(struct idpf_queue *tx_q, i++; if (i == tx_q->desc_count) { - tx_desc = IDPF_BASE_TX_DESC(tx_q, 0); + tx_desc = &tx_q->base_tx[0]; i = 0; } @@ -259,7 +261,7 @@ static void idpf_tx_singleq_map(struct idpf_queue *tx_q, i++; if (i == tx_q->desc_count) { - tx_desc = IDPF_BASE_TX_DESC(tx_q, 0); + tx_desc = &tx_q->base_tx[0]; i = 0; } @@ -285,7 +287,7 @@ static void idpf_tx_singleq_map(struct idpf_queue *tx_q, /* set next_to_watch value indicating a packet is present */ first->next_to_watch = tx_desc; - nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx); + nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx); netdev_tx_sent_queue(nq, first->bytecount); idpf_tx_buf_hw_update(tx_q, i, netdev_xmit_more()); @@ -299,7 +301,7 @@ static void idpf_tx_singleq_map(struct idpf_queue *tx_q, * ring entry to reflect that this index is a context descriptor */ static struct idpf_base_tx_ctx_desc * -idpf_tx_singleq_get_ctx_desc(struct idpf_queue *txq) +idpf_tx_singleq_get_ctx_desc(struct idpf_tx_queue *txq) { struct idpf_base_tx_ctx_desc *ctx_desc; int ntu = txq->next_to_use; @@ -307,7 +309,7 @@ idpf_tx_singleq_get_ctx_desc(struct idpf_queue *txq) memset(&txq->tx_buf[ntu], 0, sizeof(struct idpf_tx_buf)); txq->tx_buf[ntu].ctx_entry = true; - ctx_desc = IDPF_BASE_TX_CTX_DESC(txq, ntu); + ctx_desc = &txq->base_ctx[ntu]; IDPF_SINGLEQ_BUMP_RING_IDX(txq, ntu); txq->next_to_use = ntu; @@ -320,7 +322,7 @@ idpf_tx_singleq_get_ctx_desc(struct idpf_queue *txq) * @txq: queue to send buffer on * @offload: offload parameter structure **/ -static void idpf_tx_singleq_build_ctx_desc(struct idpf_queue *txq, +static void idpf_tx_singleq_build_ctx_desc(struct idpf_tx_queue *txq, struct idpf_tx_offload_params *offload) { struct idpf_base_tx_ctx_desc *desc = idpf_tx_singleq_get_ctx_desc(txq); @@ -333,7 +335,7 @@ static void idpf_tx_singleq_build_ctx_desc(struct idpf_queue *txq, qw1 |= FIELD_PREP(IDPF_TXD_CTX_QW1_MSS_M, offload->mss); u64_stats_update_begin(&txq->stats_sync); - u64_stats_inc(&txq->q_stats.tx.lso_pkts); + u64_stats_inc(&txq->q_stats.lso_pkts); u64_stats_update_end(&txq->stats_sync); } @@ -351,8 +353,8 @@ static void idpf_tx_singleq_build_ctx_desc(struct idpf_queue *txq, * * Returns NETDEV_TX_OK if sent, else an error code */ -static netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb, - struct idpf_queue *tx_q) +netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb, + struct idpf_tx_queue *tx_q) { struct idpf_tx_offload_params offload = { }; struct idpf_tx_buf *first; @@ -409,53 +411,25 @@ out_drop: } /** - * idpf_tx_singleq_start - Selects the right Tx queue to send buffer - * @skb: send buffer - * @netdev: network interface device structure - * - * Returns NETDEV_TX_OK if sent, else an error code - */ -netdev_tx_t idpf_tx_singleq_start(struct sk_buff *skb, - struct net_device *netdev) -{ - struct idpf_vport *vport = idpf_netdev_to_vport(netdev); - struct idpf_queue *tx_q; - - tx_q = vport->txqs[skb_get_queue_mapping(skb)]; - - /* hardware can't handle really short frames, hardware padding works - * beyond this point - */ - if (skb_put_padto(skb, IDPF_TX_MIN_PKT_LEN)) { - idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false); - - return NETDEV_TX_OK; - } - - return idpf_tx_singleq_frame(skb, tx_q); -} - -/** * idpf_tx_singleq_clean - Reclaim resources from queue * @tx_q: Tx queue to clean * @napi_budget: Used to determine if we are in netpoll * @cleaned: returns number of packets cleaned * */ -static bool idpf_tx_singleq_clean(struct idpf_queue *tx_q, int napi_budget, +static bool idpf_tx_singleq_clean(struct idpf_tx_queue *tx_q, int napi_budget, int *cleaned) { - unsigned int budget = tx_q->vport->compln_clean_budget; unsigned int total_bytes = 0, total_pkts = 0; struct idpf_base_tx_desc *tx_desc; + u32 budget = tx_q->clean_budget; s16 ntc = tx_q->next_to_clean; struct idpf_netdev_priv *np; struct idpf_tx_buf *tx_buf; - struct idpf_vport *vport; struct netdev_queue *nq; bool dont_wake; - tx_desc = IDPF_BASE_TX_DESC(tx_q, ntc); + tx_desc = &tx_q->base_tx[ntc]; tx_buf = &tx_q->tx_buf[ntc]; ntc -= tx_q->desc_count; @@ -517,7 +491,7 @@ static bool idpf_tx_singleq_clean(struct idpf_queue *tx_q, int napi_budget, if (unlikely(!ntc)) { ntc -= tx_q->desc_count; tx_buf = tx_q->tx_buf; - tx_desc = IDPF_BASE_TX_DESC(tx_q, 0); + tx_desc = &tx_q->base_tx[0]; } /* unmap any remaining paged data */ @@ -540,7 +514,7 @@ fetch_next_txq_desc: if (unlikely(!ntc)) { ntc -= tx_q->desc_count; tx_buf = tx_q->tx_buf; - tx_desc = IDPF_BASE_TX_DESC(tx_q, 0); + tx_desc = &tx_q->base_tx[0]; } } while (likely(budget)); @@ -550,16 +524,15 @@ fetch_next_txq_desc: *cleaned += total_pkts; u64_stats_update_begin(&tx_q->stats_sync); - u64_stats_add(&tx_q->q_stats.tx.packets, total_pkts); - u64_stats_add(&tx_q->q_stats.tx.bytes, total_bytes); + u64_stats_add(&tx_q->q_stats.packets, total_pkts); + u64_stats_add(&tx_q->q_stats.bytes, total_bytes); u64_stats_update_end(&tx_q->stats_sync); - vport = tx_q->vport; - np = netdev_priv(vport->netdev); - nq = netdev_get_tx_queue(vport->netdev, tx_q->idx); + np = netdev_priv(tx_q->netdev); + nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx); dont_wake = np->state != __IDPF_VPORT_UP || - !netif_carrier_ok(vport->netdev); + !netif_carrier_ok(tx_q->netdev); __netif_txq_completed_wake(nq, total_pkts, total_bytes, IDPF_DESC_UNUSED(tx_q), IDPF_TX_WAKE_THRESH, dont_wake); @@ -584,7 +557,7 @@ static bool idpf_tx_singleq_clean_all(struct idpf_q_vector *q_vec, int budget, budget_per_q = num_txq ? max(budget / num_txq, 1) : 0; for (i = 0; i < num_txq; i++) { - struct idpf_queue *q; + struct idpf_tx_queue *q; q = q_vec->tx[i]; clean_complete &= idpf_tx_singleq_clean(q, budget_per_q, @@ -614,14 +587,9 @@ static bool idpf_rx_singleq_test_staterr(const union virtchnl2_rx_desc *rx_desc, /** * idpf_rx_singleq_is_non_eop - process handling of non-EOP buffers - * @rxq: Rx ring being processed * @rx_desc: Rx descriptor for current buffer - * @skb: Current socket buffer containing buffer in progress - * @ntc: next to clean */ -static bool idpf_rx_singleq_is_non_eop(struct idpf_queue *rxq, - union virtchnl2_rx_desc *rx_desc, - struct sk_buff *skb, u16 ntc) +static bool idpf_rx_singleq_is_non_eop(const union virtchnl2_rx_desc *rx_desc) { /* if we are the last buffer then there is nothing else to do */ if (likely(idpf_rx_singleq_test_staterr(rx_desc, IDPF_RXD_EOF_SINGLEQ))) @@ -635,98 +603,82 @@ static bool idpf_rx_singleq_is_non_eop(struct idpf_queue *rxq, * @rxq: Rx ring being processed * @skb: skb currently being received and modified * @csum_bits: checksum bits from descriptor - * @ptype: the packet type decoded by hardware + * @decoded: the packet type decoded by hardware * * skb->protocol must be set before this function is called */ -static void idpf_rx_singleq_csum(struct idpf_queue *rxq, struct sk_buff *skb, - struct idpf_rx_csum_decoded *csum_bits, - u16 ptype) +static void idpf_rx_singleq_csum(struct idpf_rx_queue *rxq, + struct sk_buff *skb, + struct idpf_rx_csum_decoded csum_bits, + struct libeth_rx_pt decoded) { - struct idpf_rx_ptype_decoded decoded; bool ipv4, ipv6; /* check if Rx checksum is enabled */ - if (unlikely(!(rxq->vport->netdev->features & NETIF_F_RXCSUM))) + if (!libeth_rx_pt_has_checksum(rxq->netdev, decoded)) return; /* check if HW has decoded the packet and checksum */ - if (unlikely(!(csum_bits->l3l4p))) - return; - - decoded = rxq->vport->rx_ptype_lkup[ptype]; - if (unlikely(!(decoded.known && decoded.outer_ip))) + if (unlikely(!csum_bits.l3l4p)) return; - ipv4 = IDPF_RX_PTYPE_TO_IPV(&decoded, IDPF_RX_PTYPE_OUTER_IPV4); - ipv6 = IDPF_RX_PTYPE_TO_IPV(&decoded, IDPF_RX_PTYPE_OUTER_IPV6); + ipv4 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV4; + ipv6 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV6; /* Check if there were any checksum errors */ - if (unlikely(ipv4 && (csum_bits->ipe || csum_bits->eipe))) + if (unlikely(ipv4 && (csum_bits.ipe || csum_bits.eipe))) goto checksum_fail; /* Device could not do any checksum offload for certain extension * headers as indicated by setting IPV6EXADD bit */ - if (unlikely(ipv6 && csum_bits->ipv6exadd)) + if (unlikely(ipv6 && csum_bits.ipv6exadd)) return; /* check for L4 errors and handle packets that were not able to be * checksummed due to arrival speed */ - if (unlikely(csum_bits->l4e)) + if (unlikely(csum_bits.l4e)) goto checksum_fail; - if (unlikely(csum_bits->nat && csum_bits->eudpe)) + if (unlikely(csum_bits.nat && csum_bits.eudpe)) goto checksum_fail; /* Handle packets that were not able to be checksummed due to arrival * speed, in this case the stack can compute the csum. */ - if (unlikely(csum_bits->pprs)) + if (unlikely(csum_bits.pprs)) return; /* If there is an outer header present that might contain a checksum * we need to bump the checksum level by 1 to reflect the fact that * we are indicating we validated the inner checksum. */ - if (decoded.tunnel_type >= IDPF_RX_PTYPE_TUNNEL_IP_GRENAT) + if (decoded.tunnel_type >= LIBETH_RX_PT_TUNNEL_IP_GRENAT) skb->csum_level = 1; - /* Only report checksum unnecessary for ICMP, TCP, UDP, or SCTP */ - switch (decoded.inner_prot) { - case IDPF_RX_PTYPE_INNER_PROT_ICMP: - case IDPF_RX_PTYPE_INNER_PROT_TCP: - case IDPF_RX_PTYPE_INNER_PROT_UDP: - case IDPF_RX_PTYPE_INNER_PROT_SCTP: - skb->ip_summed = CHECKSUM_UNNECESSARY; - return; - default: - return; - } + skb->ip_summed = CHECKSUM_UNNECESSARY; + return; checksum_fail: u64_stats_update_begin(&rxq->stats_sync); - u64_stats_inc(&rxq->q_stats.rx.hw_csum_err); + u64_stats_inc(&rxq->q_stats.hw_csum_err); u64_stats_update_end(&rxq->stats_sync); } /** * idpf_rx_singleq_base_csum - Indicate in skb if hw indicated a good cksum - * @rx_q: Rx completion queue - * @skb: skb currently being received and modified * @rx_desc: the receive descriptor - * @ptype: Rx packet type * * This function only operates on the VIRTCHNL2_RXDID_1_32B_BASE_M base 32byte * descriptor writeback format. + * + * Return: parsed checksum status. **/ -static void idpf_rx_singleq_base_csum(struct idpf_queue *rx_q, - struct sk_buff *skb, - union virtchnl2_rx_desc *rx_desc, - u16 ptype) +static struct idpf_rx_csum_decoded +idpf_rx_singleq_base_csum(const union virtchnl2_rx_desc *rx_desc) { - struct idpf_rx_csum_decoded csum_bits; + struct idpf_rx_csum_decoded csum_bits = { }; u32 rx_error, rx_status; u64 qword; @@ -745,28 +697,23 @@ static void idpf_rx_singleq_base_csum(struct idpf_queue *rx_q, rx_status); csum_bits.ipv6exadd = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_STATUS_IPV6EXADD_M, rx_status); - csum_bits.nat = 0; - csum_bits.eudpe = 0; - idpf_rx_singleq_csum(rx_q, skb, &csum_bits, ptype); + return csum_bits; } /** * idpf_rx_singleq_flex_csum - Indicate in skb if hw indicated a good cksum - * @rx_q: Rx completion queue - * @skb: skb currently being received and modified * @rx_desc: the receive descriptor - * @ptype: Rx packet type * * This function only operates on the VIRTCHNL2_RXDID_2_FLEX_SQ_NIC flexible * descriptor writeback format. + * + * Return: parsed checksum status. **/ -static void idpf_rx_singleq_flex_csum(struct idpf_queue *rx_q, - struct sk_buff *skb, - union virtchnl2_rx_desc *rx_desc, - u16 ptype) +static struct idpf_rx_csum_decoded +idpf_rx_singleq_flex_csum(const union virtchnl2_rx_desc *rx_desc) { - struct idpf_rx_csum_decoded csum_bits; + struct idpf_rx_csum_decoded csum_bits = { }; u16 rx_status0, rx_status1; rx_status0 = le16_to_cpu(rx_desc->flex_nic_wb.status_error0); @@ -786,9 +733,8 @@ static void idpf_rx_singleq_flex_csum(struct idpf_queue *rx_q, rx_status0); csum_bits.nat = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_STATUS1_NAT_M, rx_status1); - csum_bits.pprs = 0; - idpf_rx_singleq_csum(rx_q, skb, &csum_bits, ptype); + return csum_bits; } /** @@ -801,14 +747,14 @@ static void idpf_rx_singleq_flex_csum(struct idpf_queue *rx_q, * This function only operates on the VIRTCHNL2_RXDID_1_32B_BASE_M base 32byte * descriptor writeback format. **/ -static void idpf_rx_singleq_base_hash(struct idpf_queue *rx_q, +static void idpf_rx_singleq_base_hash(struct idpf_rx_queue *rx_q, struct sk_buff *skb, - union virtchnl2_rx_desc *rx_desc, - struct idpf_rx_ptype_decoded *decoded) + const union virtchnl2_rx_desc *rx_desc, + struct libeth_rx_pt decoded) { u64 mask, qw1; - if (unlikely(!(rx_q->vport->netdev->features & NETIF_F_RXHASH))) + if (!libeth_rx_pt_has_hash(rx_q->netdev, decoded)) return; mask = VIRTCHNL2_RX_BASE_DESC_FLTSTAT_RSS_HASH_M; @@ -817,7 +763,7 @@ static void idpf_rx_singleq_base_hash(struct idpf_queue *rx_q, if (FIELD_GET(mask, qw1) == mask) { u32 hash = le32_to_cpu(rx_desc->base_wb.qword0.hi_dword.rss); - skb_set_hash(skb, hash, idpf_ptype_to_htype(decoded)); + libeth_rx_pt_set_hash(skb, hash, decoded); } } @@ -831,18 +777,20 @@ static void idpf_rx_singleq_base_hash(struct idpf_queue *rx_q, * This function only operates on the VIRTCHNL2_RXDID_2_FLEX_SQ_NIC flexible * descriptor writeback format. **/ -static void idpf_rx_singleq_flex_hash(struct idpf_queue *rx_q, +static void idpf_rx_singleq_flex_hash(struct idpf_rx_queue *rx_q, struct sk_buff *skb, - union virtchnl2_rx_desc *rx_desc, - struct idpf_rx_ptype_decoded *decoded) + const union virtchnl2_rx_desc *rx_desc, + struct libeth_rx_pt decoded) { - if (unlikely(!(rx_q->vport->netdev->features & NETIF_F_RXHASH))) + if (!libeth_rx_pt_has_hash(rx_q->netdev, decoded)) return; if (FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_STATUS0_RSS_VALID_M, - le16_to_cpu(rx_desc->flex_nic_wb.status_error0))) - skb_set_hash(skb, le32_to_cpu(rx_desc->flex_nic_wb.rss_hash), - idpf_ptype_to_htype(decoded)); + le16_to_cpu(rx_desc->flex_nic_wb.status_error0))) { + u32 hash = le32_to_cpu(rx_desc->flex_nic_wb.rss_hash); + + libeth_rx_pt_set_hash(skb, hash, decoded); + } } /** @@ -857,25 +805,45 @@ static void idpf_rx_singleq_flex_hash(struct idpf_queue *rx_q, * order to populate the hash, checksum, VLAN, protocol, and * other fields within the skb. */ -static void idpf_rx_singleq_process_skb_fields(struct idpf_queue *rx_q, - struct sk_buff *skb, - union virtchnl2_rx_desc *rx_desc, - u16 ptype) +static void +idpf_rx_singleq_process_skb_fields(struct idpf_rx_queue *rx_q, + struct sk_buff *skb, + const union virtchnl2_rx_desc *rx_desc, + u16 ptype) { - struct idpf_rx_ptype_decoded decoded = - rx_q->vport->rx_ptype_lkup[ptype]; + struct libeth_rx_pt decoded = rx_q->rx_ptype_lkup[ptype]; + struct idpf_rx_csum_decoded csum_bits; /* modifies the skb - consumes the enet header */ - skb->protocol = eth_type_trans(skb, rx_q->vport->netdev); + skb->protocol = eth_type_trans(skb, rx_q->netdev); /* Check if we're using base mode descriptor IDs */ if (rx_q->rxdids == VIRTCHNL2_RXDID_1_32B_BASE_M) { - idpf_rx_singleq_base_hash(rx_q, skb, rx_desc, &decoded); - idpf_rx_singleq_base_csum(rx_q, skb, rx_desc, ptype); + idpf_rx_singleq_base_hash(rx_q, skb, rx_desc, decoded); + csum_bits = idpf_rx_singleq_base_csum(rx_desc); } else { - idpf_rx_singleq_flex_hash(rx_q, skb, rx_desc, &decoded); - idpf_rx_singleq_flex_csum(rx_q, skb, rx_desc, ptype); + idpf_rx_singleq_flex_hash(rx_q, skb, rx_desc, decoded); + csum_bits = idpf_rx_singleq_flex_csum(rx_desc); } + + idpf_rx_singleq_csum(rx_q, skb, csum_bits, decoded); + skb_record_rx_queue(skb, rx_q->idx); +} + +/** + * idpf_rx_buf_hw_update - Store the new tail and head values + * @rxq: queue to bump + * @val: new head index + */ +static void idpf_rx_buf_hw_update(struct idpf_rx_queue *rxq, u32 val) +{ + rxq->next_to_use = val; + + if (unlikely(!rxq->tail)) + return; + + /* writel has an implicit memory barrier */ + writel(val, rxq->tail); } /** @@ -885,24 +853,28 @@ static void idpf_rx_singleq_process_skb_fields(struct idpf_queue *rx_q, * * Returns false if all allocations were successful, true if any fail */ -bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_queue *rx_q, +bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_rx_queue *rx_q, u16 cleaned_count) { struct virtchnl2_singleq_rx_buf_desc *desc; + const struct libeth_fq_fp fq = { + .pp = rx_q->pp, + .fqes = rx_q->rx_buf, + .truesize = rx_q->truesize, + .count = rx_q->desc_count, + }; u16 nta = rx_q->next_to_alloc; - struct idpf_rx_buf *buf; if (!cleaned_count) return false; - desc = IDPF_SINGLEQ_RX_BUF_DESC(rx_q, nta); - buf = &rx_q->rx_buf.buf[nta]; + desc = &rx_q->single_buf[nta]; do { dma_addr_t addr; - addr = idpf_alloc_page(rx_q->pp, buf, rx_q->rx_buf_size); - if (unlikely(addr == DMA_MAPPING_ERROR)) + addr = libeth_rx_alloc(&fq, nta); + if (addr == DMA_MAPPING_ERROR) break; /* Refresh the desc even if buffer_addrs didn't change @@ -912,11 +884,9 @@ bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_queue *rx_q, desc->hdr_addr = 0; desc++; - buf++; nta++; if (unlikely(nta == rx_q->desc_count)) { - desc = IDPF_SINGLEQ_RX_BUF_DESC(rx_q, 0); - buf = rx_q->rx_buf.buf; + desc = &rx_q->single_buf[0]; nta = 0; } @@ -933,7 +903,6 @@ bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_queue *rx_q, /** * idpf_rx_singleq_extract_base_fields - Extract fields from the Rx descriptor - * @rx_q: Rx descriptor queue * @rx_desc: the descriptor to process * @fields: storage for extracted values * @@ -943,9 +912,9 @@ bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_queue *rx_q, * This function only operates on the VIRTCHNL2_RXDID_1_32B_BASE_M base 32byte * descriptor writeback format. */ -static void idpf_rx_singleq_extract_base_fields(struct idpf_queue *rx_q, - union virtchnl2_rx_desc *rx_desc, - struct idpf_rx_extracted *fields) +static void +idpf_rx_singleq_extract_base_fields(const union virtchnl2_rx_desc *rx_desc, + struct idpf_rx_extracted *fields) { u64 qword; @@ -957,7 +926,6 @@ static void idpf_rx_singleq_extract_base_fields(struct idpf_queue *rx_q, /** * idpf_rx_singleq_extract_flex_fields - Extract fields from the Rx descriptor - * @rx_q: Rx descriptor queue * @rx_desc: the descriptor to process * @fields: storage for extracted values * @@ -967,9 +935,9 @@ static void idpf_rx_singleq_extract_base_fields(struct idpf_queue *rx_q, * This function only operates on the VIRTCHNL2_RXDID_2_FLEX_SQ_NIC flexible * descriptor writeback format. */ -static void idpf_rx_singleq_extract_flex_fields(struct idpf_queue *rx_q, - union virtchnl2_rx_desc *rx_desc, - struct idpf_rx_extracted *fields) +static void +idpf_rx_singleq_extract_flex_fields(const union virtchnl2_rx_desc *rx_desc, + struct idpf_rx_extracted *fields) { fields->size = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_PKT_LEN_M, le16_to_cpu(rx_desc->flex_nic_wb.pkt_len)); @@ -984,14 +952,15 @@ static void idpf_rx_singleq_extract_flex_fields(struct idpf_queue *rx_q, * @fields: storage for extracted values * */ -static void idpf_rx_singleq_extract_fields(struct idpf_queue *rx_q, - union virtchnl2_rx_desc *rx_desc, - struct idpf_rx_extracted *fields) +static void +idpf_rx_singleq_extract_fields(const struct idpf_rx_queue *rx_q, + const union virtchnl2_rx_desc *rx_desc, + struct idpf_rx_extracted *fields) { if (rx_q->rxdids == VIRTCHNL2_RXDID_1_32B_BASE_M) - idpf_rx_singleq_extract_base_fields(rx_q, rx_desc, fields); + idpf_rx_singleq_extract_base_fields(rx_desc, fields); else - idpf_rx_singleq_extract_flex_fields(rx_q, rx_desc, fields); + idpf_rx_singleq_extract_flex_fields(rx_desc, fields); } /** @@ -1001,7 +970,7 @@ static void idpf_rx_singleq_extract_fields(struct idpf_queue *rx_q, * * Returns true if there's any budget left (e.g. the clean is finished) */ -static int idpf_rx_singleq_clean(struct idpf_queue *rx_q, int budget) +static int idpf_rx_singleq_clean(struct idpf_rx_queue *rx_q, int budget) { unsigned int total_rx_bytes = 0, total_rx_pkts = 0; struct sk_buff *skb = rx_q->skb; @@ -1016,7 +985,7 @@ static int idpf_rx_singleq_clean(struct idpf_queue *rx_q, int budget) struct idpf_rx_buf *rx_buf; /* get the Rx desc from Rx queue based on 'next_to_clean' */ - rx_desc = IDPF_RX_DESC(rx_q, ntc); + rx_desc = &rx_q->rx[ntc]; /* status_error_ptype_len will always be zero for unused * descriptors because it's cleared in cleanup, and overlaps @@ -1036,29 +1005,27 @@ static int idpf_rx_singleq_clean(struct idpf_queue *rx_q, int budget) idpf_rx_singleq_extract_fields(rx_q, rx_desc, &fields); - rx_buf = &rx_q->rx_buf.buf[ntc]; - if (!fields.size) { - idpf_rx_put_page(rx_buf); + rx_buf = &rx_q->rx_buf[ntc]; + if (!libeth_rx_sync_for_cpu(rx_buf, fields.size)) goto skip_data; - } - idpf_rx_sync_for_cpu(rx_buf, fields.size); if (skb) idpf_rx_add_frag(rx_buf, skb, fields.size); else - skb = idpf_rx_construct_skb(rx_q, rx_buf, fields.size); + skb = idpf_rx_build_skb(rx_buf, fields.size); /* exit if we failed to retrieve a buffer */ if (!skb) break; skip_data: - IDPF_SINGLEQ_BUMP_RING_IDX(rx_q, ntc); + rx_buf->page = NULL; + IDPF_SINGLEQ_BUMP_RING_IDX(rx_q, ntc); cleaned_count++; /* skip if it is non EOP desc */ - if (idpf_rx_singleq_is_non_eop(rx_q, rx_desc, skb, ntc)) + if (idpf_rx_singleq_is_non_eop(rx_desc) || unlikely(!skb)) continue; #define IDPF_RXD_ERR_S FIELD_PREP(VIRTCHNL2_RX_BASE_DESC_QW1_ERROR_M, \ @@ -1084,7 +1051,7 @@ skip_data: rx_desc, fields.rx_ptype); /* send completed skb up the stack */ - napi_gro_receive(&rx_q->q_vector->napi, skb); + napi_gro_receive(rx_q->pp->p.napi, skb); skb = NULL; /* update budget accounting */ @@ -1095,12 +1062,13 @@ skip_data: rx_q->next_to_clean = ntc; + page_pool_nid_changed(rx_q->pp, numa_mem_id()); if (cleaned_count) failure = idpf_rx_singleq_buf_hw_alloc_all(rx_q, cleaned_count); u64_stats_update_begin(&rx_q->stats_sync); - u64_stats_add(&rx_q->q_stats.rx.packets, total_rx_pkts); - u64_stats_add(&rx_q->q_stats.rx.bytes, total_rx_bytes); + u64_stats_add(&rx_q->q_stats.packets, total_rx_pkts); + u64_stats_add(&rx_q->q_stats.bytes, total_rx_bytes); u64_stats_update_end(&rx_q->stats_sync); /* guarantee a trip back through this routine if there was a failure */ @@ -1127,7 +1095,7 @@ static bool idpf_rx_singleq_clean_all(struct idpf_q_vector *q_vec, int budget, */ budget_per_q = num_rxq ? max(budget / num_rxq, 1) : 0; for (i = 0; i < num_rxq; i++) { - struct idpf_queue *rxq = q_vec->rx[i]; + struct idpf_rx_queue *rxq = q_vec->rx[i]; int pkts_cleaned_per_q; pkts_cleaned_per_q = idpf_rx_singleq_clean(rxq, budget_per_q); diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c index b023704bbbda..af2879f03b8d 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c @@ -1,9 +1,14 @@ // SPDX-License-Identifier: GPL-2.0-only /* Copyright (C) 2023 Intel Corporation */ +#include <net/libeth/rx.h> + #include "idpf.h" #include "idpf_virtchnl.h" +static bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs, + unsigned int count); + /** * idpf_buf_lifo_push - push a buffer pointer onto stack * @stack: pointer to stack struct @@ -60,7 +65,8 @@ void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue) * @tx_q: the queue that owns the buffer * @tx_buf: the buffer to free */ -static void idpf_tx_buf_rel(struct idpf_queue *tx_q, struct idpf_tx_buf *tx_buf) +static void idpf_tx_buf_rel(struct idpf_tx_queue *tx_q, + struct idpf_tx_buf *tx_buf) { if (tx_buf->skb) { if (dma_unmap_len(tx_buf, len)) @@ -86,8 +92,9 @@ static void idpf_tx_buf_rel(struct idpf_queue *tx_q, struct idpf_tx_buf *tx_buf) * idpf_tx_buf_rel_all - Free any empty Tx buffers * @txq: queue to be cleaned */ -static void idpf_tx_buf_rel_all(struct idpf_queue *txq) +static void idpf_tx_buf_rel_all(struct idpf_tx_queue *txq) { + struct idpf_buf_lifo *buf_stack; u16 i; /* Buffers already cleared, nothing to do */ @@ -101,39 +108,58 @@ static void idpf_tx_buf_rel_all(struct idpf_queue *txq) kfree(txq->tx_buf); txq->tx_buf = NULL; - if (!txq->buf_stack.bufs) + if (!idpf_queue_has(FLOW_SCH_EN, txq)) return; - for (i = 0; i < txq->buf_stack.size; i++) - kfree(txq->buf_stack.bufs[i]); + buf_stack = &txq->stash->buf_stack; + if (!buf_stack->bufs) + return; - kfree(txq->buf_stack.bufs); - txq->buf_stack.bufs = NULL; + for (i = 0; i < buf_stack->size; i++) + kfree(buf_stack->bufs[i]); + + kfree(buf_stack->bufs); + buf_stack->bufs = NULL; } /** * idpf_tx_desc_rel - Free Tx resources per queue * @txq: Tx descriptor ring for a specific queue - * @bufq: buffer q or completion q * * Free all transmit software resources */ -static void idpf_tx_desc_rel(struct idpf_queue *txq, bool bufq) +static void idpf_tx_desc_rel(struct idpf_tx_queue *txq) { - if (bufq) - idpf_tx_buf_rel_all(txq); + idpf_tx_buf_rel_all(txq); if (!txq->desc_ring) return; dmam_free_coherent(txq->dev, txq->size, txq->desc_ring, txq->dma); txq->desc_ring = NULL; - txq->next_to_alloc = 0; txq->next_to_use = 0; txq->next_to_clean = 0; } /** + * idpf_compl_desc_rel - Free completion resources per queue + * @complq: completion queue + * + * Free all completion software resources. + */ +static void idpf_compl_desc_rel(struct idpf_compl_queue *complq) +{ + if (!complq->comp) + return; + + dma_free_coherent(complq->netdev->dev.parent, complq->size, + complq->comp, complq->dma); + complq->comp = NULL; + complq->next_to_use = 0; + complq->next_to_clean = 0; +} + +/** * idpf_tx_desc_rel_all - Free Tx Resources for All Queues * @vport: virtual port structure * @@ -150,10 +176,10 @@ static void idpf_tx_desc_rel_all(struct idpf_vport *vport) struct idpf_txq_group *txq_grp = &vport->txq_grps[i]; for (j = 0; j < txq_grp->num_txq; j++) - idpf_tx_desc_rel(txq_grp->txqs[j], true); + idpf_tx_desc_rel(txq_grp->txqs[j]); if (idpf_is_queue_model_split(vport->txq_model)) - idpf_tx_desc_rel(txq_grp->complq, false); + idpf_compl_desc_rel(txq_grp->complq); } } @@ -163,8 +189,9 @@ static void idpf_tx_desc_rel_all(struct idpf_vport *vport) * * Returns 0 on success, negative on failure */ -static int idpf_tx_buf_alloc_all(struct idpf_queue *tx_q) +static int idpf_tx_buf_alloc_all(struct idpf_tx_queue *tx_q) { + struct idpf_buf_lifo *buf_stack; int buf_size; int i; @@ -180,22 +207,26 @@ static int idpf_tx_buf_alloc_all(struct idpf_queue *tx_q) for (i = 0; i < tx_q->desc_count; i++) tx_q->tx_buf[i].compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG; + if (!idpf_queue_has(FLOW_SCH_EN, tx_q)) + return 0; + + buf_stack = &tx_q->stash->buf_stack; + /* Initialize tx buf stack for out-of-order completions if * flow scheduling offload is enabled */ - tx_q->buf_stack.bufs = - kcalloc(tx_q->desc_count, sizeof(struct idpf_tx_stash *), - GFP_KERNEL); - if (!tx_q->buf_stack.bufs) + buf_stack->bufs = kcalloc(tx_q->desc_count, sizeof(*buf_stack->bufs), + GFP_KERNEL); + if (!buf_stack->bufs) return -ENOMEM; - tx_q->buf_stack.size = tx_q->desc_count; - tx_q->buf_stack.top = tx_q->desc_count; + buf_stack->size = tx_q->desc_count; + buf_stack->top = tx_q->desc_count; for (i = 0; i < tx_q->desc_count; i++) { - tx_q->buf_stack.bufs[i] = kzalloc(sizeof(*tx_q->buf_stack.bufs[i]), - GFP_KERNEL); - if (!tx_q->buf_stack.bufs[i]) + buf_stack->bufs[i] = kzalloc(sizeof(*buf_stack->bufs[i]), + GFP_KERNEL); + if (!buf_stack->bufs[i]) return -ENOMEM; } @@ -204,28 +235,22 @@ static int idpf_tx_buf_alloc_all(struct idpf_queue *tx_q) /** * idpf_tx_desc_alloc - Allocate the Tx descriptors + * @vport: vport to allocate resources for * @tx_q: the tx ring to set up - * @bufq: buffer or completion queue * * Returns 0 on success, negative on failure */ -static int idpf_tx_desc_alloc(struct idpf_queue *tx_q, bool bufq) +static int idpf_tx_desc_alloc(const struct idpf_vport *vport, + struct idpf_tx_queue *tx_q) { struct device *dev = tx_q->dev; - u32 desc_sz; int err; - if (bufq) { - err = idpf_tx_buf_alloc_all(tx_q); - if (err) - goto err_alloc; - - desc_sz = sizeof(struct idpf_base_tx_desc); - } else { - desc_sz = sizeof(struct idpf_splitq_tx_compl_desc); - } + err = idpf_tx_buf_alloc_all(tx_q); + if (err) + goto err_alloc; - tx_q->size = tx_q->desc_count * desc_sz; + tx_q->size = tx_q->desc_count * sizeof(*tx_q->base_tx); /* Allocate descriptors also round up to nearest 4K */ tx_q->size = ALIGN(tx_q->size, 4096); @@ -238,20 +263,44 @@ static int idpf_tx_desc_alloc(struct idpf_queue *tx_q, bool bufq) goto err_alloc; } - tx_q->next_to_alloc = 0; tx_q->next_to_use = 0; tx_q->next_to_clean = 0; - set_bit(__IDPF_Q_GEN_CHK, tx_q->flags); + idpf_queue_set(GEN_CHK, tx_q); return 0; err_alloc: - idpf_tx_desc_rel(tx_q, bufq); + idpf_tx_desc_rel(tx_q); return err; } /** + * idpf_compl_desc_alloc - allocate completion descriptors + * @vport: vport to allocate resources for + * @complq: completion queue to set up + * + * Return: 0 on success, -errno on failure. + */ +static int idpf_compl_desc_alloc(const struct idpf_vport *vport, + struct idpf_compl_queue *complq) +{ + complq->size = array_size(complq->desc_count, sizeof(*complq->comp)); + + complq->comp = dma_alloc_coherent(complq->netdev->dev.parent, + complq->size, &complq->dma, + GFP_KERNEL); + if (!complq->comp) + return -ENOMEM; + + complq->next_to_use = 0; + complq->next_to_clean = 0; + idpf_queue_set(GEN_CHK, complq); + + return 0; +} + +/** * idpf_tx_desc_alloc_all - allocate all queues Tx resources * @vport: virtual port private structure * @@ -259,7 +308,6 @@ err_alloc: */ static int idpf_tx_desc_alloc_all(struct idpf_vport *vport) { - struct device *dev = &vport->adapter->pdev->dev; int err = 0; int i, j; @@ -268,13 +316,14 @@ static int idpf_tx_desc_alloc_all(struct idpf_vport *vport) */ for (i = 0; i < vport->num_txq_grp; i++) { for (j = 0; j < vport->txq_grps[i].num_txq; j++) { - struct idpf_queue *txq = vport->txq_grps[i].txqs[j]; + struct idpf_tx_queue *txq = vport->txq_grps[i].txqs[j]; u8 gen_bits = 0; u16 bufidx_mask; - err = idpf_tx_desc_alloc(txq, true); + err = idpf_tx_desc_alloc(vport, txq); if (err) { - dev_err(dev, "Allocation for Tx Queue %u failed\n", + pci_err(vport->adapter->pdev, + "Allocation for Tx Queue %u failed\n", i); goto err_out; } @@ -312,9 +361,10 @@ static int idpf_tx_desc_alloc_all(struct idpf_vport *vport) continue; /* Setup completion queues */ - err = idpf_tx_desc_alloc(vport->txq_grps[i].complq, false); + err = idpf_compl_desc_alloc(vport, vport->txq_grps[i].complq); if (err) { - dev_err(dev, "Allocation for Tx Completion Queue %u failed\n", + pci_err(vport->adapter->pdev, + "Allocation for Tx Completion Queue %u failed\n", i); goto err_out; } @@ -329,70 +379,97 @@ err_out: /** * idpf_rx_page_rel - Release an rx buffer page - * @rxq: the queue that owns the buffer * @rx_buf: the buffer to free */ -static void idpf_rx_page_rel(struct idpf_queue *rxq, struct idpf_rx_buf *rx_buf) +static void idpf_rx_page_rel(struct libeth_fqe *rx_buf) { if (unlikely(!rx_buf->page)) return; - page_pool_put_full_page(rxq->pp, rx_buf->page, false); + page_pool_put_full_page(rx_buf->page->pp, rx_buf->page, false); rx_buf->page = NULL; - rx_buf->page_offset = 0; + rx_buf->offset = 0; } /** * idpf_rx_hdr_buf_rel_all - Release header buffer memory - * @rxq: queue to use + * @bufq: queue to use */ -static void idpf_rx_hdr_buf_rel_all(struct idpf_queue *rxq) +static void idpf_rx_hdr_buf_rel_all(struct idpf_buf_queue *bufq) { - struct idpf_adapter *adapter = rxq->vport->adapter; + struct libeth_fq fq = { + .fqes = bufq->hdr_buf, + .pp = bufq->hdr_pp, + }; - dma_free_coherent(&adapter->pdev->dev, - rxq->desc_count * IDPF_HDR_BUF_SIZE, - rxq->rx_buf.hdr_buf_va, - rxq->rx_buf.hdr_buf_pa); - rxq->rx_buf.hdr_buf_va = NULL; + for (u32 i = 0; i < bufq->desc_count; i++) + idpf_rx_page_rel(&bufq->hdr_buf[i]); + + libeth_rx_fq_destroy(&fq); + bufq->hdr_buf = NULL; + bufq->hdr_pp = NULL; } /** - * idpf_rx_buf_rel_all - Free all Rx buffer resources for a queue - * @rxq: queue to be cleaned + * idpf_rx_buf_rel_bufq - Free all Rx buffer resources for a buffer queue + * @bufq: queue to be cleaned */ -static void idpf_rx_buf_rel_all(struct idpf_queue *rxq) +static void idpf_rx_buf_rel_bufq(struct idpf_buf_queue *bufq) { - u16 i; + struct libeth_fq fq = { + .fqes = bufq->buf, + .pp = bufq->pp, + }; /* queue already cleared, nothing to do */ - if (!rxq->rx_buf.buf) + if (!bufq->buf) return; /* Free all the bufs allocated and given to hw on Rx queue */ - for (i = 0; i < rxq->desc_count; i++) - idpf_rx_page_rel(rxq, &rxq->rx_buf.buf[i]); + for (u32 i = 0; i < bufq->desc_count; i++) + idpf_rx_page_rel(&bufq->buf[i]); - if (rxq->rx_hsplit_en) - idpf_rx_hdr_buf_rel_all(rxq); + if (idpf_queue_has(HSPLIT_EN, bufq)) + idpf_rx_hdr_buf_rel_all(bufq); - page_pool_destroy(rxq->pp); - rxq->pp = NULL; + libeth_rx_fq_destroy(&fq); + bufq->buf = NULL; + bufq->pp = NULL; +} + +/** + * idpf_rx_buf_rel_all - Free all Rx buffer resources for a receive queue + * @rxq: queue to be cleaned + */ +static void idpf_rx_buf_rel_all(struct idpf_rx_queue *rxq) +{ + struct libeth_fq fq = { + .fqes = rxq->rx_buf, + .pp = rxq->pp, + }; + + if (!rxq->rx_buf) + return; - kfree(rxq->rx_buf.buf); - rxq->rx_buf.buf = NULL; + for (u32 i = 0; i < rxq->desc_count; i++) + idpf_rx_page_rel(&rxq->rx_buf[i]); + + libeth_rx_fq_destroy(&fq); + rxq->rx_buf = NULL; + rxq->pp = NULL; } /** * idpf_rx_desc_rel - Free a specific Rx q resources * @rxq: queue to clean the resources from - * @bufq: buffer q or completion q - * @q_model: single or split q model + * @dev: device to free DMA memory + * @model: single or split queue model * * Free a specific rx queue resources */ -static void idpf_rx_desc_rel(struct idpf_queue *rxq, bool bufq, s32 q_model) +static void idpf_rx_desc_rel(struct idpf_rx_queue *rxq, struct device *dev, + u32 model) { if (!rxq) return; @@ -402,7 +479,7 @@ static void idpf_rx_desc_rel(struct idpf_queue *rxq, bool bufq, s32 q_model) rxq->skb = NULL; } - if (bufq || !idpf_is_queue_model_split(q_model)) + if (!idpf_is_queue_model_split(model)) idpf_rx_buf_rel_all(rxq); rxq->next_to_alloc = 0; @@ -411,11 +488,35 @@ static void idpf_rx_desc_rel(struct idpf_queue *rxq, bool bufq, s32 q_model) if (!rxq->desc_ring) return; - dmam_free_coherent(rxq->dev, rxq->size, rxq->desc_ring, rxq->dma); + dmam_free_coherent(dev, rxq->size, rxq->desc_ring, rxq->dma); rxq->desc_ring = NULL; } /** + * idpf_rx_desc_rel_bufq - free buffer queue resources + * @bufq: buffer queue to clean the resources from + * @dev: device to free DMA memory + */ +static void idpf_rx_desc_rel_bufq(struct idpf_buf_queue *bufq, + struct device *dev) +{ + if (!bufq) + return; + + idpf_rx_buf_rel_bufq(bufq); + + bufq->next_to_alloc = 0; + bufq->next_to_clean = 0; + bufq->next_to_use = 0; + + if (!bufq->split_buf) + return; + + dma_free_coherent(dev, bufq->size, bufq->split_buf, bufq->dma); + bufq->split_buf = NULL; +} + +/** * idpf_rx_desc_rel_all - Free Rx Resources for All Queues * @vport: virtual port structure * @@ -423,6 +524,7 @@ static void idpf_rx_desc_rel(struct idpf_queue *rxq, bool bufq, s32 q_model) */ static void idpf_rx_desc_rel_all(struct idpf_vport *vport) { + struct device *dev = &vport->adapter->pdev->dev; struct idpf_rxq_group *rx_qgrp; u16 num_rxq; int i, j; @@ -435,15 +537,15 @@ static void idpf_rx_desc_rel_all(struct idpf_vport *vport) if (!idpf_is_queue_model_split(vport->rxq_model)) { for (j = 0; j < rx_qgrp->singleq.num_rxq; j++) - idpf_rx_desc_rel(rx_qgrp->singleq.rxqs[j], - false, vport->rxq_model); + idpf_rx_desc_rel(rx_qgrp->singleq.rxqs[j], dev, + VIRTCHNL2_QUEUE_MODEL_SINGLE); continue; } num_rxq = rx_qgrp->splitq.num_rxq_sets; for (j = 0; j < num_rxq; j++) idpf_rx_desc_rel(&rx_qgrp->splitq.rxq_sets[j]->rxq, - false, vport->rxq_model); + dev, VIRTCHNL2_QUEUE_MODEL_SPLIT); if (!rx_qgrp->splitq.bufq_sets) continue; @@ -452,45 +554,50 @@ static void idpf_rx_desc_rel_all(struct idpf_vport *vport) struct idpf_bufq_set *bufq_set = &rx_qgrp->splitq.bufq_sets[j]; - idpf_rx_desc_rel(&bufq_set->bufq, true, - vport->rxq_model); + idpf_rx_desc_rel_bufq(&bufq_set->bufq, dev); } } } /** * idpf_rx_buf_hw_update - Store the new tail and head values - * @rxq: queue to bump + * @bufq: queue to bump * @val: new head index */ -void idpf_rx_buf_hw_update(struct idpf_queue *rxq, u32 val) +static void idpf_rx_buf_hw_update(struct idpf_buf_queue *bufq, u32 val) { - rxq->next_to_use = val; + bufq->next_to_use = val; - if (unlikely(!rxq->tail)) + if (unlikely(!bufq->tail)) return; /* writel has an implicit memory barrier */ - writel(val, rxq->tail); + writel(val, bufq->tail); } /** * idpf_rx_hdr_buf_alloc_all - Allocate memory for header buffers - * @rxq: ring to use + * @bufq: ring to use * * Returns 0 on success, negative on failure. */ -static int idpf_rx_hdr_buf_alloc_all(struct idpf_queue *rxq) +static int idpf_rx_hdr_buf_alloc_all(struct idpf_buf_queue *bufq) { - struct idpf_adapter *adapter = rxq->vport->adapter; - - rxq->rx_buf.hdr_buf_va = - dma_alloc_coherent(&adapter->pdev->dev, - IDPF_HDR_BUF_SIZE * rxq->desc_count, - &rxq->rx_buf.hdr_buf_pa, - GFP_KERNEL); - if (!rxq->rx_buf.hdr_buf_va) - return -ENOMEM; + struct libeth_fq fq = { + .count = bufq->desc_count, + .type = LIBETH_FQE_HDR, + .nid = idpf_q_vector_to_mem(bufq->q_vector), + }; + int ret; + + ret = libeth_rx_fq_create(&fq, &bufq->q_vector->napi); + if (ret) + return ret; + + bufq->hdr_pp = fq.pp; + bufq->hdr_buf = fq.fqes; + bufq->hdr_truesize = fq.truesize; + bufq->rx_hbuf_size = fq.buf_len; return 0; } @@ -502,19 +609,20 @@ static int idpf_rx_hdr_buf_alloc_all(struct idpf_queue *rxq) */ static void idpf_rx_post_buf_refill(struct idpf_sw_queue *refillq, u16 buf_id) { - u16 nta = refillq->next_to_alloc; + u32 nta = refillq->next_to_use; /* store the buffer ID and the SW maintained GEN bit to the refillq */ refillq->ring[nta] = FIELD_PREP(IDPF_RX_BI_BUFID_M, buf_id) | FIELD_PREP(IDPF_RX_BI_GEN_M, - test_bit(__IDPF_Q_GEN_CHK, refillq->flags)); + idpf_queue_has(GEN_CHK, refillq)); if (unlikely(++nta == refillq->desc_count)) { nta = 0; - change_bit(__IDPF_Q_GEN_CHK, refillq->flags); + idpf_queue_change(GEN_CHK, refillq); } - refillq->next_to_alloc = nta; + + refillq->next_to_use = nta; } /** @@ -524,24 +632,35 @@ static void idpf_rx_post_buf_refill(struct idpf_sw_queue *refillq, u16 buf_id) * * Returns false if buffer could not be allocated, true otherwise. */ -static bool idpf_rx_post_buf_desc(struct idpf_queue *bufq, u16 buf_id) +static bool idpf_rx_post_buf_desc(struct idpf_buf_queue *bufq, u16 buf_id) { struct virtchnl2_splitq_rx_buf_desc *splitq_rx_desc = NULL; + struct libeth_fq_fp fq = { + .count = bufq->desc_count, + }; u16 nta = bufq->next_to_alloc; - struct idpf_rx_buf *buf; dma_addr_t addr; - splitq_rx_desc = IDPF_SPLITQ_RX_BUF_DESC(bufq, nta); - buf = &bufq->rx_buf.buf[buf_id]; + splitq_rx_desc = &bufq->split_buf[nta]; - if (bufq->rx_hsplit_en) { - splitq_rx_desc->hdr_addr = - cpu_to_le64(bufq->rx_buf.hdr_buf_pa + - (u32)buf_id * IDPF_HDR_BUF_SIZE); + if (idpf_queue_has(HSPLIT_EN, bufq)) { + fq.pp = bufq->hdr_pp; + fq.fqes = bufq->hdr_buf; + fq.truesize = bufq->hdr_truesize; + + addr = libeth_rx_alloc(&fq, buf_id); + if (addr == DMA_MAPPING_ERROR) + return false; + + splitq_rx_desc->hdr_addr = cpu_to_le64(addr); } - addr = idpf_alloc_page(bufq->pp, buf, bufq->rx_buf_size); - if (unlikely(addr == DMA_MAPPING_ERROR)) + fq.pp = bufq->pp; + fq.fqes = bufq->buf; + fq.truesize = bufq->truesize; + + addr = libeth_rx_alloc(&fq, buf_id); + if (addr == DMA_MAPPING_ERROR) return false; splitq_rx_desc->pkt_addr = cpu_to_le64(addr); @@ -562,7 +681,8 @@ static bool idpf_rx_post_buf_desc(struct idpf_queue *bufq, u16 buf_id) * * Returns true if @working_set bufs were posted successfully, false otherwise. */ -static bool idpf_rx_post_init_bufs(struct idpf_queue *bufq, u16 working_set) +static bool idpf_rx_post_init_bufs(struct idpf_buf_queue *bufq, + u16 working_set) { int i; @@ -571,95 +691,114 @@ static bool idpf_rx_post_init_bufs(struct idpf_queue *bufq, u16 working_set) return false; } - idpf_rx_buf_hw_update(bufq, - bufq->next_to_alloc & ~(bufq->rx_buf_stride - 1)); + idpf_rx_buf_hw_update(bufq, ALIGN_DOWN(bufq->next_to_alloc, + IDPF_RX_BUF_STRIDE)); return true; } /** - * idpf_rx_create_page_pool - Create a page pool - * @rxbufq: RX queue to create page pool for + * idpf_rx_buf_alloc_singleq - Allocate memory for all buffer resources + * @rxq: queue for which the buffers are allocated + * + * Return: 0 on success, -ENOMEM on failure. + */ +static int idpf_rx_buf_alloc_singleq(struct idpf_rx_queue *rxq) +{ + if (idpf_rx_singleq_buf_hw_alloc_all(rxq, rxq->desc_count - 1)) + goto err; + + return 0; + +err: + idpf_rx_buf_rel_all(rxq); + + return -ENOMEM; +} + +/** + * idpf_rx_bufs_init_singleq - Initialize page pool and allocate Rx bufs + * @rxq: buffer queue to create page pool for * - * Returns &page_pool on success, casted -errno on failure + * Return: 0 on success, -errno on failure. */ -static struct page_pool *idpf_rx_create_page_pool(struct idpf_queue *rxbufq) +static int idpf_rx_bufs_init_singleq(struct idpf_rx_queue *rxq) { - struct page_pool_params pp = { - .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, - .order = 0, - .pool_size = rxbufq->desc_count, - .nid = NUMA_NO_NODE, - .dev = rxbufq->vport->netdev->dev.parent, - .max_len = PAGE_SIZE, - .dma_dir = DMA_FROM_DEVICE, - .offset = 0, + struct libeth_fq fq = { + .count = rxq->desc_count, + .type = LIBETH_FQE_MTU, + .nid = idpf_q_vector_to_mem(rxq->q_vector), }; + int ret; + + ret = libeth_rx_fq_create(&fq, &rxq->q_vector->napi); + if (ret) + return ret; + + rxq->pp = fq.pp; + rxq->rx_buf = fq.fqes; + rxq->truesize = fq.truesize; + rxq->rx_buf_size = fq.buf_len; - return page_pool_create(&pp); + return idpf_rx_buf_alloc_singleq(rxq); } /** * idpf_rx_buf_alloc_all - Allocate memory for all buffer resources - * @rxbufq: queue for which the buffers are allocated; equivalent to - * rxq when operating in singleq mode + * @rxbufq: queue for which the buffers are allocated * * Returns 0 on success, negative on failure */ -static int idpf_rx_buf_alloc_all(struct idpf_queue *rxbufq) +static int idpf_rx_buf_alloc_all(struct idpf_buf_queue *rxbufq) { int err = 0; - /* Allocate book keeping buffers */ - rxbufq->rx_buf.buf = kcalloc(rxbufq->desc_count, - sizeof(struct idpf_rx_buf), GFP_KERNEL); - if (!rxbufq->rx_buf.buf) { - err = -ENOMEM; - goto rx_buf_alloc_all_out; - } - - if (rxbufq->rx_hsplit_en) { + if (idpf_queue_has(HSPLIT_EN, rxbufq)) { err = idpf_rx_hdr_buf_alloc_all(rxbufq); if (err) goto rx_buf_alloc_all_out; } /* Allocate buffers to be given to HW. */ - if (idpf_is_queue_model_split(rxbufq->vport->rxq_model)) { - int working_set = IDPF_RX_BUFQ_WORKING_SET(rxbufq); - - if (!idpf_rx_post_init_bufs(rxbufq, working_set)) - err = -ENOMEM; - } else { - if (idpf_rx_singleq_buf_hw_alloc_all(rxbufq, - rxbufq->desc_count - 1)) - err = -ENOMEM; - } + if (!idpf_rx_post_init_bufs(rxbufq, IDPF_RX_BUFQ_WORKING_SET(rxbufq))) + err = -ENOMEM; rx_buf_alloc_all_out: if (err) - idpf_rx_buf_rel_all(rxbufq); + idpf_rx_buf_rel_bufq(rxbufq); return err; } /** * idpf_rx_bufs_init - Initialize page pool, allocate rx bufs, and post to HW - * @rxbufq: RX queue to create page pool for + * @bufq: buffer queue to create page pool for + * @type: type of Rx buffers to allocate * * Returns 0 on success, negative on failure */ -static int idpf_rx_bufs_init(struct idpf_queue *rxbufq) +static int idpf_rx_bufs_init(struct idpf_buf_queue *bufq, + enum libeth_fqe_type type) { - struct page_pool *pool; + struct libeth_fq fq = { + .truesize = bufq->truesize, + .count = bufq->desc_count, + .type = type, + .hsplit = idpf_queue_has(HSPLIT_EN, bufq), + .nid = idpf_q_vector_to_mem(bufq->q_vector), + }; + int ret; - pool = idpf_rx_create_page_pool(rxbufq); - if (IS_ERR(pool)) - return PTR_ERR(pool); + ret = libeth_rx_fq_create(&fq, &bufq->q_vector->napi); + if (ret) + return ret; - rxbufq->pp = pool; + bufq->pp = fq.pp; + bufq->buf = fq.fqes; + bufq->truesize = fq.truesize; + bufq->rx_buf_size = fq.buf_len; - return idpf_rx_buf_alloc_all(rxbufq); + return idpf_rx_buf_alloc_all(bufq); } /** @@ -670,20 +809,22 @@ static int idpf_rx_bufs_init(struct idpf_queue *rxbufq) */ int idpf_rx_bufs_init_all(struct idpf_vport *vport) { - struct idpf_rxq_group *rx_qgrp; - struct idpf_queue *q; + bool split = idpf_is_queue_model_split(vport->rxq_model); int i, j, err; for (i = 0; i < vport->num_rxq_grp; i++) { - rx_qgrp = &vport->rxq_grps[i]; + struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; + u32 truesize = 0; /* Allocate bufs for the rxq itself in singleq */ - if (!idpf_is_queue_model_split(vport->rxq_model)) { + if (!split) { int num_rxq = rx_qgrp->singleq.num_rxq; for (j = 0; j < num_rxq; j++) { + struct idpf_rx_queue *q; + q = rx_qgrp->singleq.rxqs[j]; - err = idpf_rx_bufs_init(q); + err = idpf_rx_bufs_init_singleq(q); if (err) return err; } @@ -693,10 +834,19 @@ int idpf_rx_bufs_init_all(struct idpf_vport *vport) /* Otherwise, allocate bufs for the buffer queues */ for (j = 0; j < vport->num_bufqs_per_qgrp; j++) { + enum libeth_fqe_type type; + struct idpf_buf_queue *q; + q = &rx_qgrp->splitq.bufq_sets[j].bufq; - err = idpf_rx_bufs_init(q); + q->truesize = truesize; + + type = truesize ? LIBETH_FQE_SHORT : LIBETH_FQE_MTU; + + err = idpf_rx_bufs_init(q, type); if (err) return err; + + truesize = q->truesize >> 1; } } @@ -705,22 +855,17 @@ int idpf_rx_bufs_init_all(struct idpf_vport *vport) /** * idpf_rx_desc_alloc - Allocate queue Rx resources + * @vport: vport to allocate resources for * @rxq: Rx queue for which the resources are setup - * @bufq: buffer or completion queue - * @q_model: single or split queue model * * Returns 0 on success, negative on failure */ -static int idpf_rx_desc_alloc(struct idpf_queue *rxq, bool bufq, s32 q_model) +static int idpf_rx_desc_alloc(const struct idpf_vport *vport, + struct idpf_rx_queue *rxq) { - struct device *dev = rxq->dev; + struct device *dev = &vport->adapter->pdev->dev; - if (bufq) - rxq->size = rxq->desc_count * - sizeof(struct virtchnl2_splitq_rx_buf_desc); - else - rxq->size = rxq->desc_count * - sizeof(union virtchnl2_rx_desc); + rxq->size = rxq->desc_count * sizeof(union virtchnl2_rx_desc); /* Allocate descriptors and also round up to nearest 4K */ rxq->size = ALIGN(rxq->size, 4096); @@ -735,7 +880,35 @@ static int idpf_rx_desc_alloc(struct idpf_queue *rxq, bool bufq, s32 q_model) rxq->next_to_alloc = 0; rxq->next_to_clean = 0; rxq->next_to_use = 0; - set_bit(__IDPF_Q_GEN_CHK, rxq->flags); + idpf_queue_set(GEN_CHK, rxq); + + return 0; +} + +/** + * idpf_bufq_desc_alloc - Allocate buffer queue descriptor ring + * @vport: vport to allocate resources for + * @bufq: buffer queue for which the resources are set up + * + * Return: 0 on success, -ENOMEM on failure. + */ +static int idpf_bufq_desc_alloc(const struct idpf_vport *vport, + struct idpf_buf_queue *bufq) +{ + struct device *dev = &vport->adapter->pdev->dev; + + bufq->size = array_size(bufq->desc_count, sizeof(*bufq->split_buf)); + + bufq->split_buf = dma_alloc_coherent(dev, bufq->size, &bufq->dma, + GFP_KERNEL); + if (!bufq->split_buf) + return -ENOMEM; + + bufq->next_to_alloc = 0; + bufq->next_to_clean = 0; + bufq->next_to_use = 0; + + idpf_queue_set(GEN_CHK, bufq); return 0; } @@ -748,9 +921,7 @@ static int idpf_rx_desc_alloc(struct idpf_queue *rxq, bool bufq, s32 q_model) */ static int idpf_rx_desc_alloc_all(struct idpf_vport *vport) { - struct device *dev = &vport->adapter->pdev->dev; struct idpf_rxq_group *rx_qgrp; - struct idpf_queue *q; int i, j, err; u16 num_rxq; @@ -762,13 +933,17 @@ static int idpf_rx_desc_alloc_all(struct idpf_vport *vport) num_rxq = rx_qgrp->singleq.num_rxq; for (j = 0; j < num_rxq; j++) { + struct idpf_rx_queue *q; + if (idpf_is_queue_model_split(vport->rxq_model)) q = &rx_qgrp->splitq.rxq_sets[j]->rxq; else q = rx_qgrp->singleq.rxqs[j]; - err = idpf_rx_desc_alloc(q, false, vport->rxq_model); + + err = idpf_rx_desc_alloc(vport, q); if (err) { - dev_err(dev, "Memory allocation for Rx Queue %u failed\n", + pci_err(vport->adapter->pdev, + "Memory allocation for Rx Queue %u failed\n", i); goto err_out; } @@ -778,10 +953,14 @@ static int idpf_rx_desc_alloc_all(struct idpf_vport *vport) continue; for (j = 0; j < vport->num_bufqs_per_qgrp; j++) { + struct idpf_buf_queue *q; + q = &rx_qgrp->splitq.bufq_sets[j].bufq; - err = idpf_rx_desc_alloc(q, true, vport->rxq_model); + + err = idpf_bufq_desc_alloc(vport, q); if (err) { - dev_err(dev, "Memory allocation for Rx Buffer Queue %u failed\n", + pci_err(vport->adapter->pdev, + "Memory allocation for Rx Buffer Queue %u failed\n", i); goto err_out; } @@ -802,11 +981,16 @@ err_out: */ static void idpf_txq_group_rel(struct idpf_vport *vport) { + bool split, flow_sch_en; int i, j; if (!vport->txq_grps) return; + split = idpf_is_queue_model_split(vport->txq_model); + flow_sch_en = !idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS, + VIRTCHNL2_CAP_SPLITQ_QSCHED); + for (i = 0; i < vport->num_txq_grp; i++) { struct idpf_txq_group *txq_grp = &vport->txq_grps[i]; @@ -814,8 +998,15 @@ static void idpf_txq_group_rel(struct idpf_vport *vport) kfree(txq_grp->txqs[j]); txq_grp->txqs[j] = NULL; } + + if (!split) + continue; + kfree(txq_grp->complq); txq_grp->complq = NULL; + + if (flow_sch_en) + kfree(txq_grp->stashes); } kfree(vport->txq_grps); vport->txq_grps = NULL; @@ -919,7 +1110,7 @@ static int idpf_vport_init_fast_path_txqs(struct idpf_vport *vport) { int i, j, k = 0; - vport->txqs = kcalloc(vport->num_txq, sizeof(struct idpf_queue *), + vport->txqs = kcalloc(vport->num_txq, sizeof(*vport->txqs), GFP_KERNEL); if (!vport->txqs) @@ -967,17 +1158,11 @@ void idpf_vport_init_num_qs(struct idpf_vport *vport, /* Adjust number of buffer queues per Rx queue group. */ if (!idpf_is_queue_model_split(vport->rxq_model)) { vport->num_bufqs_per_qgrp = 0; - vport->bufq_size[0] = IDPF_RX_BUF_2048; return; } vport->num_bufqs_per_qgrp = IDPF_MAX_BUFQS_PER_RXQ_GRP; - /* Bufq[0] default buffer size is 4K - * Bufq[1] default buffer size is 2K - */ - vport->bufq_size[0] = IDPF_RX_BUF_4096; - vport->bufq_size[1] = IDPF_RX_BUF_2048; } /** @@ -1137,9 +1322,10 @@ static void idpf_vport_calc_numq_per_grp(struct idpf_vport *vport, * @q: rx queue for which descids are set * */ -static void idpf_rxq_set_descids(struct idpf_vport *vport, struct idpf_queue *q) +static void idpf_rxq_set_descids(const struct idpf_vport *vport, + struct idpf_rx_queue *q) { - if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) { + if (idpf_is_queue_model_split(vport->rxq_model)) { q->rxdids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M; } else { if (vport->base_rxd) @@ -1158,20 +1344,22 @@ static void idpf_rxq_set_descids(struct idpf_vport *vport, struct idpf_queue *q) */ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq) { - bool flow_sch_en; - int err, i; + bool split, flow_sch_en; + int i; vport->txq_grps = kcalloc(vport->num_txq_grp, sizeof(*vport->txq_grps), GFP_KERNEL); if (!vport->txq_grps) return -ENOMEM; + split = idpf_is_queue_model_split(vport->txq_model); flow_sch_en = !idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_SPLITQ_QSCHED); for (i = 0; i < vport->num_txq_grp; i++) { struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; struct idpf_adapter *adapter = vport->adapter; + struct idpf_txq_stash *stashes; int j; tx_qgrp->vport = vport; @@ -1180,45 +1368,62 @@ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq) for (j = 0; j < tx_qgrp->num_txq; j++) { tx_qgrp->txqs[j] = kzalloc(sizeof(*tx_qgrp->txqs[j]), GFP_KERNEL); - if (!tx_qgrp->txqs[j]) { - err = -ENOMEM; + if (!tx_qgrp->txqs[j]) goto err_alloc; - } + } + + if (split && flow_sch_en) { + stashes = kcalloc(num_txq, sizeof(*stashes), + GFP_KERNEL); + if (!stashes) + goto err_alloc; + + tx_qgrp->stashes = stashes; } for (j = 0; j < tx_qgrp->num_txq; j++) { - struct idpf_queue *q = tx_qgrp->txqs[j]; + struct idpf_tx_queue *q = tx_qgrp->txqs[j]; q->dev = &adapter->pdev->dev; q->desc_count = vport->txq_desc_count; q->tx_max_bufs = idpf_get_max_tx_bufs(adapter); q->tx_min_pkt_len = idpf_get_min_tx_pkt_len(adapter); - q->vport = vport; + q->netdev = vport->netdev; q->txq_grp = tx_qgrp; - hash_init(q->sched_buf_hash); - if (flow_sch_en) - set_bit(__IDPF_Q_FLOW_SCH_EN, q->flags); + if (!split) { + q->clean_budget = vport->compln_clean_budget; + idpf_queue_assign(CRC_EN, q, + vport->crc_enable); + } + + if (!flow_sch_en) + continue; + + if (split) { + q->stash = &stashes[j]; + hash_init(q->stash->sched_buf_hash); + } + + idpf_queue_set(FLOW_SCH_EN, q); } - if (!idpf_is_queue_model_split(vport->txq_model)) + if (!split) continue; tx_qgrp->complq = kcalloc(IDPF_COMPLQ_PER_GROUP, sizeof(*tx_qgrp->complq), GFP_KERNEL); - if (!tx_qgrp->complq) { - err = -ENOMEM; + if (!tx_qgrp->complq) goto err_alloc; - } - tx_qgrp->complq->dev = &adapter->pdev->dev; tx_qgrp->complq->desc_count = vport->complq_desc_count; - tx_qgrp->complq->vport = vport; tx_qgrp->complq->txq_grp = tx_qgrp; + tx_qgrp->complq->netdev = vport->netdev; + tx_qgrp->complq->clean_budget = vport->compln_clean_budget; if (flow_sch_en) - __set_bit(__IDPF_Q_FLOW_SCH_EN, tx_qgrp->complq->flags); + idpf_queue_set(FLOW_SCH_EN, tx_qgrp->complq); } return 0; @@ -1226,7 +1431,7 @@ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq) err_alloc: idpf_txq_group_rel(vport); - return err; + return -ENOMEM; } /** @@ -1238,8 +1443,6 @@ err_alloc: */ static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq) { - struct idpf_adapter *adapter = vport->adapter; - struct idpf_queue *q; int i, k, err = 0; bool hs; @@ -1292,21 +1495,13 @@ static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq) struct idpf_bufq_set *bufq_set = &rx_qgrp->splitq.bufq_sets[j]; int swq_size = sizeof(struct idpf_sw_queue); + struct idpf_buf_queue *q; q = &rx_qgrp->splitq.bufq_sets[j].bufq; - q->dev = &adapter->pdev->dev; q->desc_count = vport->bufq_desc_count[j]; - q->vport = vport; - q->rxq_grp = rx_qgrp; - q->idx = j; - q->rx_buf_size = vport->bufq_size[j]; q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK; - q->rx_buf_stride = IDPF_RX_BUF_STRIDE; - if (hs) { - q->rx_hsplit_en = true; - q->rx_hbuf_size = IDPF_HDR_BUF_SIZE; - } + idpf_queue_assign(HSPLIT_EN, q, hs); bufq_set->num_refillqs = num_rxq; bufq_set->refillqs = kcalloc(num_rxq, swq_size, @@ -1319,13 +1514,12 @@ static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq) struct idpf_sw_queue *refillq = &bufq_set->refillqs[k]; - refillq->dev = &vport->adapter->pdev->dev; refillq->desc_count = vport->bufq_desc_count[j]; - set_bit(__IDPF_Q_GEN_CHK, refillq->flags); - set_bit(__IDPF_RFLQ_GEN_CHK, refillq->flags); + idpf_queue_set(GEN_CHK, refillq); + idpf_queue_set(RFL_GEN_CHK, refillq); refillq->ring = kcalloc(refillq->desc_count, - sizeof(u16), + sizeof(*refillq->ring), GFP_KERNEL); if (!refillq->ring) { err = -ENOMEM; @@ -1336,36 +1530,30 @@ static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq) skip_splitq_rx_init: for (j = 0; j < num_rxq; j++) { + struct idpf_rx_queue *q; + if (!idpf_is_queue_model_split(vport->rxq_model)) { q = rx_qgrp->singleq.rxqs[j]; goto setup_rxq; } q = &rx_qgrp->splitq.rxq_sets[j]->rxq; - rx_qgrp->splitq.rxq_sets[j]->refillq0 = + rx_qgrp->splitq.rxq_sets[j]->refillq[0] = &rx_qgrp->splitq.bufq_sets[0].refillqs[j]; if (vport->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP) - rx_qgrp->splitq.rxq_sets[j]->refillq1 = + rx_qgrp->splitq.rxq_sets[j]->refillq[1] = &rx_qgrp->splitq.bufq_sets[1].refillqs[j]; - if (hs) { - q->rx_hsplit_en = true; - q->rx_hbuf_size = IDPF_HDR_BUF_SIZE; - } + idpf_queue_assign(HSPLIT_EN, q, hs); setup_rxq: - q->dev = &adapter->pdev->dev; q->desc_count = vport->rxq_desc_count; - q->vport = vport; - q->rxq_grp = rx_qgrp; + q->rx_ptype_lkup = vport->rx_ptype_lkup; + q->netdev = vport->netdev; + q->bufq_sets = rx_qgrp->splitq.bufq_sets; q->idx = (i * num_rxq) + j; - /* In splitq mode, RXQ buffer size should be - * set to that of the first buffer queue - * associated with this RXQ - */ - q->rx_buf_size = vport->bufq_size[0]; q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK; q->rx_max_pkt_size = vport->netdev->mtu + - IDPF_PACKET_HDR_PAD; + LIBETH_RX_LL_LEN; idpf_rxq_set_descids(vport, q); } } @@ -1445,12 +1633,13 @@ err_out: * idpf_tx_handle_sw_marker - Handle queue marker packet * @tx_q: tx queue to handle software marker */ -static void idpf_tx_handle_sw_marker(struct idpf_queue *tx_q) +static void idpf_tx_handle_sw_marker(struct idpf_tx_queue *tx_q) { - struct idpf_vport *vport = tx_q->vport; + struct idpf_netdev_priv *priv = netdev_priv(tx_q->netdev); + struct idpf_vport *vport = priv->vport; int i; - clear_bit(__IDPF_Q_SW_MARKER, tx_q->flags); + idpf_queue_clear(SW_MARKER, tx_q); /* Hardware must write marker packets to all queues associated with * completion queues. So check if all queues received marker packets */ @@ -1458,7 +1647,7 @@ static void idpf_tx_handle_sw_marker(struct idpf_queue *tx_q) /* If we're still waiting on any other TXQ marker completions, * just return now since we cannot wake up the marker_wq yet. */ - if (test_bit(__IDPF_Q_SW_MARKER, vport->txqs[i]->flags)) + if (idpf_queue_has(SW_MARKER, vport->txqs[i])) return; /* Drain complete */ @@ -1474,7 +1663,7 @@ static void idpf_tx_handle_sw_marker(struct idpf_queue *tx_q) * @cleaned: pointer to stats struct to track cleaned packets/bytes * @napi_budget: Used to determine if we are in netpoll */ -static void idpf_tx_splitq_clean_hdr(struct idpf_queue *tx_q, +static void idpf_tx_splitq_clean_hdr(struct idpf_tx_queue *tx_q, struct idpf_tx_buf *tx_buf, struct idpf_cleaned_stats *cleaned, int napi_budget) @@ -1505,7 +1694,8 @@ static void idpf_tx_splitq_clean_hdr(struct idpf_queue *tx_q, * @cleaned: pointer to stats struct to track cleaned packets/bytes * @budget: Used to determine if we are in netpoll */ -static void idpf_tx_clean_stashed_bufs(struct idpf_queue *txq, u16 compl_tag, +static void idpf_tx_clean_stashed_bufs(struct idpf_tx_queue *txq, + u16 compl_tag, struct idpf_cleaned_stats *cleaned, int budget) { @@ -1513,7 +1703,7 @@ static void idpf_tx_clean_stashed_bufs(struct idpf_queue *txq, u16 compl_tag, struct hlist_node *tmp_buf; /* Buffer completion */ - hash_for_each_possible_safe(txq->sched_buf_hash, stash, tmp_buf, + hash_for_each_possible_safe(txq->stash->sched_buf_hash, stash, tmp_buf, hlist, compl_tag) { if (unlikely(stash->buf.compl_tag != (int)compl_tag)) continue; @@ -1530,7 +1720,7 @@ static void idpf_tx_clean_stashed_bufs(struct idpf_queue *txq, u16 compl_tag, } /* Push shadow buf back onto stack */ - idpf_buf_lifo_push(&txq->buf_stack, stash); + idpf_buf_lifo_push(&txq->stash->buf_stack, stash); hash_del(&stash->hlist); } @@ -1542,7 +1732,7 @@ static void idpf_tx_clean_stashed_bufs(struct idpf_queue *txq, u16 compl_tag, * @txq: Tx queue to clean * @tx_buf: buffer to store */ -static int idpf_stash_flow_sch_buffers(struct idpf_queue *txq, +static int idpf_stash_flow_sch_buffers(struct idpf_tx_queue *txq, struct idpf_tx_buf *tx_buf) { struct idpf_tx_stash *stash; @@ -1551,10 +1741,10 @@ static int idpf_stash_flow_sch_buffers(struct idpf_queue *txq, !dma_unmap_len(tx_buf, len))) return 0; - stash = idpf_buf_lifo_pop(&txq->buf_stack); + stash = idpf_buf_lifo_pop(&txq->stash->buf_stack); if (unlikely(!stash)) { net_err_ratelimited("%s: No out-of-order TX buffers left!\n", - txq->vport->netdev->name); + netdev_name(txq->netdev)); return -ENOMEM; } @@ -1568,7 +1758,8 @@ static int idpf_stash_flow_sch_buffers(struct idpf_queue *txq, stash->buf.compl_tag = tx_buf->compl_tag; /* Add buffer to buf_hash table to be freed later */ - hash_add(txq->sched_buf_hash, &stash->hlist, stash->buf.compl_tag); + hash_add(txq->stash->sched_buf_hash, &stash->hlist, + stash->buf.compl_tag); memset(tx_buf, 0, sizeof(struct idpf_tx_buf)); @@ -1584,7 +1775,7 @@ do { \ if (unlikely(!(ntc))) { \ ntc -= (txq)->desc_count; \ buf = (txq)->tx_buf; \ - desc = IDPF_FLEX_TX_DESC(txq, 0); \ + desc = &(txq)->flex_tx[0]; \ } else { \ (buf)++; \ (desc)++; \ @@ -1607,7 +1798,7 @@ do { \ * and the buffers will be cleaned separately. The stats are not updated from * this function when using flow-based scheduling. */ -static void idpf_tx_splitq_clean(struct idpf_queue *tx_q, u16 end, +static void idpf_tx_splitq_clean(struct idpf_tx_queue *tx_q, u16 end, int napi_budget, struct idpf_cleaned_stats *cleaned, bool descs_only) @@ -1617,8 +1808,8 @@ static void idpf_tx_splitq_clean(struct idpf_queue *tx_q, u16 end, s16 ntc = tx_q->next_to_clean; struct idpf_tx_buf *tx_buf; - tx_desc = IDPF_FLEX_TX_DESC(tx_q, ntc); - next_pending_desc = IDPF_FLEX_TX_DESC(tx_q, end); + tx_desc = &tx_q->flex_tx[ntc]; + next_pending_desc = &tx_q->flex_tx[end]; tx_buf = &tx_q->tx_buf[ntc]; ntc -= tx_q->desc_count; @@ -1703,7 +1894,7 @@ do { \ * stashed. Returns the byte/segment count for the cleaned packet associated * this completion tag. */ -static bool idpf_tx_clean_buf_ring(struct idpf_queue *txq, u16 compl_tag, +static bool idpf_tx_clean_buf_ring(struct idpf_tx_queue *txq, u16 compl_tag, struct idpf_cleaned_stats *cleaned, int budget) { @@ -1772,14 +1963,14 @@ static bool idpf_tx_clean_buf_ring(struct idpf_queue *txq, u16 compl_tag, * * Returns bytes/packets cleaned */ -static void idpf_tx_handle_rs_completion(struct idpf_queue *txq, +static void idpf_tx_handle_rs_completion(struct idpf_tx_queue *txq, struct idpf_splitq_tx_compl_desc *desc, struct idpf_cleaned_stats *cleaned, int budget) { u16 compl_tag; - if (!test_bit(__IDPF_Q_FLOW_SCH_EN, txq->flags)) { + if (!idpf_queue_has(FLOW_SCH_EN, txq)) { u16 head = le16_to_cpu(desc->q_head_compl_tag.q_head); return idpf_tx_splitq_clean(txq, head, budget, cleaned, false); @@ -1802,24 +1993,23 @@ static void idpf_tx_handle_rs_completion(struct idpf_queue *txq, * * Returns true if there's any budget left (e.g. the clean is finished) */ -static bool idpf_tx_clean_complq(struct idpf_queue *complq, int budget, +static bool idpf_tx_clean_complq(struct idpf_compl_queue *complq, int budget, int *cleaned) { struct idpf_splitq_tx_compl_desc *tx_desc; - struct idpf_vport *vport = complq->vport; s16 ntc = complq->next_to_clean; struct idpf_netdev_priv *np; unsigned int complq_budget; bool complq_ok = true; int i; - complq_budget = vport->compln_clean_budget; - tx_desc = IDPF_SPLITQ_TX_COMPLQ_DESC(complq, ntc); + complq_budget = complq->clean_budget; + tx_desc = &complq->comp[ntc]; ntc -= complq->desc_count; do { struct idpf_cleaned_stats cleaned_stats = { }; - struct idpf_queue *tx_q; + struct idpf_tx_queue *tx_q; int rel_tx_qid; u16 hw_head; u8 ctype; /* completion type */ @@ -1828,7 +2018,7 @@ static bool idpf_tx_clean_complq(struct idpf_queue *complq, int budget, /* if the descriptor isn't done, no work yet to do */ gen = le16_get_bits(tx_desc->qid_comptype_gen, IDPF_TXD_COMPLQ_GEN_M); - if (test_bit(__IDPF_Q_GEN_CHK, complq->flags) != gen) + if (idpf_queue_has(GEN_CHK, complq) != gen) break; /* Find necessary info of TX queue to clean buffers */ @@ -1836,8 +2026,7 @@ static bool idpf_tx_clean_complq(struct idpf_queue *complq, int budget, IDPF_TXD_COMPLQ_QID_M); if (rel_tx_qid >= complq->txq_grp->num_txq || !complq->txq_grp->txqs[rel_tx_qid]) { - dev_err(&complq->vport->adapter->pdev->dev, - "TxQ not found\n"); + netdev_err(complq->netdev, "TxQ not found\n"); goto fetch_next_desc; } tx_q = complq->txq_grp->txqs[rel_tx_qid]; @@ -1860,15 +2049,14 @@ static bool idpf_tx_clean_complq(struct idpf_queue *complq, int budget, idpf_tx_handle_sw_marker(tx_q); break; default: - dev_err(&tx_q->vport->adapter->pdev->dev, - "Unknown TX completion type: %d\n", - ctype); + netdev_err(tx_q->netdev, + "Unknown TX completion type: %d\n", ctype); goto fetch_next_desc; } u64_stats_update_begin(&tx_q->stats_sync); - u64_stats_add(&tx_q->q_stats.tx.packets, cleaned_stats.packets); - u64_stats_add(&tx_q->q_stats.tx.bytes, cleaned_stats.bytes); + u64_stats_add(&tx_q->q_stats.packets, cleaned_stats.packets); + u64_stats_add(&tx_q->q_stats.bytes, cleaned_stats.bytes); tx_q->cleaned_pkts += cleaned_stats.packets; tx_q->cleaned_bytes += cleaned_stats.bytes; complq->num_completions++; @@ -1879,8 +2067,8 @@ fetch_next_desc: ntc++; if (unlikely(!ntc)) { ntc -= complq->desc_count; - tx_desc = IDPF_SPLITQ_TX_COMPLQ_DESC(complq, 0); - change_bit(__IDPF_Q_GEN_CHK, complq->flags); + tx_desc = &complq->comp[0]; + idpf_queue_change(GEN_CHK, complq); } prefetch(tx_desc); @@ -1896,9 +2084,9 @@ fetch_next_desc: IDPF_TX_COMPLQ_OVERFLOW_THRESH(complq))) complq_ok = false; - np = netdev_priv(complq->vport->netdev); + np = netdev_priv(complq->netdev); for (i = 0; i < complq->txq_grp->num_txq; ++i) { - struct idpf_queue *tx_q = complq->txq_grp->txqs[i]; + struct idpf_tx_queue *tx_q = complq->txq_grp->txqs[i]; struct netdev_queue *nq; bool dont_wake; @@ -1909,11 +2097,11 @@ fetch_next_desc: *cleaned += tx_q->cleaned_pkts; /* Update BQL */ - nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx); + nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx); dont_wake = !complq_ok || IDPF_TX_BUF_RSV_LOW(tx_q) || np->state != __IDPF_VPORT_UP || - !netif_carrier_ok(tx_q->vport->netdev); + !netif_carrier_ok(tx_q->netdev); /* Check if the TXQ needs to and can be restarted */ __netif_txq_completed_wake(nq, tx_q->cleaned_pkts, tx_q->cleaned_bytes, IDPF_DESC_UNUSED(tx_q), IDPF_TX_WAKE_THRESH, @@ -1976,7 +2164,7 @@ void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc, * * Returns 0 if stop is not needed */ -int idpf_tx_maybe_stop_common(struct idpf_queue *tx_q, unsigned int size) +int idpf_tx_maybe_stop_common(struct idpf_tx_queue *tx_q, unsigned int size) { struct netdev_queue *nq; @@ -1984,10 +2172,10 @@ int idpf_tx_maybe_stop_common(struct idpf_queue *tx_q, unsigned int size) return 0; u64_stats_update_begin(&tx_q->stats_sync); - u64_stats_inc(&tx_q->q_stats.tx.q_busy); + u64_stats_inc(&tx_q->q_stats.q_busy); u64_stats_update_end(&tx_q->stats_sync); - nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx); + nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx); return netif_txq_maybe_stop(nq, IDPF_DESC_UNUSED(tx_q), size, size); } @@ -1999,7 +2187,7 @@ int idpf_tx_maybe_stop_common(struct idpf_queue *tx_q, unsigned int size) * * Returns 0 if stop is not needed */ -static int idpf_tx_maybe_stop_splitq(struct idpf_queue *tx_q, +static int idpf_tx_maybe_stop_splitq(struct idpf_tx_queue *tx_q, unsigned int descs_needed) { if (idpf_tx_maybe_stop_common(tx_q, descs_needed)) @@ -2023,9 +2211,9 @@ static int idpf_tx_maybe_stop_splitq(struct idpf_queue *tx_q, splitq_stop: u64_stats_update_begin(&tx_q->stats_sync); - u64_stats_inc(&tx_q->q_stats.tx.q_busy); + u64_stats_inc(&tx_q->q_stats.q_busy); u64_stats_update_end(&tx_q->stats_sync); - netif_stop_subqueue(tx_q->vport->netdev, tx_q->idx); + netif_stop_subqueue(tx_q->netdev, tx_q->idx); return -EBUSY; } @@ -2040,12 +2228,12 @@ splitq_stop: * to do a register write to update our queue status. We know this can only * mean tail here as HW should be owning head for TX. */ -void idpf_tx_buf_hw_update(struct idpf_queue *tx_q, u32 val, +void idpf_tx_buf_hw_update(struct idpf_tx_queue *tx_q, u32 val, bool xmit_more) { struct netdev_queue *nq; - nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx); + nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx); tx_q->next_to_use = val; idpf_tx_maybe_stop_common(tx_q, IDPF_TX_DESC_NEEDED); @@ -2069,7 +2257,7 @@ void idpf_tx_buf_hw_update(struct idpf_queue *tx_q, u32 val, * * Returns number of data descriptors needed for this skb. */ -unsigned int idpf_tx_desc_count_required(struct idpf_queue *txq, +unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq, struct sk_buff *skb) { const struct skb_shared_info *shinfo; @@ -2102,7 +2290,7 @@ unsigned int idpf_tx_desc_count_required(struct idpf_queue *txq, count = idpf_size_to_txd_count(skb->len); u64_stats_update_begin(&txq->stats_sync); - u64_stats_inc(&txq->q_stats.tx.linearize); + u64_stats_inc(&txq->q_stats.linearize); u64_stats_update_end(&txq->stats_sync); } @@ -2116,11 +2304,11 @@ unsigned int idpf_tx_desc_count_required(struct idpf_queue *txq, * @first: original first buffer info buffer for packet * @idx: starting point on ring to unwind */ -void idpf_tx_dma_map_error(struct idpf_queue *txq, struct sk_buff *skb, +void idpf_tx_dma_map_error(struct idpf_tx_queue *txq, struct sk_buff *skb, struct idpf_tx_buf *first, u16 idx) { u64_stats_update_begin(&txq->stats_sync); - u64_stats_inc(&txq->q_stats.tx.dma_map_errs); + u64_stats_inc(&txq->q_stats.dma_map_errs); u64_stats_update_end(&txq->stats_sync); /* clear dma mappings for failed tx_buf map */ @@ -2143,7 +2331,7 @@ void idpf_tx_dma_map_error(struct idpf_queue *txq, struct sk_buff *skb, * used one additional descriptor for a context * descriptor. Reset that here. */ - tx_desc = IDPF_FLEX_TX_DESC(txq, idx); + tx_desc = &txq->flex_tx[idx]; memset(tx_desc, 0, sizeof(struct idpf_flex_tx_ctx_desc)); if (idx == 0) idx = txq->desc_count; @@ -2159,7 +2347,7 @@ void idpf_tx_dma_map_error(struct idpf_queue *txq, struct sk_buff *skb, * @txq: the tx ring to wrap * @ntu: ring index to bump */ -static unsigned int idpf_tx_splitq_bump_ntu(struct idpf_queue *txq, u16 ntu) +static unsigned int idpf_tx_splitq_bump_ntu(struct idpf_tx_queue *txq, u16 ntu) { ntu++; @@ -2181,7 +2369,7 @@ static unsigned int idpf_tx_splitq_bump_ntu(struct idpf_queue *txq, u16 ntu) * and gets a physical address for each memory location and programs * it and the length into the transmit flex descriptor. */ -static void idpf_tx_splitq_map(struct idpf_queue *tx_q, +static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q, struct idpf_tx_splitq_params *params, struct idpf_tx_buf *first) { @@ -2202,7 +2390,7 @@ static void idpf_tx_splitq_map(struct idpf_queue *tx_q, data_len = skb->data_len; size = skb_headlen(skb); - tx_desc = IDPF_FLEX_TX_DESC(tx_q, i); + tx_desc = &tx_q->flex_tx[i]; dma = dma_map_single(tx_q->dev, skb->data, size, DMA_TO_DEVICE); @@ -2275,7 +2463,7 @@ static void idpf_tx_splitq_map(struct idpf_queue *tx_q, i++; if (i == tx_q->desc_count) { - tx_desc = IDPF_FLEX_TX_DESC(tx_q, 0); + tx_desc = &tx_q->flex_tx[0]; i = 0; tx_q->compl_tag_cur_gen = IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q); @@ -2320,7 +2508,7 @@ static void idpf_tx_splitq_map(struct idpf_queue *tx_q, i++; if (i == tx_q->desc_count) { - tx_desc = IDPF_FLEX_TX_DESC(tx_q, 0); + tx_desc = &tx_q->flex_tx[0]; i = 0; tx_q->compl_tag_cur_gen = IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q); } @@ -2348,7 +2536,7 @@ static void idpf_tx_splitq_map(struct idpf_queue *tx_q, tx_q->txq_grp->num_completions_pending++; /* record bytecount for BQL */ - nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx); + nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx); netdev_tx_sent_queue(nq, first->bytecount); idpf_tx_buf_hw_update(tx_q, i, netdev_xmit_more()); @@ -2525,8 +2713,8 @@ static bool __idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs) * E.g.: a packet with 7 fragments can require 9 DMA transactions; 1 for TSO * header, 1 for segment payload, and then 7 for the fragments. */ -bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs, - unsigned int count) +static bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs, + unsigned int count) { if (likely(count < max_bufs)) return false; @@ -2544,7 +2732,7 @@ bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs, * ring entry to reflect that this index is a context descriptor */ static struct idpf_flex_tx_ctx_desc * -idpf_tx_splitq_get_ctx_desc(struct idpf_queue *txq) +idpf_tx_splitq_get_ctx_desc(struct idpf_tx_queue *txq) { struct idpf_flex_tx_ctx_desc *desc; int i = txq->next_to_use; @@ -2553,7 +2741,7 @@ idpf_tx_splitq_get_ctx_desc(struct idpf_queue *txq) txq->tx_buf[i].compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG; /* grab the next descriptor */ - desc = IDPF_FLEX_TX_CTX_DESC(txq, i); + desc = &txq->flex_ctx[i]; txq->next_to_use = idpf_tx_splitq_bump_ntu(txq, i); return desc; @@ -2564,10 +2752,10 @@ idpf_tx_splitq_get_ctx_desc(struct idpf_queue *txq) * @tx_q: queue to send buffer on * @skb: pointer to skb */ -netdev_tx_t idpf_tx_drop_skb(struct idpf_queue *tx_q, struct sk_buff *skb) +netdev_tx_t idpf_tx_drop_skb(struct idpf_tx_queue *tx_q, struct sk_buff *skb) { u64_stats_update_begin(&tx_q->stats_sync); - u64_stats_inc(&tx_q->q_stats.tx.skb_drops); + u64_stats_inc(&tx_q->q_stats.skb_drops); u64_stats_update_end(&tx_q->stats_sync); idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false); @@ -2585,7 +2773,7 @@ netdev_tx_t idpf_tx_drop_skb(struct idpf_queue *tx_q, struct sk_buff *skb) * Returns NETDEV_TX_OK if sent, else an error code */ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb, - struct idpf_queue *tx_q) + struct idpf_tx_queue *tx_q) { struct idpf_tx_splitq_params tx_params = { }; struct idpf_tx_buf *first; @@ -2625,7 +2813,7 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb, ctx_desc->tso.qw0.hdr_len = tx_params.offload.tso_hdr_len; u64_stats_update_begin(&tx_q->stats_sync); - u64_stats_inc(&tx_q->q_stats.tx.lso_pkts); + u64_stats_inc(&tx_q->q_stats.lso_pkts); u64_stats_update_end(&tx_q->stats_sync); } @@ -2642,7 +2830,7 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb, first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN); } - if (test_bit(__IDPF_Q_FLOW_SCH_EN, tx_q->flags)) { + if (idpf_queue_has(FLOW_SCH_EN, tx_q)) { tx_params.dtype = IDPF_TX_DESC_DTYPE_FLEX_FLOW_SCHE; tx_params.eop_cmd = IDPF_TXD_FLEX_FLOW_CMD_EOP; /* Set the RE bit to catch any packets that may have not been @@ -2672,17 +2860,16 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb, } /** - * idpf_tx_splitq_start - Selects the right Tx queue to send buffer + * idpf_tx_start - Selects the right Tx queue to send buffer * @skb: send buffer * @netdev: network interface device structure * * Returns NETDEV_TX_OK if sent, else an error code */ -netdev_tx_t idpf_tx_splitq_start(struct sk_buff *skb, - struct net_device *netdev) +netdev_tx_t idpf_tx_start(struct sk_buff *skb, struct net_device *netdev) { struct idpf_vport *vport = idpf_netdev_to_vport(netdev); - struct idpf_queue *tx_q; + struct idpf_tx_queue *tx_q; if (unlikely(skb_get_queue_mapping(skb) >= vport->num_txq)) { dev_kfree_skb_any(skb); @@ -2701,31 +2888,10 @@ netdev_tx_t idpf_tx_splitq_start(struct sk_buff *skb, return NETDEV_TX_OK; } - return idpf_tx_splitq_frame(skb, tx_q); -} - -/** - * idpf_ptype_to_htype - get a hash type - * @decoded: Decoded Rx packet type related fields - * - * Returns appropriate hash type (such as PKT_HASH_TYPE_L2/L3/L4) to be used by - * skb_set_hash based on PTYPE as parsed by HW Rx pipeline and is part of - * Rx desc. - */ -enum pkt_hash_types idpf_ptype_to_htype(const struct idpf_rx_ptype_decoded *decoded) -{ - if (!decoded->known) - return PKT_HASH_TYPE_NONE; - if (decoded->payload_layer == IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY2 && - decoded->inner_prot) - return PKT_HASH_TYPE_L4; - if (decoded->payload_layer == IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY2 && - decoded->outer_ip) - return PKT_HASH_TYPE_L3; - if (decoded->outer_ip == IDPF_RX_PTYPE_OUTER_L2) - return PKT_HASH_TYPE_L2; - - return PKT_HASH_TYPE_NONE; + if (idpf_is_queue_model_split(vport->txq_model)) + return idpf_tx_splitq_frame(skb, tx_q); + else + return idpf_tx_singleq_frame(skb, tx_q); } /** @@ -2735,20 +2901,21 @@ enum pkt_hash_types idpf_ptype_to_htype(const struct idpf_rx_ptype_decoded *deco * @rx_desc: Receive descriptor * @decoded: Decoded Rx packet type related fields */ -static void idpf_rx_hash(struct idpf_queue *rxq, struct sk_buff *skb, - struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc, - struct idpf_rx_ptype_decoded *decoded) +static void +idpf_rx_hash(const struct idpf_rx_queue *rxq, struct sk_buff *skb, + const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc, + struct libeth_rx_pt decoded) { u32 hash; - if (unlikely(!idpf_is_feature_ena(rxq->vport, NETIF_F_RXHASH))) + if (!libeth_rx_pt_has_hash(rxq->netdev, decoded)) return; hash = le16_to_cpu(rx_desc->hash1) | (rx_desc->ff2_mirrid_hash2.hash2 << 16) | (rx_desc->hash3 << 24); - skb_set_hash(skb, hash, idpf_ptype_to_htype(decoded)); + libeth_rx_pt_set_hash(skb, hash, decoded); } /** @@ -2760,92 +2927,83 @@ static void idpf_rx_hash(struct idpf_queue *rxq, struct sk_buff *skb, * * skb->protocol must be set before this function is called */ -static void idpf_rx_csum(struct idpf_queue *rxq, struct sk_buff *skb, - struct idpf_rx_csum_decoded *csum_bits, - struct idpf_rx_ptype_decoded *decoded) +static void idpf_rx_csum(struct idpf_rx_queue *rxq, struct sk_buff *skb, + struct idpf_rx_csum_decoded csum_bits, + struct libeth_rx_pt decoded) { bool ipv4, ipv6; /* check if Rx checksum is enabled */ - if (unlikely(!idpf_is_feature_ena(rxq->vport, NETIF_F_RXCSUM))) + if (!libeth_rx_pt_has_checksum(rxq->netdev, decoded)) return; /* check if HW has decoded the packet and checksum */ - if (!(csum_bits->l3l4p)) + if (unlikely(!csum_bits.l3l4p)) return; - ipv4 = IDPF_RX_PTYPE_TO_IPV(decoded, IDPF_RX_PTYPE_OUTER_IPV4); - ipv6 = IDPF_RX_PTYPE_TO_IPV(decoded, IDPF_RX_PTYPE_OUTER_IPV6); + ipv4 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV4; + ipv6 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV6; - if (ipv4 && (csum_bits->ipe || csum_bits->eipe)) + if (unlikely(ipv4 && (csum_bits.ipe || csum_bits.eipe))) goto checksum_fail; - if (ipv6 && csum_bits->ipv6exadd) + if (unlikely(ipv6 && csum_bits.ipv6exadd)) return; /* check for L4 errors and handle packets that were not able to be * checksummed */ - if (csum_bits->l4e) + if (unlikely(csum_bits.l4e)) goto checksum_fail; - /* Only report checksum unnecessary for ICMP, TCP, UDP, or SCTP */ - switch (decoded->inner_prot) { - case IDPF_RX_PTYPE_INNER_PROT_ICMP: - case IDPF_RX_PTYPE_INNER_PROT_TCP: - case IDPF_RX_PTYPE_INNER_PROT_UDP: - if (!csum_bits->raw_csum_inv) { - u16 csum = csum_bits->raw_csum; - - skb->csum = csum_unfold((__force __sum16)~swab16(csum)); - skb->ip_summed = CHECKSUM_COMPLETE; - } else { - skb->ip_summed = CHECKSUM_UNNECESSARY; - } - break; - case IDPF_RX_PTYPE_INNER_PROT_SCTP: + if (csum_bits.raw_csum_inv || + decoded.inner_prot == LIBETH_RX_PT_INNER_SCTP) { skb->ip_summed = CHECKSUM_UNNECESSARY; - break; - default: - break; + return; } + skb->csum = csum_unfold((__force __sum16)~swab16(csum_bits.raw_csum)); + skb->ip_summed = CHECKSUM_COMPLETE; + return; checksum_fail: u64_stats_update_begin(&rxq->stats_sync); - u64_stats_inc(&rxq->q_stats.rx.hw_csum_err); + u64_stats_inc(&rxq->q_stats.hw_csum_err); u64_stats_update_end(&rxq->stats_sync); } /** * idpf_rx_splitq_extract_csum_bits - Extract checksum bits from descriptor * @rx_desc: receive descriptor - * @csum: structure to extract checksum fields * + * Return: parsed checksum status. **/ -static void idpf_rx_splitq_extract_csum_bits(struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc, - struct idpf_rx_csum_decoded *csum) +static struct idpf_rx_csum_decoded +idpf_rx_splitq_extract_csum_bits(const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc) { + struct idpf_rx_csum_decoded csum = { }; u8 qword0, qword1; qword0 = rx_desc->status_err0_qw0; qword1 = rx_desc->status_err0_qw1; - csum->ipe = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_M, + csum.ipe = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_M, + qword1); + csum.eipe = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_M, qword1); - csum->eipe = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_M, + csum.l4e = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_M, + qword1); + csum.l3l4p = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_L3L4P_M, qword1); - csum->l4e = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_M, - qword1); - csum->l3l4p = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_L3L4P_M, - qword1); - csum->ipv6exadd = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_IPV6EXADD_M, - qword0); - csum->raw_csum_inv = + csum.ipv6exadd = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_IPV6EXADD_M, + qword0); + csum.raw_csum_inv = le16_get_bits(rx_desc->ptype_err_fflags0, VIRTCHNL2_RX_FLEX_DESC_ADV_RAW_CSUM_INV_M); - csum->raw_csum = le16_to_cpu(rx_desc->misc.raw_cs); + csum.raw_csum = le16_to_cpu(rx_desc->misc.raw_cs); + + return csum; } /** @@ -2860,23 +3018,24 @@ static void idpf_rx_splitq_extract_csum_bits(struct virtchnl2_rx_flex_desc_adv_n * Populate the skb fields with the total number of RSC segments, RSC payload * length and packet type. */ -static int idpf_rx_rsc(struct idpf_queue *rxq, struct sk_buff *skb, - struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc, - struct idpf_rx_ptype_decoded *decoded) +static int idpf_rx_rsc(struct idpf_rx_queue *rxq, struct sk_buff *skb, + const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc, + struct libeth_rx_pt decoded) { u16 rsc_segments, rsc_seg_len; bool ipv4, ipv6; int len; - if (unlikely(!decoded->outer_ip)) + if (unlikely(libeth_rx_pt_get_ip_ver(decoded) == + LIBETH_RX_PT_OUTER_L2)) return -EINVAL; rsc_seg_len = le16_to_cpu(rx_desc->misc.rscseglen); if (unlikely(!rsc_seg_len)) return -EINVAL; - ipv4 = IDPF_RX_PTYPE_TO_IPV(decoded, IDPF_RX_PTYPE_OUTER_IPV4); - ipv6 = IDPF_RX_PTYPE_TO_IPV(decoded, IDPF_RX_PTYPE_OUTER_IPV6); + ipv4 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV4; + ipv6 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV6; if (unlikely(!(ipv4 ^ ipv6))) return -EINVAL; @@ -2914,7 +3073,7 @@ static int idpf_rx_rsc(struct idpf_queue *rxq, struct sk_buff *skb, tcp_gro_complete(skb); u64_stats_update_begin(&rxq->stats_sync); - u64_stats_inc(&rxq->q_stats.rx.rsc_pkts); + u64_stats_inc(&rxq->q_stats.rsc_pkts); u64_stats_update_end(&rxq->stats_sync); return 0; @@ -2930,35 +3089,31 @@ static int idpf_rx_rsc(struct idpf_queue *rxq, struct sk_buff *skb, * order to populate the hash, checksum, protocol, and * other fields within the skb. */ -static int idpf_rx_process_skb_fields(struct idpf_queue *rxq, - struct sk_buff *skb, - struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc) +static int +idpf_rx_process_skb_fields(struct idpf_rx_queue *rxq, struct sk_buff *skb, + const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc) { - struct idpf_rx_csum_decoded csum_bits = { }; - struct idpf_rx_ptype_decoded decoded; + struct idpf_rx_csum_decoded csum_bits; + struct libeth_rx_pt decoded; u16 rx_ptype; rx_ptype = le16_get_bits(rx_desc->ptype_err_fflags0, VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_M); - - skb->protocol = eth_type_trans(skb, rxq->vport->netdev); - - decoded = rxq->vport->rx_ptype_lkup[rx_ptype]; - /* If we don't know the ptype we can't do anything else with it. Just - * pass it up the stack as-is. - */ - if (!decoded.known) - return 0; + decoded = rxq->rx_ptype_lkup[rx_ptype]; /* process RSS/hash */ - idpf_rx_hash(rxq, skb, rx_desc, &decoded); + idpf_rx_hash(rxq, skb, rx_desc, decoded); + + skb->protocol = eth_type_trans(skb, rxq->netdev); if (le16_get_bits(rx_desc->hdrlen_flags, VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_M)) - return idpf_rx_rsc(rxq, skb, rx_desc, &decoded); + return idpf_rx_rsc(rxq, skb, rx_desc, decoded); + + csum_bits = idpf_rx_splitq_extract_csum_bits(rx_desc); + idpf_rx_csum(rxq, skb, csum_bits, decoded); - idpf_rx_splitq_extract_csum_bits(rx_desc, &csum_bits); - idpf_rx_csum(rxq, skb, &csum_bits, &decoded); + skb_record_rx_queue(skb, rxq->idx); return 0; } @@ -2976,103 +3131,73 @@ static int idpf_rx_process_skb_fields(struct idpf_queue *rxq, void idpf_rx_add_frag(struct idpf_rx_buf *rx_buf, struct sk_buff *skb, unsigned int size) { - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page, - rx_buf->page_offset, size, rx_buf->truesize); + u32 hr = rx_buf->page->pp->p.offset; - rx_buf->page = NULL; + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page, + rx_buf->offset + hr, size, rx_buf->truesize); } /** - * idpf_rx_construct_skb - Allocate skb and populate it - * @rxq: Rx descriptor queue - * @rx_buf: Rx buffer to pull data from - * @size: the length of the packet + * idpf_rx_hsplit_wa - handle header buffer overflows and split errors + * @hdr: Rx buffer for the headers + * @buf: Rx buffer for the payload + * @data_len: number of bytes received to the payload buffer * - * This function allocates an skb. It then populates it with the page - * data from the current receive descriptor, taking care to set up the - * skb correctly. + * When a header buffer overflow occurs or the HW was unable do parse the + * packet type to perform header split, the whole frame gets placed to the + * payload buffer. We can't build a valid skb around a payload buffer when + * the header split is active since it doesn't reserve any head- or tailroom. + * In that case, copy either the whole frame when it's short or just the + * Ethernet header to the header buffer to be able to build an skb and adjust + * the data offset in the payload buffer, IOW emulate the header split. + * + * Return: number of bytes copied to the header buffer. */ -struct sk_buff *idpf_rx_construct_skb(struct idpf_queue *rxq, - struct idpf_rx_buf *rx_buf, - unsigned int size) +static u32 idpf_rx_hsplit_wa(const struct libeth_fqe *hdr, + struct libeth_fqe *buf, u32 data_len) { - unsigned int headlen; - struct sk_buff *skb; - void *va; - - va = page_address(rx_buf->page) + rx_buf->page_offset; - - /* prefetch first cache line of first page */ - net_prefetch(va); - /* allocate a skb to store the frags */ - skb = napi_alloc_skb(&rxq->q_vector->napi, IDPF_RX_HDR_SIZE); - if (unlikely(!skb)) { - idpf_rx_put_page(rx_buf); - - return NULL; - } - - skb_record_rx_queue(skb, rxq->idx); - skb_mark_for_recycle(skb); + u32 copy = data_len <= L1_CACHE_BYTES ? data_len : ETH_HLEN; + const void *src; + void *dst; - /* Determine available headroom for copy */ - headlen = size; - if (headlen > IDPF_RX_HDR_SIZE) - headlen = eth_get_headlen(skb->dev, va, IDPF_RX_HDR_SIZE); - - /* align pull length to size of long to optimize memcpy performance */ - memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long))); - - /* if we exhaust the linear part then add what is left as a frag */ - size -= headlen; - if (!size) { - idpf_rx_put_page(rx_buf); - - return skb; - } + if (!libeth_rx_sync_for_cpu(buf, copy)) + return 0; - skb_add_rx_frag(skb, 0, rx_buf->page, rx_buf->page_offset + headlen, - size, rx_buf->truesize); + dst = page_address(hdr->page) + hdr->offset + hdr->page->pp->p.offset; + src = page_address(buf->page) + buf->offset + buf->page->pp->p.offset; + memcpy(dst, src, LARGEST_ALIGN(copy)); - /* Since we're giving the page to the stack, clear our reference to it. - * We'll get a new one during buffer posting. - */ - rx_buf->page = NULL; + buf->offset += copy; - return skb; + return copy; } /** - * idpf_rx_hdr_construct_skb - Allocate skb and populate it from header buffer - * @rxq: Rx descriptor queue - * @va: Rx buffer to pull data from + * idpf_rx_build_skb - Allocate skb and populate it from header buffer + * @buf: Rx buffer to pull data from * @size: the length of the packet * * This function allocates an skb. It then populates it with the page data from * the current receive descriptor, taking care to set up the skb correctly. - * This specifically uses a header buffer to start building the skb. */ -static struct sk_buff *idpf_rx_hdr_construct_skb(struct idpf_queue *rxq, - const void *va, - unsigned int size) +struct sk_buff *idpf_rx_build_skb(const struct libeth_fqe *buf, u32 size) { + u32 hr = buf->page->pp->p.offset; struct sk_buff *skb; + void *va; - /* allocate a skb to store the frags */ - skb = napi_alloc_skb(&rxq->q_vector->napi, size); + va = page_address(buf->page) + buf->offset; + prefetch(va + hr); + + skb = napi_build_skb(va, buf->truesize); if (unlikely(!skb)) return NULL; - skb_record_rx_queue(skb, rxq->idx); - - memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); - - /* More than likely, a payload fragment, which will use a page from - * page_pool will be added to the SKB so mark it for recycle - * preemptively. And if not, it's inconsequential. - */ skb_mark_for_recycle(skb); + skb_reserve(skb, hr); + __skb_put(skb, size); + return skb; } @@ -3115,31 +3240,27 @@ static bool idpf_rx_splitq_is_eop(struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_de * * Returns amount of work completed */ -static int idpf_rx_splitq_clean(struct idpf_queue *rxq, int budget) +static int idpf_rx_splitq_clean(struct idpf_rx_queue *rxq, int budget) { int total_rx_bytes = 0, total_rx_pkts = 0; - struct idpf_queue *rx_bufq = NULL; + struct idpf_buf_queue *rx_bufq = NULL; struct sk_buff *skb = rxq->skb; u16 ntc = rxq->next_to_clean; /* Process Rx packets bounded by budget */ while (likely(total_rx_pkts < budget)) { struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc; + struct libeth_fqe *hdr, *rx_buf = NULL; struct idpf_sw_queue *refillq = NULL; struct idpf_rxq_set *rxq_set = NULL; - struct idpf_rx_buf *rx_buf = NULL; - union virtchnl2_rx_desc *desc; unsigned int pkt_len = 0; unsigned int hdr_len = 0; u16 gen_id, buf_id = 0; - /* Header buffer overflow only valid for header split */ - bool hbo = false; int bufq_id; u8 rxdid; /* get the Rx desc from Rx queue based on 'next_to_clean' */ - desc = IDPF_RX_DESC(rxq, ntc); - rx_desc = (struct virtchnl2_rx_flex_desc_adv_nic_3 *)desc; + rx_desc = &rxq->rx[ntc].flex_adv_nic_3_wb; /* This memory barrier is needed to keep us from reading * any other fields out of the rx_desc @@ -3150,7 +3271,7 @@ static int idpf_rx_splitq_clean(struct idpf_queue *rxq, int budget) gen_id = le16_get_bits(rx_desc->pktlen_gen_bufq_id, VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_M); - if (test_bit(__IDPF_Q_GEN_CHK, rxq->flags) != gen_id) + if (idpf_queue_has(GEN_CHK, rxq) != gen_id) break; rxdid = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_RXDID_M, @@ -3158,7 +3279,7 @@ static int idpf_rx_splitq_clean(struct idpf_queue *rxq, int budget) if (rxdid != VIRTCHNL2_RXDID_2_FLEX_SPLITQ) { IDPF_RX_BUMP_NTC(rxq, ntc); u64_stats_update_begin(&rxq->stats_sync); - u64_stats_inc(&rxq->q_stats.rx.bad_descs); + u64_stats_inc(&rxq->q_stats.bad_descs); u64_stats_update_end(&rxq->stats_sync); continue; } @@ -3166,71 +3287,79 @@ static int idpf_rx_splitq_clean(struct idpf_queue *rxq, int budget) pkt_len = le16_get_bits(rx_desc->pktlen_gen_bufq_id, VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_PBUF_M); - hbo = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_HBO_M, - rx_desc->status_err0_qw1); - - if (unlikely(hbo)) { - /* If a header buffer overflow, occurs, i.e. header is - * too large to fit in the header split buffer, HW will - * put the entire packet, including headers, in the - * data/payload buffer. - */ - u64_stats_update_begin(&rxq->stats_sync); - u64_stats_inc(&rxq->q_stats.rx.hsplit_buf_ovf); - u64_stats_update_end(&rxq->stats_sync); - goto bypass_hsplit; - } - - hdr_len = le16_get_bits(rx_desc->hdrlen_flags, - VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_HDR_M); - -bypass_hsplit: bufq_id = le16_get_bits(rx_desc->pktlen_gen_bufq_id, VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_M); rxq_set = container_of(rxq, struct idpf_rxq_set, rxq); - if (!bufq_id) - refillq = rxq_set->refillq0; - else - refillq = rxq_set->refillq1; + refillq = rxq_set->refillq[bufq_id]; /* retrieve buffer from the rxq */ - rx_bufq = &rxq->rxq_grp->splitq.bufq_sets[bufq_id].bufq; + rx_bufq = &rxq->bufq_sets[bufq_id].bufq; buf_id = le16_to_cpu(rx_desc->buf_id); - rx_buf = &rx_bufq->rx_buf.buf[buf_id]; + rx_buf = &rx_bufq->buf[buf_id]; + + if (!rx_bufq->hdr_pp) + goto payload; + +#define __HBO_BIT VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_HBO_M +#define __HDR_LEN_MASK VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_HDR_M + if (likely(!(rx_desc->status_err0_qw1 & __HBO_BIT))) + /* If a header buffer overflow, occurs, i.e. header is + * too large to fit in the header split buffer, HW will + * put the entire packet, including headers, in the + * data/payload buffer. + */ + hdr_len = le16_get_bits(rx_desc->hdrlen_flags, + __HDR_LEN_MASK); +#undef __HDR_LEN_MASK +#undef __HBO_BIT + + hdr = &rx_bufq->hdr_buf[buf_id]; - if (hdr_len) { - const void *va = (u8 *)rx_bufq->rx_buf.hdr_buf_va + - (u32)buf_id * IDPF_HDR_BUF_SIZE; + if (unlikely(!hdr_len && !skb)) { + hdr_len = idpf_rx_hsplit_wa(hdr, rx_buf, pkt_len); + pkt_len -= hdr_len; - skb = idpf_rx_hdr_construct_skb(rxq, va, hdr_len); u64_stats_update_begin(&rxq->stats_sync); - u64_stats_inc(&rxq->q_stats.rx.hsplit_pkts); + u64_stats_inc(&rxq->q_stats.hsplit_buf_ovf); u64_stats_update_end(&rxq->stats_sync); } - if (pkt_len) { - idpf_rx_sync_for_cpu(rx_buf, pkt_len); - if (skb) - idpf_rx_add_frag(rx_buf, skb, pkt_len); - else - skb = idpf_rx_construct_skb(rxq, rx_buf, - pkt_len); - } else { - idpf_rx_put_page(rx_buf); + if (libeth_rx_sync_for_cpu(hdr, hdr_len)) { + skb = idpf_rx_build_skb(hdr, hdr_len); + if (!skb) + break; + + u64_stats_update_begin(&rxq->stats_sync); + u64_stats_inc(&rxq->q_stats.hsplit_pkts); + u64_stats_update_end(&rxq->stats_sync); } + hdr->page = NULL; + +payload: + if (!libeth_rx_sync_for_cpu(rx_buf, pkt_len)) + goto skip_data; + + if (skb) + idpf_rx_add_frag(rx_buf, skb, pkt_len); + else + skb = idpf_rx_build_skb(rx_buf, pkt_len); + /* exit if we failed to retrieve a buffer */ if (!skb) break; - idpf_rx_post_buf_refill(refillq, buf_id); +skip_data: + rx_buf->page = NULL; + idpf_rx_post_buf_refill(refillq, buf_id); IDPF_RX_BUMP_NTC(rxq, ntc); + /* skip if it is non EOP desc */ - if (!idpf_rx_splitq_is_eop(rx_desc)) + if (!idpf_rx_splitq_is_eop(rx_desc) || unlikely(!skb)) continue; /* pad skb if needed (to make valid ethernet frame) */ @@ -3250,7 +3379,7 @@ bypass_hsplit: } /* send completed skb up the stack */ - napi_gro_receive(&rxq->q_vector->napi, skb); + napi_gro_receive(rxq->napi, skb); skb = NULL; /* update budget accounting */ @@ -3261,8 +3390,8 @@ bypass_hsplit: rxq->skb = skb; u64_stats_update_begin(&rxq->stats_sync); - u64_stats_add(&rxq->q_stats.rx.packets, total_rx_pkts); - u64_stats_add(&rxq->q_stats.rx.bytes, total_rx_bytes); + u64_stats_add(&rxq->q_stats.packets, total_rx_pkts); + u64_stats_add(&rxq->q_stats.bytes, total_rx_bytes); u64_stats_update_end(&rxq->stats_sync); /* guarantee a trip back through this routine if there was a failure */ @@ -3272,34 +3401,41 @@ bypass_hsplit: /** * idpf_rx_update_bufq_desc - Update buffer queue descriptor * @bufq: Pointer to the buffer queue - * @refill_desc: SW Refill queue descriptor containing buffer ID + * @buf_id: buffer ID * @buf_desc: Buffer queue descriptor * * Return 0 on success and negative on failure. */ -static int idpf_rx_update_bufq_desc(struct idpf_queue *bufq, u16 refill_desc, +static int idpf_rx_update_bufq_desc(struct idpf_buf_queue *bufq, u32 buf_id, struct virtchnl2_splitq_rx_buf_desc *buf_desc) { - struct idpf_rx_buf *buf; + struct libeth_fq_fp fq = { + .pp = bufq->pp, + .fqes = bufq->buf, + .truesize = bufq->truesize, + .count = bufq->desc_count, + }; dma_addr_t addr; - u16 buf_id; - - buf_id = FIELD_GET(IDPF_RX_BI_BUFID_M, refill_desc); - buf = &bufq->rx_buf.buf[buf_id]; - - addr = idpf_alloc_page(bufq->pp, buf, bufq->rx_buf_size); - if (unlikely(addr == DMA_MAPPING_ERROR)) + addr = libeth_rx_alloc(&fq, buf_id); + if (addr == DMA_MAPPING_ERROR) return -ENOMEM; buf_desc->pkt_addr = cpu_to_le64(addr); buf_desc->qword0.buf_id = cpu_to_le16(buf_id); - if (!bufq->rx_hsplit_en) + if (!idpf_queue_has(HSPLIT_EN, bufq)) return 0; - buf_desc->hdr_addr = cpu_to_le64(bufq->rx_buf.hdr_buf_pa + - (u32)buf_id * IDPF_HDR_BUF_SIZE); + fq.pp = bufq->hdr_pp; + fq.fqes = bufq->hdr_buf; + fq.truesize = bufq->hdr_truesize; + + addr = libeth_rx_alloc(&fq, buf_id); + if (addr == DMA_MAPPING_ERROR) + return -ENOMEM; + + buf_desc->hdr_addr = cpu_to_le64(addr); return 0; } @@ -3311,38 +3447,37 @@ static int idpf_rx_update_bufq_desc(struct idpf_queue *bufq, u16 refill_desc, * * This function takes care of the buffer refill management */ -static void idpf_rx_clean_refillq(struct idpf_queue *bufq, +static void idpf_rx_clean_refillq(struct idpf_buf_queue *bufq, struct idpf_sw_queue *refillq) { struct virtchnl2_splitq_rx_buf_desc *buf_desc; u16 bufq_nta = bufq->next_to_alloc; u16 ntc = refillq->next_to_clean; int cleaned = 0; - u16 gen; - buf_desc = IDPF_SPLITQ_RX_BUF_DESC(bufq, bufq_nta); + buf_desc = &bufq->split_buf[bufq_nta]; /* make sure we stop at ring wrap in the unlikely case ring is full */ while (likely(cleaned < refillq->desc_count)) { - u16 refill_desc = IDPF_SPLITQ_RX_BI_DESC(refillq, ntc); + u32 buf_id, refill_desc = refillq->ring[ntc]; bool failure; - gen = FIELD_GET(IDPF_RX_BI_GEN_M, refill_desc); - if (test_bit(__IDPF_RFLQ_GEN_CHK, refillq->flags) != gen) + if (idpf_queue_has(RFL_GEN_CHK, refillq) != + !!(refill_desc & IDPF_RX_BI_GEN_M)) break; - failure = idpf_rx_update_bufq_desc(bufq, refill_desc, - buf_desc); + buf_id = FIELD_GET(IDPF_RX_BI_BUFID_M, refill_desc); + failure = idpf_rx_update_bufq_desc(bufq, buf_id, buf_desc); if (failure) break; if (unlikely(++ntc == refillq->desc_count)) { - change_bit(__IDPF_RFLQ_GEN_CHK, refillq->flags); + idpf_queue_change(RFL_GEN_CHK, refillq); ntc = 0; } if (unlikely(++bufq_nta == bufq->desc_count)) { - buf_desc = IDPF_SPLITQ_RX_BUF_DESC(bufq, 0); + buf_desc = &bufq->split_buf[0]; bufq_nta = 0; } else { buf_desc++; @@ -3371,16 +3506,21 @@ static void idpf_rx_clean_refillq(struct idpf_queue *bufq, /** * idpf_rx_clean_refillq_all - Clean all refill queues * @bufq: buffer queue with refill queues + * @nid: ID of the closest NUMA node with memory * * Iterates through all refill queues assigned to the buffer queue assigned to * this vector. Returns true if clean is complete within budget, false * otherwise. */ -static void idpf_rx_clean_refillq_all(struct idpf_queue *bufq) +static void idpf_rx_clean_refillq_all(struct idpf_buf_queue *bufq, int nid) { struct idpf_bufq_set *bufq_set; int i; + page_pool_nid_changed(bufq->pp, nid); + if (bufq->hdr_pp) + page_pool_nid_changed(bufq->hdr_pp, nid); + bufq_set = container_of(bufq, struct idpf_bufq_set, bufq); for (i = 0; i < bufq_set->num_refillqs; i++) idpf_rx_clean_refillq(bufq, &bufq_set->refillqs[i]); @@ -3441,12 +3581,16 @@ void idpf_vport_intr_rel(struct idpf_vport *vport) for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) { struct idpf_q_vector *q_vector = &vport->q_vectors[v_idx]; + kfree(q_vector->complq); + q_vector->complq = NULL; kfree(q_vector->bufq); q_vector->bufq = NULL; kfree(q_vector->tx); q_vector->tx = NULL; kfree(q_vector->rx); q_vector->rx = NULL; + + free_cpumask_var(q_vector->affinity_mask); } /* Clean up the mapping of queues to vectors */ @@ -3495,7 +3639,7 @@ static void idpf_vport_intr_rel_irq(struct idpf_vport *vport) /* clear the affinity_mask in the IRQ descriptor */ irq_set_affinity_hint(irq_num, NULL); - free_irq(irq_num, q_vector); + kfree(free_irq(irq_num, q_vector)); } } @@ -3579,13 +3723,13 @@ static void idpf_net_dim(struct idpf_q_vector *q_vector) goto check_rx_itr; for (i = 0, packets = 0, bytes = 0; i < q_vector->num_txq; i++) { - struct idpf_queue *txq = q_vector->tx[i]; + struct idpf_tx_queue *txq = q_vector->tx[i]; unsigned int start; do { start = u64_stats_fetch_begin(&txq->stats_sync); - packets += u64_stats_read(&txq->q_stats.tx.packets); - bytes += u64_stats_read(&txq->q_stats.tx.bytes); + packets += u64_stats_read(&txq->q_stats.packets); + bytes += u64_stats_read(&txq->q_stats.bytes); } while (u64_stats_fetch_retry(&txq->stats_sync, start)); } @@ -3598,13 +3742,13 @@ check_rx_itr: return; for (i = 0, packets = 0, bytes = 0; i < q_vector->num_rxq; i++) { - struct idpf_queue *rxq = q_vector->rx[i]; + struct idpf_rx_queue *rxq = q_vector->rx[i]; unsigned int start; do { start = u64_stats_fetch_begin(&rxq->stats_sync); - packets += u64_stats_read(&rxq->q_stats.rx.packets); - bytes += u64_stats_read(&rxq->q_stats.rx.bytes); + packets += u64_stats_read(&rxq->q_stats.packets); + bytes += u64_stats_read(&rxq->q_stats.bytes); } while (u64_stats_fetch_retry(&rxq->stats_sync, start)); } @@ -3646,6 +3790,7 @@ static int idpf_vport_intr_req_irq(struct idpf_vport *vport, char *basename) for (vector = 0; vector < vport->num_q_vectors; vector++) { struct idpf_q_vector *q_vector = &vport->q_vectors[vector]; + char *name; vidx = vport->q_vector_idxs[vector]; irq_num = adapter->msix_entries[vidx].vector; @@ -3659,18 +3804,18 @@ static int idpf_vport_intr_req_irq(struct idpf_vport *vport, char *basename) else continue; - q_vector->name = kasprintf(GFP_KERNEL, "%s-%s-%d", - basename, vec_name, vidx); + name = kasprintf(GFP_KERNEL, "%s-%s-%d", basename, vec_name, + vidx); err = request_irq(irq_num, idpf_vport_intr_clean_queues, 0, - q_vector->name, q_vector); + name, q_vector); if (err) { netdev_err(vport->netdev, "Request_irq failed, error: %d\n", err); goto free_q_irqs; } /* assign the mask for this irq */ - irq_set_affinity_hint(irq_num, &q_vector->affinity_mask); + irq_set_affinity_hint(irq_num, q_vector->affinity_mask); } return 0; @@ -3679,7 +3824,7 @@ free_q_irqs: while (--vector >= 0) { vidx = vport->q_vector_idxs[vector]; irq_num = adapter->msix_entries[vidx].vector; - free_irq(irq_num, &vport->q_vectors[vector]); + kfree(free_irq(irq_num, &vport->q_vectors[vector])); } return err; @@ -3846,16 +3991,17 @@ static void idpf_vport_intr_napi_ena_all(struct idpf_vport *vport) static bool idpf_tx_splitq_clean_all(struct idpf_q_vector *q_vec, int budget, int *cleaned) { - u16 num_txq = q_vec->num_txq; + u16 num_complq = q_vec->num_complq; bool clean_complete = true; int i, budget_per_q; - if (unlikely(!num_txq)) + if (unlikely(!num_complq)) return true; - budget_per_q = DIV_ROUND_UP(budget, num_txq); - for (i = 0; i < num_txq; i++) - clean_complete &= idpf_tx_clean_complq(q_vec->tx[i], + budget_per_q = DIV_ROUND_UP(budget, num_complq); + + for (i = 0; i < num_complq; i++) + clean_complete &= idpf_tx_clean_complq(q_vec->complq[i], budget_per_q, cleaned); return clean_complete; @@ -3876,13 +4022,14 @@ static bool idpf_rx_splitq_clean_all(struct idpf_q_vector *q_vec, int budget, bool clean_complete = true; int pkts_cleaned = 0; int i, budget_per_q; + int nid; /* We attempt to distribute budget to each Rx queue fairly, but don't * allow the budget to go below 1 because that would exit polling early. */ budget_per_q = num_rxq ? max(budget / num_rxq, 1) : 0; for (i = 0; i < num_rxq; i++) { - struct idpf_queue *rxq = q_vec->rx[i]; + struct idpf_rx_queue *rxq = q_vec->rx[i]; int pkts_cleaned_per_q; pkts_cleaned_per_q = idpf_rx_splitq_clean(rxq, budget_per_q); @@ -3893,8 +4040,10 @@ static bool idpf_rx_splitq_clean_all(struct idpf_q_vector *q_vec, int budget, } *cleaned = pkts_cleaned; + nid = numa_mem_id(); + for (i = 0; i < q_vec->num_bufq; i++) - idpf_rx_clean_refillq_all(q_vec->bufq[i]); + idpf_rx_clean_refillq_all(q_vec->bufq[i], nid); return clean_complete; } @@ -3937,8 +4086,8 @@ static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget) * queues virtchnl message, as the interrupts will be disabled after * that */ - if (unlikely(q_vector->num_txq && test_bit(__IDPF_Q_POLL_MODE, - q_vector->tx[0]->flags))) + if (unlikely(q_vector->num_txq && idpf_queue_has(POLL_MODE, + q_vector->tx[0]))) return budget; else return work_done; @@ -3952,27 +4101,28 @@ static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget) */ static void idpf_vport_intr_map_vector_to_qs(struct idpf_vport *vport) { + bool split = idpf_is_queue_model_split(vport->rxq_model); u16 num_txq_grp = vport->num_txq_grp; - int i, j, qv_idx, bufq_vidx = 0; struct idpf_rxq_group *rx_qgrp; struct idpf_txq_group *tx_qgrp; - struct idpf_queue *q, *bufq; - u16 q_index; + u32 i, qv_idx, q_index; for (i = 0, qv_idx = 0; i < vport->num_rxq_grp; i++) { u16 num_rxq; + if (qv_idx >= vport->num_q_vectors) + qv_idx = 0; + rx_qgrp = &vport->rxq_grps[i]; - if (idpf_is_queue_model_split(vport->rxq_model)) + if (split) num_rxq = rx_qgrp->splitq.num_rxq_sets; else num_rxq = rx_qgrp->singleq.num_rxq; - for (j = 0; j < num_rxq; j++) { - if (qv_idx >= vport->num_q_vectors) - qv_idx = 0; + for (u32 j = 0; j < num_rxq; j++) { + struct idpf_rx_queue *q; - if (idpf_is_queue_model_split(vport->rxq_model)) + if (split) q = &rx_qgrp->splitq.rxq_sets[j]->rxq; else q = rx_qgrp->singleq.rxqs[j]; @@ -3980,52 +4130,53 @@ static void idpf_vport_intr_map_vector_to_qs(struct idpf_vport *vport) q_index = q->q_vector->num_rxq; q->q_vector->rx[q_index] = q; q->q_vector->num_rxq++; - qv_idx++; + + if (split) + q->napi = &q->q_vector->napi; } - if (idpf_is_queue_model_split(vport->rxq_model)) { - for (j = 0; j < vport->num_bufqs_per_qgrp; j++) { + if (split) { + for (u32 j = 0; j < vport->num_bufqs_per_qgrp; j++) { + struct idpf_buf_queue *bufq; + bufq = &rx_qgrp->splitq.bufq_sets[j].bufq; - bufq->q_vector = &vport->q_vectors[bufq_vidx]; + bufq->q_vector = &vport->q_vectors[qv_idx]; q_index = bufq->q_vector->num_bufq; bufq->q_vector->bufq[q_index] = bufq; bufq->q_vector->num_bufq++; } - if (++bufq_vidx >= vport->num_q_vectors) - bufq_vidx = 0; } + + qv_idx++; } + split = idpf_is_queue_model_split(vport->txq_model); + for (i = 0, qv_idx = 0; i < num_txq_grp; i++) { u16 num_txq; + if (qv_idx >= vport->num_q_vectors) + qv_idx = 0; + tx_qgrp = &vport->txq_grps[i]; num_txq = tx_qgrp->num_txq; - if (idpf_is_queue_model_split(vport->txq_model)) { - if (qv_idx >= vport->num_q_vectors) - qv_idx = 0; + for (u32 j = 0; j < num_txq; j++) { + struct idpf_tx_queue *q; - q = tx_qgrp->complq; + q = tx_qgrp->txqs[j]; q->q_vector = &vport->q_vectors[qv_idx]; - q_index = q->q_vector->num_txq; - q->q_vector->tx[q_index] = q; - q->q_vector->num_txq++; - qv_idx++; - } else { - for (j = 0; j < num_txq; j++) { - if (qv_idx >= vport->num_q_vectors) - qv_idx = 0; + q->q_vector->tx[q->q_vector->num_txq++] = q; + } - q = tx_qgrp->txqs[j]; - q->q_vector = &vport->q_vectors[qv_idx]; - q_index = q->q_vector->num_txq; - q->q_vector->tx[q_index] = q; - q->q_vector->num_txq++; + if (split) { + struct idpf_compl_queue *q = tx_qgrp->complq; - qv_idx++; - } + q->q_vector = &vport->q_vectors[qv_idx]; + q->q_vector->complq[q->q_vector->num_complq++] = q; } + + qv_idx++; } } @@ -4086,7 +4237,7 @@ static void idpf_vport_intr_napi_add_all(struct idpf_vport *vport) /* only set affinity_mask if the CPU is online */ if (cpu_online(v_idx)) - cpumask_set_cpu(v_idx, &q_vector->affinity_mask); + cpumask_set_cpu(v_idx, q_vector->affinity_mask); } } @@ -4101,18 +4252,22 @@ int idpf_vport_intr_alloc(struct idpf_vport *vport) { u16 txqs_per_vector, rxqs_per_vector, bufqs_per_vector; struct idpf_q_vector *q_vector; - int v_idx, err; + u32 complqs_per_vector, v_idx; vport->q_vectors = kcalloc(vport->num_q_vectors, sizeof(struct idpf_q_vector), GFP_KERNEL); if (!vport->q_vectors) return -ENOMEM; - txqs_per_vector = DIV_ROUND_UP(vport->num_txq, vport->num_q_vectors); - rxqs_per_vector = DIV_ROUND_UP(vport->num_rxq, vport->num_q_vectors); + txqs_per_vector = DIV_ROUND_UP(vport->num_txq_grp, + vport->num_q_vectors); + rxqs_per_vector = DIV_ROUND_UP(vport->num_rxq_grp, + vport->num_q_vectors); bufqs_per_vector = vport->num_bufqs_per_qgrp * DIV_ROUND_UP(vport->num_rxq_grp, vport->num_q_vectors); + complqs_per_vector = DIV_ROUND_UP(vport->num_txq_grp, + vport->num_q_vectors); for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) { q_vector = &vport->q_vectors[v_idx]; @@ -4126,32 +4281,33 @@ int idpf_vport_intr_alloc(struct idpf_vport *vport) q_vector->rx_intr_mode = IDPF_ITR_DYNAMIC; q_vector->rx_itr_idx = VIRTCHNL2_ITR_IDX_0; - q_vector->tx = kcalloc(txqs_per_vector, - sizeof(struct idpf_queue *), + if (!zalloc_cpumask_var(&q_vector->affinity_mask, GFP_KERNEL)) + goto error; + + q_vector->tx = kcalloc(txqs_per_vector, sizeof(*q_vector->tx), GFP_KERNEL); - if (!q_vector->tx) { - err = -ENOMEM; + if (!q_vector->tx) goto error; - } - q_vector->rx = kcalloc(rxqs_per_vector, - sizeof(struct idpf_queue *), + q_vector->rx = kcalloc(rxqs_per_vector, sizeof(*q_vector->rx), GFP_KERNEL); - if (!q_vector->rx) { - err = -ENOMEM; + if (!q_vector->rx) goto error; - } if (!idpf_is_queue_model_split(vport->rxq_model)) continue; q_vector->bufq = kcalloc(bufqs_per_vector, - sizeof(struct idpf_queue *), + sizeof(*q_vector->bufq), GFP_KERNEL); - if (!q_vector->bufq) { - err = -ENOMEM; + if (!q_vector->bufq) + goto error; + + q_vector->complq = kcalloc(complqs_per_vector, + sizeof(*q_vector->complq), + GFP_KERNEL); + if (!q_vector->complq) goto error; - } } return 0; @@ -4159,7 +4315,7 @@ int idpf_vport_intr_alloc(struct idpf_vport *vport) error: idpf_vport_intr_rel(vport); - return err; + return -ENOMEM; } /** diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h index 551391e20464..6215dbee5546 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h @@ -4,10 +4,13 @@ #ifndef _IDPF_TXRX_H_ #define _IDPF_TXRX_H_ -#include <net/page_pool/helpers.h> +#include <linux/dim.h> + +#include <net/libeth/cache.h> #include <net/tcp.h> #include <net/netdev_queues.h> +#include "idpf_lan_txrx.h" #include "virtchnl2_lan_desc.h" #define IDPF_LARGE_MAX_Q 256 @@ -83,7 +86,7 @@ do { \ if (unlikely(++(ntc) == (rxq)->desc_count)) { \ ntc = 0; \ - change_bit(__IDPF_Q_GEN_CHK, (rxq)->flags); \ + idpf_queue_change(GEN_CHK, rxq); \ } \ } while (0) @@ -93,16 +96,10 @@ do { \ idx = 0; \ } while (0) -#define IDPF_RX_HDR_SIZE 256 -#define IDPF_RX_BUF_2048 2048 -#define IDPF_RX_BUF_4096 4096 #define IDPF_RX_BUF_STRIDE 32 #define IDPF_RX_BUF_POST_STRIDE 16 #define IDPF_LOW_WATERMARK 64 -/* Size of header buffer specifically for header split */ -#define IDPF_HDR_BUF_SIZE 256 -#define IDPF_PACKET_HDR_PAD \ - (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN * 2) + #define IDPF_TX_TSO_MIN_MSS 88 /* Minimum number of descriptors between 2 descriptors with the RE bit set; @@ -110,36 +107,17 @@ do { \ */ #define IDPF_TX_SPLITQ_RE_MIN_GAP 64 -#define IDPF_RX_BI_BUFID_S 0 -#define IDPF_RX_BI_BUFID_M GENMASK(14, 0) -#define IDPF_RX_BI_GEN_S 15 -#define IDPF_RX_BI_GEN_M BIT(IDPF_RX_BI_GEN_S) +#define IDPF_RX_BI_GEN_M BIT(16) +#define IDPF_RX_BI_BUFID_M GENMASK(15, 0) + #define IDPF_RXD_EOF_SPLITQ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_EOF_M #define IDPF_RXD_EOF_SINGLEQ VIRTCHNL2_RX_BASE_DESC_STATUS_EOF_M -#define IDPF_SINGLEQ_RX_BUF_DESC(rxq, i) \ - (&(((struct virtchnl2_singleq_rx_buf_desc *)((rxq)->desc_ring))[i])) -#define IDPF_SPLITQ_RX_BUF_DESC(rxq, i) \ - (&(((struct virtchnl2_splitq_rx_buf_desc *)((rxq)->desc_ring))[i])) -#define IDPF_SPLITQ_RX_BI_DESC(rxq, i) ((((rxq)->ring))[i]) - -#define IDPF_BASE_TX_DESC(txq, i) \ - (&(((struct idpf_base_tx_desc *)((txq)->desc_ring))[i])) -#define IDPF_BASE_TX_CTX_DESC(txq, i) \ - (&(((struct idpf_base_tx_ctx_desc *)((txq)->desc_ring))[i])) -#define IDPF_SPLITQ_TX_COMPLQ_DESC(txcq, i) \ - (&(((struct idpf_splitq_tx_compl_desc *)((txcq)->desc_ring))[i])) - -#define IDPF_FLEX_TX_DESC(txq, i) \ - (&(((union idpf_tx_flex_desc *)((txq)->desc_ring))[i])) -#define IDPF_FLEX_TX_CTX_DESC(txq, i) \ - (&(((struct idpf_flex_tx_ctx_desc *)((txq)->desc_ring))[i])) - #define IDPF_DESC_UNUSED(txq) \ ((((txq)->next_to_clean > (txq)->next_to_use) ? 0 : (txq)->desc_count) + \ (txq)->next_to_clean - (txq)->next_to_use - 1) -#define IDPF_TX_BUF_RSV_UNUSED(txq) ((txq)->buf_stack.top) +#define IDPF_TX_BUF_RSV_UNUSED(txq) ((txq)->stash->buf_stack.top) #define IDPF_TX_BUF_RSV_LOW(txq) (IDPF_TX_BUF_RSV_UNUSED(txq) < \ (txq)->desc_count >> 2) @@ -315,16 +293,7 @@ struct idpf_rx_extracted { #define IDPF_TX_MAX_DESC_DATA_ALIGNED \ ALIGN_DOWN(IDPF_TX_MAX_DESC_DATA, IDPF_TX_MAX_READ_REQ_SIZE) -#define IDPF_RX_DMA_ATTR \ - (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) -#define IDPF_RX_DESC(rxq, i) \ - (&(((union virtchnl2_rx_desc *)((rxq)->desc_ring))[i])) - -struct idpf_rx_buf { - struct page *page; - unsigned int page_offset; - u16 truesize; -}; +#define idpf_rx_buf libeth_fqe #define IDPF_RX_MAX_PTYPE_PROTO_IDS 32 #define IDPF_RX_MAX_PTYPE_SZ (sizeof(struct virtchnl2_ptype) + \ @@ -348,72 +317,6 @@ struct idpf_rx_buf { #define IDPF_RX_MAX_BASE_PTYPE 256 #define IDPF_INVALID_PTYPE_ID 0xFFFF -/* Packet type non-ip values */ -enum idpf_rx_ptype_l2 { - IDPF_RX_PTYPE_L2_RESERVED = 0, - IDPF_RX_PTYPE_L2_MAC_PAY2 = 1, - IDPF_RX_PTYPE_L2_TIMESYNC_PAY2 = 2, - IDPF_RX_PTYPE_L2_FIP_PAY2 = 3, - IDPF_RX_PTYPE_L2_OUI_PAY2 = 4, - IDPF_RX_PTYPE_L2_MACCNTRL_PAY2 = 5, - IDPF_RX_PTYPE_L2_LLDP_PAY2 = 6, - IDPF_RX_PTYPE_L2_ECP_PAY2 = 7, - IDPF_RX_PTYPE_L2_EVB_PAY2 = 8, - IDPF_RX_PTYPE_L2_QCN_PAY2 = 9, - IDPF_RX_PTYPE_L2_EAPOL_PAY2 = 10, - IDPF_RX_PTYPE_L2_ARP = 11, -}; - -enum idpf_rx_ptype_outer_ip { - IDPF_RX_PTYPE_OUTER_L2 = 0, - IDPF_RX_PTYPE_OUTER_IP = 1, -}; - -#define IDPF_RX_PTYPE_TO_IPV(ptype, ipv) \ - (((ptype)->outer_ip == IDPF_RX_PTYPE_OUTER_IP) && \ - ((ptype)->outer_ip_ver == (ipv))) - -enum idpf_rx_ptype_outer_ip_ver { - IDPF_RX_PTYPE_OUTER_NONE = 0, - IDPF_RX_PTYPE_OUTER_IPV4 = 1, - IDPF_RX_PTYPE_OUTER_IPV6 = 2, -}; - -enum idpf_rx_ptype_outer_fragmented { - IDPF_RX_PTYPE_NOT_FRAG = 0, - IDPF_RX_PTYPE_FRAG = 1, -}; - -enum idpf_rx_ptype_tunnel_type { - IDPF_RX_PTYPE_TUNNEL_NONE = 0, - IDPF_RX_PTYPE_TUNNEL_IP_IP = 1, - IDPF_RX_PTYPE_TUNNEL_IP_GRENAT = 2, - IDPF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3, - IDPF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4, -}; - -enum idpf_rx_ptype_tunnel_end_prot { - IDPF_RX_PTYPE_TUNNEL_END_NONE = 0, - IDPF_RX_PTYPE_TUNNEL_END_IPV4 = 1, - IDPF_RX_PTYPE_TUNNEL_END_IPV6 = 2, -}; - -enum idpf_rx_ptype_inner_prot { - IDPF_RX_PTYPE_INNER_PROT_NONE = 0, - IDPF_RX_PTYPE_INNER_PROT_UDP = 1, - IDPF_RX_PTYPE_INNER_PROT_TCP = 2, - IDPF_RX_PTYPE_INNER_PROT_SCTP = 3, - IDPF_RX_PTYPE_INNER_PROT_ICMP = 4, - IDPF_RX_PTYPE_INNER_PROT_TIMESYNC = 5, -}; - -enum idpf_rx_ptype_payload_layer { - IDPF_RX_PTYPE_PAYLOAD_LAYER_NONE = 0, - IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1, - IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2, - IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3, -}; - enum idpf_tunnel_state { IDPF_PTYPE_TUNNEL_IP = BIT(0), IDPF_PTYPE_TUNNEL_IP_GRENAT = BIT(1), @@ -421,22 +324,9 @@ enum idpf_tunnel_state { }; struct idpf_ptype_state { - bool outer_ip; - bool outer_frag; - u8 tunnel_state; -}; - -struct idpf_rx_ptype_decoded { - u32 ptype:10; - u32 known:1; - u32 outer_ip:1; - u32 outer_ip_ver:2; - u32 outer_frag:1; - u32 tunnel_type:3; - u32 tunnel_end_prot:2; - u32 tunnel_end_frag:1; - u32 inner_prot:4; - u32 payload_layer:3; + bool outer_ip:1; + bool outer_frag:1; + u8 tunnel_state:6; }; /** @@ -452,23 +342,37 @@ struct idpf_rx_ptype_decoded { * to 1 and knows that reading a gen bit of 1 in any * descriptor on the initial pass of the ring indicates a * writeback. It also flips on every ring wrap. - * @__IDPF_RFLQ_GEN_CHK: Refill queues are SW only, so Q_GEN acts as the HW bit - * and RFLGQ_GEN is the SW bit. + * @__IDPF_Q_RFL_GEN_CHK: Refill queues are SW only, so Q_GEN acts as the HW + * bit and Q_RFL_GEN is the SW bit. * @__IDPF_Q_FLOW_SCH_EN: Enable flow scheduling * @__IDPF_Q_SW_MARKER: Used to indicate TX queue marker completions * @__IDPF_Q_POLL_MODE: Enable poll mode + * @__IDPF_Q_CRC_EN: enable CRC offload in singleq mode + * @__IDPF_Q_HSPLIT_EN: enable header split on Rx (splitq) * @__IDPF_Q_FLAGS_NBITS: Must be last */ enum idpf_queue_flags_t { __IDPF_Q_GEN_CHK, - __IDPF_RFLQ_GEN_CHK, + __IDPF_Q_RFL_GEN_CHK, __IDPF_Q_FLOW_SCH_EN, __IDPF_Q_SW_MARKER, __IDPF_Q_POLL_MODE, + __IDPF_Q_CRC_EN, + __IDPF_Q_HSPLIT_EN, __IDPF_Q_FLAGS_NBITS, }; +#define idpf_queue_set(f, q) __set_bit(__IDPF_Q_##f, (q)->flags) +#define idpf_queue_clear(f, q) __clear_bit(__IDPF_Q_##f, (q)->flags) +#define idpf_queue_change(f, q) __change_bit(__IDPF_Q_##f, (q)->flags) +#define idpf_queue_has(f, q) test_bit(__IDPF_Q_##f, (q)->flags) + +#define idpf_queue_has_clear(f, q) \ + __test_and_clear_bit(__IDPF_Q_##f, (q)->flags) +#define idpf_queue_assign(f, q, v) \ + __assign_bit(__IDPF_Q_##f, (q)->flags, v) + /** * struct idpf_vec_regs * @dyn_ctl_reg: Dynamic control interrupt register offset @@ -509,54 +413,68 @@ struct idpf_intr_reg { /** * struct idpf_q_vector * @vport: Vport back pointer - * @affinity_mask: CPU affinity mask - * @napi: napi handler - * @v_idx: Vector index - * @intr_reg: See struct idpf_intr_reg + * @num_rxq: Number of RX queues * @num_txq: Number of TX queues + * @num_bufq: Number of buffer queues + * @num_complq: number of completion queues + * @rx: Array of RX queues to service * @tx: Array of TX queues to service + * @bufq: Array of buffer queues to service + * @complq: array of completion queues + * @intr_reg: See struct idpf_intr_reg + * @napi: napi handler + * @total_events: Number of interrupts processed * @tx_dim: Data for TX net_dim algorithm * @tx_itr_value: TX interrupt throttling rate * @tx_intr_mode: Dynamic ITR or not * @tx_itr_idx: TX ITR index - * @num_rxq: Number of RX queues - * @rx: Array of RX queues to service * @rx_dim: Data for RX net_dim algorithm * @rx_itr_value: RX interrupt throttling rate * @rx_intr_mode: Dynamic ITR or not * @rx_itr_idx: RX ITR index - * @num_bufq: Number of buffer queues - * @bufq: Array of buffer queues to service - * @total_events: Number of interrupts processed - * @name: Queue vector name + * @v_idx: Vector index + * @affinity_mask: CPU affinity mask */ struct idpf_q_vector { + __cacheline_group_begin_aligned(read_mostly); struct idpf_vport *vport; - cpumask_t affinity_mask; - struct napi_struct napi; - u16 v_idx; - struct idpf_intr_reg intr_reg; + u16 num_rxq; u16 num_txq; - struct idpf_queue **tx; + u16 num_bufq; + u16 num_complq; + struct idpf_rx_queue **rx; + struct idpf_tx_queue **tx; + struct idpf_buf_queue **bufq; + struct idpf_compl_queue **complq; + + struct idpf_intr_reg intr_reg; + __cacheline_group_end_aligned(read_mostly); + + __cacheline_group_begin_aligned(read_write); + struct napi_struct napi; + u16 total_events; + struct dim tx_dim; u16 tx_itr_value; bool tx_intr_mode; u32 tx_itr_idx; - u16 num_rxq; - struct idpf_queue **rx; struct dim rx_dim; u16 rx_itr_value; bool rx_intr_mode; u32 rx_itr_idx; + __cacheline_group_end_aligned(read_write); - u16 num_bufq; - struct idpf_queue **bufq; + __cacheline_group_begin_aligned(cold); + u16 v_idx; - u16 total_events; - char *name; + cpumask_var_t affinity_mask; + __cacheline_group_end_aligned(cold); }; +libeth_cacheline_set_assert(struct idpf_q_vector, 104, + 424 + 2 * sizeof(struct dim), + 8 + sizeof(cpumask_var_t)); struct idpf_rx_queue_stats { u64_stats_t packets; @@ -583,11 +501,6 @@ struct idpf_cleaned_stats { u32 bytes; }; -union idpf_queue_stats { - struct idpf_rx_queue_stats rx; - struct idpf_tx_queue_stats tx; -}; - #define IDPF_ITR_DYNAMIC 1 #define IDPF_ITR_MAX 0x1FE0 #define IDPF_ITR_20K 0x0032 @@ -603,68 +516,123 @@ union idpf_queue_stats { #define IDPF_DIM_DEFAULT_PROFILE_IX 1 /** - * struct idpf_queue - * @dev: Device back pointer for DMA mapping - * @vport: Back pointer to associated vport - * @txq_grp: See struct idpf_txq_group - * @rxq_grp: See struct idpf_rxq_group - * @idx: For buffer queue, it is used as group id, either 0 or 1. On clean, - * buffer queue uses this index to determine which group of refill queues - * to clean. - * For TX queue, it is used as index to map between TX queue group and - * hot path TX pointers stored in vport. Used in both singleq/splitq. - * For RX queue, it is used to index to total RX queue across groups and + * struct idpf_txq_stash - Tx buffer stash for Flow-based scheduling mode + * @buf_stack: Stack of empty buffers to store buffer info for out of order + * buffer completions. See struct idpf_buf_lifo + * @sched_buf_hash: Hash table to store buffers + */ +struct idpf_txq_stash { + struct idpf_buf_lifo buf_stack; + DECLARE_HASHTABLE(sched_buf_hash, 12); +} ____cacheline_aligned; + +/** + * struct idpf_rx_queue - software structure representing a receive queue + * @rx: universal receive descriptor array + * @single_buf: buffer descriptor array in singleq + * @desc_ring: virtual descriptor ring address + * @bufq_sets: Pointer to the array of buffer queues in splitq mode + * @napi: NAPI instance corresponding to this queue (splitq) + * @rx_buf: See struct &libeth_fqe + * @pp: Page pool pointer in singleq mode + * @netdev: &net_device corresponding to this queue + * @tail: Tail offset. Used for both queue models single and split. + * @flags: See enum idpf_queue_flags_t + * @idx: For RX queue, it is used to index to total RX queue across groups and * used for skb reporting. - * @tail: Tail offset. Used for both queue models single and split. In splitq - * model relevant only for TX queue and RX queue. - * @tx_buf: See struct idpf_tx_buf - * @rx_buf: Struct with RX buffer related members - * @rx_buf.buf: See struct idpf_rx_buf - * @rx_buf.hdr_buf_pa: DMA handle - * @rx_buf.hdr_buf_va: Virtual address - * @pp: Page pool pointer - * @skb: Pointer to the skb - * @q_type: Queue type (TX, RX, TX completion, RX buffer) - * @q_id: Queue id * @desc_count: Number of descriptors - * @next_to_use: Next descriptor to use. Relevant in both split & single txq - * and bufq. - * @next_to_clean: Next descriptor to clean. In split queue model, only - * relevant to TX completion queue and RX queue. - * @next_to_alloc: RX buffer to allocate at. Used only for RX. In splitq model - * only relevant to RX queue. - * @flags: See enum idpf_queue_flags_t - * @q_stats: See union idpf_queue_stats + * @rxdids: Supported RX descriptor ids + * @rx_ptype_lkup: LUT of Rx ptypes + * @next_to_use: Next descriptor to use + * @next_to_clean: Next descriptor to clean + * @next_to_alloc: RX buffer to allocate at + * @skb: Pointer to the skb + * @truesize: data buffer truesize in singleq * @stats_sync: See struct u64_stats_sync - * @cleaned_bytes: Splitq only, TXQ only: When a TX completion is received on - * the TX completion queue, it can be for any TXQ associated - * with that completion queue. This means we can clean up to - * N TXQs during a single call to clean the completion queue. - * cleaned_bytes|pkts tracks the clean stats per TXQ during - * that single call to clean the completion queue. By doing so, - * we can update BQL with aggregate cleaned stats for each TXQ - * only once at the end of the cleaning routine. - * @cleaned_pkts: Number of packets cleaned for the above said case - * @rx_hsplit_en: RX headsplit enable + * @q_stats: See union idpf_rx_queue_stats + * @q_id: Queue id + * @size: Length of descriptor ring in bytes + * @dma: Physical address of ring + * @q_vector: Backreference to associated vector + * @rx_buffer_low_watermark: RX buffer low watermark * @rx_hbuf_size: Header buffer size * @rx_buf_size: Buffer size * @rx_max_pkt_size: RX max packet size - * @rx_buf_stride: RX buffer stride - * @rx_buffer_low_watermark: RX buffer low watermark - * @rxdids: Supported RX descriptor ids - * @q_vector: Backreference to associated vector - * @size: Length of descriptor ring in bytes - * @dma: Physical address of ring - * @desc_ring: Descriptor ring memory - * @tx_max_bufs: Max buffers that can be transmitted with scatter-gather + */ +struct idpf_rx_queue { + __cacheline_group_begin_aligned(read_mostly); + union { + union virtchnl2_rx_desc *rx; + struct virtchnl2_singleq_rx_buf_desc *single_buf; + + void *desc_ring; + }; + union { + struct { + struct idpf_bufq_set *bufq_sets; + struct napi_struct *napi; + }; + struct { + struct libeth_fqe *rx_buf; + struct page_pool *pp; + }; + }; + struct net_device *netdev; + void __iomem *tail; + + DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS); + u16 idx; + u16 desc_count; + + u32 rxdids; + const struct libeth_rx_pt *rx_ptype_lkup; + __cacheline_group_end_aligned(read_mostly); + + __cacheline_group_begin_aligned(read_write); + u16 next_to_use; + u16 next_to_clean; + u16 next_to_alloc; + + struct sk_buff *skb; + u32 truesize; + + struct u64_stats_sync stats_sync; + struct idpf_rx_queue_stats q_stats; + __cacheline_group_end_aligned(read_write); + + __cacheline_group_begin_aligned(cold); + u32 q_id; + u32 size; + dma_addr_t dma; + + struct idpf_q_vector *q_vector; + + u16 rx_buffer_low_watermark; + u16 rx_hbuf_size; + u16 rx_buf_size; + u16 rx_max_pkt_size; + __cacheline_group_end_aligned(cold); +}; +libeth_cacheline_set_assert(struct idpf_rx_queue, 64, + 80 + sizeof(struct u64_stats_sync), + 32); + +/** + * struct idpf_tx_queue - software structure representing a transmit queue + * @base_tx: base Tx descriptor array + * @base_ctx: base Tx context descriptor array + * @flex_tx: flex Tx descriptor array + * @flex_ctx: flex Tx context descriptor array + * @desc_ring: virtual descriptor ring address + * @tx_buf: See struct idpf_tx_buf + * @txq_grp: See struct idpf_txq_group + * @dev: Device back pointer for DMA mapping + * @tail: Tail offset. Used for both queue models single and split + * @flags: See enum idpf_queue_flags_t + * @idx: For TX queue, it is used as index to map between TX queue group and + * hot path TX pointers stored in vport. Used in both singleq/splitq. + * @desc_count: Number of descriptors * @tx_min_pkt_len: Min supported packet length - * @num_completions: Only relevant for TX completion queue. It tracks the - * number of completions received to compare against the - * number of completions pending, as accumulated by the - * TX queues. - * @buf_stack: Stack of empty buffers to store buffer info for out of order - * buffer completions. See struct idpf_buf_lifo. - * @compl_tag_bufid_m: Completion tag buffer id mask * @compl_tag_gen_s: Completion tag generation bit * The format of the completion tag will change based on the TXQ * descriptor ring size so that we can maintain roughly the same level @@ -685,108 +653,238 @@ union idpf_queue_stats { * -------------------------------- * * This gives us 8*8160 = 65280 possible unique values. + * @netdev: &net_device corresponding to this queue + * @next_to_use: Next descriptor to use + * @next_to_clean: Next descriptor to clean + * @cleaned_bytes: Splitq only, TXQ only: When a TX completion is received on + * the TX completion queue, it can be for any TXQ associated + * with that completion queue. This means we can clean up to + * N TXQs during a single call to clean the completion queue. + * cleaned_bytes|pkts tracks the clean stats per TXQ during + * that single call to clean the completion queue. By doing so, + * we can update BQL with aggregate cleaned stats for each TXQ + * only once at the end of the cleaning routine. + * @clean_budget: singleq only, queue cleaning budget + * @cleaned_pkts: Number of packets cleaned for the above said case + * @tx_max_bufs: Max buffers that can be transmitted with scatter-gather + * @stash: Tx buffer stash for Flow-based scheduling mode + * @compl_tag_bufid_m: Completion tag buffer id mask * @compl_tag_cur_gen: Used to keep track of current completion tag generation * @compl_tag_gen_max: To determine when compl_tag_cur_gen should be reset - * @sched_buf_hash: Hash table to stores buffers + * @stats_sync: See struct u64_stats_sync + * @q_stats: See union idpf_tx_queue_stats + * @q_id: Queue id + * @size: Length of descriptor ring in bytes + * @dma: Physical address of ring + * @q_vector: Backreference to associated vector */ -struct idpf_queue { - struct device *dev; - struct idpf_vport *vport; +struct idpf_tx_queue { + __cacheline_group_begin_aligned(read_mostly); union { - struct idpf_txq_group *txq_grp; - struct idpf_rxq_group *rxq_grp; + struct idpf_base_tx_desc *base_tx; + struct idpf_base_tx_ctx_desc *base_ctx; + union idpf_tx_flex_desc *flex_tx; + struct idpf_flex_tx_ctx_desc *flex_ctx; + + void *desc_ring; }; - u16 idx; + struct idpf_tx_buf *tx_buf; + struct idpf_txq_group *txq_grp; + struct device *dev; void __iomem *tail; - union { - struct idpf_tx_buf *tx_buf; - struct { - struct idpf_rx_buf *buf; - dma_addr_t hdr_buf_pa; - void *hdr_buf_va; - } rx_buf; - }; - struct page_pool *pp; - struct sk_buff *skb; - u16 q_type; - u32 q_id; + + DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS); + u16 idx; u16 desc_count; + u16 tx_min_pkt_len; + u16 compl_tag_gen_s; + + struct net_device *netdev; + __cacheline_group_end_aligned(read_mostly); + + __cacheline_group_begin_aligned(read_write); u16 next_to_use; u16 next_to_clean; - u16 next_to_alloc; - DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS); - union idpf_queue_stats q_stats; + union { + u32 cleaned_bytes; + u32 clean_budget; + }; + u16 cleaned_pkts; + + u16 tx_max_bufs; + struct idpf_txq_stash *stash; + + u16 compl_tag_bufid_m; + u16 compl_tag_cur_gen; + u16 compl_tag_gen_max; + struct u64_stats_sync stats_sync; + struct idpf_tx_queue_stats q_stats; + __cacheline_group_end_aligned(read_write); - u32 cleaned_bytes; - u16 cleaned_pkts; + __cacheline_group_begin_aligned(cold); + u32 q_id; + u32 size; + dma_addr_t dma; - bool rx_hsplit_en; - u16 rx_hbuf_size; - u16 rx_buf_size; - u16 rx_max_pkt_size; - u16 rx_buf_stride; - u8 rx_buffer_low_watermark; - u64 rxdids; struct idpf_q_vector *q_vector; - unsigned int size; + __cacheline_group_end_aligned(cold); +}; +libeth_cacheline_set_assert(struct idpf_tx_queue, 64, + 88 + sizeof(struct u64_stats_sync), + 24); + +/** + * struct idpf_buf_queue - software structure representing a buffer queue + * @split_buf: buffer descriptor array + * @hdr_buf: &libeth_fqe for header buffers + * @hdr_pp: &page_pool for header buffers + * @buf: &libeth_fqe for data buffers + * @pp: &page_pool for data buffers + * @tail: Tail offset + * @flags: See enum idpf_queue_flags_t + * @desc_count: Number of descriptors + * @next_to_use: Next descriptor to use + * @next_to_clean: Next descriptor to clean + * @next_to_alloc: RX buffer to allocate at + * @hdr_truesize: truesize for buffer headers + * @truesize: truesize for data buffers + * @q_id: Queue id + * @size: Length of descriptor ring in bytes + * @dma: Physical address of ring + * @q_vector: Backreference to associated vector + * @rx_buffer_low_watermark: RX buffer low watermark + * @rx_hbuf_size: Header buffer size + * @rx_buf_size: Buffer size + */ +struct idpf_buf_queue { + __cacheline_group_begin_aligned(read_mostly); + struct virtchnl2_splitq_rx_buf_desc *split_buf; + struct libeth_fqe *hdr_buf; + struct page_pool *hdr_pp; + struct libeth_fqe *buf; + struct page_pool *pp; + void __iomem *tail; + + DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS); + u32 desc_count; + __cacheline_group_end_aligned(read_mostly); + + __cacheline_group_begin_aligned(read_write); + u32 next_to_use; + u32 next_to_clean; + u32 next_to_alloc; + + u32 hdr_truesize; + u32 truesize; + __cacheline_group_end_aligned(read_write); + + __cacheline_group_begin_aligned(cold); + u32 q_id; + u32 size; dma_addr_t dma; - void *desc_ring; - u16 tx_max_bufs; - u8 tx_min_pkt_len; + struct idpf_q_vector *q_vector; - u32 num_completions; + u16 rx_buffer_low_watermark; + u16 rx_hbuf_size; + u16 rx_buf_size; + __cacheline_group_end_aligned(cold); +}; +libeth_cacheline_set_assert(struct idpf_buf_queue, 64, 24, 32); - struct idpf_buf_lifo buf_stack; +/** + * struct idpf_compl_queue - software structure representing a completion queue + * @comp: completion descriptor array + * @txq_grp: See struct idpf_txq_group + * @flags: See enum idpf_queue_flags_t + * @desc_count: Number of descriptors + * @clean_budget: queue cleaning budget + * @netdev: &net_device corresponding to this queue + * @next_to_use: Next descriptor to use. Relevant in both split & single txq + * and bufq. + * @next_to_clean: Next descriptor to clean + * @num_completions: Only relevant for TX completion queue. It tracks the + * number of completions received to compare against the + * number of completions pending, as accumulated by the + * TX queues. + * @q_id: Queue id + * @size: Length of descriptor ring in bytes + * @dma: Physical address of ring + * @q_vector: Backreference to associated vector + */ +struct idpf_compl_queue { + __cacheline_group_begin_aligned(read_mostly); + struct idpf_splitq_tx_compl_desc *comp; + struct idpf_txq_group *txq_grp; - u16 compl_tag_bufid_m; - u16 compl_tag_gen_s; + DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS); + u32 desc_count; - u16 compl_tag_cur_gen; - u16 compl_tag_gen_max; + u32 clean_budget; + struct net_device *netdev; + __cacheline_group_end_aligned(read_mostly); - DECLARE_HASHTABLE(sched_buf_hash, 12); -} ____cacheline_internodealigned_in_smp; + __cacheline_group_begin_aligned(read_write); + u32 next_to_use; + u32 next_to_clean; + + u32 num_completions; + __cacheline_group_end_aligned(read_write); + + __cacheline_group_begin_aligned(cold); + u32 q_id; + u32 size; + dma_addr_t dma; + + struct idpf_q_vector *q_vector; + __cacheline_group_end_aligned(cold); +}; +libeth_cacheline_set_assert(struct idpf_compl_queue, 40, 16, 24); /** * struct idpf_sw_queue - * @next_to_clean: Next descriptor to clean - * @next_to_alloc: Buffer to allocate at - * @flags: See enum idpf_queue_flags_t * @ring: Pointer to the ring + * @flags: See enum idpf_queue_flags_t * @desc_count: Descriptor count - * @dev: Device back pointer for DMA mapping + * @next_to_use: Buffer to allocate at + * @next_to_clean: Next descriptor to clean * * Software queues are used in splitq mode to manage buffers between rxq * producer and the bufq consumer. These are required in order to maintain a * lockless buffer management system and are strictly software only constructs. */ struct idpf_sw_queue { - u16 next_to_clean; - u16 next_to_alloc; + __cacheline_group_begin_aligned(read_mostly); + u32 *ring; + DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS); - u16 *ring; - u16 desc_count; - struct device *dev; -} ____cacheline_internodealigned_in_smp; + u32 desc_count; + __cacheline_group_end_aligned(read_mostly); + + __cacheline_group_begin_aligned(read_write); + u32 next_to_use; + u32 next_to_clean; + __cacheline_group_end_aligned(read_write); +}; +libeth_cacheline_group_assert(struct idpf_sw_queue, read_mostly, 24); +libeth_cacheline_group_assert(struct idpf_sw_queue, read_write, 8); +libeth_cacheline_struct_assert(struct idpf_sw_queue, 24, 8); /** * struct idpf_rxq_set * @rxq: RX queue - * @refillq0: Pointer to refill queue 0 - * @refillq1: Pointer to refill queue 1 + * @refillq: pointers to refill queues * * Splitq only. idpf_rxq_set associates an rxq with at an array of refillqs. * Each rxq needs a refillq to return used buffers back to the respective bufq. * Bufqs then clean these refillqs for buffers to give to hardware. */ struct idpf_rxq_set { - struct idpf_queue rxq; - struct idpf_sw_queue *refillq0; - struct idpf_sw_queue *refillq1; + struct idpf_rx_queue rxq; + struct idpf_sw_queue *refillq[IDPF_MAX_BUFQS_PER_RXQ_GRP]; }; /** @@ -805,7 +903,7 @@ struct idpf_rxq_set { * managed by at most two bufqs (depending on performance configuration). */ struct idpf_bufq_set { - struct idpf_queue bufq; + struct idpf_buf_queue bufq; int num_refillqs; struct idpf_sw_queue *refillqs; }; @@ -831,7 +929,7 @@ struct idpf_rxq_group { union { struct { u16 num_rxq; - struct idpf_queue *rxqs[IDPF_LARGE_MAX_Q]; + struct idpf_rx_queue *rxqs[IDPF_LARGE_MAX_Q]; } singleq; struct { u16 num_rxq_sets; @@ -846,6 +944,7 @@ struct idpf_rxq_group { * @vport: Vport back pointer * @num_txq: Number of TX queues associated * @txqs: Array of TX queue pointers + * @stashes: array of OOO stashes for the queues * @complq: Associated completion queue pointer, split queue only * @num_completions_pending: Total number of completions pending for the * completion queue, acculumated for all TX queues @@ -859,13 +958,26 @@ struct idpf_txq_group { struct idpf_vport *vport; u16 num_txq; - struct idpf_queue *txqs[IDPF_LARGE_MAX_Q]; + struct idpf_tx_queue *txqs[IDPF_LARGE_MAX_Q]; + struct idpf_txq_stash *stashes; - struct idpf_queue *complq; + struct idpf_compl_queue *complq; u32 num_completions_pending; }; +static inline int idpf_q_vector_to_mem(const struct idpf_q_vector *q_vector) +{ + u32 cpu; + + if (!q_vector) + return NUMA_NO_NODE; + + cpu = cpumask_first(q_vector->affinity_mask); + + return cpu < nr_cpu_ids ? cpu_to_mem(cpu) : NUMA_NO_NODE; +} + /** * idpf_size_to_txd_count - Get number of descriptors needed for large Tx frag * @size: transmit request size in bytes @@ -921,60 +1033,6 @@ static inline void idpf_tx_splitq_build_desc(union idpf_tx_flex_desc *desc, idpf_tx_splitq_build_flow_desc(desc, params, td_cmd, size); } -/** - * idpf_alloc_page - Allocate a new RX buffer from the page pool - * @pool: page_pool to allocate from - * @buf: metadata struct to populate with page info - * @buf_size: 2K or 4K - * - * Returns &dma_addr_t to be passed to HW for Rx, %DMA_MAPPING_ERROR otherwise. - */ -static inline dma_addr_t idpf_alloc_page(struct page_pool *pool, - struct idpf_rx_buf *buf, - unsigned int buf_size) -{ - if (buf_size == IDPF_RX_BUF_2048) - buf->page = page_pool_dev_alloc_frag(pool, &buf->page_offset, - buf_size); - else - buf->page = page_pool_dev_alloc_pages(pool); - - if (!buf->page) - return DMA_MAPPING_ERROR; - - buf->truesize = buf_size; - - return page_pool_get_dma_addr(buf->page) + buf->page_offset + - pool->p.offset; -} - -/** - * idpf_rx_put_page - Return RX buffer page to pool - * @rx_buf: RX buffer metadata struct - */ -static inline void idpf_rx_put_page(struct idpf_rx_buf *rx_buf) -{ - page_pool_put_page(rx_buf->page->pp, rx_buf->page, - rx_buf->truesize, true); - rx_buf->page = NULL; -} - -/** - * idpf_rx_sync_for_cpu - Synchronize DMA buffer - * @rx_buf: RX buffer metadata struct - * @len: frame length from descriptor - */ -static inline void idpf_rx_sync_for_cpu(struct idpf_rx_buf *rx_buf, u32 len) -{ - struct page *page = rx_buf->page; - struct page_pool *pp = page->pp; - - dma_sync_single_range_for_cpu(pp->p.dev, - page_pool_get_dma_addr(page), - rx_buf->page_offset + pp->p.offset, len, - page_pool_get_dma_dir(pp)); -} - int idpf_vport_singleq_napi_poll(struct napi_struct *napi, int budget); void idpf_vport_init_num_qs(struct idpf_vport *vport, struct virtchnl2_create_vport *vport_msg); @@ -991,35 +1049,27 @@ void idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector *q_vector); void idpf_vport_intr_deinit(struct idpf_vport *vport); int idpf_vport_intr_init(struct idpf_vport *vport); void idpf_vport_intr_ena(struct idpf_vport *vport); -enum pkt_hash_types idpf_ptype_to_htype(const struct idpf_rx_ptype_decoded *decoded); int idpf_config_rss(struct idpf_vport *vport); int idpf_init_rss(struct idpf_vport *vport); void idpf_deinit_rss(struct idpf_vport *vport); int idpf_rx_bufs_init_all(struct idpf_vport *vport); void idpf_rx_add_frag(struct idpf_rx_buf *rx_buf, struct sk_buff *skb, unsigned int size); -struct sk_buff *idpf_rx_construct_skb(struct idpf_queue *rxq, - struct idpf_rx_buf *rx_buf, - unsigned int size); -bool idpf_init_rx_buf_hw_alloc(struct idpf_queue *rxq, struct idpf_rx_buf *buf); -void idpf_rx_buf_hw_update(struct idpf_queue *rxq, u32 val); -void idpf_tx_buf_hw_update(struct idpf_queue *tx_q, u32 val, +struct sk_buff *idpf_rx_build_skb(const struct libeth_fqe *buf, u32 size); +void idpf_tx_buf_hw_update(struct idpf_tx_queue *tx_q, u32 val, bool xmit_more); unsigned int idpf_size_to_txd_count(unsigned int size); -netdev_tx_t idpf_tx_drop_skb(struct idpf_queue *tx_q, struct sk_buff *skb); -void idpf_tx_dma_map_error(struct idpf_queue *txq, struct sk_buff *skb, +netdev_tx_t idpf_tx_drop_skb(struct idpf_tx_queue *tx_q, struct sk_buff *skb); +void idpf_tx_dma_map_error(struct idpf_tx_queue *txq, struct sk_buff *skb, struct idpf_tx_buf *first, u16 ring_idx); -unsigned int idpf_tx_desc_count_required(struct idpf_queue *txq, +unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq, struct sk_buff *skb); -bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs, - unsigned int count); -int idpf_tx_maybe_stop_common(struct idpf_queue *tx_q, unsigned int size); +int idpf_tx_maybe_stop_common(struct idpf_tx_queue *tx_q, unsigned int size); void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue); -netdev_tx_t idpf_tx_splitq_start(struct sk_buff *skb, - struct net_device *netdev); -netdev_tx_t idpf_tx_singleq_start(struct sk_buff *skb, - struct net_device *netdev); -bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_queue *rxq, +netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb, + struct idpf_tx_queue *tx_q); +netdev_tx_t idpf_tx_start(struct sk_buff *skb, struct net_device *netdev); +bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_rx_queue *rxq, u16 cleaned_count); int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off); diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c index a5f9b7a5effe..70986e12da28 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c +++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c @@ -1,6 +1,8 @@ // SPDX-License-Identifier: GPL-2.0-only /* Copyright (C) 2023 Intel Corporation */ +#include <net/libeth/rx.h> + #include "idpf.h" #include "idpf_virtchnl.h" @@ -750,7 +752,7 @@ static int idpf_wait_for_marker_event(struct idpf_vport *vport) int i; for (i = 0; i < vport->num_txq; i++) - set_bit(__IDPF_Q_SW_MARKER, vport->txqs[i]->flags); + idpf_queue_set(SW_MARKER, vport->txqs[i]); event = wait_event_timeout(vport->sw_marker_wq, test_and_clear_bit(IDPF_VPORT_SW_MARKER, @@ -758,7 +760,7 @@ static int idpf_wait_for_marker_event(struct idpf_vport *vport) msecs_to_jiffies(500)); for (i = 0; i < vport->num_txq; i++) - clear_bit(__IDPF_Q_POLL_MODE, vport->txqs[i]->flags); + idpf_queue_clear(POLL_MODE, vport->txqs[i]); if (event) return 0; @@ -1092,7 +1094,6 @@ static int __idpf_queue_reg_init(struct idpf_vport *vport, u32 *reg_vals, int num_regs, u32 q_type) { struct idpf_adapter *adapter = vport->adapter; - struct idpf_queue *q; int i, j, k = 0; switch (q_type) { @@ -1111,6 +1112,8 @@ static int __idpf_queue_reg_init(struct idpf_vport *vport, u32 *reg_vals, u16 num_rxq = rx_qgrp->singleq.num_rxq; for (j = 0; j < num_rxq && k < num_regs; j++, k++) { + struct idpf_rx_queue *q; + q = rx_qgrp->singleq.rxqs[j]; q->tail = idpf_get_reg_addr(adapter, reg_vals[k]); @@ -1123,6 +1126,8 @@ static int __idpf_queue_reg_init(struct idpf_vport *vport, u32 *reg_vals, u8 num_bufqs = vport->num_bufqs_per_qgrp; for (j = 0; j < num_bufqs && k < num_regs; j++, k++) { + struct idpf_buf_queue *q; + q = &rx_qgrp->splitq.bufq_sets[j].bufq; q->tail = idpf_get_reg_addr(adapter, reg_vals[k]); @@ -1253,12 +1258,12 @@ int idpf_send_create_vport_msg(struct idpf_adapter *adapter, vport_msg->vport_type = cpu_to_le16(VIRTCHNL2_VPORT_TYPE_DEFAULT); vport_msg->vport_index = cpu_to_le16(idx); - if (adapter->req_tx_splitq) + if (adapter->req_tx_splitq || !IS_ENABLED(CONFIG_IDPF_SINGLEQ)) vport_msg->txq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SPLIT); else vport_msg->txq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SINGLE); - if (adapter->req_rx_splitq) + if (adapter->req_rx_splitq || !IS_ENABLED(CONFIG_IDPF_SINGLEQ)) vport_msg->rxq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SPLIT); else vport_msg->rxq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SINGLE); @@ -1320,10 +1325,17 @@ int idpf_check_supported_desc_ids(struct idpf_vport *vport) vport_msg = adapter->vport_params_recvd[vport->idx]; + if (!IS_ENABLED(CONFIG_IDPF_SINGLEQ) && + (vport_msg->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE || + vport_msg->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)) { + pci_err(adapter->pdev, "singleq mode requested, but not compiled-in\n"); + return -EOPNOTSUPP; + } + rx_desc_ids = le64_to_cpu(vport_msg->rx_desc_ids); tx_desc_ids = le64_to_cpu(vport_msg->tx_desc_ids); - if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) { + if (idpf_is_queue_model_split(vport->rxq_model)) { if (!(rx_desc_ids & VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M)) { dev_info(&adapter->pdev->dev, "Minimum RX descriptor support not provided, using the default\n"); vport_msg->rx_desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M); @@ -1333,7 +1345,7 @@ int idpf_check_supported_desc_ids(struct idpf_vport *vport) vport->base_rxd = true; } - if (vport->txq_model != VIRTCHNL2_QUEUE_MODEL_SPLIT) + if (!idpf_is_queue_model_split(vport->txq_model)) return 0; if ((tx_desc_ids & MIN_SUPPORT_TXDID) != MIN_SUPPORT_TXDID) { @@ -1449,19 +1461,19 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport) qi[k].model = cpu_to_le16(vport->txq_model); qi[k].type = - cpu_to_le32(tx_qgrp->txqs[j]->q_type); + cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX); qi[k].ring_len = cpu_to_le16(tx_qgrp->txqs[j]->desc_count); qi[k].dma_ring_addr = cpu_to_le64(tx_qgrp->txqs[j]->dma); if (idpf_is_queue_model_split(vport->txq_model)) { - struct idpf_queue *q = tx_qgrp->txqs[j]; + struct idpf_tx_queue *q = tx_qgrp->txqs[j]; qi[k].tx_compl_queue_id = cpu_to_le16(tx_qgrp->complq->q_id); qi[k].relative_queue_id = cpu_to_le16(j); - if (test_bit(__IDPF_Q_FLOW_SCH_EN, q->flags)) + if (idpf_queue_has(FLOW_SCH_EN, q)) qi[k].sched_mode = cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_FLOW); else @@ -1478,11 +1490,11 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport) qi[k].queue_id = cpu_to_le32(tx_qgrp->complq->q_id); qi[k].model = cpu_to_le16(vport->txq_model); - qi[k].type = cpu_to_le32(tx_qgrp->complq->q_type); + qi[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION); qi[k].ring_len = cpu_to_le16(tx_qgrp->complq->desc_count); qi[k].dma_ring_addr = cpu_to_le64(tx_qgrp->complq->dma); - if (test_bit(__IDPF_Q_FLOW_SCH_EN, tx_qgrp->complq->flags)) + if (idpf_queue_has(FLOW_SCH_EN, tx_qgrp->complq)) sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW; else sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE; @@ -1567,17 +1579,18 @@ static int idpf_send_config_rx_queues_msg(struct idpf_vport *vport) goto setup_rxqs; for (j = 0; j < vport->num_bufqs_per_qgrp; j++, k++) { - struct idpf_queue *bufq = + struct idpf_buf_queue *bufq = &rx_qgrp->splitq.bufq_sets[j].bufq; qi[k].queue_id = cpu_to_le32(bufq->q_id); qi[k].model = cpu_to_le16(vport->rxq_model); - qi[k].type = cpu_to_le32(bufq->q_type); + qi[k].type = + cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX_BUFFER); qi[k].desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M); qi[k].ring_len = cpu_to_le16(bufq->desc_count); qi[k].dma_ring_addr = cpu_to_le64(bufq->dma); qi[k].data_buffer_size = cpu_to_le32(bufq->rx_buf_size); - qi[k].buffer_notif_stride = bufq->rx_buf_stride; + qi[k].buffer_notif_stride = IDPF_RX_BUF_STRIDE; qi[k].rx_buffer_low_watermark = cpu_to_le16(bufq->rx_buffer_low_watermark); if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW)) @@ -1591,35 +1604,47 @@ setup_rxqs: num_rxq = rx_qgrp->singleq.num_rxq; for (j = 0; j < num_rxq; j++, k++) { - struct idpf_queue *rxq; + const struct idpf_bufq_set *sets; + struct idpf_rx_queue *rxq; if (!idpf_is_queue_model_split(vport->rxq_model)) { rxq = rx_qgrp->singleq.rxqs[j]; goto common_qi_fields; } + rxq = &rx_qgrp->splitq.rxq_sets[j]->rxq; - qi[k].rx_bufq1_id = - cpu_to_le16(rxq->rxq_grp->splitq.bufq_sets[0].bufq.q_id); + sets = rxq->bufq_sets; + + /* In splitq mode, RXQ buffer size should be + * set to that of the first buffer queue + * associated with this RXQ. + */ + rxq->rx_buf_size = sets[0].bufq.rx_buf_size; + + qi[k].rx_bufq1_id = cpu_to_le16(sets[0].bufq.q_id); if (vport->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP) { qi[k].bufq2_ena = IDPF_BUFQ2_ENA; qi[k].rx_bufq2_id = - cpu_to_le16(rxq->rxq_grp->splitq.bufq_sets[1].bufq.q_id); + cpu_to_le16(sets[1].bufq.q_id); } qi[k].rx_buffer_low_watermark = cpu_to_le16(rxq->rx_buffer_low_watermark); if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW)) qi[k].qflags |= cpu_to_le16(VIRTCHNL2_RXQ_RSC); -common_qi_fields: - if (rxq->rx_hsplit_en) { + rxq->rx_hbuf_size = sets[0].bufq.rx_hbuf_size; + + if (idpf_queue_has(HSPLIT_EN, rxq)) { qi[k].qflags |= cpu_to_le16(VIRTCHNL2_RXQ_HDR_SPLIT); qi[k].hdr_buffer_size = cpu_to_le16(rxq->rx_hbuf_size); } + +common_qi_fields: qi[k].queue_id = cpu_to_le32(rxq->q_id); qi[k].model = cpu_to_le16(vport->rxq_model); - qi[k].type = cpu_to_le32(rxq->q_type); + qi[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX); qi[k].ring_len = cpu_to_le16(rxq->desc_count); qi[k].dma_ring_addr = cpu_to_le64(rxq->dma); qi[k].max_pkt_size = cpu_to_le32(rxq->rx_max_pkt_size); @@ -1706,7 +1731,7 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, bool ena) struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; for (j = 0; j < tx_qgrp->num_txq; j++, k++) { - qc[k].type = cpu_to_le32(tx_qgrp->txqs[j]->q_type); + qc[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX); qc[k].start_queue_id = cpu_to_le32(tx_qgrp->txqs[j]->q_id); qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK); } @@ -1720,7 +1745,7 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, bool ena) for (i = 0; i < vport->num_txq_grp; i++, k++) { struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; - qc[k].type = cpu_to_le32(tx_qgrp->complq->q_type); + qc[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION); qc[k].start_queue_id = cpu_to_le32(tx_qgrp->complq->q_id); qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK); } @@ -1741,12 +1766,12 @@ setup_rx: qc[k].start_queue_id = cpu_to_le32(rx_qgrp->splitq.rxq_sets[j]->rxq.q_id); qc[k].type = - cpu_to_le32(rx_qgrp->splitq.rxq_sets[j]->rxq.q_type); + cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX); } else { qc[k].start_queue_id = cpu_to_le32(rx_qgrp->singleq.rxqs[j]->q_id); qc[k].type = - cpu_to_le32(rx_qgrp->singleq.rxqs[j]->q_type); + cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX); } qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK); } @@ -1761,10 +1786,11 @@ setup_rx: struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; for (j = 0; j < vport->num_bufqs_per_qgrp; j++, k++) { - struct idpf_queue *q; + const struct idpf_buf_queue *q; q = &rx_qgrp->splitq.bufq_sets[j].bufq; - qc[k].type = cpu_to_le32(q->q_type); + qc[k].type = + cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX_BUFFER); qc[k].start_queue_id = cpu_to_le32(q->q_id); qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK); } @@ -1849,7 +1875,8 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map) struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; for (j = 0; j < tx_qgrp->num_txq; j++, k++) { - vqv[k].queue_type = cpu_to_le32(tx_qgrp->txqs[j]->q_type); + vqv[k].queue_type = + cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX); vqv[k].queue_id = cpu_to_le32(tx_qgrp->txqs[j]->q_id); if (idpf_is_queue_model_split(vport->txq_model)) { @@ -1879,14 +1906,15 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map) num_rxq = rx_qgrp->singleq.num_rxq; for (j = 0; j < num_rxq; j++, k++) { - struct idpf_queue *rxq; + struct idpf_rx_queue *rxq; if (idpf_is_queue_model_split(vport->rxq_model)) rxq = &rx_qgrp->splitq.rxq_sets[j]->rxq; else rxq = rx_qgrp->singleq.rxqs[j]; - vqv[k].queue_type = cpu_to_le32(rxq->q_type); + vqv[k].queue_type = + cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX); vqv[k].queue_id = cpu_to_le32(rxq->q_id); vqv[k].vector_id = cpu_to_le16(rxq->q_vector->v_idx); vqv[k].itr_idx = cpu_to_le32(rxq->q_vector->rx_itr_idx); @@ -1975,7 +2003,7 @@ int idpf_send_disable_queues_msg(struct idpf_vport *vport) * queues virtchnl message is sent */ for (i = 0; i < vport->num_txq; i++) - set_bit(__IDPF_Q_POLL_MODE, vport->txqs[i]->flags); + idpf_queue_set(POLL_MODE, vport->txqs[i]); /* schedule the napi to receive all the marker packets */ local_bh_disable(); @@ -2469,39 +2497,52 @@ do_memcpy: * @frag: fragmentation allowed * */ -static void idpf_fill_ptype_lookup(struct idpf_rx_ptype_decoded *ptype, +static void idpf_fill_ptype_lookup(struct libeth_rx_pt *ptype, struct idpf_ptype_state *pstate, bool ipv4, bool frag) { if (!pstate->outer_ip || !pstate->outer_frag) { - ptype->outer_ip = IDPF_RX_PTYPE_OUTER_IP; pstate->outer_ip = true; if (ipv4) - ptype->outer_ip_ver = IDPF_RX_PTYPE_OUTER_IPV4; + ptype->outer_ip = LIBETH_RX_PT_OUTER_IPV4; else - ptype->outer_ip_ver = IDPF_RX_PTYPE_OUTER_IPV6; + ptype->outer_ip = LIBETH_RX_PT_OUTER_IPV6; if (frag) { - ptype->outer_frag = IDPF_RX_PTYPE_FRAG; + ptype->outer_frag = LIBETH_RX_PT_FRAG; pstate->outer_frag = true; } } else { - ptype->tunnel_type = IDPF_RX_PTYPE_TUNNEL_IP_IP; + ptype->tunnel_type = LIBETH_RX_PT_TUNNEL_IP_IP; pstate->tunnel_state = IDPF_PTYPE_TUNNEL_IP; if (ipv4) - ptype->tunnel_end_prot = - IDPF_RX_PTYPE_TUNNEL_END_IPV4; + ptype->tunnel_end_prot = LIBETH_RX_PT_TUNNEL_END_IPV4; else - ptype->tunnel_end_prot = - IDPF_RX_PTYPE_TUNNEL_END_IPV6; + ptype->tunnel_end_prot = LIBETH_RX_PT_TUNNEL_END_IPV6; if (frag) - ptype->tunnel_end_frag = IDPF_RX_PTYPE_FRAG; + ptype->tunnel_end_frag = LIBETH_RX_PT_FRAG; } } +static void idpf_finalize_ptype_lookup(struct libeth_rx_pt *ptype) +{ + if (ptype->payload_layer == LIBETH_RX_PT_PAYLOAD_L2 && + ptype->inner_prot) + ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_L4; + else if (ptype->payload_layer == LIBETH_RX_PT_PAYLOAD_L2 && + ptype->outer_ip) + ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_L3; + else if (ptype->outer_ip == LIBETH_RX_PT_OUTER_L2) + ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_L2; + else + ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_NONE; + + libeth_rx_pt_gen_hash_type(ptype); +} + /** * idpf_send_get_rx_ptype_msg - Send virtchnl for ptype info * @vport: virtual port data structure @@ -2512,7 +2553,7 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport) { struct virtchnl2_get_ptype_info *get_ptype_info __free(kfree) = NULL; struct virtchnl2_get_ptype_info *ptype_info __free(kfree) = NULL; - struct idpf_rx_ptype_decoded *ptype_lkup = vport->rx_ptype_lkup; + struct libeth_rx_pt *ptype_lkup __free(kfree) = NULL; int max_ptype, ptypes_recvd = 0, ptype_offset; struct idpf_adapter *adapter = vport->adapter; struct idpf_vc_xn_params xn_params = {}; @@ -2520,12 +2561,17 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport) ssize_t reply_sz; int i, j, k; + if (vport->rx_ptype_lkup) + return 0; + if (idpf_is_queue_model_split(vport->rxq_model)) max_ptype = IDPF_RX_MAX_PTYPE; else max_ptype = IDPF_RX_MAX_BASE_PTYPE; - memset(vport->rx_ptype_lkup, 0, sizeof(vport->rx_ptype_lkup)); + ptype_lkup = kcalloc(max_ptype, sizeof(*ptype_lkup), GFP_KERNEL); + if (!ptype_lkup) + return -ENOMEM; get_ptype_info = kzalloc(sizeof(*get_ptype_info), GFP_KERNEL); if (!get_ptype_info) @@ -2583,16 +2629,13 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport) /* 0xFFFF indicates end of ptypes */ if (le16_to_cpu(ptype->ptype_id_10) == IDPF_INVALID_PTYPE_ID) - return 0; + goto out; if (idpf_is_queue_model_split(vport->rxq_model)) k = le16_to_cpu(ptype->ptype_id_10); else k = ptype->ptype_id_8; - if (ptype->proto_id_count) - ptype_lkup[k].known = 1; - for (j = 0; j < ptype->proto_id_count; j++) { id = le16_to_cpu(ptype->proto_id[j]); switch (id) { @@ -2600,18 +2643,18 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport) if (pstate.tunnel_state == IDPF_PTYPE_TUNNEL_IP) { ptype_lkup[k].tunnel_type = - IDPF_RX_PTYPE_TUNNEL_IP_GRENAT; + LIBETH_RX_PT_TUNNEL_IP_GRENAT; pstate.tunnel_state |= IDPF_PTYPE_TUNNEL_IP_GRENAT; } break; case VIRTCHNL2_PROTO_HDR_MAC: ptype_lkup[k].outer_ip = - IDPF_RX_PTYPE_OUTER_L2; + LIBETH_RX_PT_OUTER_L2; if (pstate.tunnel_state == IDPF_TUN_IP_GRE) { ptype_lkup[k].tunnel_type = - IDPF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC; + LIBETH_RX_PT_TUNNEL_IP_GRENAT_MAC; pstate.tunnel_state |= IDPF_PTYPE_TUNNEL_IP_GRENAT_MAC; } @@ -2638,23 +2681,23 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport) break; case VIRTCHNL2_PROTO_HDR_UDP: ptype_lkup[k].inner_prot = - IDPF_RX_PTYPE_INNER_PROT_UDP; + LIBETH_RX_PT_INNER_UDP; break; case VIRTCHNL2_PROTO_HDR_TCP: ptype_lkup[k].inner_prot = - IDPF_RX_PTYPE_INNER_PROT_TCP; + LIBETH_RX_PT_INNER_TCP; break; case VIRTCHNL2_PROTO_HDR_SCTP: ptype_lkup[k].inner_prot = - IDPF_RX_PTYPE_INNER_PROT_SCTP; + LIBETH_RX_PT_INNER_SCTP; break; case VIRTCHNL2_PROTO_HDR_ICMP: ptype_lkup[k].inner_prot = - IDPF_RX_PTYPE_INNER_PROT_ICMP; + LIBETH_RX_PT_INNER_ICMP; break; case VIRTCHNL2_PROTO_HDR_PAY: ptype_lkup[k].payload_layer = - IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY2; + LIBETH_RX_PT_PAYLOAD_L2; break; case VIRTCHNL2_PROTO_HDR_ICMPV6: case VIRTCHNL2_PROTO_HDR_IPV6_EH: @@ -2708,9 +2751,14 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport) break; } } + + idpf_finalize_ptype_lookup(&ptype_lkup[k]); } } +out: + vport->rx_ptype_lkup = no_free_ptr(ptype_lkup); + return 0; } @@ -3125,7 +3173,7 @@ void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q) rss_data->rss_lut_size = le16_to_cpu(vport_msg->rss_lut_size); ether_addr_copy(vport->default_mac_addr, vport_msg->default_mac_addr); - vport->max_mtu = le16_to_cpu(vport_msg->max_mtu) - IDPF_PACKET_HDR_PAD; + vport->max_mtu = le16_to_cpu(vport_msg->max_mtu) - LIBETH_RX_LL_LEN; /* Initialize Tx and Rx profiles for Dynamic Interrupt Moderation */ memcpy(vport->rx_itr_profile, rx_itr, IDPF_DIM_PROFILE_SLOTS); @@ -3242,7 +3290,6 @@ static int __idpf_vport_queue_ids_init(struct idpf_vport *vport, int num_qids, u32 q_type) { - struct idpf_queue *q; int i, j, k = 0; switch (q_type) { @@ -3250,11 +3297,8 @@ static int __idpf_vport_queue_ids_init(struct idpf_vport *vport, for (i = 0; i < vport->num_txq_grp; i++) { struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; - for (j = 0; j < tx_qgrp->num_txq && k < num_qids; j++, k++) { + for (j = 0; j < tx_qgrp->num_txq && k < num_qids; j++, k++) tx_qgrp->txqs[j]->q_id = qids[k]; - tx_qgrp->txqs[j]->q_type = - VIRTCHNL2_QUEUE_TYPE_TX; - } } break; case VIRTCHNL2_QUEUE_TYPE_RX: @@ -3268,12 +3312,13 @@ static int __idpf_vport_queue_ids_init(struct idpf_vport *vport, num_rxq = rx_qgrp->singleq.num_rxq; for (j = 0; j < num_rxq && k < num_qids; j++, k++) { + struct idpf_rx_queue *q; + if (idpf_is_queue_model_split(vport->rxq_model)) q = &rx_qgrp->splitq.rxq_sets[j]->rxq; else q = rx_qgrp->singleq.rxqs[j]; q->q_id = qids[k]; - q->q_type = VIRTCHNL2_QUEUE_TYPE_RX; } } break; @@ -3282,8 +3327,6 @@ static int __idpf_vport_queue_ids_init(struct idpf_vport *vport, struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; tx_qgrp->complq->q_id = qids[k]; - tx_qgrp->complq->q_type = - VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION; } break; case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER: @@ -3292,9 +3335,10 @@ static int __idpf_vport_queue_ids_init(struct idpf_vport *vport, u8 num_bufqs = vport->num_bufqs_per_qgrp; for (j = 0; j < num_bufqs && k < num_qids; j++, k++) { + struct idpf_buf_queue *q; + q = &rx_qgrp->splitq.bufq_sets[j].bufq; q->q_id = qids[k]; - q->q_type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER; } } break; diff --git a/drivers/net/ethernet/intel/igb/Makefile b/drivers/net/ethernet/intel/igb/Makefile index 394c1e0656b9..463c0d26b9d4 100644 --- a/drivers/net/ethernet/intel/igb/Makefile +++ b/drivers/net/ethernet/intel/igb/Makefile @@ -6,6 +6,6 @@ obj-$(CONFIG_IGB) += igb.o -igb-objs := igb_main.o igb_ethtool.o e1000_82575.o \ - e1000_mac.o e1000_nvm.o e1000_phy.o e1000_mbx.o \ - e1000_i210.o igb_ptp.o igb_hwmon.o +igb-y := igb_main.o igb_ethtool.o e1000_82575.o \ + e1000_mac.o e1000_nvm.o e1000_phy.o e1000_mbx.o \ + e1000_i210.o igb_ptp.o igb_hwmon.o diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 61d72250c0ed..06b9970dffad 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -2381,7 +2381,7 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data) } static int igb_get_ts_info(struct net_device *dev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct igb_adapter *adapter = netdev_priv(dev); diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index fce2930ae6af..11be39f435f3 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -203,7 +203,6 @@ static const struct pci_error_handlers igb_err_handler = { static void igb_init_dmac(struct igb_adapter *adapter, u32 pba); -MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>"); MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver"); MODULE_LICENSE("GPL v2"); @@ -9139,6 +9138,10 @@ static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) return -EIO; break; case SIOCSMIIREG: + if (igb_write_phy_reg(&adapter->hw, data->reg_num & 0x1F, + data->val_in)) + return -EIO; + break; default: return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/intel/igbvf/Makefile b/drivers/net/ethernet/intel/igbvf/Makefile index afd3e36eae75..902711d5e691 100644 --- a/drivers/net/ethernet/intel/igbvf/Makefile +++ b/drivers/net/ethernet/intel/igbvf/Makefile @@ -6,8 +6,4 @@ obj-$(CONFIG_IGBVF) += igbvf.o -igbvf-objs := vf.o \ - mbx.o \ - ethtool.o \ - netdev.o - +igbvf-y := vf.o mbx.o ethtool.o netdev.o diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index 7661edd7d0f2..925d7286a8ee 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -3001,7 +3001,6 @@ static void __exit igbvf_exit_module(void) } module_exit(igbvf_exit_module); -MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>"); MODULE_DESCRIPTION("Intel(R) Gigabit Virtual Function Network Driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/intel/igc/Makefile b/drivers/net/ethernet/intel/igc/Makefile index ebffd3054285..efc5e7983dad 100644 --- a/drivers/net/ethernet/intel/igc/Makefile +++ b/drivers/net/ethernet/intel/igc/Makefile @@ -6,7 +6,7 @@ # obj-$(CONFIG_IGC) += igc.o -igc-$(CONFIG_IGC_LEDS) += igc_leds.o -igc-objs := igc_main.o igc_mac.o igc_i225.o igc_base.o igc_nvm.o igc_phy.o \ -igc_diag.o igc_ethtool.o igc_ptp.o igc_dump.o igc_tsn.o igc_xdp.o +igc-y := igc_main.o igc_mac.o igc_i225.o igc_base.o igc_nvm.o igc_phy.o \ + igc_diag.o igc_ethtool.o igc_ptp.o igc_dump.o igc_tsn.o igc_xdp.o +igc-$(CONFIG_IGC_LEDS) += igc_leds.o diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h index 8b14c029eda1..c38b4d0f00ce 100644 --- a/drivers/net/ethernet/intel/igc/igc.h +++ b/drivers/net/ethernet/intel/igc/igc.h @@ -202,7 +202,6 @@ struct igc_adapter { struct net_device *netdev; struct ethtool_keee eee; - u16 eee_advert; unsigned long state; unsigned int flags; diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c index 0cd2bd695db1..3d3ef4e1547c 100644 --- a/drivers/net/ethernet/intel/igc/igc_ethtool.c +++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c @@ -1559,7 +1559,7 @@ static int igc_ethtool_set_channels(struct net_device *netdev, } static int igc_ethtool_get_ts_info(struct net_device *dev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct igc_adapter *adapter = netdev_priv(dev); @@ -1636,10 +1636,6 @@ static int igc_ethtool_get_eee(struct net_device *netdev, linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, edata->supported); - if (hw->dev_spec._base.eee_enable) - mii_eee_cap1_mod_linkmode_t(edata->advertised, - adapter->eee_advert); - eeer = rd32(IGC_EEER); /* EEE status on negotiated link */ @@ -1700,8 +1696,6 @@ static int igc_ethtool_set_eee(struct net_device *netdev, return -EINVAL; } - adapter->eee_advert = linkmode_to_mii_eee_cap1_t(edata->advertised); - if (hw->dev_spec._base.eee_enable != edata->eee_enabled) { hw->dev_spec._base.eee_enable = edata->eee_enabled; adapter->flags |= IGC_FLAG_EEE; diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 87b655b839c1..cb5c7b09e8a0 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -32,7 +32,6 @@ static int debug = -1; -MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); MODULE_DESCRIPTION(DRV_SUMMARY); MODULE_LICENSE("GPL v2"); module_param(debug, int, 0); @@ -4976,9 +4975,6 @@ void igc_up(struct igc_adapter *adapter) /* start the watchdog. */ hw->mac.get_link_status = true; schedule_work(&adapter->watchdog_task); - - adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T | - MDIO_EEE_2_5GT; } /** diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile index 4fb0d9e3f2da..965e5ce1b326 100644 --- a/drivers/net/ethernet/intel/ixgbe/Makefile +++ b/drivers/net/ethernet/intel/ixgbe/Makefile @@ -6,10 +6,10 @@ obj-$(CONFIG_IXGBE) += ixgbe.o -ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \ - ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \ - ixgbe_mbx.o ixgbe_x540.o ixgbe_x550.o ixgbe_lib.o ixgbe_ptp.o \ - ixgbe_xsk.o +ixgbe-y := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \ + ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \ + ixgbe_mbx.o ixgbe_x540.o ixgbe_x550.o ixgbe_lib.o ixgbe_ptp.o \ + ixgbe_xsk.o ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \ ixgbe_dcb_82599.o ixgbe_dcb_nl.o diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 6e6e6f1847b6..4cac76254966 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -3170,7 +3170,7 @@ static int ixgbe_set_rxfh(struct net_device *netdev, } static int ixgbe_get_ts_info(struct net_device *dev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct ixgbe_adapter *adapter = netdev_priv(dev); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 094653e81b97..8057cef61f39 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -162,7 +162,6 @@ static int debug = -1; module_param(debug, int, 0); MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); -MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/intel/ixgbevf/Makefile b/drivers/net/ethernet/intel/ixgbevf/Makefile index 186a4bb24fde..01d3e892f3fa 100644 --- a/drivers/net/ethernet/intel/ixgbevf/Makefile +++ b/drivers/net/ethernet/intel/ixgbevf/Makefile @@ -6,9 +6,5 @@ obj-$(CONFIG_IXGBEVF) += ixgbevf.o -ixgbevf-objs := vf.o \ - mbx.o \ - ethtool.o \ - ixgbevf_main.o +ixgbevf-y := vf.o mbx.o ethtool.o ixgbevf_main.o ixgbevf-$(CONFIG_IXGBEVF_IPSEC) += ipsec.o - diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index b938dc06045d..149911e3002a 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -76,7 +76,6 @@ static const struct pci_device_id ixgbevf_pci_tbl[] = { }; MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl); -MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); MODULE_DESCRIPTION("Intel(R) 10 Gigabit Virtual Function Network Driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/intel/libeth/Makefile b/drivers/net/ethernet/intel/libeth/Makefile index cb99203d1dd2..52492b081132 100644 --- a/drivers/net/ethernet/intel/libeth/Makefile +++ b/drivers/net/ethernet/intel/libeth/Makefile @@ -3,4 +3,4 @@ obj-$(CONFIG_LIBETH) += libeth.o -libeth-objs += rx.o +libeth-y := rx.o diff --git a/drivers/net/ethernet/intel/libeth/rx.c b/drivers/net/ethernet/intel/libeth/rx.c index 6221b88c34ac..f20926669318 100644 --- a/drivers/net/ethernet/intel/libeth/rx.c +++ b/drivers/net/ethernet/intel/libeth/rx.c @@ -6,7 +6,7 @@ /* Rx buffer management */ /** - * libeth_rx_hw_len - get the actual buffer size to be passed to HW + * libeth_rx_hw_len_mtu - get the actual buffer size to be passed to HW * @pp: &page_pool_params of the netdev to calculate the size for * @max_len: maximum buffer size for a single descriptor * @@ -14,7 +14,7 @@ * MTU the @dev has, HW required alignment, minimum and maximum allowed values, * and system's page size. */ -static u32 libeth_rx_hw_len(const struct page_pool_params *pp, u32 max_len) +static u32 libeth_rx_hw_len_mtu(const struct page_pool_params *pp, u32 max_len) { u32 len; @@ -27,6 +27,118 @@ static u32 libeth_rx_hw_len(const struct page_pool_params *pp, u32 max_len) } /** + * libeth_rx_hw_len_truesize - get the short buffer size to be passed to HW + * @pp: &page_pool_params of the netdev to calculate the size for + * @max_len: maximum buffer size for a single descriptor + * @truesize: desired truesize for the buffers + * + * Return: HW-writeable length per one buffer to pass it to the HW ignoring the + * MTU and closest to the passed truesize. Can be used for "short" buffer + * queues to fragment pages more efficiently. + */ +static u32 libeth_rx_hw_len_truesize(const struct page_pool_params *pp, + u32 max_len, u32 truesize) +{ + u32 min, len; + + min = SKB_HEAD_ALIGN(pp->offset + LIBETH_RX_BUF_STRIDE); + truesize = clamp(roundup_pow_of_two(truesize), roundup_pow_of_two(min), + PAGE_SIZE << LIBETH_RX_PAGE_ORDER); + + len = SKB_WITH_OVERHEAD(truesize - pp->offset); + len = ALIGN_DOWN(len, LIBETH_RX_BUF_STRIDE) ? : LIBETH_RX_BUF_STRIDE; + len = min3(len, ALIGN_DOWN(max_len ? : U32_MAX, LIBETH_RX_BUF_STRIDE), + pp->max_len); + + return len; +} + +/** + * libeth_rx_page_pool_params - calculate params with the stack overhead + * @fq: buffer queue to calculate the size for + * @pp: &page_pool_params of the netdev + * + * Set the PP params to will all needed stack overhead (headroom, tailroom) and + * both the HW buffer length and the truesize for all types of buffers. For + * "short" buffers, truesize never exceeds the "wanted" one; for the rest, + * it can be up to the page size. + * + * Return: true on success, false on invalid input params. + */ +static bool libeth_rx_page_pool_params(struct libeth_fq *fq, + struct page_pool_params *pp) +{ + pp->offset = LIBETH_SKB_HEADROOM; + /* HW-writeable / syncable length per one page */ + pp->max_len = LIBETH_RX_PAGE_LEN(pp->offset); + + /* HW-writeable length per buffer */ + switch (fq->type) { + case LIBETH_FQE_MTU: + fq->buf_len = libeth_rx_hw_len_mtu(pp, fq->buf_len); + break; + case LIBETH_FQE_SHORT: + fq->buf_len = libeth_rx_hw_len_truesize(pp, fq->buf_len, + fq->truesize); + break; + case LIBETH_FQE_HDR: + fq->buf_len = ALIGN(LIBETH_MAX_HEAD, LIBETH_RX_BUF_STRIDE); + break; + default: + return false; + } + + /* Buffer size to allocate */ + fq->truesize = roundup_pow_of_two(SKB_HEAD_ALIGN(pp->offset + + fq->buf_len)); + + return true; +} + +/** + * libeth_rx_page_pool_params_zc - calculate params without the stack overhead + * @fq: buffer queue to calculate the size for + * @pp: &page_pool_params of the netdev + * + * Set the PP params to exclude the stack overhead and both the buffer length + * and the truesize, which are equal for the data buffers. Note that this + * requires separate header buffers to be always active and account the + * overhead. + * With the MTU == ``PAGE_SIZE``, this allows the kernel to enable the zerocopy + * mode. + * + * Return: true on success, false on invalid input params. + */ +static bool libeth_rx_page_pool_params_zc(struct libeth_fq *fq, + struct page_pool_params *pp) +{ + u32 mtu, max; + + pp->offset = 0; + pp->max_len = PAGE_SIZE << LIBETH_RX_PAGE_ORDER; + + switch (fq->type) { + case LIBETH_FQE_MTU: + mtu = READ_ONCE(pp->netdev->mtu); + break; + case LIBETH_FQE_SHORT: + mtu = fq->truesize; + break; + default: + return false; + } + + mtu = roundup_pow_of_two(mtu); + max = min(rounddown_pow_of_two(fq->buf_len ? : U32_MAX), + pp->max_len); + + fq->buf_len = clamp(mtu, LIBETH_RX_BUF_STRIDE, max); + fq->truesize = fq->buf_len; + + return true; +} + +/** * libeth_rx_fq_create - create a PP with the default libeth settings * @fq: buffer queue struct to fill * @napi: &napi_struct covering this PP (no usage outside its poll loops) @@ -44,19 +156,17 @@ int libeth_rx_fq_create(struct libeth_fq *fq, struct napi_struct *napi) .netdev = napi->dev, .napi = napi, .dma_dir = DMA_FROM_DEVICE, - .offset = LIBETH_SKB_HEADROOM, }; struct libeth_fqe *fqes; struct page_pool *pool; + bool ret; - /* HW-writeable / syncable length per one page */ - pp.max_len = LIBETH_RX_PAGE_LEN(pp.offset); - - /* HW-writeable length per buffer */ - fq->buf_len = libeth_rx_hw_len(&pp, fq->buf_len); - /* Buffer size to allocate */ - fq->truesize = roundup_pow_of_two(SKB_HEAD_ALIGN(pp.offset + - fq->buf_len)); + if (!fq->hsplit) + ret = libeth_rx_page_pool_params(fq, &pp); + else + ret = libeth_rx_page_pool_params_zc(fq, &pp); + if (!ret) + return -EINVAL; pool = page_pool_create(&pp); if (IS_ERR(pool)) @@ -145,6 +255,5 @@ EXPORT_SYMBOL_NS_GPL(libeth_rx_pt_gen_hash_type, LIBETH); /* Module */ -MODULE_AUTHOR("Intel Corporation"); MODULE_DESCRIPTION("Common Ethernet library"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/intel/libie/Makefile b/drivers/net/ethernet/intel/libie/Makefile index bf42c5aeeedd..ffd27fab916a 100644 --- a/drivers/net/ethernet/intel/libie/Makefile +++ b/drivers/net/ethernet/intel/libie/Makefile @@ -3,4 +3,4 @@ obj-$(CONFIG_LIBIE) += libie.o -libie-objs += rx.o +libie-y := rx.o diff --git a/drivers/net/ethernet/intel/libie/rx.c b/drivers/net/ethernet/intel/libie/rx.c index 38201ee1e891..aceb8d8813c4 100644 --- a/drivers/net/ethernet/intel/libie/rx.c +++ b/drivers/net/ethernet/intel/libie/rx.c @@ -118,7 +118,6 @@ const struct libeth_rx_pt libie_rx_pt_lut[LIBIE_RX_PT_NUM] = { }; EXPORT_SYMBOL_NS_GPL(libie_rx_pt_lut, LIBIE); -MODULE_AUTHOR("Intel Corporation"); MODULE_DESCRIPTION("Intel(R) Ethernet common library"); MODULE_IMPORT_NS(LIBETH); MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c index 0b9982804370..9e6984815386 100644 --- a/drivers/net/ethernet/lantiq_etop.c +++ b/drivers/net/ethernet/lantiq_etop.c @@ -675,7 +675,6 @@ ltq_etop_probe(struct platform_device *pdev) err = -ENOMEM; goto err_out; } - strcpy(dev->name, "eth%d"); dev->netdev_ops = <q_eth_netdev_ops; dev->ethtool_ops = <q_etop_ethtool_ops; priv = netdev_priv(dev); diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 9adf4301c9b1..8c45ad983abc 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -5259,7 +5259,7 @@ static int mvpp2_get_ts_config(struct mvpp2_port *port, struct ifreq *ifr) } static int mvpp2_ethtool_get_ts_info(struct net_device *dev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct mvpp2_port *port = netdev_priv(dev); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h index 05b84581d5c5..ed2160cc5acb 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h @@ -139,6 +139,7 @@ M(MSIX_OFFSET, 0x005, msix_offset, msg_req, msix_offset_rsp) \ M(VF_FLR, 0x006, vf_flr, msg_req, msg_rsp) \ M(PTP_OP, 0x007, ptp_op, ptp_req, ptp_rsp) \ M(GET_HW_CAP, 0x008, get_hw_cap, msg_req, get_hw_cap_rsp) \ +M(NDC_SYNC_OP, 0x009, ndc_sync_op, ndc_sync_op, msg_rsp) \ M(LMTST_TBL_SETUP, 0x00a, lmtst_tbl_setup, lmtst_tbl_setup_req, \ msg_rsp) \ M(SET_VF_PERM, 0x00b, set_vf_perm, set_vf_perm, msg_rsp) \ @@ -1716,6 +1717,13 @@ struct lmtst_tbl_setup_req { u64 rsvd[4]; }; +struct ndc_sync_op { + struct mbox_msghdr hdr; + u8 nix_lf_tx_sync; + u8 nix_lf_rx_sync; + u8 npa_lf_sync; +}; + /* CPT mailbox error codes * Range 901 - 1000. */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c index 5f661e67ccbc..ac7ee3f3598c 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c @@ -2014,6 +2014,13 @@ int rvu_mbox_handler_vf_flr(struct rvu *rvu, struct msg_req *req, return 0; } +int rvu_ndc_sync(struct rvu *rvu, int lfblkaddr, int lfidx, u64 lfoffset) +{ + /* Sync cached info for this LF in NDC to LLC/DRAM */ + rvu_write64(rvu, lfblkaddr, lfoffset, BIT_ULL(12) | lfidx); + return rvu_poll_reg(rvu, lfblkaddr, lfoffset, BIT_ULL(12), true); +} + int rvu_mbox_handler_get_hw_cap(struct rvu *rvu, struct msg_req *req, struct get_hw_cap_rsp *rsp) { @@ -2068,6 +2075,65 @@ int rvu_mbox_handler_set_vf_perm(struct rvu *rvu, struct set_vf_perm *req, return 0; } +int rvu_mbox_handler_ndc_sync_op(struct rvu *rvu, + struct ndc_sync_op *req, + struct msg_rsp *rsp) +{ + struct rvu_hwinfo *hw = rvu->hw; + u16 pcifunc = req->hdr.pcifunc; + int err, lfidx, lfblkaddr; + + if (req->npa_lf_sync) { + /* Get NPA LF data */ + lfblkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc); + if (lfblkaddr < 0) + return NPA_AF_ERR_AF_LF_INVALID; + + lfidx = rvu_get_lf(rvu, &hw->block[lfblkaddr], pcifunc, 0); + if (lfidx < 0) + return NPA_AF_ERR_AF_LF_INVALID; + + /* Sync NPA NDC */ + err = rvu_ndc_sync(rvu, lfblkaddr, + lfidx, NPA_AF_NDC_SYNC); + if (err) + dev_err(rvu->dev, + "NDC-NPA sync failed for LF %u\n", lfidx); + } + + if (!req->nix_lf_tx_sync && !req->nix_lf_rx_sync) + return 0; + + /* Get NIX LF data */ + lfblkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + if (lfblkaddr < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + lfidx = rvu_get_lf(rvu, &hw->block[lfblkaddr], pcifunc, 0); + if (lfidx < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + if (req->nix_lf_tx_sync) { + /* Sync NIX TX NDC */ + err = rvu_ndc_sync(rvu, lfblkaddr, + lfidx, NIX_AF_NDC_TX_SYNC); + if (err) + dev_err(rvu->dev, + "NDC-NIX-TX sync fail for LF %u\n", lfidx); + } + + if (req->nix_lf_rx_sync) { + /* Sync NIX RX NDC */ + err = rvu_ndc_sync(rvu, lfblkaddr, + lfidx, NIX_AF_NDC_RX_SYNC); + if (err) + dev_err(rvu->dev, + "NDC-NIX-RX sync failed for LF %u\n", lfidx); + } + + return 0; +} + static int rvu_process_mbox_msg(struct otx2_mbox *mbox, int devid, struct mbox_msghdr *req) { diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h index 35834687e40f..03ee93fd9e94 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h @@ -76,6 +76,7 @@ struct rvu_debugfs { struct dump_ctx nix_cq_ctx; struct dump_ctx nix_rq_ctx; struct dump_ctx nix_sq_ctx; + struct dump_ctx nix_tm_ctx; struct cpt_ctx cpt_ctx[MAX_CPT_BLKS]; int npa_qsize_id; int nix_qsize_id; @@ -799,6 +800,7 @@ int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf); int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc); int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero); int rvu_get_num_lbk_chans(void); +int rvu_ndc_sync(struct rvu *rvu, int lfblkid, int lfidx, u64 lfoffset); int rvu_get_blkaddr_from_slot(struct rvu *rvu, int blktype, u16 pcifunc, u16 global_slot, u16 *slot_in_block); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c index 881d704644fb..4a4ef5bd9e0b 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c @@ -1603,6 +1603,367 @@ static void print_nix_cn10k_sq_ctx(struct seq_file *m, (u64)sq_ctx->dropped_pkts); } +static void print_tm_tree(struct seq_file *m, + struct nix_aq_enq_rsp *rsp, u64 sq) +{ + struct nix_sq_ctx_s *sq_ctx = &rsp->sq; + struct nix_hw *nix_hw = m->private; + struct rvu *rvu = nix_hw->rvu; + u16 p1, p2, p3, p4, schq; + int blkaddr; + u64 cfg; + + blkaddr = nix_hw->blkaddr; + schq = sq_ctx->smq; + + cfg = rvu_read64(rvu, blkaddr, NIX_AF_MDQX_PARENT(schq)); + p1 = FIELD_GET(NIX_AF_MDQ_PARENT_MASK, cfg); + + cfg = rvu_read64(rvu, blkaddr, NIX_AF_TL4X_PARENT(p1)); + p2 = FIELD_GET(NIX_AF_TL4_PARENT_MASK, cfg); + + cfg = rvu_read64(rvu, blkaddr, NIX_AF_TL3X_PARENT(p2)); + p3 = FIELD_GET(NIX_AF_TL3_PARENT_MASK, cfg); + + cfg = rvu_read64(rvu, blkaddr, NIX_AF_TL2X_PARENT(p3)); + p4 = FIELD_GET(NIX_AF_TL2_PARENT_MASK, cfg); + seq_printf(m, + "SQ(%llu) -> SMQ(%u) -> TL4(%u) -> TL3(%u) -> TL2(%u) -> TL1(%u)\n", + sq, schq, p1, p2, p3, p4); +} + +/*dumps given tm_tree registers*/ +static int rvu_dbg_nix_tm_tree_display(struct seq_file *m, void *unused) +{ + int qidx, nixlf, rc, id, max_id = 0; + struct nix_hw *nix_hw = m->private; + struct rvu *rvu = nix_hw->rvu; + struct nix_aq_enq_req aq_req; + struct nix_aq_enq_rsp rsp; + struct rvu_pfvf *pfvf; + u16 pcifunc; + + nixlf = rvu->rvu_dbg.nix_tm_ctx.lf; + id = rvu->rvu_dbg.nix_tm_ctx.id; + + if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc)) + return -EINVAL; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + max_id = pfvf->sq_ctx->qsize; + + memset(&aq_req, 0, sizeof(struct nix_aq_enq_req)); + aq_req.hdr.pcifunc = pcifunc; + aq_req.ctype = NIX_AQ_CTYPE_SQ; + aq_req.op = NIX_AQ_INSTOP_READ; + seq_printf(m, "pcifunc is 0x%x\n", pcifunc); + for (qidx = id; qidx < max_id; qidx++) { + aq_req.qidx = qidx; + + /* Skip SQ's if not initialized */ + if (!test_bit(qidx, pfvf->sq_bmap)) + continue; + + rc = rvu_mbox_handler_nix_aq_enq(rvu, &aq_req, &rsp); + + if (rc) { + seq_printf(m, "Failed to read SQ(%d) context\n", + aq_req.qidx); + continue; + } + print_tm_tree(m, &rsp, aq_req.qidx); + } + return 0; +} + +static ssize_t rvu_dbg_nix_tm_tree_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + struct seq_file *m = filp->private_data; + struct nix_hw *nix_hw = m->private; + struct rvu *rvu = nix_hw->rvu; + struct rvu_pfvf *pfvf; + u16 pcifunc; + u64 nixlf; + int ret; + + ret = kstrtoull_from_user(buffer, count, 10, &nixlf); + if (ret) + return ret; + + if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc)) + return -EINVAL; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + if (!pfvf->sq_ctx) { + dev_warn(rvu->dev, "SQ context is not initialized\n"); + return -EINVAL; + } + + rvu->rvu_dbg.nix_tm_ctx.lf = nixlf; + return count; +} + +RVU_DEBUG_SEQ_FOPS(nix_tm_tree, nix_tm_tree_display, nix_tm_tree_write); + +static void print_tm_topo(struct seq_file *m, u64 schq, u32 lvl) +{ + struct nix_hw *nix_hw = m->private; + struct rvu *rvu = nix_hw->rvu; + int blkaddr, link, link_level; + struct rvu_hwinfo *hw; + + hw = rvu->hw; + blkaddr = nix_hw->blkaddr; + if (lvl == NIX_TXSCH_LVL_MDQ) { + seq_printf(m, "NIX_AF_SMQ[%llu]_CFG =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq))); + seq_printf(m, "NIX_AF_SMQ[%llu]_STATUS =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_SMQX_STATUS(schq))); + seq_printf(m, "NIX_AF_MDQ[%llu]_OUT_MD_COUNT =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_MDQX_OUT_MD_COUNT(schq))); + seq_printf(m, "NIX_AF_MDQ[%llu]_SCHEDULE =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_MDQX_SCHEDULE(schq))); + seq_printf(m, "NIX_AF_MDQ[%llu]_SHAPE =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_MDQX_SHAPE(schq))); + seq_printf(m, "NIX_AF_MDQ[%llu]_CIR =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_MDQX_CIR(schq))); + seq_printf(m, "NIX_AF_MDQ[%llu]_PIR =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_MDQX_PIR(schq))); + seq_printf(m, "NIX_AF_MDQ[%llu]_SW_XOFF =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_MDQX_SW_XOFF(schq))); + seq_printf(m, "NIX_AF_MDQ[%llu]_PARENT =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_MDQX_PARENT(schq))); + seq_puts(m, "\n"); + } + + if (lvl == NIX_TXSCH_LVL_TL4) { + seq_printf(m, "NIX_AF_TL4[%llu]_SDP_LINK_CFG =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TL4X_SDP_LINK_CFG(schq))); + seq_printf(m, "NIX_AF_TL4[%llu]_SCHEDULE =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TL4X_SCHEDULE(schq))); + seq_printf(m, "NIX_AF_TL4[%llu]_SHAPE =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_TL4X_SHAPE(schq))); + seq_printf(m, "NIX_AF_TL4[%llu]_CIR =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_TL4X_CIR(schq))); + seq_printf(m, "NIX_AF_TL4[%llu]_PIR =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_TL4X_PIR(schq))); + seq_printf(m, "NIX_AF_TL4[%llu]_SW_XOFF =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_TL4X_SW_XOFF(schq))); + seq_printf(m, "NIX_AF_TL4[%llu]_TOPOLOGY =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TL4X_TOPOLOGY(schq))); + seq_printf(m, "NIX_AF_TL4[%llu]_PARENT =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_TL4X_PARENT(schq))); + seq_printf(m, "NIX_AF_TL4[%llu]_MD_DEBUG0 =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TL4X_MD_DEBUG0(schq))); + seq_printf(m, "NIX_AF_TL4[%llu]_MD_DEBUG1 =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TL4X_MD_DEBUG1(schq))); + seq_puts(m, "\n"); + } + + if (lvl == NIX_TXSCH_LVL_TL3) { + seq_printf(m, "NIX_AF_TL3[%llu]_SCHEDULE =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TL3X_SCHEDULE(schq))); + seq_printf(m, "NIX_AF_TL3[%llu]_SHAPE =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_TL3X_SHAPE(schq))); + seq_printf(m, "NIX_AF_TL3[%llu]_CIR =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_TL3X_CIR(schq))); + seq_printf(m, "NIX_AF_TL3[%llu]_PIR =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_TL3X_PIR(schq))); + seq_printf(m, "NIX_AF_TL3[%llu]_SW_XOFF =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_TL3X_SW_XOFF(schq))); + seq_printf(m, "NIX_AF_TL3[%llu]_TOPOLOGY =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TL3X_TOPOLOGY(schq))); + seq_printf(m, "NIX_AF_TL3[%llu]_PARENT =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_TL3X_PARENT(schq))); + seq_printf(m, "NIX_AF_TL3[%llu]_MD_DEBUG0 =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TL3X_MD_DEBUG0(schq))); + seq_printf(m, "NIX_AF_TL3[%llu]_MD_DEBUG1 =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TL3X_MD_DEBUG1(schq))); + + link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL) + & 0x01 ? NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2; + if (lvl == link_level) { + seq_printf(m, + "NIX_AF_TL3_TL2[%llu]_BP_STATUS =0x%llx\n", + schq, rvu_read64(rvu, blkaddr, + NIX_AF_TL3_TL2X_BP_STATUS(schq))); + for (link = 0; link < hw->cgx_links; link++) + seq_printf(m, + "NIX_AF_TL3_TL2[%llu]_LINK[%d]_CFG =0x%llx\n", + schq, link, + rvu_read64(rvu, blkaddr, + NIX_AF_TL3_TL2X_LINKX_CFG(schq, link))); + } + seq_puts(m, "\n"); + } + + if (lvl == NIX_TXSCH_LVL_TL2) { + seq_printf(m, "NIX_AF_TL2[%llu]_SHAPE =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_TL2X_SHAPE(schq))); + seq_printf(m, "NIX_AF_TL2[%llu]_CIR =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_TL2X_CIR(schq))); + seq_printf(m, "NIX_AF_TL2[%llu]_PIR =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_TL2X_PIR(schq))); + seq_printf(m, "NIX_AF_TL2[%llu]_SW_XOFF =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_TL2X_SW_XOFF(schq))); + seq_printf(m, "NIX_AF_TL2[%llu]_TOPOLOGY =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TL2X_TOPOLOGY(schq))); + seq_printf(m, "NIX_AF_TL2[%llu]_PARENT =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_TL2X_PARENT(schq))); + seq_printf(m, "NIX_AF_TL2[%llu]_MD_DEBUG0 =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TL2X_MD_DEBUG0(schq))); + seq_printf(m, "NIX_AF_TL2[%llu]_MD_DEBUG1 =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TL2X_MD_DEBUG1(schq))); + + link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL) + & 0x01 ? NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2; + if (lvl == link_level) { + seq_printf(m, + "NIX_AF_TL3_TL2[%llu]_BP_STATUS =0x%llx\n", + schq, rvu_read64(rvu, blkaddr, + NIX_AF_TL3_TL2X_BP_STATUS(schq))); + for (link = 0; link < hw->cgx_links; link++) + seq_printf(m, + "NIX_AF_TL3_TL2[%llu]_LINK[%d]_CFG =0x%llx\n", + schq, link, rvu_read64(rvu, blkaddr, + NIX_AF_TL3_TL2X_LINKX_CFG(schq, link))); + } + seq_puts(m, "\n"); + } + + if (lvl == NIX_TXSCH_LVL_TL1) { + seq_printf(m, "NIX_AF_TX_LINK[%llu]_NORM_CREDIT =0x%llx\n", + schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TX_LINKX_NORM_CREDIT(schq))); + seq_printf(m, "NIX_AF_TX_LINK[%llu]_HW_XOFF =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TX_LINKX_HW_XOFF(schq))); + seq_printf(m, "NIX_AF_TL1[%llu]_SCHEDULE =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TL1X_SCHEDULE(schq))); + seq_printf(m, "NIX_AF_TL1[%llu]_SHAPE =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_TL1X_SHAPE(schq))); + seq_printf(m, "NIX_AF_TL1[%llu]_CIR =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq))); + seq_printf(m, "NIX_AF_TL1[%llu]_SW_XOFF =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_TL1X_SW_XOFF(schq))); + seq_printf(m, "NIX_AF_TL1[%llu]_TOPOLOGY =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TL1X_TOPOLOGY(schq))); + seq_printf(m, "NIX_AF_TL1[%llu]_MD_DEBUG0 =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TL1X_MD_DEBUG0(schq))); + seq_printf(m, "NIX_AF_TL1[%llu]_MD_DEBUG1 =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TL1X_MD_DEBUG1(schq))); + seq_printf(m, "NIX_AF_TL1[%llu]_DROPPED_PACKETS =0x%llx\n", + schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TL1X_DROPPED_PACKETS(schq))); + seq_printf(m, "NIX_AF_TL1[%llu]_DROPPED_BYTES =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TL1X_DROPPED_BYTES(schq))); + seq_printf(m, "NIX_AF_TL1[%llu]_RED_PACKETS =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TL1X_RED_PACKETS(schq))); + seq_printf(m, "NIX_AF_TL1[%llu]_RED_BYTES =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TL1X_RED_BYTES(schq))); + seq_printf(m, "NIX_AF_TL1[%llu]_YELLOW_PACKETS =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TL1X_YELLOW_PACKETS(schq))); + seq_printf(m, "NIX_AF_TL1[%llu]_YELLOW_BYTES =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TL1X_YELLOW_BYTES(schq))); + seq_printf(m, "NIX_AF_TL1[%llu]_GREEN_PACKETS =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TL1X_GREEN_PACKETS(schq))); + seq_printf(m, "NIX_AF_TL1[%llu]_GREEN_BYTES =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TL1X_GREEN_BYTES(schq))); + seq_puts(m, "\n"); + } +} + +/*dumps given tm_topo registers*/ +static int rvu_dbg_nix_tm_topo_display(struct seq_file *m, void *unused) +{ + struct nix_hw *nix_hw = m->private; + struct rvu *rvu = nix_hw->rvu; + struct nix_aq_enq_req aq_req; + struct nix_txsch *txsch; + int nixlf, lvl, schq; + u16 pcifunc; + + nixlf = rvu->rvu_dbg.nix_tm_ctx.lf; + + if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc)) + return -EINVAL; + + memset(&aq_req, 0, sizeof(struct nix_aq_enq_req)); + aq_req.hdr.pcifunc = pcifunc; + aq_req.ctype = NIX_AQ_CTYPE_SQ; + aq_req.op = NIX_AQ_INSTOP_READ; + seq_printf(m, "pcifunc is 0x%x\n", pcifunc); + + for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { + txsch = &nix_hw->txsch[lvl]; + for (schq = 0; schq < txsch->schq.max; schq++) { + if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) == pcifunc) + print_tm_topo(m, schq, lvl); + } + } + return 0; +} + +static ssize_t rvu_dbg_nix_tm_topo_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + struct seq_file *m = filp->private_data; + struct nix_hw *nix_hw = m->private; + struct rvu *rvu = nix_hw->rvu; + struct rvu_pfvf *pfvf; + u16 pcifunc; + u64 nixlf; + int ret; + + ret = kstrtoull_from_user(buffer, count, 10, &nixlf); + if (ret) + return ret; + + if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc)) + return -EINVAL; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + if (!pfvf->sq_ctx) { + dev_warn(rvu->dev, "SQ context is not initialized\n"); + return -EINVAL; + } + + rvu->rvu_dbg.nix_tm_ctx.lf = nixlf; + return count; +} + +RVU_DEBUG_SEQ_FOPS(nix_tm_topo, nix_tm_topo_display, nix_tm_topo_write); + /* Dumps given nix_sq's context */ static void print_nix_sq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp) { @@ -2349,6 +2710,10 @@ static void rvu_dbg_nix_init(struct rvu *rvu, int blkaddr) nix_hw = &rvu->hw->nix[1]; } + debugfs_create_file("tm_tree", 0600, rvu->rvu_dbg.nix, nix_hw, + &rvu_dbg_nix_tm_tree_fops); + debugfs_create_file("tm_topo", 0600, rvu->rvu_dbg.nix, nix_hw, + &rvu_dbg_nix_tm_topo_fops); debugfs_create_file("sq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw, &rvu_dbg_nix_sq_ctx_fops); debugfs_create_file("rq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw, diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index 3dc828cf6c5a..222f9e00b836 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@ -2497,9 +2497,7 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc) } mutex_unlock(&rvu->rsrc_lock); - /* Sync cached info for this LF in NDC-TX to LLC/DRAM */ - rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf); - err = rvu_poll_reg(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), true); + err = rvu_ndc_sync(rvu, blkaddr, nixlf, NIX_AF_NDC_TX_SYNC); if (err) dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h index 086f05c0376f..d56be5fb7eb4 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h @@ -121,6 +121,7 @@ #define NPA_AF_LF_RST (0x0020) #define NPA_AF_GEN_CFG (0x0030) #define NPA_AF_NDC_CFG (0x0040) +#define NPA_AF_NDC_SYNC (0x0050) #define NPA_AF_INP_CTL (0x00D0) #define NPA_AF_ACTIVE_CYCLES_PC (0x00F0) #define NPA_AF_AVG_DELAY (0x0100) @@ -239,6 +240,7 @@ #define NIX_AF_RX_CPTX_INST_ADDR (0x0310) #define NIX_AF_RX_CPTX_INST_QSEL(a) (0x0320ull | (uint64_t)(a) << 3) #define NIX_AF_RX_CPTX_CREDIT(a) (0x0360ull | (uint64_t)(a) << 3) +#define NIX_AF_NDC_RX_SYNC (0x03E0) #define NIX_AF_NDC_TX_SYNC (0x03F0) #define NIX_AF_AQ_CFG (0x0400) #define NIX_AF_AQ_BASE (0x0410) @@ -429,6 +431,8 @@ #define NIX_AF_RX_ACTIVE_CYCLES_PCX(a) (0x4800 | (a) << 16) #define NIX_AF_LINKX_CFG(a) (0x4010 | (a) << 17) #define NIX_AF_MDQX_IN_MD_COUNT(a) (0x14e0 | (a) << 16) +#define NIX_AF_SMQX_STATUS(a) (0x730 | (a) << 16) +#define NIX_AF_MDQX_OUT_MD_COUNT(a) (0xdb0 | (a) << 16) #define NIX_PRIV_AF_INT_CFG (0x8000000) #define NIX_PRIV_LFX_CFG (0x8000010) @@ -442,6 +446,11 @@ #define NIX_CONST_MAX_BPIDS GENMASK_ULL(23, 12) #define NIX_CONST_SDP_CHANS GENMASK_ULL(11, 0) +#define NIX_AF_MDQ_PARENT_MASK GENMASK_ULL(24, 16) +#define NIX_AF_TL4_PARENT_MASK GENMASK_ULL(23, 16) +#define NIX_AF_TL3_PARENT_MASK GENMASK_ULL(23, 16) +#define NIX_AF_TL2_PARENT_MASK GENMASK_ULL(20, 16) + /* SSO */ #define SSO_AF_CONST (0x1000) #define SSO_AF_CONST1 (0x1008) diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h index 24fbbef265a6..f27a3456ae64 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h @@ -346,12 +346,9 @@ struct otx2_flow_config { u16 *def_ent; u16 nr_flows; #define OTX2_DEFAULT_FLOWCOUNT 16 -#define OTX2_MAX_UNICAST_FLOWS 8 +#define OTX2_DEFAULT_UNICAST_FLOWS 4 #define OTX2_MAX_VLAN_FLOWS 1 #define OTX2_MAX_TC_FLOWS OTX2_DEFAULT_FLOWCOUNT -#define OTX2_MCAM_COUNT (OTX2_DEFAULT_FLOWCOUNT + \ - OTX2_MAX_UNICAST_FLOWS + \ - OTX2_MAX_VLAN_FLOWS) u16 unicast_offset; u16 rx_vlan_offset; u16 vf_vlan_offset; @@ -365,6 +362,7 @@ struct otx2_flow_config { u16 max_flows; refcount_t mark_flows; struct list_head flow_list_tc; + u8 ucast_flt_cnt; bool ntuple; }; @@ -1067,6 +1065,7 @@ int otx2_handle_ntuple_tc_features(struct net_device *netdev, int otx2_smq_flush(struct otx2_nic *pfvf, int smq); void otx2_free_bufs(struct otx2_nic *pfvf, struct otx2_pool *pool, u64 iova, int size); +int otx2_mcam_entry_init(struct otx2_nic *pfvf); /* tc support */ int otx2_init_tc(struct otx2_nic *nic); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c index 458d34a62e18..53f14aa944bd 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c @@ -64,9 +64,68 @@ static int otx2_dl_mcam_count_get(struct devlink *devlink, u32 id, return 0; } +static int otx2_dl_ucast_flt_cnt_set(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) +{ + struct otx2_devlink *otx2_dl = devlink_priv(devlink); + struct otx2_nic *pfvf = otx2_dl->pfvf; + int err; + + pfvf->flow_cfg->ucast_flt_cnt = ctx->val.vu8; + + otx2_mcam_flow_del(pfvf); + err = otx2_mcam_entry_init(pfvf); + if (err) + return err; + + return 0; +} + +static int otx2_dl_ucast_flt_cnt_get(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct otx2_devlink *otx2_dl = devlink_priv(devlink); + struct otx2_nic *pfvf = otx2_dl->pfvf; + + ctx->val.vu8 = pfvf->flow_cfg ? pfvf->flow_cfg->ucast_flt_cnt : 0; + + return 0; +} + +static int otx2_dl_ucast_flt_cnt_validate(struct devlink *devlink, u32 id, + union devlink_param_value val, + struct netlink_ext_ack *extack) +{ + struct otx2_devlink *otx2_dl = devlink_priv(devlink); + struct otx2_nic *pfvf = otx2_dl->pfvf; + + /* Check for UNICAST filter support*/ + if (!(pfvf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT)) { + NL_SET_ERR_MSG_MOD(extack, + "Unicast filter not enabled"); + return -EINVAL; + } + + if (!pfvf->flow_cfg) { + NL_SET_ERR_MSG_MOD(extack, + "pfvf->flow_cfg not initialized"); + return -EINVAL; + } + + if (pfvf->flow_cfg->nr_flows) { + NL_SET_ERR_MSG_MOD(extack, + "Cannot modify count when there are active rules"); + return -EINVAL; + } + + return 0; +} + enum otx2_dl_param_id { OTX2_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, OTX2_DEVLINK_PARAM_ID_MCAM_COUNT, + OTX2_DEVLINK_PARAM_ID_UCAST_FLT_CNT, }; static const struct devlink_param otx2_dl_params[] = { @@ -75,6 +134,11 @@ static const struct devlink_param otx2_dl_params[] = { BIT(DEVLINK_PARAM_CMODE_RUNTIME), otx2_dl_mcam_count_get, otx2_dl_mcam_count_set, otx2_dl_mcam_count_validate), + DEVLINK_PARAM_DRIVER(OTX2_DEVLINK_PARAM_ID_UCAST_FLT_CNT, + "unicast_filter_count", DEVLINK_PARAM_TYPE_U8, + BIT(DEVLINK_PARAM_CMODE_RUNTIME), + otx2_dl_ucast_flt_cnt_get, otx2_dl_ucast_flt_cnt_set, + otx2_dl_ucast_flt_cnt_validate), }; static const struct devlink_ops otx2_devlink_ops = { diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c index 7f786de61014..0db62eb0dab3 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c @@ -954,7 +954,7 @@ static u32 otx2_get_link(struct net_device *netdev) } static int otx2_get_ts_info(struct net_device *netdev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct otx2_nic *pfvf = netdev_priv(netdev); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c index bc5819237ed7..98c31a16c70b 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c @@ -12,8 +12,6 @@ #define OTX2_DEFAULT_ACTION 0x1 -static int otx2_mcam_entry_init(struct otx2_nic *pfvf); - struct otx2_flow { struct ethtool_rx_flow_spec flow_spec; struct list_head list; @@ -161,7 +159,7 @@ exit: } EXPORT_SYMBOL(otx2_alloc_mcam_entries); -static int otx2_mcam_entry_init(struct otx2_nic *pfvf) +int otx2_mcam_entry_init(struct otx2_nic *pfvf) { struct otx2_flow_config *flow_cfg = pfvf->flow_cfg; struct npc_get_field_status_req *freq; @@ -172,7 +170,7 @@ static int otx2_mcam_entry_init(struct otx2_nic *pfvf) int ent, count; vf_vlan_max_flows = pfvf->total_vfs * OTX2_PER_VF_VLAN_FLOWS; - count = OTX2_MAX_UNICAST_FLOWS + + count = flow_cfg->ucast_flt_cnt + OTX2_MAX_VLAN_FLOWS + vf_vlan_max_flows; flow_cfg->def_ent = devm_kmalloc_array(pfvf->dev, count, @@ -214,7 +212,7 @@ static int otx2_mcam_entry_init(struct otx2_nic *pfvf) flow_cfg->vf_vlan_offset = 0; flow_cfg->unicast_offset = vf_vlan_max_flows; flow_cfg->rx_vlan_offset = flow_cfg->unicast_offset + - OTX2_MAX_UNICAST_FLOWS; + flow_cfg->ucast_flt_cnt; pfvf->flags |= OTX2_FLAG_UCAST_FLTR_SUPPORT; /* Check if NPC_DMAC field is supported @@ -255,6 +253,7 @@ static int otx2_mcam_entry_init(struct otx2_nic *pfvf) refcount_set(&flow_cfg->mark_flows, 1); return 0; } +EXPORT_SYMBOL(otx2_mcam_entry_init); /* TODO : revisit on size */ #define OTX2_DMAC_FLTR_BITMAP_SZ (4 * 2048 + 32) @@ -302,6 +301,8 @@ int otx2_mcam_flow_init(struct otx2_nic *pf) INIT_LIST_HEAD(&pf->flow_cfg->flow_list); INIT_LIST_HEAD(&pf->flow_cfg->flow_list_tc); + pf->flow_cfg->ucast_flt_cnt = OTX2_DEFAULT_UNICAST_FLOWS; + /* Allocate bare minimum number of MCAM entries needed for * unicast and ntuple filters. */ @@ -314,7 +315,7 @@ int otx2_mcam_flow_init(struct otx2_nic *pf) return 0; pf->mac_table = devm_kzalloc(pf->dev, sizeof(struct otx2_mac_table) - * OTX2_MAX_UNICAST_FLOWS, GFP_KERNEL); + * pf->flow_cfg->ucast_flt_cnt, GFP_KERNEL); if (!pf->mac_table) return -ENOMEM; @@ -356,7 +357,7 @@ static int otx2_do_add_macfilter(struct otx2_nic *pf, const u8 *mac) return -ENOMEM; /* dont have free mcam entries or uc list is greater than alloted */ - if (netdev_uc_count(pf->netdev) > OTX2_MAX_UNICAST_FLOWS) + if (netdev_uc_count(pf->netdev) > pf->flow_cfg->ucast_flt_cnt) return -ENOMEM; mutex_lock(&pf->mbox.lock); @@ -367,7 +368,7 @@ static int otx2_do_add_macfilter(struct otx2_nic *pf, const u8 *mac) } /* unicast offset starts with 32 0..31 for ntuple */ - for (i = 0; i < OTX2_MAX_UNICAST_FLOWS; i++) { + for (i = 0; i < pf->flow_cfg->ucast_flt_cnt; i++) { if (pf->mac_table[i].inuse) continue; ether_addr_copy(pf->mac_table[i].addr, mac); @@ -410,7 +411,7 @@ static bool otx2_get_mcamentry_for_mac(struct otx2_nic *pf, const u8 *mac, { int i; - for (i = 0; i < OTX2_MAX_UNICAST_FLOWS; i++) { + for (i = 0; i < pf->flow_cfg->ucast_flt_cnt; i++) { if (!pf->mac_table[i].inuse) continue; @@ -1394,6 +1395,7 @@ int otx2_destroy_mcam_flows(struct otx2_nic *pfvf) } pfvf->flags &= ~OTX2_FLAG_MCAM_ENTRIES_ALLOC; + flow_cfg->max_flows = 0; mutex_unlock(&pfvf->mbox.lock); return 0; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c index f5bce3e326cc..5492dea547a1 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c @@ -1714,7 +1714,7 @@ static void otx2_do_set_rx_mode(struct otx2_nic *pf) return; if ((netdev->flags & IFF_PROMISC) || - (netdev_uc_count(netdev) > OTX2_MAX_UNICAST_FLOWS)) { + (netdev_uc_count(netdev) > pf->flow_cfg->ucast_flt_cnt)) { promisc = true; } @@ -3245,6 +3245,29 @@ static int otx2_sriov_configure(struct pci_dev *pdev, int numvfs) return otx2_sriov_enable(pdev, numvfs); } +static void otx2_ndc_sync(struct otx2_nic *pf) +{ + struct mbox *mbox = &pf->mbox; + struct ndc_sync_op *req; + + mutex_lock(&mbox->lock); + + req = otx2_mbox_alloc_msg_ndc_sync_op(mbox); + if (!req) { + mutex_unlock(&mbox->lock); + return; + } + + req->nix_lf_tx_sync = 1; + req->nix_lf_rx_sync = 1; + req->npa_lf_sync = 1; + + if (!otx2_sync_mbox_msg(mbox)) + dev_err(pf->dev, "NDC sync operation failed\n"); + + mutex_unlock(&mbox->lock); +} + static void otx2_remove(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); @@ -3293,6 +3316,7 @@ static void otx2_remove(struct pci_dev *pdev) otx2_mcam_flow_del(pf); otx2_shutdown_tc(pf); otx2_shutdown_qos(pf); + otx2_ndc_sync(pf); otx2_detach_resources(&pf->mbox); if (pf->hw.lmt_info) free_percpu(pf->hw.lmt_info); diff --git a/drivers/net/ethernet/mediatek/Kconfig b/drivers/net/ethernet/mediatek/Kconfig index da0db417ab69..95c4405b7d7b 100644 --- a/drivers/net/ethernet/mediatek/Kconfig +++ b/drivers/net/ethernet/mediatek/Kconfig @@ -1,12 +1,20 @@ # SPDX-License-Identifier: GPL-2.0-only config NET_VENDOR_MEDIATEK bool "MediaTek devices" - depends on ARCH_MEDIATEK || SOC_MT7621 || SOC_MT7620 || COMPILE_TEST + depends on ARCH_MEDIATEK || ARCH_AIROHA || SOC_MT7621 || SOC_MT7620 || COMPILE_TEST help If you have a Mediatek SoC with ethernet, say Y. if NET_VENDOR_MEDIATEK +config NET_AIROHA + tristate "Airoha SoC Gigabit Ethernet support" + depends on NET_DSA || !NET_DSA + select PAGE_POOL + help + This driver supports the gigabit ethernet MACs in the + Airoha SoC family. + config NET_MEDIATEK_SOC_WED depends on ARCH_MEDIATEK || COMPILE_TEST def_bool NET_MEDIATEK_SOC != n diff --git a/drivers/net/ethernet/mediatek/Makefile b/drivers/net/ethernet/mediatek/Makefile index 03e008fbc859..ddbb7f4a516c 100644 --- a/drivers/net/ethernet/mediatek/Makefile +++ b/drivers/net/ethernet/mediatek/Makefile @@ -11,3 +11,4 @@ mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_debugfs.o endif obj-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_ops.o obj-$(CONFIG_NET_MEDIATEK_STAR_EMAC) += mtk_star_emac.o +obj-$(CONFIG_NET_AIROHA) += airoha_eth.o diff --git a/drivers/net/ethernet/mediatek/airoha_eth.c b/drivers/net/ethernet/mediatek/airoha_eth.c new file mode 100644 index 000000000000..7967a92803c2 --- /dev/null +++ b/drivers/net/ethernet/mediatek/airoha_eth.c @@ -0,0 +1,2730 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2024 AIROHA Inc + * Author: Lorenzo Bianconi <lorenzo@kernel.org> + */ +#include <linux/etherdevice.h> +#include <linux/iopoll.h> +#include <linux/kernel.h> +#include <linux/netdevice.h> +#include <linux/of.h> +#include <linux/of_net.h> +#include <linux/platform_device.h> +#include <linux/reset.h> +#include <linux/tcp.h> +#include <linux/u64_stats_sync.h> +#include <net/dsa.h> +#include <net/page_pool/helpers.h> +#include <uapi/linux/ppp_defs.h> + +#define AIROHA_MAX_NUM_GDM_PORTS 1 +#define AIROHA_MAX_NUM_RSTS 3 +#define AIROHA_MAX_NUM_XSI_RSTS 5 +#define AIROHA_MAX_MTU 2000 +#define AIROHA_MAX_PACKET_SIZE 2048 +#define AIROHA_NUM_TX_RING 32 +#define AIROHA_NUM_RX_RING 32 +#define AIROHA_FE_MC_MAX_VLAN_TABLE 64 +#define AIROHA_FE_MC_MAX_VLAN_PORT 16 +#define AIROHA_NUM_TX_IRQ 2 +#define HW_DSCP_NUM 2048 +#define IRQ_QUEUE_LEN(_n) ((_n) ? 1024 : 2048) +#define TX_DSCP_NUM 1024 +#define RX_DSCP_NUM(_n) \ + ((_n) == 2 ? 128 : \ + (_n) == 11 ? 128 : \ + (_n) == 15 ? 128 : \ + (_n) == 0 ? 1024 : 16) + +#define PSE_RSV_PAGES 128 +#define PSE_QUEUE_RSV_PAGES 64 + +/* FE */ +#define PSE_BASE 0x0100 +#define CSR_IFC_BASE 0x0200 +#define CDM1_BASE 0x0400 +#define GDM1_BASE 0x0500 +#define PPE1_BASE 0x0c00 + +#define CDM2_BASE 0x1400 +#define GDM2_BASE 0x1500 + +#define GDM3_BASE 0x1100 +#define GDM4_BASE 0x2500 + +#define GDM_BASE(_n) \ + ((_n) == 4 ? GDM4_BASE : \ + (_n) == 3 ? GDM3_BASE : \ + (_n) == 2 ? GDM2_BASE : GDM1_BASE) + +#define REG_FE_DMA_GLO_CFG 0x0000 +#define FE_DMA_GLO_L2_SPACE_MASK GENMASK(7, 4) +#define FE_DMA_GLO_PG_SZ_MASK BIT(3) + +#define REG_FE_RST_GLO_CFG 0x0004 +#define FE_RST_GDM4_MBI_ARB_MASK BIT(3) +#define FE_RST_GDM3_MBI_ARB_MASK BIT(2) +#define FE_RST_CORE_MASK BIT(0) + +#define REG_FE_LAN_MAC_H 0x0040 +#define REG_FE_LAN_MAC_LMIN 0x0044 +#define REG_FE_LAN_MAC_LMAX 0x0048 + +#define REG_FE_CDM1_OQ_MAP0 0x0050 +#define REG_FE_CDM1_OQ_MAP1 0x0054 +#define REG_FE_CDM1_OQ_MAP2 0x0058 +#define REG_FE_CDM1_OQ_MAP3 0x005c + +#define REG_FE_PCE_CFG 0x0070 +#define PCE_DPI_EN_MASK BIT(2) +#define PCE_KA_EN_MASK BIT(1) +#define PCE_MC_EN_MASK BIT(0) + +#define REG_FE_PSE_QUEUE_CFG_WR 0x0080 +#define PSE_CFG_PORT_ID_MASK GENMASK(27, 24) +#define PSE_CFG_QUEUE_ID_MASK GENMASK(20, 16) +#define PSE_CFG_WR_EN_MASK BIT(8) +#define PSE_CFG_OQRSV_SEL_MASK BIT(0) + +#define REG_FE_PSE_QUEUE_CFG_VAL 0x0084 +#define PSE_CFG_OQ_RSV_MASK GENMASK(13, 0) + +#define PSE_FQ_CFG 0x008c +#define PSE_FQ_LIMIT_MASK GENMASK(14, 0) + +#define REG_FE_PSE_BUF_SET 0x0090 +#define PSE_SHARE_USED_LTHD_MASK GENMASK(31, 16) +#define PSE_ALLRSV_MASK GENMASK(14, 0) + +#define REG_PSE_SHARE_USED_THD 0x0094 +#define PSE_SHARE_USED_MTHD_MASK GENMASK(31, 16) +#define PSE_SHARE_USED_HTHD_MASK GENMASK(15, 0) + +#define REG_GDM_MISC_CFG 0x0148 +#define GDM2_RDM_ACK_WAIT_PREF_MASK BIT(9) +#define GDM2_CHN_VLD_MODE_MASK BIT(5) + +#define REG_FE_CSR_IFC_CFG CSR_IFC_BASE +#define FE_IFC_EN_MASK BIT(0) + +#define REG_FE_VIP_PORT_EN 0x01f0 +#define REG_FE_IFC_PORT_EN 0x01f4 + +#define REG_PSE_IQ_REV1 (PSE_BASE + 0x08) +#define PSE_IQ_RES1_P2_MASK GENMASK(23, 16) + +#define REG_PSE_IQ_REV2 (PSE_BASE + 0x0c) +#define PSE_IQ_RES2_P5_MASK GENMASK(15, 8) +#define PSE_IQ_RES2_P4_MASK GENMASK(7, 0) + +#define REG_FE_VIP_EN(_n) (0x0300 + ((_n) << 3)) +#define PATN_FCPU_EN_MASK BIT(7) +#define PATN_SWP_EN_MASK BIT(6) +#define PATN_DP_EN_MASK BIT(5) +#define PATN_SP_EN_MASK BIT(4) +#define PATN_TYPE_MASK GENMASK(3, 1) +#define PATN_EN_MASK BIT(0) + +#define REG_FE_VIP_PATN(_n) (0x0304 + ((_n) << 3)) +#define PATN_DP_MASK GENMASK(31, 16) +#define PATN_SP_MASK GENMASK(15, 0) + +#define REG_CDM1_VLAN_CTRL CDM1_BASE +#define CDM1_VLAN_MASK GENMASK(31, 16) + +#define REG_CDM1_FWD_CFG (CDM1_BASE + 0x08) +#define CDM1_VIP_QSEL_MASK GENMASK(24, 20) + +#define REG_CDM1_CRSN_QSEL(_n) (CDM1_BASE + 0x10 + ((_n) << 2)) +#define CDM1_CRSN_QSEL_REASON_MASK(_n) \ + GENMASK(4 + (((_n) % 4) << 3), (((_n) % 4) << 3)) + +#define REG_CDM2_FWD_CFG (CDM2_BASE + 0x08) +#define CDM2_OAM_QSEL_MASK GENMASK(31, 27) +#define CDM2_VIP_QSEL_MASK GENMASK(24, 20) + +#define REG_CDM2_CRSN_QSEL(_n) (CDM2_BASE + 0x10 + ((_n) << 2)) +#define CDM2_CRSN_QSEL_REASON_MASK(_n) \ + GENMASK(4 + (((_n) % 4) << 3), (((_n) % 4) << 3)) + +#define REG_GDM_FWD_CFG(_n) GDM_BASE(_n) +#define GDM_DROP_CRC_ERR BIT(23) +#define GDM_IP4_CKSUM BIT(22) +#define GDM_TCP_CKSUM BIT(21) +#define GDM_UDP_CKSUM BIT(20) +#define GDM_UCFQ_MASK GENMASK(15, 12) +#define GDM_BCFQ_MASK GENMASK(11, 8) +#define GDM_MCFQ_MASK GENMASK(7, 4) +#define GDM_OCFQ_MASK GENMASK(3, 0) + +#define REG_GDM_INGRESS_CFG(_n) (GDM_BASE(_n) + 0x10) +#define GDM_INGRESS_FC_EN_MASK BIT(1) +#define GDM_STAG_EN_MASK BIT(0) + +#define REG_GDM_LEN_CFG(_n) (GDM_BASE(_n) + 0x14) +#define GDM_SHORT_LEN_MASK GENMASK(13, 0) +#define GDM_LONG_LEN_MASK GENMASK(29, 16) + +#define REG_FE_CPORT_CFG (GDM1_BASE + 0x40) +#define FE_CPORT_PAD BIT(26) +#define FE_CPORT_PORT_XFC_MASK BIT(25) +#define FE_CPORT_QUEUE_XFC_MASK BIT(24) + +#define REG_FE_GDM_MIB_CLEAR(_n) (GDM_BASE(_n) + 0xf0) +#define FE_GDM_MIB_RX_CLEAR_MASK BIT(1) +#define FE_GDM_MIB_TX_CLEAR_MASK BIT(0) + +#define REG_FE_GDM1_MIB_CFG (GDM1_BASE + 0xf4) +#define FE_STRICT_RFC2819_MODE_MASK BIT(31) +#define FE_GDM1_TX_MIB_SPLIT_EN_MASK BIT(17) +#define FE_GDM1_RX_MIB_SPLIT_EN_MASK BIT(16) +#define FE_TX_MIB_ID_MASK GENMASK(15, 8) +#define FE_RX_MIB_ID_MASK GENMASK(7, 0) + +#define REG_FE_GDM_TX_OK_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x104) +#define REG_FE_GDM_TX_OK_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x10c) +#define REG_FE_GDM_TX_ETH_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x110) +#define REG_FE_GDM_TX_ETH_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x114) +#define REG_FE_GDM_TX_ETH_DROP_CNT(_n) (GDM_BASE(_n) + 0x118) +#define REG_FE_GDM_TX_ETH_BC_CNT(_n) (GDM_BASE(_n) + 0x11c) +#define REG_FE_GDM_TX_ETH_MC_CNT(_n) (GDM_BASE(_n) + 0x120) +#define REG_FE_GDM_TX_ETH_RUNT_CNT(_n) (GDM_BASE(_n) + 0x124) +#define REG_FE_GDM_TX_ETH_LONG_CNT(_n) (GDM_BASE(_n) + 0x128) +#define REG_FE_GDM_TX_ETH_E64_CNT_L(_n) (GDM_BASE(_n) + 0x12c) +#define REG_FE_GDM_TX_ETH_L64_CNT_L(_n) (GDM_BASE(_n) + 0x130) +#define REG_FE_GDM_TX_ETH_L127_CNT_L(_n) (GDM_BASE(_n) + 0x134) +#define REG_FE_GDM_TX_ETH_L255_CNT_L(_n) (GDM_BASE(_n) + 0x138) +#define REG_FE_GDM_TX_ETH_L511_CNT_L(_n) (GDM_BASE(_n) + 0x13c) +#define REG_FE_GDM_TX_ETH_L1023_CNT_L(_n) (GDM_BASE(_n) + 0x140) + +#define REG_FE_GDM_RX_OK_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x148) +#define REG_FE_GDM_RX_FC_DROP_CNT(_n) (GDM_BASE(_n) + 0x14c) +#define REG_FE_GDM_RX_RC_DROP_CNT(_n) (GDM_BASE(_n) + 0x150) +#define REG_FE_GDM_RX_OVERFLOW_DROP_CNT(_n) (GDM_BASE(_n) + 0x154) +#define REG_FE_GDM_RX_ERROR_DROP_CNT(_n) (GDM_BASE(_n) + 0x158) +#define REG_FE_GDM_RX_OK_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x15c) +#define REG_FE_GDM_RX_ETH_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x160) +#define REG_FE_GDM_RX_ETH_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x164) +#define REG_FE_GDM_RX_ETH_DROP_CNT(_n) (GDM_BASE(_n) + 0x168) +#define REG_FE_GDM_RX_ETH_BC_CNT(_n) (GDM_BASE(_n) + 0x16c) +#define REG_FE_GDM_RX_ETH_MC_CNT(_n) (GDM_BASE(_n) + 0x170) +#define REG_FE_GDM_RX_ETH_CRC_ERR_CNT(_n) (GDM_BASE(_n) + 0x174) +#define REG_FE_GDM_RX_ETH_FRAG_CNT(_n) (GDM_BASE(_n) + 0x178) +#define REG_FE_GDM_RX_ETH_JABBER_CNT(_n) (GDM_BASE(_n) + 0x17c) +#define REG_FE_GDM_RX_ETH_RUNT_CNT(_n) (GDM_BASE(_n) + 0x180) +#define REG_FE_GDM_RX_ETH_LONG_CNT(_n) (GDM_BASE(_n) + 0x184) +#define REG_FE_GDM_RX_ETH_E64_CNT_L(_n) (GDM_BASE(_n) + 0x188) +#define REG_FE_GDM_RX_ETH_L64_CNT_L(_n) (GDM_BASE(_n) + 0x18c) +#define REG_FE_GDM_RX_ETH_L127_CNT_L(_n) (GDM_BASE(_n) + 0x190) +#define REG_FE_GDM_RX_ETH_L255_CNT_L(_n) (GDM_BASE(_n) + 0x194) +#define REG_FE_GDM_RX_ETH_L511_CNT_L(_n) (GDM_BASE(_n) + 0x198) +#define REG_FE_GDM_RX_ETH_L1023_CNT_L(_n) (GDM_BASE(_n) + 0x19c) + +#define REG_PPE1_TB_HASH_CFG (PPE1_BASE + 0x250) +#define PPE1_SRAM_TABLE_EN_MASK BIT(0) +#define PPE1_SRAM_HASH1_EN_MASK BIT(8) +#define PPE1_DRAM_TABLE_EN_MASK BIT(16) +#define PPE1_DRAM_HASH1_EN_MASK BIT(24) + +#define REG_FE_GDM_TX_OK_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x280) +#define REG_FE_GDM_TX_OK_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x284) +#define REG_FE_GDM_TX_ETH_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x288) +#define REG_FE_GDM_TX_ETH_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x28c) + +#define REG_FE_GDM_RX_OK_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x290) +#define REG_FE_GDM_RX_OK_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x294) +#define REG_FE_GDM_RX_ETH_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x298) +#define REG_FE_GDM_RX_ETH_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x29c) +#define REG_FE_GDM_TX_ETH_E64_CNT_H(_n) (GDM_BASE(_n) + 0x2b8) +#define REG_FE_GDM_TX_ETH_L64_CNT_H(_n) (GDM_BASE(_n) + 0x2bc) +#define REG_FE_GDM_TX_ETH_L127_CNT_H(_n) (GDM_BASE(_n) + 0x2c0) +#define REG_FE_GDM_TX_ETH_L255_CNT_H(_n) (GDM_BASE(_n) + 0x2c4) +#define REG_FE_GDM_TX_ETH_L511_CNT_H(_n) (GDM_BASE(_n) + 0x2c8) +#define REG_FE_GDM_TX_ETH_L1023_CNT_H(_n) (GDM_BASE(_n) + 0x2cc) +#define REG_FE_GDM_RX_ETH_E64_CNT_H(_n) (GDM_BASE(_n) + 0x2e8) +#define REG_FE_GDM_RX_ETH_L64_CNT_H(_n) (GDM_BASE(_n) + 0x2ec) +#define REG_FE_GDM_RX_ETH_L127_CNT_H(_n) (GDM_BASE(_n) + 0x2f0) +#define REG_FE_GDM_RX_ETH_L255_CNT_H(_n) (GDM_BASE(_n) + 0x2f4) +#define REG_FE_GDM_RX_ETH_L511_CNT_H(_n) (GDM_BASE(_n) + 0x2f8) +#define REG_FE_GDM_RX_ETH_L1023_CNT_H(_n) (GDM_BASE(_n) + 0x2fc) + +#define REG_GDM2_CHN_RLS (GDM2_BASE + 0x20) +#define MBI_RX_AGE_SEL_MASK GENMASK(18, 17) +#define MBI_TX_AGE_SEL_MASK GENMASK(18, 17) + +#define REG_GDM3_FWD_CFG GDM3_BASE +#define GDM3_PAD_EN_MASK BIT(28) + +#define REG_GDM4_FWD_CFG (GDM4_BASE + 0x100) +#define GDM4_PAD_EN_MASK BIT(28) +#define GDM4_SPORT_OFFSET0_MASK GENMASK(11, 8) + +#define REG_GDM4_SRC_PORT_SET (GDM4_BASE + 0x33c) +#define GDM4_SPORT_OFF2_MASK GENMASK(19, 16) +#define GDM4_SPORT_OFF1_MASK GENMASK(15, 12) +#define GDM4_SPORT_OFF0_MASK GENMASK(11, 8) + +#define REG_IP_FRAG_FP 0x2010 +#define IP_ASSEMBLE_PORT_MASK GENMASK(24, 21) +#define IP_ASSEMBLE_NBQ_MASK GENMASK(20, 16) +#define IP_FRAGMENT_PORT_MASK GENMASK(8, 5) +#define IP_FRAGMENT_NBQ_MASK GENMASK(4, 0) + +#define REG_MC_VLAN_EN 0x2100 +#define MC_VLAN_EN_MASK BIT(0) + +#define REG_MC_VLAN_CFG 0x2104 +#define MC_VLAN_CFG_CMD_DONE_MASK BIT(31) +#define MC_VLAN_CFG_TABLE_ID_MASK GENMASK(21, 16) +#define MC_VLAN_CFG_PORT_ID_MASK GENMASK(11, 8) +#define MC_VLAN_CFG_TABLE_SEL_MASK BIT(4) +#define MC_VLAN_CFG_RW_MASK BIT(0) + +#define REG_MC_VLAN_DATA 0x2108 + +#define REG_CDM5_RX_OQ1_DROP_CNT 0x29d4 + +/* QDMA */ +#define REG_QDMA_GLOBAL_CFG 0x0004 +#define GLOBAL_CFG_RX_2B_OFFSET_MASK BIT(31) +#define GLOBAL_CFG_DMA_PREFERENCE_MASK GENMASK(30, 29) +#define GLOBAL_CFG_CPU_TXR_RR_MASK BIT(28) +#define GLOBAL_CFG_DSCP_BYTE_SWAP_MASK BIT(27) +#define GLOBAL_CFG_PAYLOAD_BYTE_SWAP_MASK BIT(26) +#define GLOBAL_CFG_MULTICAST_MODIFY_FP_MASK BIT(25) +#define GLOBAL_CFG_OAM_MODIFY_MASK BIT(24) +#define GLOBAL_CFG_RESET_MASK BIT(23) +#define GLOBAL_CFG_RESET_DONE_MASK BIT(22) +#define GLOBAL_CFG_MULTICAST_EN_MASK BIT(21) +#define GLOBAL_CFG_IRQ1_EN_MASK BIT(20) +#define GLOBAL_CFG_IRQ0_EN_MASK BIT(19) +#define GLOBAL_CFG_LOOPCNT_EN_MASK BIT(18) +#define GLOBAL_CFG_RD_BYPASS_WR_MASK BIT(17) +#define GLOBAL_CFG_QDMA_LOOPBACK_MASK BIT(16) +#define GLOBAL_CFG_LPBK_RXQ_SEL_MASK GENMASK(13, 8) +#define GLOBAL_CFG_CHECK_DONE_MASK BIT(7) +#define GLOBAL_CFG_TX_WB_DONE_MASK BIT(6) +#define GLOBAL_CFG_MAX_ISSUE_NUM_MASK GENMASK(5, 4) +#define GLOBAL_CFG_RX_DMA_BUSY_MASK BIT(3) +#define GLOBAL_CFG_RX_DMA_EN_MASK BIT(2) +#define GLOBAL_CFG_TX_DMA_BUSY_MASK BIT(1) +#define GLOBAL_CFG_TX_DMA_EN_MASK BIT(0) + +#define REG_FWD_DSCP_BASE 0x0010 +#define REG_FWD_BUF_BASE 0x0014 + +#define REG_HW_FWD_DSCP_CFG 0x0018 +#define HW_FWD_DSCP_PAYLOAD_SIZE_MASK GENMASK(29, 28) +#define HW_FWD_DSCP_SCATTER_LEN_MASK GENMASK(17, 16) +#define HW_FWD_DSCP_MIN_SCATTER_LEN_MASK GENMASK(15, 0) + +#define REG_INT_STATUS(_n) \ + (((_n) == 4) ? 0x0730 : \ + ((_n) == 3) ? 0x0724 : \ + ((_n) == 2) ? 0x0720 : \ + ((_n) == 1) ? 0x0024 : 0x0020) + +#define REG_INT_ENABLE(_n) \ + (((_n) == 4) ? 0x0750 : \ + ((_n) == 3) ? 0x0744 : \ + ((_n) == 2) ? 0x0740 : \ + ((_n) == 1) ? 0x002c : 0x0028) + +/* QDMA_CSR_INT_ENABLE1 */ +#define RX15_COHERENT_INT_MASK BIT(31) +#define RX14_COHERENT_INT_MASK BIT(30) +#define RX13_COHERENT_INT_MASK BIT(29) +#define RX12_COHERENT_INT_MASK BIT(28) +#define RX11_COHERENT_INT_MASK BIT(27) +#define RX10_COHERENT_INT_MASK BIT(26) +#define RX9_COHERENT_INT_MASK BIT(25) +#define RX8_COHERENT_INT_MASK BIT(24) +#define RX7_COHERENT_INT_MASK BIT(23) +#define RX6_COHERENT_INT_MASK BIT(22) +#define RX5_COHERENT_INT_MASK BIT(21) +#define RX4_COHERENT_INT_MASK BIT(20) +#define RX3_COHERENT_INT_MASK BIT(19) +#define RX2_COHERENT_INT_MASK BIT(18) +#define RX1_COHERENT_INT_MASK BIT(17) +#define RX0_COHERENT_INT_MASK BIT(16) +#define TX7_COHERENT_INT_MASK BIT(15) +#define TX6_COHERENT_INT_MASK BIT(14) +#define TX5_COHERENT_INT_MASK BIT(13) +#define TX4_COHERENT_INT_MASK BIT(12) +#define TX3_COHERENT_INT_MASK BIT(11) +#define TX2_COHERENT_INT_MASK BIT(10) +#define TX1_COHERENT_INT_MASK BIT(9) +#define TX0_COHERENT_INT_MASK BIT(8) +#define CNT_OVER_FLOW_INT_MASK BIT(7) +#define IRQ1_FULL_INT_MASK BIT(5) +#define IRQ1_INT_MASK BIT(4) +#define HWFWD_DSCP_LOW_INT_MASK BIT(3) +#define HWFWD_DSCP_EMPTY_INT_MASK BIT(2) +#define IRQ0_FULL_INT_MASK BIT(1) +#define IRQ0_INT_MASK BIT(0) + +#define TX_DONE_INT_MASK(_n) \ + ((_n) ? IRQ1_INT_MASK | IRQ1_FULL_INT_MASK \ + : IRQ0_INT_MASK | IRQ0_FULL_INT_MASK) + +#define INT_TX_MASK \ + (IRQ1_INT_MASK | IRQ1_FULL_INT_MASK | \ + IRQ0_INT_MASK | IRQ0_FULL_INT_MASK) + +#define INT_IDX0_MASK \ + (TX0_COHERENT_INT_MASK | TX1_COHERENT_INT_MASK | \ + TX2_COHERENT_INT_MASK | TX3_COHERENT_INT_MASK | \ + TX4_COHERENT_INT_MASK | TX5_COHERENT_INT_MASK | \ + TX6_COHERENT_INT_MASK | TX7_COHERENT_INT_MASK | \ + RX0_COHERENT_INT_MASK | RX1_COHERENT_INT_MASK | \ + RX2_COHERENT_INT_MASK | RX3_COHERENT_INT_MASK | \ + RX4_COHERENT_INT_MASK | RX7_COHERENT_INT_MASK | \ + RX8_COHERENT_INT_MASK | RX9_COHERENT_INT_MASK | \ + RX15_COHERENT_INT_MASK | INT_TX_MASK) + +/* QDMA_CSR_INT_ENABLE2 */ +#define RX15_NO_CPU_DSCP_INT_MASK BIT(31) +#define RX14_NO_CPU_DSCP_INT_MASK BIT(30) +#define RX13_NO_CPU_DSCP_INT_MASK BIT(29) +#define RX12_NO_CPU_DSCP_INT_MASK BIT(28) +#define RX11_NO_CPU_DSCP_INT_MASK BIT(27) +#define RX10_NO_CPU_DSCP_INT_MASK BIT(26) +#define RX9_NO_CPU_DSCP_INT_MASK BIT(25) +#define RX8_NO_CPU_DSCP_INT_MASK BIT(24) +#define RX7_NO_CPU_DSCP_INT_MASK BIT(23) +#define RX6_NO_CPU_DSCP_INT_MASK BIT(22) +#define RX5_NO_CPU_DSCP_INT_MASK BIT(21) +#define RX4_NO_CPU_DSCP_INT_MASK BIT(20) +#define RX3_NO_CPU_DSCP_INT_MASK BIT(19) +#define RX2_NO_CPU_DSCP_INT_MASK BIT(18) +#define RX1_NO_CPU_DSCP_INT_MASK BIT(17) +#define RX0_NO_CPU_DSCP_INT_MASK BIT(16) +#define RX15_DONE_INT_MASK BIT(15) +#define RX14_DONE_INT_MASK BIT(14) +#define RX13_DONE_INT_MASK BIT(13) +#define RX12_DONE_INT_MASK BIT(12) +#define RX11_DONE_INT_MASK BIT(11) +#define RX10_DONE_INT_MASK BIT(10) +#define RX9_DONE_INT_MASK BIT(9) +#define RX8_DONE_INT_MASK BIT(8) +#define RX7_DONE_INT_MASK BIT(7) +#define RX6_DONE_INT_MASK BIT(6) +#define RX5_DONE_INT_MASK BIT(5) +#define RX4_DONE_INT_MASK BIT(4) +#define RX3_DONE_INT_MASK BIT(3) +#define RX2_DONE_INT_MASK BIT(2) +#define RX1_DONE_INT_MASK BIT(1) +#define RX0_DONE_INT_MASK BIT(0) + +#define RX_DONE_INT_MASK \ + (RX0_DONE_INT_MASK | RX1_DONE_INT_MASK | \ + RX2_DONE_INT_MASK | RX3_DONE_INT_MASK | \ + RX4_DONE_INT_MASK | RX7_DONE_INT_MASK | \ + RX8_DONE_INT_MASK | RX9_DONE_INT_MASK | \ + RX15_DONE_INT_MASK) +#define INT_IDX1_MASK \ + (RX_DONE_INT_MASK | \ + RX0_NO_CPU_DSCP_INT_MASK | RX1_NO_CPU_DSCP_INT_MASK | \ + RX2_NO_CPU_DSCP_INT_MASK | RX3_NO_CPU_DSCP_INT_MASK | \ + RX4_NO_CPU_DSCP_INT_MASK | RX7_NO_CPU_DSCP_INT_MASK | \ + RX8_NO_CPU_DSCP_INT_MASK | RX9_NO_CPU_DSCP_INT_MASK | \ + RX15_NO_CPU_DSCP_INT_MASK) + +/* QDMA_CSR_INT_ENABLE5 */ +#define TX31_COHERENT_INT_MASK BIT(31) +#define TX30_COHERENT_INT_MASK BIT(30) +#define TX29_COHERENT_INT_MASK BIT(29) +#define TX28_COHERENT_INT_MASK BIT(28) +#define TX27_COHERENT_INT_MASK BIT(27) +#define TX26_COHERENT_INT_MASK BIT(26) +#define TX25_COHERENT_INT_MASK BIT(25) +#define TX24_COHERENT_INT_MASK BIT(24) +#define TX23_COHERENT_INT_MASK BIT(23) +#define TX22_COHERENT_INT_MASK BIT(22) +#define TX21_COHERENT_INT_MASK BIT(21) +#define TX20_COHERENT_INT_MASK BIT(20) +#define TX19_COHERENT_INT_MASK BIT(19) +#define TX18_COHERENT_INT_MASK BIT(18) +#define TX17_COHERENT_INT_MASK BIT(17) +#define TX16_COHERENT_INT_MASK BIT(16) +#define TX15_COHERENT_INT_MASK BIT(15) +#define TX14_COHERENT_INT_MASK BIT(14) +#define TX13_COHERENT_INT_MASK BIT(13) +#define TX12_COHERENT_INT_MASK BIT(12) +#define TX11_COHERENT_INT_MASK BIT(11) +#define TX10_COHERENT_INT_MASK BIT(10) +#define TX9_COHERENT_INT_MASK BIT(9) +#define TX8_COHERENT_INT_MASK BIT(8) + +#define INT_IDX4_MASK \ + (TX8_COHERENT_INT_MASK | TX9_COHERENT_INT_MASK | \ + TX10_COHERENT_INT_MASK | TX11_COHERENT_INT_MASK | \ + TX12_COHERENT_INT_MASK | TX13_COHERENT_INT_MASK | \ + TX14_COHERENT_INT_MASK | TX15_COHERENT_INT_MASK | \ + TX16_COHERENT_INT_MASK | TX17_COHERENT_INT_MASK | \ + TX18_COHERENT_INT_MASK | TX19_COHERENT_INT_MASK | \ + TX20_COHERENT_INT_MASK | TX21_COHERENT_INT_MASK | \ + TX22_COHERENT_INT_MASK | TX23_COHERENT_INT_MASK | \ + TX24_COHERENT_INT_MASK | TX25_COHERENT_INT_MASK | \ + TX26_COHERENT_INT_MASK | TX27_COHERENT_INT_MASK | \ + TX28_COHERENT_INT_MASK | TX29_COHERENT_INT_MASK | \ + TX30_COHERENT_INT_MASK | TX31_COHERENT_INT_MASK) + +#define REG_TX_IRQ_BASE(_n) ((_n) ? 0x0048 : 0x0050) + +#define REG_TX_IRQ_CFG(_n) ((_n) ? 0x004c : 0x0054) +#define TX_IRQ_THR_MASK GENMASK(27, 16) +#define TX_IRQ_DEPTH_MASK GENMASK(11, 0) + +#define REG_IRQ_CLEAR_LEN(_n) ((_n) ? 0x0064 : 0x0058) +#define IRQ_CLEAR_LEN_MASK GENMASK(7, 0) + +#define REG_IRQ_STATUS(_n) ((_n) ? 0x0068 : 0x005c) +#define IRQ_ENTRY_LEN_MASK GENMASK(27, 16) +#define IRQ_HEAD_IDX_MASK GENMASK(11, 0) + +#define REG_TX_RING_BASE(_n) \ + (((_n) < 8) ? 0x0100 + ((_n) << 5) : 0x0b00 + (((_n) - 8) << 5)) + +#define REG_TX_RING_BLOCKING(_n) \ + (((_n) < 8) ? 0x0104 + ((_n) << 5) : 0x0b04 + (((_n) - 8) << 5)) + +#define TX_RING_IRQ_BLOCKING_MAP_MASK BIT(6) +#define TX_RING_IRQ_BLOCKING_CFG_MASK BIT(4) +#define TX_RING_IRQ_BLOCKING_TX_DROP_EN_MASK BIT(2) +#define TX_RING_IRQ_BLOCKING_MAX_TH_TXRING_EN_MASK BIT(1) +#define TX_RING_IRQ_BLOCKING_MIN_TH_TXRING_EN_MASK BIT(0) + +#define REG_TX_CPU_IDX(_n) \ + (((_n) < 8) ? 0x0108 + ((_n) << 5) : 0x0b08 + (((_n) - 8) << 5)) + +#define TX_RING_CPU_IDX_MASK GENMASK(15, 0) + +#define REG_TX_DMA_IDX(_n) \ + (((_n) < 8) ? 0x010c + ((_n) << 5) : 0x0b0c + (((_n) - 8) << 5)) + +#define TX_RING_DMA_IDX_MASK GENMASK(15, 0) + +#define IRQ_RING_IDX_MASK GENMASK(20, 16) +#define IRQ_DESC_IDX_MASK GENMASK(15, 0) + +#define REG_RX_RING_BASE(_n) \ + (((_n) < 16) ? 0x0200 + ((_n) << 5) : 0x0e00 + (((_n) - 16) << 5)) + +#define REG_RX_RING_SIZE(_n) \ + (((_n) < 16) ? 0x0204 + ((_n) << 5) : 0x0e04 + (((_n) - 16) << 5)) + +#define RX_RING_THR_MASK GENMASK(31, 16) +#define RX_RING_SIZE_MASK GENMASK(15, 0) + +#define REG_RX_CPU_IDX(_n) \ + (((_n) < 16) ? 0x0208 + ((_n) << 5) : 0x0e08 + (((_n) - 16) << 5)) + +#define RX_RING_CPU_IDX_MASK GENMASK(15, 0) + +#define REG_RX_DMA_IDX(_n) \ + (((_n) < 16) ? 0x020c + ((_n) << 5) : 0x0e0c + (((_n) - 16) << 5)) + +#define REG_RX_DELAY_INT_IDX(_n) \ + (((_n) < 16) ? 0x0210 + ((_n) << 5) : 0x0e10 + (((_n) - 16) << 5)) + +#define RX_DELAY_INT_MASK GENMASK(15, 0) + +#define RX_RING_DMA_IDX_MASK GENMASK(15, 0) + +#define REG_INGRESS_TRTCM_CFG 0x0070 +#define INGRESS_TRTCM_EN_MASK BIT(31) +#define INGRESS_TRTCM_MODE_MASK BIT(30) +#define INGRESS_SLOW_TICK_RATIO_MASK GENMASK(29, 16) +#define INGRESS_FAST_TICK_MASK GENMASK(15, 0) + +#define REG_TXQ_DIS_CFG_BASE(_n) ((_n) ? 0x20a0 : 0x00a0) +#define REG_TXQ_DIS_CFG(_n, _m) (REG_TXQ_DIS_CFG_BASE((_n)) + (_m) << 2) + +#define REG_LMGR_INIT_CFG 0x1000 +#define LMGR_INIT_START BIT(31) +#define LMGR_SRAM_MODE_MASK BIT(30) +#define HW_FWD_PKTSIZE_OVERHEAD_MASK GENMASK(27, 20) +#define HW_FWD_DESC_NUM_MASK GENMASK(16, 0) + +#define REG_FWD_DSCP_LOW_THR 0x1004 +#define FWD_DSCP_LOW_THR_MASK GENMASK(17, 0) + +#define REG_EGRESS_RATE_METER_CFG 0x100c +#define EGRESS_RATE_METER_EN_MASK BIT(29) +#define EGRESS_RATE_METER_EQ_RATE_EN_MASK BIT(17) +#define EGRESS_RATE_METER_WINDOW_SZ_MASK GENMASK(16, 12) +#define EGRESS_RATE_METER_TIMESLICE_MASK GENMASK(10, 0) + +#define REG_EGRESS_TRTCM_CFG 0x1010 +#define EGRESS_TRTCM_EN_MASK BIT(31) +#define EGRESS_TRTCM_MODE_MASK BIT(30) +#define EGRESS_SLOW_TICK_RATIO_MASK GENMASK(29, 16) +#define EGRESS_FAST_TICK_MASK GENMASK(15, 0) + +#define REG_TXWRR_MODE_CFG 0x1020 +#define TWRR_WEIGHT_SCALE_MASK BIT(31) +#define TWRR_WEIGHT_BASE_MASK BIT(3) + +#define REG_PSE_BUF_USAGE_CFG 0x1028 +#define PSE_BUF_ESTIMATE_EN_MASK BIT(29) + +#define REG_GLB_TRTCM_CFG 0x1080 +#define GLB_TRTCM_EN_MASK BIT(31) +#define GLB_TRTCM_MODE_MASK BIT(30) +#define GLB_SLOW_TICK_RATIO_MASK GENMASK(29, 16) +#define GLB_FAST_TICK_MASK GENMASK(15, 0) + +#define REG_TXQ_CNGST_CFG 0x10a0 +#define TXQ_CNGST_DROP_EN BIT(31) +#define TXQ_CNGST_DEI_DROP_EN BIT(30) + +#define REG_SLA_TRTCM_CFG 0x1150 +#define SLA_TRTCM_EN_MASK BIT(31) +#define SLA_TRTCM_MODE_MASK BIT(30) +#define SLA_SLOW_TICK_RATIO_MASK GENMASK(29, 16) +#define SLA_FAST_TICK_MASK GENMASK(15, 0) + +/* CTRL */ +#define QDMA_DESC_DONE_MASK BIT(31) +#define QDMA_DESC_DROP_MASK BIT(30) /* tx: drop - rx: overflow */ +#define QDMA_DESC_MORE_MASK BIT(29) /* more SG elements */ +#define QDMA_DESC_DEI_MASK BIT(25) +#define QDMA_DESC_NO_DROP_MASK BIT(24) +#define QDMA_DESC_LEN_MASK GENMASK(15, 0) +/* DATA */ +#define QDMA_DESC_NEXT_ID_MASK GENMASK(15, 0) +/* TX MSG0 */ +#define QDMA_ETH_TXMSG_MIC_IDX_MASK BIT(30) +#define QDMA_ETH_TXMSG_SP_TAG_MASK GENMASK(29, 14) +#define QDMA_ETH_TXMSG_ICO_MASK BIT(13) +#define QDMA_ETH_TXMSG_UCO_MASK BIT(12) +#define QDMA_ETH_TXMSG_TCO_MASK BIT(11) +#define QDMA_ETH_TXMSG_TSO_MASK BIT(10) +#define QDMA_ETH_TXMSG_FAST_MASK BIT(9) +#define QDMA_ETH_TXMSG_OAM_MASK BIT(8) +#define QDMA_ETH_TXMSG_CHAN_MASK GENMASK(7, 3) +#define QDMA_ETH_TXMSG_QUEUE_MASK GENMASK(2, 0) +/* TX MSG1 */ +#define QDMA_ETH_TXMSG_NO_DROP BIT(31) +#define QDMA_ETH_TXMSG_METER_MASK GENMASK(30, 24) /* 0x7f no meters */ +#define QDMA_ETH_TXMSG_FPORT_MASK GENMASK(23, 20) +#define QDMA_ETH_TXMSG_NBOQ_MASK GENMASK(19, 15) +#define QDMA_ETH_TXMSG_HWF_MASK BIT(14) +#define QDMA_ETH_TXMSG_HOP_MASK BIT(13) +#define QDMA_ETH_TXMSG_PTP_MASK BIT(12) +#define QDMA_ETH_TXMSG_ACNT_G1_MASK GENMASK(10, 6) /* 0x1f do not count */ +#define QDMA_ETH_TXMSG_ACNT_G0_MASK GENMASK(5, 0) /* 0x3f do not count */ + +/* RX MSG1 */ +#define QDMA_ETH_RXMSG_DEI_MASK BIT(31) +#define QDMA_ETH_RXMSG_IP6_MASK BIT(30) +#define QDMA_ETH_RXMSG_IP4_MASK BIT(29) +#define QDMA_ETH_RXMSG_IP4F_MASK BIT(28) +#define QDMA_ETH_RXMSG_L4_VALID_MASK BIT(27) +#define QDMA_ETH_RXMSG_L4F_MASK BIT(26) +#define QDMA_ETH_RXMSG_SPORT_MASK GENMASK(25, 21) +#define QDMA_ETH_RXMSG_CRSN_MASK GENMASK(20, 16) +#define QDMA_ETH_RXMSG_PPE_ENTRY_MASK GENMASK(15, 0) + +struct airoha_qdma_desc { + __le32 rsv; + __le32 ctrl; + __le32 addr; + __le32 data; + __le32 msg0; + __le32 msg1; + __le32 msg2; + __le32 msg3; +}; + +/* CTRL0 */ +#define QDMA_FWD_DESC_CTX_MASK BIT(31) +#define QDMA_FWD_DESC_RING_MASK GENMASK(30, 28) +#define QDMA_FWD_DESC_IDX_MASK GENMASK(27, 16) +#define QDMA_FWD_DESC_LEN_MASK GENMASK(15, 0) +/* CTRL1 */ +#define QDMA_FWD_DESC_FIRST_IDX_MASK GENMASK(15, 0) +/* CTRL2 */ +#define QDMA_FWD_DESC_MORE_PKT_NUM_MASK GENMASK(2, 0) + +struct airoha_qdma_fwd_desc { + __le32 addr; + __le32 ctrl0; + __le32 ctrl1; + __le32 ctrl2; + __le32 msg0; + __le32 msg1; + __le32 rsv0; + __le32 rsv1; +}; + +enum { + QDMA_INT_REG_IDX0, + QDMA_INT_REG_IDX1, + QDMA_INT_REG_IDX2, + QDMA_INT_REG_IDX3, + QDMA_INT_REG_IDX4, + QDMA_INT_REG_MAX +}; + +enum { + XSI_PCIE0_PORT, + XSI_PCIE1_PORT, + XSI_USB_PORT, + XSI_AE_PORT, + XSI_ETH_PORT, +}; + +enum { + XSI_PCIE0_VIP_PORT_MASK = BIT(22), + XSI_PCIE1_VIP_PORT_MASK = BIT(23), + XSI_USB_VIP_PORT_MASK = BIT(25), + XSI_ETH_VIP_PORT_MASK = BIT(24), +}; + +enum { + DEV_STATE_INITIALIZED, +}; + +enum { + CDM_CRSN_QSEL_Q1 = 1, + CDM_CRSN_QSEL_Q5 = 5, + CDM_CRSN_QSEL_Q6 = 6, + CDM_CRSN_QSEL_Q15 = 15, +}; + +enum { + CRSN_08 = 0x8, + CRSN_21 = 0x15, /* KA */ + CRSN_22 = 0x16, /* hit bind and force route to CPU */ + CRSN_24 = 0x18, + CRSN_25 = 0x19, +}; + +enum { + FE_PSE_PORT_CDM1, + FE_PSE_PORT_GDM1, + FE_PSE_PORT_GDM2, + FE_PSE_PORT_GDM3, + FE_PSE_PORT_PPE1, + FE_PSE_PORT_CDM2, + FE_PSE_PORT_CDM3, + FE_PSE_PORT_CDM4, + FE_PSE_PORT_PPE2, + FE_PSE_PORT_GDM4, + FE_PSE_PORT_CDM5, + FE_PSE_PORT_DROP = 0xf, +}; + +struct airoha_queue_entry { + union { + void *buf; + struct sk_buff *skb; + }; + dma_addr_t dma_addr; + u16 dma_len; +}; + +struct airoha_queue { + struct airoha_eth *eth; + + /* protect concurrent queue accesses */ + spinlock_t lock; + struct airoha_queue_entry *entry; + struct airoha_qdma_desc *desc; + u16 head; + u16 tail; + + int queued; + int ndesc; + int free_thr; + int buf_size; + + struct napi_struct napi; + struct page_pool *page_pool; +}; + +struct airoha_tx_irq_queue { + struct airoha_eth *eth; + + struct napi_struct napi; + u32 *q; + + int size; + int queued; + u16 head; +}; + +struct airoha_hw_stats { + /* protect concurrent hw_stats accesses */ + spinlock_t lock; + struct u64_stats_sync syncp; + + /* get_stats64 */ + u64 rx_ok_pkts; + u64 tx_ok_pkts; + u64 rx_ok_bytes; + u64 tx_ok_bytes; + u64 rx_multicast; + u64 rx_errors; + u64 rx_drops; + u64 tx_drops; + u64 rx_crc_error; + u64 rx_over_errors; + /* ethtool stats */ + u64 tx_broadcast; + u64 tx_multicast; + u64 tx_len[7]; + u64 rx_broadcast; + u64 rx_fragment; + u64 rx_jabber; + u64 rx_len[7]; +}; + +struct airoha_gdm_port { + struct net_device *dev; + struct airoha_eth *eth; + int id; + + struct airoha_hw_stats stats; +}; + +struct airoha_eth { + struct device *dev; + + unsigned long state; + + void __iomem *qdma_regs; + void __iomem *fe_regs; + + /* protect concurrent irqmask accesses */ + spinlock_t irq_lock; + u32 irqmask[QDMA_INT_REG_MAX]; + int irq; + + struct reset_control_bulk_data rsts[AIROHA_MAX_NUM_RSTS]; + struct reset_control_bulk_data xsi_rsts[AIROHA_MAX_NUM_XSI_RSTS]; + + struct airoha_gdm_port *ports[AIROHA_MAX_NUM_GDM_PORTS]; + + struct net_device *napi_dev; + struct airoha_queue q_tx[AIROHA_NUM_TX_RING]; + struct airoha_queue q_rx[AIROHA_NUM_RX_RING]; + + struct airoha_tx_irq_queue q_tx_irq[AIROHA_NUM_TX_IRQ]; + + /* descriptor and packet buffers for qdma hw forward */ + struct { + void *desc; + void *q; + } hfwd; +}; + +static u32 airoha_rr(void __iomem *base, u32 offset) +{ + return readl(base + offset); +} + +static void airoha_wr(void __iomem *base, u32 offset, u32 val) +{ + writel(val, base + offset); +} + +static u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val) +{ + val |= (airoha_rr(base, offset) & ~mask); + airoha_wr(base, offset, val); + + return val; +} + +#define airoha_fe_rr(eth, offset) \ + airoha_rr((eth)->fe_regs, (offset)) +#define airoha_fe_wr(eth, offset, val) \ + airoha_wr((eth)->fe_regs, (offset), (val)) +#define airoha_fe_rmw(eth, offset, mask, val) \ + airoha_rmw((eth)->fe_regs, (offset), (mask), (val)) +#define airoha_fe_set(eth, offset, val) \ + airoha_rmw((eth)->fe_regs, (offset), 0, (val)) +#define airoha_fe_clear(eth, offset, val) \ + airoha_rmw((eth)->fe_regs, (offset), (val), 0) + +#define airoha_qdma_rr(eth, offset) \ + airoha_rr((eth)->qdma_regs, (offset)) +#define airoha_qdma_wr(eth, offset, val) \ + airoha_wr((eth)->qdma_regs, (offset), (val)) +#define airoha_qdma_rmw(eth, offset, mask, val) \ + airoha_rmw((eth)->qdma_regs, (offset), (mask), (val)) +#define airoha_qdma_set(eth, offset, val) \ + airoha_rmw((eth)->qdma_regs, (offset), 0, (val)) +#define airoha_qdma_clear(eth, offset, val) \ + airoha_rmw((eth)->qdma_regs, (offset), (val), 0) + +static void airoha_qdma_set_irqmask(struct airoha_eth *eth, int index, + u32 clear, u32 set) +{ + unsigned long flags; + + if (WARN_ON_ONCE(index >= ARRAY_SIZE(eth->irqmask))) + return; + + spin_lock_irqsave(ð->irq_lock, flags); + + eth->irqmask[index] &= ~clear; + eth->irqmask[index] |= set; + airoha_qdma_wr(eth, REG_INT_ENABLE(index), eth->irqmask[index]); + /* Read irq_enable register in order to guarantee the update above + * completes in the spinlock critical section. + */ + airoha_qdma_rr(eth, REG_INT_ENABLE(index)); + + spin_unlock_irqrestore(ð->irq_lock, flags); +} + +static void airoha_qdma_irq_enable(struct airoha_eth *eth, int index, + u32 mask) +{ + airoha_qdma_set_irqmask(eth, index, 0, mask); +} + +static void airoha_qdma_irq_disable(struct airoha_eth *eth, int index, + u32 mask) +{ + airoha_qdma_set_irqmask(eth, index, mask, 0); +} + +static void airoha_set_macaddr(struct airoha_eth *eth, const u8 *addr) +{ + u32 val; + + val = (addr[0] << 16) | (addr[1] << 8) | addr[2]; + airoha_fe_wr(eth, REG_FE_LAN_MAC_H, val); + + val = (addr[3] << 16) | (addr[4] << 8) | addr[5]; + airoha_fe_wr(eth, REG_FE_LAN_MAC_LMIN, val); + airoha_fe_wr(eth, REG_FE_LAN_MAC_LMAX, val); +} + +static void airoha_set_gdm_port_fwd_cfg(struct airoha_eth *eth, u32 addr, + u32 val) +{ + airoha_fe_rmw(eth, addr, GDM_OCFQ_MASK, + FIELD_PREP(GDM_OCFQ_MASK, val)); + airoha_fe_rmw(eth, addr, GDM_MCFQ_MASK, + FIELD_PREP(GDM_MCFQ_MASK, val)); + airoha_fe_rmw(eth, addr, GDM_BCFQ_MASK, + FIELD_PREP(GDM_BCFQ_MASK, val)); + airoha_fe_rmw(eth, addr, GDM_UCFQ_MASK, + FIELD_PREP(GDM_UCFQ_MASK, val)); +} + +static int airoha_set_gdm_port(struct airoha_eth *eth, int port, bool enable) +{ + u32 val = enable ? FE_PSE_PORT_PPE1 : FE_PSE_PORT_DROP; + u32 vip_port, cfg_addr; + + switch (port) { + case XSI_PCIE0_PORT: + vip_port = XSI_PCIE0_VIP_PORT_MASK; + cfg_addr = REG_GDM_FWD_CFG(3); + break; + case XSI_PCIE1_PORT: + vip_port = XSI_PCIE1_VIP_PORT_MASK; + cfg_addr = REG_GDM_FWD_CFG(3); + break; + case XSI_USB_PORT: + vip_port = XSI_USB_VIP_PORT_MASK; + cfg_addr = REG_GDM_FWD_CFG(4); + break; + case XSI_ETH_PORT: + vip_port = XSI_ETH_VIP_PORT_MASK; + cfg_addr = REG_GDM_FWD_CFG(4); + break; + default: + return -EINVAL; + } + + if (enable) { + airoha_fe_set(eth, REG_FE_VIP_PORT_EN, vip_port); + airoha_fe_set(eth, REG_FE_IFC_PORT_EN, vip_port); + } else { + airoha_fe_clear(eth, REG_FE_VIP_PORT_EN, vip_port); + airoha_fe_clear(eth, REG_FE_IFC_PORT_EN, vip_port); + } + + airoha_set_gdm_port_fwd_cfg(eth, cfg_addr, val); + + return 0; +} + +static int airoha_set_gdm_ports(struct airoha_eth *eth, bool enable) +{ + const int port_list[] = { + XSI_PCIE0_PORT, + XSI_PCIE1_PORT, + XSI_USB_PORT, + XSI_ETH_PORT + }; + int i, err; + + for (i = 0; i < ARRAY_SIZE(port_list); i++) { + err = airoha_set_gdm_port(eth, port_list[i], enable); + if (err) + goto error; + } + + return 0; + +error: + for (i--; i >= 0; i++) + airoha_set_gdm_port(eth, port_list[i], false); + + return err; +} + +static void airoha_fe_maccr_init(struct airoha_eth *eth) +{ + int p; + + for (p = 1; p <= ARRAY_SIZE(eth->ports); p++) { + airoha_fe_set(eth, REG_GDM_FWD_CFG(p), + GDM_TCP_CKSUM | GDM_UDP_CKSUM | GDM_IP4_CKSUM | + GDM_DROP_CRC_ERR); + airoha_set_gdm_port_fwd_cfg(eth, REG_GDM_FWD_CFG(p), + FE_PSE_PORT_CDM1); + airoha_fe_rmw(eth, REG_GDM_LEN_CFG(p), + GDM_SHORT_LEN_MASK | GDM_LONG_LEN_MASK, + FIELD_PREP(GDM_SHORT_LEN_MASK, 60) | + FIELD_PREP(GDM_LONG_LEN_MASK, 4004)); + } + + airoha_fe_rmw(eth, REG_CDM1_VLAN_CTRL, CDM1_VLAN_MASK, + FIELD_PREP(CDM1_VLAN_MASK, 0x8100)); + + airoha_fe_set(eth, REG_FE_CPORT_CFG, FE_CPORT_PAD); +} + +static void airoha_fe_vip_setup(struct airoha_eth *eth) +{ + airoha_fe_wr(eth, REG_FE_VIP_PATN(3), ETH_P_PPP_DISC); + airoha_fe_wr(eth, REG_FE_VIP_EN(3), PATN_FCPU_EN_MASK | PATN_EN_MASK); + + airoha_fe_wr(eth, REG_FE_VIP_PATN(4), PPP_LCP); + airoha_fe_wr(eth, REG_FE_VIP_EN(4), + PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) | + PATN_EN_MASK); + + airoha_fe_wr(eth, REG_FE_VIP_PATN(6), PPP_IPCP); + airoha_fe_wr(eth, REG_FE_VIP_EN(6), + PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) | + PATN_EN_MASK); + + airoha_fe_wr(eth, REG_FE_VIP_PATN(7), PPP_CHAP); + airoha_fe_wr(eth, REG_FE_VIP_EN(7), + PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) | + PATN_EN_MASK); + + /* BOOTP (0x43) */ + airoha_fe_wr(eth, REG_FE_VIP_PATN(8), 0x43); + airoha_fe_wr(eth, REG_FE_VIP_EN(8), + PATN_FCPU_EN_MASK | PATN_SP_EN_MASK | + FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK); + + /* BOOTP (0x44) */ + airoha_fe_wr(eth, REG_FE_VIP_PATN(9), 0x44); + airoha_fe_wr(eth, REG_FE_VIP_EN(9), + PATN_FCPU_EN_MASK | PATN_SP_EN_MASK | + FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK); + + /* ISAKMP */ + airoha_fe_wr(eth, REG_FE_VIP_PATN(10), 0x1f401f4); + airoha_fe_wr(eth, REG_FE_VIP_EN(10), + PATN_FCPU_EN_MASK | PATN_DP_EN_MASK | PATN_SP_EN_MASK | + FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK); + + airoha_fe_wr(eth, REG_FE_VIP_PATN(11), PPP_IPV6CP); + airoha_fe_wr(eth, REG_FE_VIP_EN(11), + PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) | + PATN_EN_MASK); + + /* DHCPv6 */ + airoha_fe_wr(eth, REG_FE_VIP_PATN(12), 0x2220223); + airoha_fe_wr(eth, REG_FE_VIP_EN(12), + PATN_FCPU_EN_MASK | PATN_DP_EN_MASK | PATN_SP_EN_MASK | + FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK); + + airoha_fe_wr(eth, REG_FE_VIP_PATN(19), PPP_PAP); + airoha_fe_wr(eth, REG_FE_VIP_EN(19), + PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) | + PATN_EN_MASK); + + /* ETH->ETH_P_1905 (0x893a) */ + airoha_fe_wr(eth, REG_FE_VIP_PATN(20), 0x893a); + airoha_fe_wr(eth, REG_FE_VIP_EN(20), + PATN_FCPU_EN_MASK | PATN_EN_MASK); + + airoha_fe_wr(eth, REG_FE_VIP_PATN(21), ETH_P_LLDP); + airoha_fe_wr(eth, REG_FE_VIP_EN(21), + PATN_FCPU_EN_MASK | PATN_EN_MASK); +} + +static u32 airoha_fe_get_pse_queue_rsv_pages(struct airoha_eth *eth, + u32 port, u32 queue) +{ + u32 val; + + airoha_fe_rmw(eth, REG_FE_PSE_QUEUE_CFG_WR, + PSE_CFG_PORT_ID_MASK | PSE_CFG_QUEUE_ID_MASK, + FIELD_PREP(PSE_CFG_PORT_ID_MASK, port) | + FIELD_PREP(PSE_CFG_QUEUE_ID_MASK, queue)); + val = airoha_fe_rr(eth, REG_FE_PSE_QUEUE_CFG_VAL); + + return FIELD_GET(PSE_CFG_OQ_RSV_MASK, val); +} + +static void airoha_fe_set_pse_queue_rsv_pages(struct airoha_eth *eth, + u32 port, u32 queue, u32 val) +{ + airoha_fe_rmw(eth, REG_FE_PSE_QUEUE_CFG_VAL, PSE_CFG_OQ_RSV_MASK, + FIELD_PREP(PSE_CFG_OQ_RSV_MASK, val)); + airoha_fe_rmw(eth, REG_FE_PSE_QUEUE_CFG_WR, + PSE_CFG_PORT_ID_MASK | PSE_CFG_QUEUE_ID_MASK | + PSE_CFG_WR_EN_MASK | PSE_CFG_OQRSV_SEL_MASK, + FIELD_PREP(PSE_CFG_PORT_ID_MASK, port) | + FIELD_PREP(PSE_CFG_QUEUE_ID_MASK, queue) | + PSE_CFG_WR_EN_MASK | PSE_CFG_OQRSV_SEL_MASK); +} + +static int airoha_fe_set_pse_oq_rsv(struct airoha_eth *eth, + u32 port, u32 queue, u32 val) +{ + u32 orig_val, tmp, all_rsv, fq_limit; + + airoha_fe_set_pse_queue_rsv_pages(eth, port, queue, val); + + /* modify all rsv */ + orig_val = airoha_fe_get_pse_queue_rsv_pages(eth, port, queue); + tmp = airoha_fe_rr(eth, REG_FE_PSE_BUF_SET); + all_rsv = FIELD_GET(PSE_ALLRSV_MASK, tmp); + all_rsv += (val - orig_val); + airoha_fe_rmw(eth, REG_FE_PSE_BUF_SET, PSE_ALLRSV_MASK, + FIELD_PREP(PSE_ALLRSV_MASK, all_rsv)); + + /* modify hthd */ + tmp = airoha_fe_rr(eth, PSE_FQ_CFG); + fq_limit = FIELD_GET(PSE_FQ_LIMIT_MASK, tmp); + tmp = fq_limit - all_rsv - 0x20; + airoha_fe_rmw(eth, REG_PSE_SHARE_USED_THD, + PSE_SHARE_USED_HTHD_MASK, + FIELD_PREP(PSE_SHARE_USED_HTHD_MASK, tmp)); + + tmp = fq_limit - all_rsv - 0x100; + airoha_fe_rmw(eth, REG_PSE_SHARE_USED_THD, + PSE_SHARE_USED_MTHD_MASK, + FIELD_PREP(PSE_SHARE_USED_MTHD_MASK, tmp)); + tmp = (3 * tmp) >> 2; + airoha_fe_rmw(eth, REG_FE_PSE_BUF_SET, + PSE_SHARE_USED_LTHD_MASK, + FIELD_PREP(PSE_SHARE_USED_LTHD_MASK, tmp)); + + return 0; +} + +static void airoha_fe_pse_ports_init(struct airoha_eth *eth) +{ + const u32 pse_port_num_queues[] = { + [FE_PSE_PORT_CDM1] = 6, + [FE_PSE_PORT_GDM1] = 6, + [FE_PSE_PORT_GDM2] = 32, + [FE_PSE_PORT_GDM3] = 6, + [FE_PSE_PORT_PPE1] = 4, + [FE_PSE_PORT_CDM2] = 6, + [FE_PSE_PORT_CDM3] = 8, + [FE_PSE_PORT_CDM4] = 10, + [FE_PSE_PORT_PPE2] = 4, + [FE_PSE_PORT_GDM4] = 2, + [FE_PSE_PORT_CDM5] = 2, + }; + int q; + + /* hw misses PPE2 oq rsv */ + airoha_fe_set(eth, REG_FE_PSE_BUF_SET, + PSE_RSV_PAGES * pse_port_num_queues[FE_PSE_PORT_PPE2]); + + /* CMD1 */ + for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM1]; q++) + airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM1, q, + PSE_QUEUE_RSV_PAGES); + /* GMD1 */ + for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM1]; q++) + airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM1, q, + PSE_QUEUE_RSV_PAGES); + /* GMD2 */ + for (q = 6; q < pse_port_num_queues[FE_PSE_PORT_GDM2]; q++) + airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM2, q, 0); + /* GMD3 */ + for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM3]; q++) + airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM3, q, + PSE_QUEUE_RSV_PAGES); + /* PPE1 */ + for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_PPE1]; q++) { + if (q < pse_port_num_queues[FE_PSE_PORT_PPE1]) + airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE1, q, + PSE_QUEUE_RSV_PAGES); + else + airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE1, q, 0); + } + /* CDM2 */ + for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM2]; q++) + airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM2, q, + PSE_QUEUE_RSV_PAGES); + /* CDM3 */ + for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM3] - 1; q++) + airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM3, q, 0); + /* CDM4 */ + for (q = 4; q < pse_port_num_queues[FE_PSE_PORT_CDM4]; q++) + airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM4, q, + PSE_QUEUE_RSV_PAGES); + /* PPE2 */ + for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_PPE2]; q++) { + if (q < pse_port_num_queues[FE_PSE_PORT_PPE2] / 2) + airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE2, q, + PSE_QUEUE_RSV_PAGES); + else + airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE2, q, 0); + } + /* GMD4 */ + for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM4]; q++) + airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM4, q, + PSE_QUEUE_RSV_PAGES); + /* CDM5 */ + for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM5]; q++) + airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM5, q, + PSE_QUEUE_RSV_PAGES); +} + +static int airoha_fe_mc_vlan_clear(struct airoha_eth *eth) +{ + int i; + + for (i = 0; i < AIROHA_FE_MC_MAX_VLAN_TABLE; i++) { + int err, j; + u32 val; + + airoha_fe_wr(eth, REG_MC_VLAN_DATA, 0x0); + + val = FIELD_PREP(MC_VLAN_CFG_TABLE_ID_MASK, i) | + MC_VLAN_CFG_TABLE_SEL_MASK | MC_VLAN_CFG_RW_MASK; + airoha_fe_wr(eth, REG_MC_VLAN_CFG, val); + err = read_poll_timeout(airoha_fe_rr, val, + val & MC_VLAN_CFG_CMD_DONE_MASK, + USEC_PER_MSEC, 5 * USEC_PER_MSEC, + false, eth, REG_MC_VLAN_CFG); + if (err) + return err; + + for (j = 0; j < AIROHA_FE_MC_MAX_VLAN_PORT; j++) { + airoha_fe_wr(eth, REG_MC_VLAN_DATA, 0x0); + + val = FIELD_PREP(MC_VLAN_CFG_TABLE_ID_MASK, i) | + FIELD_PREP(MC_VLAN_CFG_PORT_ID_MASK, j) | + MC_VLAN_CFG_RW_MASK; + airoha_fe_wr(eth, REG_MC_VLAN_CFG, val); + err = read_poll_timeout(airoha_fe_rr, val, + val & MC_VLAN_CFG_CMD_DONE_MASK, + USEC_PER_MSEC, + 5 * USEC_PER_MSEC, false, eth, + REG_MC_VLAN_CFG); + if (err) + return err; + } + } + + return 0; +} + +static void airoha_fe_crsn_qsel_init(struct airoha_eth *eth) +{ + /* CDM1_CRSN_QSEL */ + airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_22 >> 2), + CDM1_CRSN_QSEL_REASON_MASK(CRSN_22), + FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_22), + CDM_CRSN_QSEL_Q1)); + airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_08 >> 2), + CDM1_CRSN_QSEL_REASON_MASK(CRSN_08), + FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_08), + CDM_CRSN_QSEL_Q1)); + airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_21 >> 2), + CDM1_CRSN_QSEL_REASON_MASK(CRSN_21), + FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_21), + CDM_CRSN_QSEL_Q1)); + airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_24 >> 2), + CDM1_CRSN_QSEL_REASON_MASK(CRSN_24), + FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_24), + CDM_CRSN_QSEL_Q6)); + airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_25 >> 2), + CDM1_CRSN_QSEL_REASON_MASK(CRSN_25), + FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_25), + CDM_CRSN_QSEL_Q1)); + /* CDM2_CRSN_QSEL */ + airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_08 >> 2), + CDM2_CRSN_QSEL_REASON_MASK(CRSN_08), + FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_08), + CDM_CRSN_QSEL_Q1)); + airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_21 >> 2), + CDM2_CRSN_QSEL_REASON_MASK(CRSN_21), + FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_21), + CDM_CRSN_QSEL_Q1)); + airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_22 >> 2), + CDM2_CRSN_QSEL_REASON_MASK(CRSN_22), + FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_22), + CDM_CRSN_QSEL_Q1)); + airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_24 >> 2), + CDM2_CRSN_QSEL_REASON_MASK(CRSN_24), + FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_24), + CDM_CRSN_QSEL_Q6)); + airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_25 >> 2), + CDM2_CRSN_QSEL_REASON_MASK(CRSN_25), + FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_25), + CDM_CRSN_QSEL_Q1)); +} + +static int airoha_fe_init(struct airoha_eth *eth) +{ + airoha_fe_maccr_init(eth); + + /* PSE IQ reserve */ + airoha_fe_rmw(eth, REG_PSE_IQ_REV1, PSE_IQ_RES1_P2_MASK, + FIELD_PREP(PSE_IQ_RES1_P2_MASK, 0x10)); + airoha_fe_rmw(eth, REG_PSE_IQ_REV2, + PSE_IQ_RES2_P5_MASK | PSE_IQ_RES2_P4_MASK, + FIELD_PREP(PSE_IQ_RES2_P5_MASK, 0x40) | + FIELD_PREP(PSE_IQ_RES2_P4_MASK, 0x34)); + + /* enable FE copy engine for MC/KA/DPI */ + airoha_fe_wr(eth, REG_FE_PCE_CFG, + PCE_DPI_EN_MASK | PCE_KA_EN_MASK | PCE_MC_EN_MASK); + /* set vip queue selection to ring 1 */ + airoha_fe_rmw(eth, REG_CDM1_FWD_CFG, CDM1_VIP_QSEL_MASK, + FIELD_PREP(CDM1_VIP_QSEL_MASK, 0x4)); + airoha_fe_rmw(eth, REG_CDM2_FWD_CFG, CDM2_VIP_QSEL_MASK, + FIELD_PREP(CDM2_VIP_QSEL_MASK, 0x4)); + /* set GDM4 source interface offset to 8 */ + airoha_fe_rmw(eth, REG_GDM4_SRC_PORT_SET, + GDM4_SPORT_OFF2_MASK | + GDM4_SPORT_OFF1_MASK | + GDM4_SPORT_OFF0_MASK, + FIELD_PREP(GDM4_SPORT_OFF2_MASK, 8) | + FIELD_PREP(GDM4_SPORT_OFF1_MASK, 8) | + FIELD_PREP(GDM4_SPORT_OFF0_MASK, 8)); + + /* set PSE Page as 128B */ + airoha_fe_rmw(eth, REG_FE_DMA_GLO_CFG, + FE_DMA_GLO_L2_SPACE_MASK | FE_DMA_GLO_PG_SZ_MASK, + FIELD_PREP(FE_DMA_GLO_L2_SPACE_MASK, 2) | + FE_DMA_GLO_PG_SZ_MASK); + airoha_fe_wr(eth, REG_FE_RST_GLO_CFG, + FE_RST_CORE_MASK | FE_RST_GDM3_MBI_ARB_MASK | + FE_RST_GDM4_MBI_ARB_MASK); + usleep_range(1000, 2000); + + /* connect RxRing1 and RxRing15 to PSE Port0 OQ-1 + * connect other rings to PSE Port0 OQ-0 + */ + airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP0, BIT(4)); + airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP1, BIT(28)); + airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP2, BIT(4)); + airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP3, BIT(28)); + + airoha_fe_vip_setup(eth); + airoha_fe_pse_ports_init(eth); + + airoha_fe_set(eth, REG_GDM_MISC_CFG, + GDM2_RDM_ACK_WAIT_PREF_MASK | + GDM2_CHN_VLD_MODE_MASK); + airoha_fe_rmw(eth, REG_CDM2_FWD_CFG, CDM2_OAM_QSEL_MASK, 15); + + /* init fragment and assemble Force Port */ + /* NPU Core-3, NPU Bridge Channel-3 */ + airoha_fe_rmw(eth, REG_IP_FRAG_FP, + IP_FRAGMENT_PORT_MASK | IP_FRAGMENT_NBQ_MASK, + FIELD_PREP(IP_FRAGMENT_PORT_MASK, 6) | + FIELD_PREP(IP_FRAGMENT_NBQ_MASK, 3)); + /* QDMA LAN, RX Ring-22 */ + airoha_fe_rmw(eth, REG_IP_FRAG_FP, + IP_ASSEMBLE_PORT_MASK | IP_ASSEMBLE_NBQ_MASK, + FIELD_PREP(IP_ASSEMBLE_PORT_MASK, 0) | + FIELD_PREP(IP_ASSEMBLE_NBQ_MASK, 22)); + + airoha_fe_set(eth, REG_GDM3_FWD_CFG, GDM3_PAD_EN_MASK); + airoha_fe_set(eth, REG_GDM4_FWD_CFG, GDM4_PAD_EN_MASK); + + airoha_fe_crsn_qsel_init(eth); + + airoha_fe_clear(eth, REG_FE_CPORT_CFG, FE_CPORT_QUEUE_XFC_MASK); + airoha_fe_set(eth, REG_FE_CPORT_CFG, FE_CPORT_PORT_XFC_MASK); + + /* default aging mode for mbi unlock issue */ + airoha_fe_rmw(eth, REG_GDM2_CHN_RLS, + MBI_RX_AGE_SEL_MASK | MBI_TX_AGE_SEL_MASK, + FIELD_PREP(MBI_RX_AGE_SEL_MASK, 3) | + FIELD_PREP(MBI_TX_AGE_SEL_MASK, 3)); + + /* disable IFC by default */ + airoha_fe_clear(eth, REG_FE_CSR_IFC_CFG, FE_IFC_EN_MASK); + + /* enable 1:N vlan action, init vlan table */ + airoha_fe_set(eth, REG_MC_VLAN_EN, MC_VLAN_EN_MASK); + + return airoha_fe_mc_vlan_clear(eth); +} + +static int airoha_qdma_fill_rx_queue(struct airoha_queue *q) +{ + enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool); + struct airoha_eth *eth = q->eth; + int qid = q - ð->q_rx[0]; + int nframes = 0; + + while (q->queued < q->ndesc - 1) { + struct airoha_queue_entry *e = &q->entry[q->head]; + struct airoha_qdma_desc *desc = &q->desc[q->head]; + struct page *page; + int offset; + u32 val; + + page = page_pool_dev_alloc_frag(q->page_pool, &offset, + q->buf_size); + if (!page) + break; + + q->head = (q->head + 1) % q->ndesc; + q->queued++; + nframes++; + + e->buf = page_address(page) + offset; + e->dma_addr = page_pool_get_dma_addr(page) + offset; + e->dma_len = SKB_WITH_OVERHEAD(q->buf_size); + + dma_sync_single_for_device(eth->dev, e->dma_addr, e->dma_len, + dir); + + val = FIELD_PREP(QDMA_DESC_LEN_MASK, e->dma_len); + WRITE_ONCE(desc->ctrl, cpu_to_le32(val)); + WRITE_ONCE(desc->addr, cpu_to_le32(e->dma_addr)); + val = FIELD_PREP(QDMA_DESC_NEXT_ID_MASK, q->head); + WRITE_ONCE(desc->data, cpu_to_le32(val)); + WRITE_ONCE(desc->msg0, 0); + WRITE_ONCE(desc->msg1, 0); + WRITE_ONCE(desc->msg2, 0); + WRITE_ONCE(desc->msg3, 0); + + airoha_qdma_rmw(eth, REG_RX_CPU_IDX(qid), RX_RING_CPU_IDX_MASK, + FIELD_PREP(RX_RING_CPU_IDX_MASK, q->head)); + } + + return nframes; +} + +static int airoha_qdma_get_gdm_port(struct airoha_eth *eth, + struct airoha_qdma_desc *desc) +{ + u32 port, sport, msg1 = le32_to_cpu(desc->msg1); + + sport = FIELD_GET(QDMA_ETH_RXMSG_SPORT_MASK, msg1); + switch (sport) { + case 0x10 ... 0x13: + port = 0; + break; + case 0x2 ... 0x4: + port = sport - 1; + break; + default: + return -EINVAL; + } + + return port >= ARRAY_SIZE(eth->ports) ? -EINVAL : port; +} + +static int airoha_qdma_rx_process(struct airoha_queue *q, int budget) +{ + enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool); + struct airoha_eth *eth = q->eth; + int qid = q - ð->q_rx[0]; + int done = 0; + + while (done < budget) { + struct airoha_queue_entry *e = &q->entry[q->tail]; + struct airoha_qdma_desc *desc = &q->desc[q->tail]; + dma_addr_t dma_addr = le32_to_cpu(desc->addr); + u32 desc_ctrl = le32_to_cpu(desc->ctrl); + struct sk_buff *skb; + int len, p; + + if (!(desc_ctrl & QDMA_DESC_DONE_MASK)) + break; + + if (!dma_addr) + break; + + len = FIELD_GET(QDMA_DESC_LEN_MASK, desc_ctrl); + if (!len) + break; + + q->tail = (q->tail + 1) % q->ndesc; + q->queued--; + + dma_sync_single_for_cpu(eth->dev, dma_addr, + SKB_WITH_OVERHEAD(q->buf_size), dir); + + p = airoha_qdma_get_gdm_port(eth, desc); + if (p < 0 || !eth->ports[p]) { + page_pool_put_full_page(q->page_pool, + virt_to_head_page(e->buf), + true); + continue; + } + + skb = napi_build_skb(e->buf, q->buf_size); + if (!skb) { + page_pool_put_full_page(q->page_pool, + virt_to_head_page(e->buf), + true); + break; + } + + skb_reserve(skb, 2); + __skb_put(skb, len); + skb_mark_for_recycle(skb); + skb->dev = eth->ports[p]->dev; + skb->protocol = eth_type_trans(skb, skb->dev); + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb_record_rx_queue(skb, qid); + napi_gro_receive(&q->napi, skb); + + done++; + } + airoha_qdma_fill_rx_queue(q); + + return done; +} + +static int airoha_qdma_rx_napi_poll(struct napi_struct *napi, int budget) +{ + struct airoha_queue *q = container_of(napi, struct airoha_queue, napi); + struct airoha_eth *eth = q->eth; + int cur, done = 0; + + do { + cur = airoha_qdma_rx_process(q, budget - done); + done += cur; + } while (cur && done < budget); + + if (done < budget && napi_complete(napi)) + airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX1, + RX_DONE_INT_MASK); + + return done; +} + +static int airoha_qdma_init_rx_queue(struct airoha_eth *eth, + struct airoha_queue *q, int ndesc) +{ + const struct page_pool_params pp_params = { + .order = 0, + .pool_size = 256, + .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, + .dma_dir = DMA_FROM_DEVICE, + .max_len = PAGE_SIZE, + .nid = NUMA_NO_NODE, + .dev = eth->dev, + .napi = &q->napi, + }; + int qid = q - ð->q_rx[0], thr; + dma_addr_t dma_addr; + + q->buf_size = PAGE_SIZE / 2; + q->ndesc = ndesc; + q->eth = eth; + + q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry), + GFP_KERNEL); + if (!q->entry) + return -ENOMEM; + + q->page_pool = page_pool_create(&pp_params); + if (IS_ERR(q->page_pool)) { + int err = PTR_ERR(q->page_pool); + + q->page_pool = NULL; + return err; + } + + q->desc = dmam_alloc_coherent(eth->dev, q->ndesc * sizeof(*q->desc), + &dma_addr, GFP_KERNEL); + if (!q->desc) + return -ENOMEM; + + netif_napi_add(eth->napi_dev, &q->napi, airoha_qdma_rx_napi_poll); + + airoha_qdma_wr(eth, REG_RX_RING_BASE(qid), dma_addr); + airoha_qdma_rmw(eth, REG_RX_RING_SIZE(qid), RX_RING_SIZE_MASK, + FIELD_PREP(RX_RING_SIZE_MASK, ndesc)); + + thr = clamp(ndesc >> 3, 1, 32); + airoha_qdma_rmw(eth, REG_RX_RING_SIZE(qid), RX_RING_THR_MASK, + FIELD_PREP(RX_RING_THR_MASK, thr)); + airoha_qdma_rmw(eth, REG_RX_DMA_IDX(qid), RX_RING_DMA_IDX_MASK, + FIELD_PREP(RX_RING_DMA_IDX_MASK, q->head)); + + airoha_qdma_fill_rx_queue(q); + + return 0; +} + +static void airoha_qdma_cleanup_rx_queue(struct airoha_queue *q) +{ + enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool); + struct airoha_eth *eth = q->eth; + + while (q->queued) { + struct airoha_queue_entry *e = &q->entry[q->tail]; + struct page *page = virt_to_head_page(e->buf); + + dma_sync_single_for_cpu(eth->dev, e->dma_addr, e->dma_len, + dir); + page_pool_put_full_page(q->page_pool, page, false); + q->tail = (q->tail + 1) % q->ndesc; + q->queued--; + } +} + +static int airoha_qdma_init_rx(struct airoha_eth *eth) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) { + int err; + + if (!(RX_DONE_INT_MASK & BIT(i))) { + /* rx-queue not binded to irq */ + continue; + } + + err = airoha_qdma_init_rx_queue(eth, ð->q_rx[i], + RX_DSCP_NUM(i)); + if (err) + return err; + } + + return 0; +} + +static int airoha_qdma_tx_napi_poll(struct napi_struct *napi, int budget) +{ + struct airoha_tx_irq_queue *irq_q; + struct airoha_eth *eth; + int id, done = 0; + + irq_q = container_of(napi, struct airoha_tx_irq_queue, napi); + eth = irq_q->eth; + id = irq_q - ð->q_tx_irq[0]; + + while (irq_q->queued > 0 && done < budget) { + u32 qid, last, val = irq_q->q[irq_q->head]; + struct airoha_queue *q; + + if (val == 0xff) + break; + + irq_q->q[irq_q->head] = 0xff; /* mark as done */ + irq_q->head = (irq_q->head + 1) % irq_q->size; + irq_q->queued--; + done++; + + last = FIELD_GET(IRQ_DESC_IDX_MASK, val); + qid = FIELD_GET(IRQ_RING_IDX_MASK, val); + + if (qid >= ARRAY_SIZE(eth->q_tx)) + continue; + + q = ð->q_tx[qid]; + if (!q->ndesc) + continue; + + spin_lock_bh(&q->lock); + + while (q->queued > 0) { + struct airoha_qdma_desc *desc = &q->desc[q->tail]; + struct airoha_queue_entry *e = &q->entry[q->tail]; + u32 desc_ctrl = le32_to_cpu(desc->ctrl); + struct sk_buff *skb = e->skb; + u16 index = q->tail; + + if (!(desc_ctrl & QDMA_DESC_DONE_MASK) && + !(desc_ctrl & QDMA_DESC_DROP_MASK)) + break; + + q->tail = (q->tail + 1) % q->ndesc; + q->queued--; + + dma_unmap_single(eth->dev, e->dma_addr, e->dma_len, + DMA_TO_DEVICE); + + WRITE_ONCE(desc->msg0, 0); + WRITE_ONCE(desc->msg1, 0); + + if (skb) { + struct netdev_queue *txq; + + txq = netdev_get_tx_queue(skb->dev, qid); + if (netif_tx_queue_stopped(txq) && + q->ndesc - q->queued >= q->free_thr) + netif_tx_wake_queue(txq); + + dev_kfree_skb_any(skb); + e->skb = NULL; + } + + if (index == last) + break; + } + + spin_unlock_bh(&q->lock); + } + + if (done) { + int i, len = done >> 7; + + for (i = 0; i < len; i++) + airoha_qdma_rmw(eth, REG_IRQ_CLEAR_LEN(id), + IRQ_CLEAR_LEN_MASK, 0x80); + airoha_qdma_rmw(eth, REG_IRQ_CLEAR_LEN(id), + IRQ_CLEAR_LEN_MASK, (done & 0x7f)); + } + + if (done < budget && napi_complete(napi)) + airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX0, + TX_DONE_INT_MASK(id)); + + return done; +} + +static int airoha_qdma_init_tx_queue(struct airoha_eth *eth, + struct airoha_queue *q, int size) +{ + int i, qid = q - ð->q_tx[0]; + dma_addr_t dma_addr; + + spin_lock_init(&q->lock); + q->ndesc = size; + q->eth = eth; + q->free_thr = 1 + MAX_SKB_FRAGS; + + q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry), + GFP_KERNEL); + if (!q->entry) + return -ENOMEM; + + q->desc = dmam_alloc_coherent(eth->dev, q->ndesc * sizeof(*q->desc), + &dma_addr, GFP_KERNEL); + if (!q->desc) + return -ENOMEM; + + for (i = 0; i < q->ndesc; i++) { + u32 val; + + val = FIELD_PREP(QDMA_DESC_DONE_MASK, 1); + WRITE_ONCE(q->desc[i].ctrl, cpu_to_le32(val)); + } + + airoha_qdma_wr(eth, REG_TX_RING_BASE(qid), dma_addr); + airoha_qdma_rmw(eth, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK, + FIELD_PREP(TX_RING_CPU_IDX_MASK, q->head)); + airoha_qdma_rmw(eth, REG_TX_DMA_IDX(qid), TX_RING_DMA_IDX_MASK, + FIELD_PREP(TX_RING_DMA_IDX_MASK, q->head)); + + return 0; +} + +static int airoha_qdma_tx_irq_init(struct airoha_eth *eth, + struct airoha_tx_irq_queue *irq_q, + int size) +{ + int id = irq_q - ð->q_tx_irq[0]; + dma_addr_t dma_addr; + + netif_napi_add_tx(eth->napi_dev, &irq_q->napi, + airoha_qdma_tx_napi_poll); + irq_q->q = dmam_alloc_coherent(eth->dev, size * sizeof(u32), + &dma_addr, GFP_KERNEL); + if (!irq_q->q) + return -ENOMEM; + + memset(irq_q->q, 0xff, size * sizeof(u32)); + irq_q->size = size; + irq_q->eth = eth; + + airoha_qdma_wr(eth, REG_TX_IRQ_BASE(id), dma_addr); + airoha_qdma_rmw(eth, REG_TX_IRQ_CFG(id), TX_IRQ_DEPTH_MASK, + FIELD_PREP(TX_IRQ_DEPTH_MASK, size)); + airoha_qdma_rmw(eth, REG_TX_IRQ_CFG(id), TX_IRQ_THR_MASK, + FIELD_PREP(TX_IRQ_THR_MASK, 1)); + + return 0; +} + +static int airoha_qdma_init_tx(struct airoha_eth *eth) +{ + int i, err; + + for (i = 0; i < ARRAY_SIZE(eth->q_tx_irq); i++) { + err = airoha_qdma_tx_irq_init(eth, ð->q_tx_irq[i], + IRQ_QUEUE_LEN(i)); + if (err) + return err; + } + + for (i = 0; i < ARRAY_SIZE(eth->q_tx); i++) { + err = airoha_qdma_init_tx_queue(eth, ð->q_tx[i], + TX_DSCP_NUM); + if (err) + return err; + } + + return 0; +} + +static void airoha_qdma_cleanup_tx_queue(struct airoha_queue *q) +{ + struct airoha_eth *eth = q->eth; + + spin_lock_bh(&q->lock); + while (q->queued) { + struct airoha_queue_entry *e = &q->entry[q->tail]; + + dma_unmap_single(eth->dev, e->dma_addr, e->dma_len, + DMA_TO_DEVICE); + dev_kfree_skb_any(e->skb); + e->skb = NULL; + + q->tail = (q->tail + 1) % q->ndesc; + q->queued--; + } + spin_unlock_bh(&q->lock); +} + +static int airoha_qdma_init_hfwd_queues(struct airoha_eth *eth) +{ + dma_addr_t dma_addr; + u32 status; + int size; + + size = HW_DSCP_NUM * sizeof(struct airoha_qdma_fwd_desc); + eth->hfwd.desc = dmam_alloc_coherent(eth->dev, size, &dma_addr, + GFP_KERNEL); + if (!eth->hfwd.desc) + return -ENOMEM; + + airoha_qdma_wr(eth, REG_FWD_DSCP_BASE, dma_addr); + + size = AIROHA_MAX_PACKET_SIZE * HW_DSCP_NUM; + eth->hfwd.q = dmam_alloc_coherent(eth->dev, size, &dma_addr, + GFP_KERNEL); + if (!eth->hfwd.q) + return -ENOMEM; + + airoha_qdma_wr(eth, REG_FWD_BUF_BASE, dma_addr); + + airoha_qdma_rmw(eth, REG_HW_FWD_DSCP_CFG, + HW_FWD_DSCP_PAYLOAD_SIZE_MASK, + FIELD_PREP(HW_FWD_DSCP_PAYLOAD_SIZE_MASK, 0)); + airoha_qdma_rmw(eth, REG_FWD_DSCP_LOW_THR, FWD_DSCP_LOW_THR_MASK, + FIELD_PREP(FWD_DSCP_LOW_THR_MASK, 128)); + airoha_qdma_rmw(eth, REG_LMGR_INIT_CFG, + LMGR_INIT_START | LMGR_SRAM_MODE_MASK | + HW_FWD_DESC_NUM_MASK, + FIELD_PREP(HW_FWD_DESC_NUM_MASK, HW_DSCP_NUM) | + LMGR_INIT_START); + + return read_poll_timeout(airoha_qdma_rr, status, + !(status & LMGR_INIT_START), USEC_PER_MSEC, + 30 * USEC_PER_MSEC, true, eth, + REG_LMGR_INIT_CFG); +} + +static void airoha_qdma_init_qos(struct airoha_eth *eth) +{ + airoha_qdma_clear(eth, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_SCALE_MASK); + airoha_qdma_set(eth, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_BASE_MASK); + + airoha_qdma_clear(eth, REG_PSE_BUF_USAGE_CFG, + PSE_BUF_ESTIMATE_EN_MASK); + + airoha_qdma_set(eth, REG_EGRESS_RATE_METER_CFG, + EGRESS_RATE_METER_EN_MASK | + EGRESS_RATE_METER_EQ_RATE_EN_MASK); + /* 2047us x 31 = 63.457ms */ + airoha_qdma_rmw(eth, REG_EGRESS_RATE_METER_CFG, + EGRESS_RATE_METER_WINDOW_SZ_MASK, + FIELD_PREP(EGRESS_RATE_METER_WINDOW_SZ_MASK, 0x1f)); + airoha_qdma_rmw(eth, REG_EGRESS_RATE_METER_CFG, + EGRESS_RATE_METER_TIMESLICE_MASK, + FIELD_PREP(EGRESS_RATE_METER_TIMESLICE_MASK, 0x7ff)); + + /* ratelimit init */ + airoha_qdma_set(eth, REG_GLB_TRTCM_CFG, GLB_TRTCM_EN_MASK); + /* fast-tick 25us */ + airoha_qdma_rmw(eth, REG_GLB_TRTCM_CFG, GLB_FAST_TICK_MASK, + FIELD_PREP(GLB_FAST_TICK_MASK, 25)); + airoha_qdma_rmw(eth, REG_GLB_TRTCM_CFG, GLB_SLOW_TICK_RATIO_MASK, + FIELD_PREP(GLB_SLOW_TICK_RATIO_MASK, 40)); + + airoha_qdma_set(eth, REG_EGRESS_TRTCM_CFG, EGRESS_TRTCM_EN_MASK); + airoha_qdma_rmw(eth, REG_EGRESS_TRTCM_CFG, EGRESS_FAST_TICK_MASK, + FIELD_PREP(EGRESS_FAST_TICK_MASK, 25)); + airoha_qdma_rmw(eth, REG_EGRESS_TRTCM_CFG, + EGRESS_SLOW_TICK_RATIO_MASK, + FIELD_PREP(EGRESS_SLOW_TICK_RATIO_MASK, 40)); + + airoha_qdma_set(eth, REG_INGRESS_TRTCM_CFG, INGRESS_TRTCM_EN_MASK); + airoha_qdma_clear(eth, REG_INGRESS_TRTCM_CFG, + INGRESS_TRTCM_MODE_MASK); + airoha_qdma_rmw(eth, REG_INGRESS_TRTCM_CFG, INGRESS_FAST_TICK_MASK, + FIELD_PREP(INGRESS_FAST_TICK_MASK, 125)); + airoha_qdma_rmw(eth, REG_INGRESS_TRTCM_CFG, + INGRESS_SLOW_TICK_RATIO_MASK, + FIELD_PREP(INGRESS_SLOW_TICK_RATIO_MASK, 8)); + + airoha_qdma_set(eth, REG_SLA_TRTCM_CFG, SLA_TRTCM_EN_MASK); + airoha_qdma_rmw(eth, REG_SLA_TRTCM_CFG, SLA_FAST_TICK_MASK, + FIELD_PREP(SLA_FAST_TICK_MASK, 25)); + airoha_qdma_rmw(eth, REG_SLA_TRTCM_CFG, SLA_SLOW_TICK_RATIO_MASK, + FIELD_PREP(SLA_SLOW_TICK_RATIO_MASK, 40)); +} + +static int airoha_qdma_hw_init(struct airoha_eth *eth) +{ + int i; + + /* clear pending irqs */ + for (i = 0; i < ARRAY_SIZE(eth->irqmask); i++) + airoha_qdma_wr(eth, REG_INT_STATUS(i), 0xffffffff); + + /* setup irqs */ + airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX0, INT_IDX0_MASK); + airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX1, INT_IDX1_MASK); + airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX4, INT_IDX4_MASK); + + /* setup irq binding */ + for (i = 0; i < ARRAY_SIZE(eth->q_tx); i++) { + if (!eth->q_tx[i].ndesc) + continue; + + if (TX_RING_IRQ_BLOCKING_MAP_MASK & BIT(i)) + airoha_qdma_set(eth, REG_TX_RING_BLOCKING(i), + TX_RING_IRQ_BLOCKING_CFG_MASK); + else + airoha_qdma_clear(eth, REG_TX_RING_BLOCKING(i), + TX_RING_IRQ_BLOCKING_CFG_MASK); + } + + airoha_qdma_wr(eth, REG_QDMA_GLOBAL_CFG, + GLOBAL_CFG_RX_2B_OFFSET_MASK | + FIELD_PREP(GLOBAL_CFG_DMA_PREFERENCE_MASK, 3) | + GLOBAL_CFG_CPU_TXR_RR_MASK | + GLOBAL_CFG_PAYLOAD_BYTE_SWAP_MASK | + GLOBAL_CFG_MULTICAST_MODIFY_FP_MASK | + GLOBAL_CFG_MULTICAST_EN_MASK | + GLOBAL_CFG_IRQ0_EN_MASK | GLOBAL_CFG_IRQ1_EN_MASK | + GLOBAL_CFG_TX_WB_DONE_MASK | + FIELD_PREP(GLOBAL_CFG_MAX_ISSUE_NUM_MASK, 2)); + + airoha_qdma_init_qos(eth); + + /* disable qdma rx delay interrupt */ + for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) { + if (!eth->q_rx[i].ndesc) + continue; + + airoha_qdma_clear(eth, REG_RX_DELAY_INT_IDX(i), + RX_DELAY_INT_MASK); + } + + airoha_qdma_set(eth, REG_TXQ_CNGST_CFG, + TXQ_CNGST_DROP_EN | TXQ_CNGST_DEI_DROP_EN); + + return 0; +} + +static irqreturn_t airoha_irq_handler(int irq, void *dev_instance) +{ + struct airoha_eth *eth = dev_instance; + u32 intr[ARRAY_SIZE(eth->irqmask)]; + int i; + + for (i = 0; i < ARRAY_SIZE(eth->irqmask); i++) { + intr[i] = airoha_qdma_rr(eth, REG_INT_STATUS(i)); + intr[i] &= eth->irqmask[i]; + airoha_qdma_wr(eth, REG_INT_STATUS(i), intr[i]); + } + + if (!test_bit(DEV_STATE_INITIALIZED, ð->state)) + return IRQ_NONE; + + if (intr[1] & RX_DONE_INT_MASK) { + airoha_qdma_irq_disable(eth, QDMA_INT_REG_IDX1, + RX_DONE_INT_MASK); + + for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) { + if (!eth->q_rx[i].ndesc) + continue; + + if (intr[1] & BIT(i)) + napi_schedule(ð->q_rx[i].napi); + } + } + + if (intr[0] & INT_TX_MASK) { + for (i = 0; i < ARRAY_SIZE(eth->q_tx_irq); i++) { + struct airoha_tx_irq_queue *irq_q = ð->q_tx_irq[i]; + u32 status, head; + + if (!(intr[0] & TX_DONE_INT_MASK(i))) + continue; + + airoha_qdma_irq_disable(eth, QDMA_INT_REG_IDX0, + TX_DONE_INT_MASK(i)); + + status = airoha_qdma_rr(eth, REG_IRQ_STATUS(i)); + head = FIELD_GET(IRQ_HEAD_IDX_MASK, status); + irq_q->head = head % irq_q->size; + irq_q->queued = FIELD_GET(IRQ_ENTRY_LEN_MASK, status); + + napi_schedule(ð->q_tx_irq[i].napi); + } + } + + return IRQ_HANDLED; +} + +static int airoha_qdma_init(struct airoha_eth *eth) +{ + int err; + + err = devm_request_irq(eth->dev, eth->irq, airoha_irq_handler, + IRQF_SHARED, KBUILD_MODNAME, eth); + if (err) + return err; + + err = airoha_qdma_init_rx(eth); + if (err) + return err; + + err = airoha_qdma_init_tx(eth); + if (err) + return err; + + err = airoha_qdma_init_hfwd_queues(eth); + if (err) + return err; + + err = airoha_qdma_hw_init(eth); + if (err) + return err; + + set_bit(DEV_STATE_INITIALIZED, ð->state); + + return 0; +} + +static int airoha_hw_init(struct airoha_eth *eth) +{ + int err; + + /* disable xsi */ + reset_control_bulk_assert(ARRAY_SIZE(eth->xsi_rsts), eth->xsi_rsts); + + reset_control_bulk_assert(ARRAY_SIZE(eth->rsts), eth->rsts); + msleep(20); + reset_control_bulk_deassert(ARRAY_SIZE(eth->rsts), eth->rsts); + msleep(20); + + err = airoha_fe_init(eth); + if (err) + return err; + + return airoha_qdma_init(eth); +} + +static void airoha_hw_cleanup(struct airoha_eth *eth) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) { + if (!eth->q_rx[i].ndesc) + continue; + + napi_disable(ð->q_rx[i].napi); + netif_napi_del(ð->q_rx[i].napi); + airoha_qdma_cleanup_rx_queue(ð->q_rx[i]); + if (eth->q_rx[i].page_pool) + page_pool_destroy(eth->q_rx[i].page_pool); + } + + for (i = 0; i < ARRAY_SIZE(eth->q_tx_irq); i++) { + napi_disable(ð->q_tx_irq[i].napi); + netif_napi_del(ð->q_tx_irq[i].napi); + } + + for (i = 0; i < ARRAY_SIZE(eth->q_tx); i++) { + if (!eth->q_tx[i].ndesc) + continue; + + airoha_qdma_cleanup_tx_queue(ð->q_tx[i]); + } +} + +static void airoha_qdma_start_napi(struct airoha_eth *eth) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(eth->q_tx_irq); i++) + napi_enable(ð->q_tx_irq[i].napi); + + for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) { + if (!eth->q_rx[i].ndesc) + continue; + + napi_enable(ð->q_rx[i].napi); + } +} + +static void airoha_update_hw_stats(struct airoha_gdm_port *port) +{ + struct airoha_eth *eth = port->eth; + u32 val, i = 0; + + spin_lock(&port->stats.lock); + u64_stats_update_begin(&port->stats.syncp); + + /* TX */ + val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_PKT_CNT_H(port->id)); + port->stats.tx_ok_pkts += ((u64)val << 32); + val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_PKT_CNT_L(port->id)); + port->stats.tx_ok_pkts += val; + + val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_BYTE_CNT_H(port->id)); + port->stats.tx_ok_bytes += ((u64)val << 32); + val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_BYTE_CNT_L(port->id)); + port->stats.tx_ok_bytes += val; + + val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_DROP_CNT(port->id)); + port->stats.tx_drops += val; + + val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_BC_CNT(port->id)); + port->stats.tx_broadcast += val; + + val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_MC_CNT(port->id)); + port->stats.tx_multicast += val; + + val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_RUNT_CNT(port->id)); + port->stats.tx_len[i] += val; + + val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_E64_CNT_H(port->id)); + port->stats.tx_len[i] += ((u64)val << 32); + val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_E64_CNT_L(port->id)); + port->stats.tx_len[i++] += val; + + val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L64_CNT_H(port->id)); + port->stats.tx_len[i] += ((u64)val << 32); + val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L64_CNT_L(port->id)); + port->stats.tx_len[i++] += val; + + val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L127_CNT_H(port->id)); + port->stats.tx_len[i] += ((u64)val << 32); + val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L127_CNT_L(port->id)); + port->stats.tx_len[i++] += val; + + val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L255_CNT_H(port->id)); + port->stats.tx_len[i] += ((u64)val << 32); + val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L255_CNT_L(port->id)); + port->stats.tx_len[i++] += val; + + val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L511_CNT_H(port->id)); + port->stats.tx_len[i] += ((u64)val << 32); + val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L511_CNT_L(port->id)); + port->stats.tx_len[i++] += val; + + val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L1023_CNT_H(port->id)); + port->stats.tx_len[i] += ((u64)val << 32); + val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L1023_CNT_L(port->id)); + port->stats.tx_len[i++] += val; + + val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_LONG_CNT(port->id)); + port->stats.tx_len[i++] += val; + + /* RX */ + val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_PKT_CNT_H(port->id)); + port->stats.rx_ok_pkts += ((u64)val << 32); + val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_PKT_CNT_L(port->id)); + port->stats.rx_ok_pkts += val; + + val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_BYTE_CNT_H(port->id)); + port->stats.rx_ok_bytes += ((u64)val << 32); + val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_BYTE_CNT_L(port->id)); + port->stats.rx_ok_bytes += val; + + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_DROP_CNT(port->id)); + port->stats.rx_drops += val; + + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_BC_CNT(port->id)); + port->stats.rx_broadcast += val; + + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_MC_CNT(port->id)); + port->stats.rx_multicast += val; + + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ERROR_DROP_CNT(port->id)); + port->stats.rx_errors += val; + + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_CRC_ERR_CNT(port->id)); + port->stats.rx_crc_error += val; + + val = airoha_fe_rr(eth, REG_FE_GDM_RX_OVERFLOW_DROP_CNT(port->id)); + port->stats.rx_over_errors += val; + + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_FRAG_CNT(port->id)); + port->stats.rx_fragment += val; + + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_JABBER_CNT(port->id)); + port->stats.rx_jabber += val; + + i = 0; + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_RUNT_CNT(port->id)); + port->stats.rx_len[i] += val; + + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_E64_CNT_H(port->id)); + port->stats.rx_len[i] += ((u64)val << 32); + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_E64_CNT_L(port->id)); + port->stats.rx_len[i++] += val; + + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L64_CNT_H(port->id)); + port->stats.rx_len[i] += ((u64)val << 32); + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L64_CNT_L(port->id)); + port->stats.rx_len[i++] += val; + + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L127_CNT_H(port->id)); + port->stats.rx_len[i] += ((u64)val << 32); + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L127_CNT_L(port->id)); + port->stats.rx_len[i++] += val; + + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L255_CNT_H(port->id)); + port->stats.rx_len[i] += ((u64)val << 32); + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L255_CNT_L(port->id)); + port->stats.rx_len[i++] += val; + + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L511_CNT_H(port->id)); + port->stats.rx_len[i] += ((u64)val << 32); + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L511_CNT_L(port->id)); + port->stats.rx_len[i++] += val; + + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L1023_CNT_H(port->id)); + port->stats.rx_len[i] += ((u64)val << 32); + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L1023_CNT_L(port->id)); + port->stats.rx_len[i++] += val; + + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_LONG_CNT(port->id)); + port->stats.rx_len[i++] += val; + + /* reset mib counters */ + airoha_fe_set(eth, REG_FE_GDM_MIB_CLEAR(port->id), + FE_GDM_MIB_RX_CLEAR_MASK | FE_GDM_MIB_TX_CLEAR_MASK); + + u64_stats_update_end(&port->stats.syncp); + spin_unlock(&port->stats.lock); +} + +static int airoha_dev_open(struct net_device *dev) +{ + struct airoha_gdm_port *port = netdev_priv(dev); + struct airoha_eth *eth = port->eth; + int err; + + netif_tx_start_all_queues(dev); + err = airoha_set_gdm_ports(eth, true); + if (err) + return err; + + if (netdev_uses_dsa(dev)) + airoha_fe_set(eth, REG_GDM_INGRESS_CFG(port->id), + GDM_STAG_EN_MASK); + else + airoha_fe_clear(eth, REG_GDM_INGRESS_CFG(port->id), + GDM_STAG_EN_MASK); + + airoha_qdma_set(eth, REG_QDMA_GLOBAL_CFG, GLOBAL_CFG_TX_DMA_EN_MASK); + airoha_qdma_set(eth, REG_QDMA_GLOBAL_CFG, GLOBAL_CFG_RX_DMA_EN_MASK); + + return 0; +} + +static int airoha_dev_stop(struct net_device *dev) +{ + struct airoha_gdm_port *port = netdev_priv(dev); + struct airoha_eth *eth = port->eth; + int err; + + netif_tx_disable(dev); + err = airoha_set_gdm_ports(eth, false); + if (err) + return err; + + airoha_qdma_clear(eth, REG_QDMA_GLOBAL_CFG, GLOBAL_CFG_TX_DMA_EN_MASK); + airoha_qdma_clear(eth, REG_QDMA_GLOBAL_CFG, GLOBAL_CFG_RX_DMA_EN_MASK); + + return 0; +} + +static int airoha_dev_set_macaddr(struct net_device *dev, void *p) +{ + struct airoha_gdm_port *port = netdev_priv(dev); + int err; + + err = eth_mac_addr(dev, p); + if (err) + return err; + + airoha_set_macaddr(port->eth, dev->dev_addr); + + return 0; +} + +static int airoha_dev_init(struct net_device *dev) +{ + struct airoha_gdm_port *port = netdev_priv(dev); + + airoha_set_macaddr(port->eth, dev->dev_addr); + + return 0; +} + +static void airoha_dev_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *storage) +{ + struct airoha_gdm_port *port = netdev_priv(dev); + unsigned int start; + + airoha_update_hw_stats(port); + do { + start = u64_stats_fetch_begin(&port->stats.syncp); + storage->rx_packets = port->stats.rx_ok_pkts; + storage->tx_packets = port->stats.tx_ok_pkts; + storage->rx_bytes = port->stats.rx_ok_bytes; + storage->tx_bytes = port->stats.tx_ok_bytes; + storage->multicast = port->stats.rx_multicast; + storage->rx_errors = port->stats.rx_errors; + storage->rx_dropped = port->stats.rx_drops; + storage->tx_dropped = port->stats.tx_drops; + storage->rx_crc_errors = port->stats.rx_crc_error; + storage->rx_over_errors = port->stats.rx_over_errors; + } while (u64_stats_fetch_retry(&port->stats.syncp, start)); +} + +static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct skb_shared_info *sinfo = skb_shinfo(skb); + struct airoha_gdm_port *port = netdev_priv(dev); + u32 msg0 = 0, msg1, len = skb_headlen(skb); + int i, qid = skb_get_queue_mapping(skb); + struct airoha_eth *eth = port->eth; + u32 nr_frags = 1 + sinfo->nr_frags; + struct netdev_queue *txq; + struct airoha_queue *q; + void *data = skb->data; + u16 index; + u8 fport; + + if (skb->ip_summed == CHECKSUM_PARTIAL) + msg0 |= FIELD_PREP(QDMA_ETH_TXMSG_TCO_MASK, 1) | + FIELD_PREP(QDMA_ETH_TXMSG_UCO_MASK, 1) | + FIELD_PREP(QDMA_ETH_TXMSG_ICO_MASK, 1); + + /* TSO: fill MSS info in tcp checksum field */ + if (skb_is_gso(skb)) { + if (skb_cow_head(skb, 0)) + goto error; + + if (sinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) { + __be16 csum = cpu_to_be16(sinfo->gso_size); + + tcp_hdr(skb)->check = (__force __sum16)csum; + msg0 |= FIELD_PREP(QDMA_ETH_TXMSG_TSO_MASK, 1); + } + } + + fport = port->id == 4 ? FE_PSE_PORT_GDM4 : port->id; + msg1 = FIELD_PREP(QDMA_ETH_TXMSG_FPORT_MASK, fport) | + FIELD_PREP(QDMA_ETH_TXMSG_METER_MASK, 0x7f); + + q = ð->q_tx[qid]; + if (WARN_ON_ONCE(!q->ndesc)) + goto error; + + spin_lock_bh(&q->lock); + + txq = netdev_get_tx_queue(dev, qid); + if (q->queued + nr_frags > q->ndesc) { + /* not enough space in the queue */ + netif_tx_stop_queue(txq); + spin_unlock_bh(&q->lock); + return NETDEV_TX_BUSY; + } + + index = q->head; + for (i = 0; i < nr_frags; i++) { + struct airoha_qdma_desc *desc = &q->desc[index]; + struct airoha_queue_entry *e = &q->entry[index]; + skb_frag_t *frag = &sinfo->frags[i]; + dma_addr_t addr; + u32 val; + + addr = dma_map_single(dev->dev.parent, data, len, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev->dev.parent, addr))) + goto error_unmap; + + index = (index + 1) % q->ndesc; + + val = FIELD_PREP(QDMA_DESC_LEN_MASK, len); + if (i < nr_frags - 1) + val |= FIELD_PREP(QDMA_DESC_MORE_MASK, 1); + WRITE_ONCE(desc->ctrl, cpu_to_le32(val)); + WRITE_ONCE(desc->addr, cpu_to_le32(addr)); + val = FIELD_PREP(QDMA_DESC_NEXT_ID_MASK, index); + WRITE_ONCE(desc->data, cpu_to_le32(val)); + WRITE_ONCE(desc->msg0, cpu_to_le32(msg0)); + WRITE_ONCE(desc->msg1, cpu_to_le32(msg1)); + WRITE_ONCE(desc->msg2, cpu_to_le32(0xffff)); + + e->skb = i ? NULL : skb; + e->dma_addr = addr; + e->dma_len = len; + + airoha_qdma_rmw(eth, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK, + FIELD_PREP(TX_RING_CPU_IDX_MASK, index)); + + data = skb_frag_address(frag); + len = skb_frag_size(frag); + } + + q->head = index; + q->queued += i; + + skb_tx_timestamp(skb); + if (q->ndesc - q->queued < q->free_thr) + netif_tx_stop_queue(txq); + + spin_unlock_bh(&q->lock); + + return NETDEV_TX_OK; + +error_unmap: + for (i--; i >= 0; i++) + dma_unmap_single(dev->dev.parent, q->entry[i].dma_addr, + q->entry[i].dma_len, DMA_TO_DEVICE); + + spin_unlock_bh(&q->lock); +error: + dev_kfree_skb_any(skb); + dev->stats.tx_dropped++; + + return NETDEV_TX_OK; +} + +static void airoha_ethtool_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct airoha_gdm_port *port = netdev_priv(dev); + struct airoha_eth *eth = port->eth; + + strscpy(info->driver, eth->dev->driver->name, sizeof(info->driver)); + strscpy(info->bus_info, dev_name(eth->dev), sizeof(info->bus_info)); +} + +static void airoha_ethtool_get_mac_stats(struct net_device *dev, + struct ethtool_eth_mac_stats *stats) +{ + struct airoha_gdm_port *port = netdev_priv(dev); + unsigned int start; + + airoha_update_hw_stats(port); + do { + start = u64_stats_fetch_begin(&port->stats.syncp); + stats->MulticastFramesXmittedOK = port->stats.tx_multicast; + stats->BroadcastFramesXmittedOK = port->stats.tx_broadcast; + stats->BroadcastFramesReceivedOK = port->stats.rx_broadcast; + } while (u64_stats_fetch_retry(&port->stats.syncp, start)); +} + +static const struct ethtool_rmon_hist_range airoha_ethtool_rmon_ranges[] = { + { 0, 64 }, + { 65, 127 }, + { 128, 255 }, + { 256, 511 }, + { 512, 1023 }, + { 1024, 1518 }, + { 1519, 10239 }, + {}, +}; + +static void +airoha_ethtool_get_rmon_stats(struct net_device *dev, + struct ethtool_rmon_stats *stats, + const struct ethtool_rmon_hist_range **ranges) +{ + struct airoha_gdm_port *port = netdev_priv(dev); + struct airoha_hw_stats *hw_stats = &port->stats; + unsigned int start; + + BUILD_BUG_ON(ARRAY_SIZE(airoha_ethtool_rmon_ranges) != + ARRAY_SIZE(hw_stats->tx_len) + 1); + BUILD_BUG_ON(ARRAY_SIZE(airoha_ethtool_rmon_ranges) != + ARRAY_SIZE(hw_stats->rx_len) + 1); + + *ranges = airoha_ethtool_rmon_ranges; + airoha_update_hw_stats(port); + do { + int i; + + start = u64_stats_fetch_begin(&port->stats.syncp); + stats->fragments = hw_stats->rx_fragment; + stats->jabbers = hw_stats->rx_jabber; + for (i = 0; i < ARRAY_SIZE(airoha_ethtool_rmon_ranges) - 1; + i++) { + stats->hist[i] = hw_stats->rx_len[i]; + stats->hist_tx[i] = hw_stats->tx_len[i]; + } + } while (u64_stats_fetch_retry(&port->stats.syncp, start)); +} + +static const struct net_device_ops airoha_netdev_ops = { + .ndo_init = airoha_dev_init, + .ndo_open = airoha_dev_open, + .ndo_stop = airoha_dev_stop, + .ndo_start_xmit = airoha_dev_xmit, + .ndo_get_stats64 = airoha_dev_get_stats64, + .ndo_set_mac_address = airoha_dev_set_macaddr, +}; + +static const struct ethtool_ops airoha_ethtool_ops = { + .get_drvinfo = airoha_ethtool_get_drvinfo, + .get_eth_mac_stats = airoha_ethtool_get_mac_stats, + .get_rmon_stats = airoha_ethtool_get_rmon_stats, +}; + +static int airoha_alloc_gdm_port(struct airoha_eth *eth, struct device_node *np) +{ + const __be32 *id_ptr = of_get_property(np, "reg", NULL); + struct airoha_gdm_port *port; + struct net_device *dev; + int err, index; + u32 id; + + if (!id_ptr) { + dev_err(eth->dev, "missing gdm port id\n"); + return -EINVAL; + } + + id = be32_to_cpup(id_ptr); + index = id - 1; + + if (!id || id > ARRAY_SIZE(eth->ports)) { + dev_err(eth->dev, "invalid gdm port id: %d\n", id); + return -EINVAL; + } + + if (eth->ports[index]) { + dev_err(eth->dev, "duplicate gdm port id: %d\n", id); + return -EINVAL; + } + + dev = devm_alloc_etherdev_mqs(eth->dev, sizeof(*port), + AIROHA_NUM_TX_RING, AIROHA_NUM_RX_RING); + if (!dev) { + dev_err(eth->dev, "alloc_etherdev failed\n"); + return -ENOMEM; + } + + dev->netdev_ops = &airoha_netdev_ops; + dev->ethtool_ops = &airoha_ethtool_ops; + dev->max_mtu = AIROHA_MAX_MTU; + dev->watchdog_timeo = 5 * HZ; + dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM | + NETIF_F_TSO6 | NETIF_F_IPV6_CSUM | + NETIF_F_SG | NETIF_F_TSO; + dev->features |= dev->hw_features; + dev->dev.of_node = np; + SET_NETDEV_DEV(dev, eth->dev); + + err = of_get_ethdev_address(np, dev); + if (err) { + if (err == -EPROBE_DEFER) + return err; + + eth_hw_addr_random(dev); + dev_info(eth->dev, "generated random MAC address %pM\n", + dev->dev_addr); + } + + port = netdev_priv(dev); + u64_stats_init(&port->stats.syncp); + spin_lock_init(&port->stats.lock); + port->dev = dev; + port->eth = eth; + port->id = id; + eth->ports[index] = port; + + return register_netdev(dev); +} + +static int airoha_probe(struct platform_device *pdev) +{ + struct device_node *np; + struct airoha_eth *eth; + int i, err; + + eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL); + if (!eth) + return -ENOMEM; + + eth->dev = &pdev->dev; + + err = dma_set_mask_and_coherent(eth->dev, DMA_BIT_MASK(32)); + if (err) { + dev_err(eth->dev, "failed configuring DMA mask\n"); + return err; + } + + eth->fe_regs = devm_platform_ioremap_resource_byname(pdev, "fe"); + if (IS_ERR(eth->fe_regs)) + return dev_err_probe(eth->dev, PTR_ERR(eth->fe_regs), + "failed to iomap fe regs\n"); + + eth->qdma_regs = devm_platform_ioremap_resource_byname(pdev, "qdma0"); + if (IS_ERR(eth->qdma_regs)) + return dev_err_probe(eth->dev, PTR_ERR(eth->qdma_regs), + "failed to iomap qdma regs\n"); + + eth->rsts[0].id = "fe"; + eth->rsts[1].id = "pdma"; + eth->rsts[2].id = "qdma"; + err = devm_reset_control_bulk_get_exclusive(eth->dev, + ARRAY_SIZE(eth->rsts), + eth->rsts); + if (err) { + dev_err(eth->dev, "failed to get bulk reset lines\n"); + return err; + } + + eth->xsi_rsts[0].id = "xsi-mac"; + eth->xsi_rsts[1].id = "hsi0-mac"; + eth->xsi_rsts[2].id = "hsi1-mac"; + eth->xsi_rsts[3].id = "hsi-mac"; + eth->xsi_rsts[4].id = "xfp-mac"; + err = devm_reset_control_bulk_get_exclusive(eth->dev, + ARRAY_SIZE(eth->xsi_rsts), + eth->xsi_rsts); + if (err) { + dev_err(eth->dev, "failed to get bulk xsi reset lines\n"); + return err; + } + + spin_lock_init(ð->irq_lock); + eth->irq = platform_get_irq(pdev, 0); + if (eth->irq < 0) + return eth->irq; + + eth->napi_dev = alloc_netdev_dummy(0); + if (!eth->napi_dev) + return -ENOMEM; + + /* Enable threaded NAPI by default */ + eth->napi_dev->threaded = true; + strscpy(eth->napi_dev->name, "qdma_eth", sizeof(eth->napi_dev->name)); + platform_set_drvdata(pdev, eth); + + err = airoha_hw_init(eth); + if (err) + goto error; + + airoha_qdma_start_napi(eth); + for_each_child_of_node(pdev->dev.of_node, np) { + if (!of_device_is_compatible(np, "airoha,eth-mac")) + continue; + + if (!of_device_is_available(np)) + continue; + + err = airoha_alloc_gdm_port(eth, np); + if (err) { + of_node_put(np); + goto error; + } + } + + return 0; + +error: + airoha_hw_cleanup(eth); + for (i = 0; i < ARRAY_SIZE(eth->ports); i++) { + struct airoha_gdm_port *port = eth->ports[i]; + + if (port && port->dev->reg_state == NETREG_REGISTERED) + unregister_netdev(port->dev); + } + free_netdev(eth->napi_dev); + platform_set_drvdata(pdev, NULL); + + return err; +} + +static void airoha_remove(struct platform_device *pdev) +{ + struct airoha_eth *eth = platform_get_drvdata(pdev); + int i; + + airoha_hw_cleanup(eth); + for (i = 0; i < ARRAY_SIZE(eth->ports); i++) { + struct airoha_gdm_port *port = eth->ports[i]; + + if (!port) + continue; + + airoha_dev_stop(port->dev); + unregister_netdev(port->dev); + } + free_netdev(eth->napi_dev); + + platform_set_drvdata(pdev, NULL); +} + +static const struct of_device_id of_airoha_match[] = { + { .compatible = "airoha,en7581-eth" }, + { /* sentinel */ } +}; + +static struct platform_driver airoha_driver = { + .probe = airoha_probe, + .remove_new = airoha_remove, + .driver = { + .name = KBUILD_MODNAME, + .of_match_table = of_airoha_match, + }, +}; +module_platform_driver(airoha_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>"); +MODULE_DESCRIPTION("Ethernet driver for Airoha SoC"); diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index c84ce54a84a0..0cc2dd85652f 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -80,7 +80,9 @@ static const struct mtk_reg_map mtk_reg_map = { .fq_blen = 0x1b2c, }, .gdm1_cnt = 0x2400, - .gdma_to_ppe = 0x4444, + .gdma_to_ppe = { + [0] = 0x4444, + }, .ppe_base = 0x0c00, .wdma_base = { [0] = 0x2800, @@ -144,7 +146,10 @@ static const struct mtk_reg_map mt7986_reg_map = { .tx_sch_rate = 0x4798, }, .gdm1_cnt = 0x1c00, - .gdma_to_ppe = 0x3333, + .gdma_to_ppe = { + [0] = 0x3333, + [1] = 0x4444, + }, .ppe_base = 0x2000, .wdma_base = { [0] = 0x4800, @@ -192,7 +197,11 @@ static const struct mtk_reg_map mt7988_reg_map = { .tx_sch_rate = 0x4798, }, .gdm1_cnt = 0x1c00, - .gdma_to_ppe = 0x3333, + .gdma_to_ppe = { + [0] = 0x3333, + [1] = 0x4444, + [2] = 0xcccc, + }, .ppe_base = 0x2000, .wdma_base = { [0] = 0x4800, @@ -2015,6 +2024,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, struct mtk_rx_dma_v2 *rxd, trxd; int done = 0, bytes = 0; dma_addr_t dma_addr = DMA_MAPPING_ERROR; + int ppe_idx = 0; while (done < budget) { unsigned int pktlen, *rxdcsum; @@ -2058,6 +2068,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, goto release_desc; netdev = eth->netdev[mac]; + ppe_idx = eth->mac[mac]->ppe_idx; if (unlikely(test_bit(MTK_RESETTING, ð->state))) goto release_desc; @@ -2181,7 +2192,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, } if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED) - mtk_ppe_check_skb(eth->ppe[0], skb, hash); + mtk_ppe_check_skb(eth->ppe[ppe_idx], skb, hash); skb_record_rx_queue(skb, 0); napi_gro_receive(napi, skb); @@ -3276,37 +3287,27 @@ static int mtk_start_dma(struct mtk_eth *eth) return 0; } -static void mtk_gdm_config(struct mtk_eth *eth, u32 config) +static void mtk_gdm_config(struct mtk_eth *eth, u32 id, u32 config) { - int i; + u32 val; if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) return; - for (i = 0; i < MTK_MAX_DEVS; i++) { - u32 val; - - if (!eth->netdev[i]) - continue; - - val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i)); + val = mtk_r32(eth, MTK_GDMA_FWD_CFG(id)); - /* default setup the forward port to send frame to PDMA */ - val &= ~0xffff; + /* default setup the forward port to send frame to PDMA */ + val &= ~0xffff; - /* Enable RX checksum */ - val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN; + /* Enable RX checksum */ + val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN; - val |= config; + val |= config; - if (netdev_uses_dsa(eth->netdev[i])) - val |= MTK_GDMA_SPECIAL_TAG; + if (eth->netdev[id] && netdev_uses_dsa(eth->netdev[id])) + val |= MTK_GDMA_SPECIAL_TAG; - mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i)); - } - /* Reset and enable PSE */ - mtk_w32(eth, RST_GL_PSE, MTK_RST_GL); - mtk_w32(eth, 0, MTK_RST_GL); + mtk_w32(eth, val, MTK_GDMA_FWD_CFG(id)); } @@ -3366,7 +3367,10 @@ static int mtk_open(struct net_device *dev) { struct mtk_mac *mac = netdev_priv(dev); struct mtk_eth *eth = mac->hw; - int i, err; + struct mtk_mac *target_mac; + int i, err, ppe_num; + + ppe_num = eth->soc->ppe_num; err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0); if (err) { @@ -3390,18 +3394,38 @@ static int mtk_open(struct net_device *dev) for (i = 0; i < ARRAY_SIZE(eth->ppe); i++) mtk_ppe_start(eth->ppe[i]); - gdm_config = soc->offload_version ? soc->reg_map->gdma_to_ppe - : MTK_GDMA_TO_PDMA; - mtk_gdm_config(eth, gdm_config); + for (i = 0; i < MTK_MAX_DEVS; i++) { + if (!eth->netdev[i]) + continue; + + target_mac = netdev_priv(eth->netdev[i]); + if (!soc->offload_version) { + target_mac->ppe_idx = 0; + gdm_config = MTK_GDMA_TO_PDMA; + } else if (ppe_num >= 3 && target_mac->id == 2) { + target_mac->ppe_idx = 2; + gdm_config = soc->reg_map->gdma_to_ppe[2]; + } else if (ppe_num >= 2 && target_mac->id == 1) { + target_mac->ppe_idx = 1; + gdm_config = soc->reg_map->gdma_to_ppe[1]; + } else { + target_mac->ppe_idx = 0; + gdm_config = soc->reg_map->gdma_to_ppe[0]; + } + mtk_gdm_config(eth, target_mac->id, gdm_config); + } + /* Reset and enable PSE */ + mtk_w32(eth, RST_GL_PSE, MTK_RST_GL); + mtk_w32(eth, 0, MTK_RST_GL); napi_enable(ð->tx_napi); napi_enable(ð->rx_napi); mtk_tx_irq_enable(eth, MTK_TX_DONE_INT); mtk_rx_irq_enable(eth, soc->rx.irq_done_mask); refcount_set(ð->dma_refcnt, 1); - } - else + } else { refcount_inc(ð->dma_refcnt); + } phylink_start(mac->phylink); netif_tx_start_all_queues(dev); @@ -3478,7 +3502,8 @@ static int mtk_stop(struct net_device *dev) if (!refcount_dec_and_test(ð->dma_refcnt)) return 0; - mtk_gdm_config(eth, MTK_GDMA_DROP_ALL); + for (i = 0; i < MTK_MAX_DEVS; i++) + mtk_gdm_config(eth, i, MTK_GDMA_DROP_ALL); mtk_tx_irq_disable(eth, MTK_TX_DONE_INT); mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask); @@ -4439,6 +4464,20 @@ static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) return ret; } +static void mtk_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause) +{ + struct mtk_mac *mac = netdev_priv(dev); + + phylink_ethtool_get_pauseparam(mac->phylink, pause); +} + +static int mtk_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause) +{ + struct mtk_mac *mac = netdev_priv(dev); + + return phylink_ethtool_set_pauseparam(mac->phylink, pause); +} + static u16 mtk_select_queue(struct net_device *dev, struct sk_buff *skb, struct net_device *sb_dev) { @@ -4467,8 +4506,10 @@ static const struct ethtool_ops mtk_ethtool_ops = { .get_strings = mtk_get_strings, .get_sset_count = mtk_get_sset_count, .get_ethtool_stats = mtk_get_ethtool_stats, + .get_pauseparam = mtk_get_pauseparam, + .set_pauseparam = mtk_set_pauseparam, .get_rxnfc = mtk_get_rxnfc, - .set_rxnfc = mtk_set_rxnfc, + .set_rxnfc = mtk_set_rxnfc, }; static const struct net_device_ops mtk_netdev_ops = { @@ -4959,23 +5000,24 @@ static int mtk_probe(struct platform_device *pdev) } if (eth->soc->offload_version) { - u32 num_ppe = mtk_is_netsys_v2_or_greater(eth) ? 2 : 1; + u8 ppe_num = eth->soc->ppe_num; - num_ppe = min_t(u32, ARRAY_SIZE(eth->ppe), num_ppe); - for (i = 0; i < num_ppe; i++) { - u32 ppe_addr = eth->soc->reg_map->ppe_base + i * 0x400; + ppe_num = min_t(u8, ARRAY_SIZE(eth->ppe), ppe_num); + for (i = 0; i < ppe_num; i++) { + u32 ppe_addr = eth->soc->reg_map->ppe_base; + ppe_addr += (i == 2 ? 0xc00 : i * 0x400); eth->ppe[i] = mtk_ppe_init(eth, eth->base + ppe_addr, i); if (!eth->ppe[i]) { err = -ENOMEM; goto err_deinit_ppe; } - } + err = mtk_eth_offload_init(eth, i); - err = mtk_eth_offload_init(eth); - if (err) - goto err_deinit_ppe; + if (err) + goto err_deinit_ppe; + } } for (i = 0; i < MTK_MAX_DEVS; i++) { @@ -5083,6 +5125,7 @@ static const struct mtk_soc_data mt7621_data = { .required_pctl = false, .version = 1, .offload_version = 1, + .ppe_num = 1, .hash_offset = 2, .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE, .tx = { @@ -5111,6 +5154,7 @@ static const struct mtk_soc_data mt7622_data = { .required_pctl = false, .version = 1, .offload_version = 2, + .ppe_num = 1, .hash_offset = 2, .has_accounting = true, .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE, @@ -5139,6 +5183,7 @@ static const struct mtk_soc_data mt7623_data = { .required_pctl = true, .version = 1, .offload_version = 1, + .ppe_num = 1, .hash_offset = 2, .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE, .disable_pll_modes = true, @@ -5194,6 +5239,7 @@ static const struct mtk_soc_data mt7981_data = { .required_pctl = false, .version = 2, .offload_version = 2, + .ppe_num = 2, .hash_offset = 4, .has_accounting = true, .foe_entry_size = MTK_FOE_ENTRY_V2_SIZE, @@ -5223,6 +5269,7 @@ static const struct mtk_soc_data mt7986_data = { .required_pctl = false, .version = 2, .offload_version = 2, + .ppe_num = 2, .hash_offset = 4, .has_accounting = true, .foe_entry_size = MTK_FOE_ENTRY_V2_SIZE, @@ -5252,6 +5299,7 @@ static const struct mtk_soc_data mt7988_data = { .required_pctl = false, .version = 3, .offload_version = 2, + .ppe_num = 3, .hash_offset = 4, .has_accounting = true, .foe_entry_size = MTK_FOE_ENTRY_V3_SIZE, diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index f5174f6cb1bb..eb1708b43aa3 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -1132,7 +1132,7 @@ struct mtk_reg_map { u32 tx_sch_rate; /* tx scheduler rate control registers */ } qdma; u32 gdm1_cnt; - u32 gdma_to_ppe; + u32 gdma_to_ppe[3]; u32 ppe_base; u32 wdma_base[3]; u32 pse_iq_sta; @@ -1170,6 +1170,7 @@ struct mtk_soc_data { u8 offload_version; u8 hash_offset; u8 version; + u8 ppe_num; u16 foe_entry_size; netdev_features_t hw_features; bool has_accounting; @@ -1294,7 +1295,7 @@ struct mtk_eth { struct metadata_dst *dsa_meta[MTK_MAX_DSA_PORTS]; - struct mtk_ppe *ppe[2]; + struct mtk_ppe *ppe[3]; struct rhashtable flow_table; struct bpf_prog __rcu *prog; @@ -1319,6 +1320,7 @@ struct mtk_eth { struct mtk_mac { int id; phy_interface_t interface; + u8 ppe_idx; int speed; struct device_node *of_node; struct phylink *phylink; @@ -1440,7 +1442,7 @@ int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id); int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id); int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id); -int mtk_eth_offload_init(struct mtk_eth *eth); +int mtk_eth_offload_init(struct mtk_eth *eth, u8 id); int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data); int mtk_flow_offload_cmd(struct mtk_eth *eth, struct flow_cls_offload *cls, diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.h b/drivers/net/ethernet/mediatek/mtk_ppe.h index 691806bca372..223f709e2704 100644 --- a/drivers/net/ethernet/mediatek/mtk_ppe.h +++ b/drivers/net/ethernet/mediatek/mtk_ppe.h @@ -8,7 +8,7 @@ #include <linux/bitfield.h> #include <linux/rhashtable.h> -#define MTK_PPE_ENTRIES_SHIFT 3 +#define MTK_PPE_ENTRIES_SHIFT 4 #define MTK_PPE_ENTRIES (1024 << MTK_PPE_ENTRIES_SHIFT) #define MTK_PPE_HASH_MASK (MTK_PPE_ENTRIES - 1) #define MTK_PPE_WAIT_TIMEOUT_US 1000000 diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c index aa262e6f4b85..f20bb390df3a 100644 --- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c +++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c @@ -245,10 +245,10 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f, int ppe_index) { struct flow_rule *rule = flow_cls_offload_flow_rule(f); + struct net_device *idev = NULL, *odev = NULL; struct flow_action_entry *act; struct mtk_flow_data data = {}; struct mtk_foe_entry foe; - struct net_device *odev = NULL; struct mtk_flow_entry *entry; int offload_type = 0; int wed_index = -1; @@ -264,6 +264,17 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f, struct flow_match_meta match; flow_rule_match_meta(rule, &match); + if (mtk_is_netsys_v2_or_greater(eth)) { + idev = __dev_get_by_index(&init_net, match.key->ingress_ifindex); + if (idev && idev->netdev_ops == eth->netdev[0]->netdev_ops) { + struct mtk_mac *mac = netdev_priv(idev); + + if (WARN_ON(mac->ppe_idx >= eth->soc->ppe_num)) + return -EINVAL; + + ppe_index = mac->ppe_idx; + } + } } else { return -EOPNOTSUPP; } @@ -637,7 +648,9 @@ int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type, } } -int mtk_eth_offload_init(struct mtk_eth *eth) +int mtk_eth_offload_init(struct mtk_eth *eth, u8 id) { + if (!eth->ppe[id] || !eth->ppe[id]->foe_table) + return 0; return rhashtable_init(ð->flow_table, &mtk_flow_ht_params); } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c index 1184ac5751e1..461cc2c79c71 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c @@ -126,6 +126,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, cq_idx = cq_idx % priv->rx_ring_num; rx_cq = priv->rx_cq[cq_idx]; cq->vector = rx_cq->vector; + irq = mlx4_eq_get_irq(mdev->dev, cq->vector); } if (cq->type == RX) @@ -142,18 +143,23 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, if (err) goto free_eq; + cq->cq_idx = cq_idx; cq->mcq.event = mlx4_en_cq_event; switch (cq->type) { case TX: cq->mcq.comp = mlx4_en_tx_irq; netif_napi_add_tx(cq->dev, &cq->napi, mlx4_en_poll_tx_cq); + netif_napi_set_irq(&cq->napi, irq); napi_enable(&cq->napi); + netif_queue_set_napi(cq->dev, cq_idx, NETDEV_QUEUE_TYPE_TX, &cq->napi); break; case RX: cq->mcq.comp = mlx4_en_rx_irq; netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq); + netif_napi_set_irq(&cq->napi, irq); napi_enable(&cq->napi); + netif_queue_set_napi(cq->dev, cq_idx, NETDEV_QUEUE_TYPE_RX, &cq->napi); break; case TX_XDP: /* nothing regarding napi, it's shared with rx ring */ @@ -189,6 +195,14 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq) void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) { if (cq->type != TX_XDP) { + enum netdev_queue_type qtype; + + if (cq->type == RX) + qtype = NETDEV_QUEUE_TYPE_RX; + else + qtype = NETDEV_QUEUE_TYPE_TX; + + netif_queue_set_napi(cq->dev, cq->cq_idx, qtype, NULL); napi_disable(&cq->napi); netif_napi_del(&cq->napi); } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index 619e1c3ef7f9..943d6918c2ec 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -450,7 +450,6 @@ static void mlx4_en_get_strings(struct net_device *dev, uint32_t stringset, uint8_t *data) { struct mlx4_en_priv *priv = netdev_priv(dev); - int index = 0; int i, strings = 0; struct bitmap_iterator it; @@ -459,10 +458,10 @@ static void mlx4_en_get_strings(struct net_device *dev, switch (stringset) { case ETH_SS_TEST: for (i = 0; i < MLX4_EN_NUM_SELF_TEST - 2; i++) - strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]); + ethtool_puts(&data, mlx4_en_test_names[i]); if (priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UC_LOOPBACK) for (; i < MLX4_EN_NUM_SELF_TEST; i++) - strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]); + ethtool_puts(&data, mlx4_en_test_names[i]); break; case ETH_SS_STATS: @@ -470,74 +469,56 @@ static void mlx4_en_get_strings(struct net_device *dev, for (i = 0; i < NUM_MAIN_STATS; i++, strings++, bitmap_iterator_inc(&it)) if (bitmap_iterator_test(&it)) - strcpy(data + (index++) * ETH_GSTRING_LEN, - main_strings[strings]); + ethtool_puts(&data, main_strings[strings]); for (i = 0; i < NUM_PORT_STATS; i++, strings++, bitmap_iterator_inc(&it)) if (bitmap_iterator_test(&it)) - strcpy(data + (index++) * ETH_GSTRING_LEN, - main_strings[strings]); + ethtool_puts(&data, main_strings[strings]); for (i = 0; i < NUM_PF_STATS; i++, strings++, bitmap_iterator_inc(&it)) if (bitmap_iterator_test(&it)) - strcpy(data + (index++) * ETH_GSTRING_LEN, - main_strings[strings]); + ethtool_puts(&data, main_strings[strings]); for (i = 0; i < NUM_FLOW_STATS; i++, strings++, bitmap_iterator_inc(&it)) if (bitmap_iterator_test(&it)) - strcpy(data + (index++) * ETH_GSTRING_LEN, - main_strings[strings]); + ethtool_puts(&data, main_strings[strings]); for (i = 0; i < NUM_PKT_STATS; i++, strings++, bitmap_iterator_inc(&it)) if (bitmap_iterator_test(&it)) - strcpy(data + (index++) * ETH_GSTRING_LEN, - main_strings[strings]); + ethtool_puts(&data, main_strings[strings]); for (i = 0; i < NUM_XDP_STATS; i++, strings++, bitmap_iterator_inc(&it)) if (bitmap_iterator_test(&it)) - strcpy(data + (index++) * ETH_GSTRING_LEN, - main_strings[strings]); + ethtool_puts(&data, main_strings[strings]); for (i = 0; i < NUM_PHY_STATS; i++, strings++, bitmap_iterator_inc(&it)) if (bitmap_iterator_test(&it)) - strcpy(data + (index++) * ETH_GSTRING_LEN, - main_strings[strings]); + ethtool_puts(&data, main_strings[strings]); for (i = 0; i < priv->tx_ring_num[TX]; i++) { - sprintf(data + (index++) * ETH_GSTRING_LEN, - "tx%d_packets", i); - sprintf(data + (index++) * ETH_GSTRING_LEN, - "tx%d_bytes", i); + ethtool_sprintf(&data, "tx%d_packets", i); + ethtool_sprintf(&data, "tx%d_bytes", i); } for (i = 0; i < priv->rx_ring_num; i++) { - sprintf(data + (index++) * ETH_GSTRING_LEN, - "rx%d_packets", i); - sprintf(data + (index++) * ETH_GSTRING_LEN, - "rx%d_bytes", i); - sprintf(data + (index++) * ETH_GSTRING_LEN, - "rx%d_dropped", i); - sprintf(data + (index++) * ETH_GSTRING_LEN, - "rx%d_xdp_drop", i); - sprintf(data + (index++) * ETH_GSTRING_LEN, - "rx%d_xdp_redirect", i); - sprintf(data + (index++) * ETH_GSTRING_LEN, - "rx%d_xdp_redirect_fail", i); - sprintf(data + (index++) * ETH_GSTRING_LEN, - "rx%d_xdp_tx", i); - sprintf(data + (index++) * ETH_GSTRING_LEN, - "rx%d_xdp_tx_full", i); + ethtool_sprintf(&data, "rx%d_packets", i); + ethtool_sprintf(&data, "rx%d_bytes", i); + ethtool_sprintf(&data, "rx%d_dropped", i); + ethtool_sprintf(&data, "rx%d_xdp_drop", i); + ethtool_sprintf(&data, "rx%d_xdp_redirect", i); + ethtool_sprintf(&data, "rx%d_xdp_redirect_fail", i); + ethtool_sprintf(&data, "rx%d_xdp_tx", i); + ethtool_sprintf(&data, "rx%d_xdp_tx_full", i); } break; case ETH_SS_PRIV_FLAGS: for (i = 0; i < ARRAY_SIZE(mlx4_en_priv_flags); i++) - strcpy(data + i * ETH_GSTRING_LEN, - mlx4_en_priv_flags[i]); + ethtool_puts(&data, mlx4_en_priv_flags[i]); break; } @@ -1903,7 +1884,7 @@ out: } static int mlx4_en_get_ts_info(struct net_device *dev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 4c089cfa027a..281b34af0bb4 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -43,6 +43,7 @@ #include <net/vxlan.h> #include <net/devlink.h> #include <net/rps.h> +#include <net/netdev_queues.h> #include <linux/mlx4/driver.h> #include <linux/mlx4/device.h> @@ -2073,6 +2074,7 @@ static void mlx4_en_clear_stats(struct net_device *dev) priv->rx_ring[i]->csum_ok = 0; priv->rx_ring[i]->csum_none = 0; priv->rx_ring[i]->csum_complete = 0; + priv->rx_ring[i]->alloc_fail = 0; } } @@ -3099,6 +3101,77 @@ void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev, last_i += NUM_PHY_STATS; } +static void mlx4_get_queue_stats_rx(struct net_device *dev, int i, + struct netdev_queue_stats_rx *stats) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + const struct mlx4_en_rx_ring *ring; + + spin_lock_bh(&priv->stats_lock); + + if (!priv->port_up || mlx4_is_master(priv->mdev->dev)) + goto out_unlock; + + ring = priv->rx_ring[i]; + stats->packets = READ_ONCE(ring->packets); + stats->bytes = READ_ONCE(ring->bytes); + stats->alloc_fail = READ_ONCE(ring->alloc_fail); + +out_unlock: + spin_unlock_bh(&priv->stats_lock); +} + +static void mlx4_get_queue_stats_tx(struct net_device *dev, int i, + struct netdev_queue_stats_tx *stats) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + const struct mlx4_en_tx_ring *ring; + + spin_lock_bh(&priv->stats_lock); + + if (!priv->port_up || mlx4_is_master(priv->mdev->dev)) + goto out_unlock; + + ring = priv->tx_ring[TX][i]; + stats->packets = READ_ONCE(ring->packets); + stats->bytes = READ_ONCE(ring->bytes); + +out_unlock: + spin_unlock_bh(&priv->stats_lock); +} + +static void mlx4_get_base_stats(struct net_device *dev, + struct netdev_queue_stats_rx *rx, + struct netdev_queue_stats_tx *tx) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + + spin_lock_bh(&priv->stats_lock); + + if (!priv->port_up || mlx4_is_master(priv->mdev->dev)) + goto out_unlock; + + if (priv->rx_ring_num) { + rx->packets = 0; + rx->bytes = 0; + rx->alloc_fail = 0; + } + + if (priv->tx_ring_num[TX]) { + tx->packets = 0; + tx->bytes = 0; + } + +out_unlock: + spin_unlock_bh(&priv->stats_lock); +} + +static const struct netdev_stat_ops mlx4_stat_ops = { + .get_queue_stats_rx = mlx4_get_queue_stats_rx, + .get_queue_stats_tx = mlx4_get_queue_stats_tx, + .get_base_stats = mlx4_get_base_stats, +}; + int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, struct mlx4_en_port_profile *prof) { @@ -3262,6 +3335,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]); netif_set_real_num_rx_queues(dev, priv->rx_ring_num); + dev->stat_ops = &mlx4_stat_ops; dev->ethtool_ops = &mlx4_en_ethtool_ops; /* diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 8328df8645d5..15c57e9517e9 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -82,8 +82,10 @@ static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv, for (i = 0; i < priv->num_frags; i++, frags++) { if (!frags->page) { - if (mlx4_alloc_page(priv, frags, gfp)) + if (mlx4_alloc_page(priv, frags, gfp)) { + ring->alloc_fail++; return -ENOMEM; + } ring->rx_alloc_pages++; } rx_desc->data[i].addr = cpu_to_be64(frags->dma + diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 98688e4dbec5..febeadfdd5a5 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -169,12 +169,6 @@ module_param_array(port_type_array, int, &arr_argc, 0444); MODULE_PARM_DESC(port_type_array, "Array of port types: HW_DEFAULT (0) is default " "1 for IB, 2 for Ethernet"); -struct mlx4_port_config { - struct list_head list; - enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1]; - struct pci_dev *pdev; -}; - static atomic_t pf_loading = ATOMIC_INIT(0); static int mlx4_devlink_ierr_reset_get(struct devlink *devlink, u32 id, diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index efe3f97b874f..28b70dcc652e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -355,6 +355,7 @@ struct mlx4_en_rx_ring { unsigned long xdp_tx; unsigned long xdp_tx_full; unsigned long dropped; + unsigned long alloc_fail; int hwtstamp_rx_filter; cpumask_var_t affinity_mask; struct xdp_rxq_info xdp_rxq; @@ -379,6 +380,7 @@ struct mlx4_en_cq { #define MLX4_EN_OPCODE_ERROR 0x1e const struct cpumask *aff_mask; + int cq_idx; }; struct mlx4_en_port_profile { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 76dc5a9b9648..1289475e7be7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -17,7 +17,7 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \ fs_counters.o fs_ft_pool.o rl.o lag/debugfs.o lag/lag.o dev.o events.o wq.o lib/gid.o \ lib/devcom.o lib/pci_vsc.o lib/dm.o lib/fs_ttc.o diag/fs_tracepoint.o \ diag/fw_tracer.o diag/crdump.o devlink.o diag/rsc_dump.o diag/reporter_vnic.o \ - fw_reset.o qos.o lib/tout.o lib/aso.o + fw_reset.o qos.o lib/tout.o lib/aso.o wc.o # # Netdev basic diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index e85fb71bf0b4..5fd82c67b6ab 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -80,6 +80,7 @@ struct page_pool; SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) #define MLX5E_RX_MAX_HEAD (256) +#define MLX5E_SHAMPO_LOG_HEADER_ENTRY_SIZE (8) #define MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE (9) #define MLX5E_SHAMPO_WQ_HEADER_PER_PAGE (PAGE_SIZE >> MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE) #define MLX5E_SHAMPO_WQ_BASE_HEAD_ENTRY_SIZE (64) @@ -146,25 +147,6 @@ struct page_pool; #define MLX5E_TX_XSK_POLL_BUDGET 64 #define MLX5E_SQ_RECOVER_MIN_INTERVAL 500 /* msecs */ -#define MLX5E_KLM_UMR_WQE_SZ(sgl_len)\ - (sizeof(struct mlx5e_umr_wqe) +\ - (sizeof(struct mlx5_klm) * (sgl_len))) - -#define MLX5E_KLM_UMR_WQEBBS(klm_entries) \ - (DIV_ROUND_UP(MLX5E_KLM_UMR_WQE_SZ(klm_entries), MLX5_SEND_WQE_BB)) - -#define MLX5E_KLM_UMR_DS_CNT(klm_entries)\ - (DIV_ROUND_UP(MLX5E_KLM_UMR_WQE_SZ(klm_entries), MLX5_SEND_WQE_DS)) - -#define MLX5E_KLM_MAX_ENTRIES_PER_WQE(wqe_size)\ - (((wqe_size) - sizeof(struct mlx5e_umr_wqe)) / sizeof(struct mlx5_klm)) - -#define MLX5E_KLM_ENTRIES_PER_WQE(wqe_size)\ - ALIGN_DOWN(MLX5E_KLM_MAX_ENTRIES_PER_WQE(wqe_size), MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT) - -#define MLX5E_MAX_KLM_PER_WQE(mdev) \ - MLX5E_KLM_ENTRIES_PER_WQE(MLX5_SEND_WQE_BB * mlx5e_get_max_sq_aligned_wqebbs(mdev)) - #define mlx5e_state_dereference(priv, p) \ rcu_dereference_protected((p), lockdep_is_held(&(priv)->state_lock)) @@ -885,6 +867,8 @@ struct mlx5e_priv { /* priv data path fields - start */ struct mlx5e_selq selq; struct mlx5e_txqsq **txq2sq; + struct mlx5e_sq_stats **txq2sq_stats; + #ifdef CONFIG_MLX5_CORE_EN_DCB struct mlx5e_dcbx_dp dcbx_dp; #endif @@ -1014,7 +998,7 @@ void mlx5e_build_ptys2ethtool_map(void); bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev, u8 page_shift, enum mlx5e_mpwrq_umr_mode umr_mode); -void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq, u16 len, u16 start, bool close); +void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq); void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats); void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s); @@ -1207,7 +1191,7 @@ int mlx5e_set_per_queue_coalesce(struct net_device *dev, u32 queue, u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv); u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv); int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv, - struct ethtool_ts_info *info); + struct kernel_ethtool_ts_info *info); int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv, struct ethtool_flash *flash); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h index 4d6225e0eec7..1e8b7d330701 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h @@ -154,6 +154,19 @@ struct mlx5e_tc_table *mlx5e_fs_get_tc(struct mlx5e_flow_steering *fs); struct mlx5e_l2_table *mlx5e_fs_get_l2(struct mlx5e_flow_steering *fs); struct mlx5_flow_namespace *mlx5e_fs_get_ns(struct mlx5e_flow_steering *fs, bool egress); void mlx5e_fs_set_ns(struct mlx5e_flow_steering *fs, struct mlx5_flow_namespace *ns, bool egress); + +static inline bool mlx5e_fs_has_arfs(struct net_device *netdev) +{ + return IS_ENABLED(CONFIG_MLX5_EN_ARFS) && + netdev->hw_features & NETIF_F_NTUPLE; +} + +static inline bool mlx5e_fs_want_arfs(struct net_device *netdev) +{ + return IS_ENABLED(CONFIG_MLX5_EN_ARFS) && + netdev->features & NETIF_F_NTUPLE; +} + #ifdef CONFIG_MLX5_EN_RXNFC struct mlx5e_ethtool_steering *mlx5e_fs_get_ethtool(struct mlx5e_flow_steering *fs); #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c index ec819dfc98be..6c9ccccca81e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c @@ -1071,18 +1071,18 @@ static u32 mlx5e_shampo_icosq_sz(struct mlx5_core_dev *mdev, struct mlx5e_params *params, struct mlx5e_rq_param *rq_param) { - int max_num_of_umr_per_wqe, max_hd_per_wqe, max_klm_per_umr, rest; + int max_num_of_umr_per_wqe, max_hd_per_wqe, max_ksm_per_umr, rest; void *wqc = MLX5_ADDR_OF(rqc, rq_param->rqc, wq); int wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz)); u32 wqebbs; - max_klm_per_umr = MLX5E_MAX_KLM_PER_WQE(mdev); + max_ksm_per_umr = MLX5E_MAX_KSM_PER_WQE(mdev); max_hd_per_wqe = mlx5e_shampo_hd_per_wqe(mdev, params, rq_param); - max_num_of_umr_per_wqe = max_hd_per_wqe / max_klm_per_umr; - rest = max_hd_per_wqe % max_klm_per_umr; - wqebbs = MLX5E_KLM_UMR_WQEBBS(max_klm_per_umr) * max_num_of_umr_per_wqe; + max_num_of_umr_per_wqe = max_hd_per_wqe / max_ksm_per_umr; + rest = max_hd_per_wqe % max_ksm_per_umr; + wqebbs = MLX5E_KSM_UMR_WQEBBS(max_ksm_per_umr) * max_num_of_umr_per_wqe; if (rest) - wqebbs += MLX5E_KLM_UMR_WQEBBS(rest); + wqebbs += MLX5E_KSM_UMR_WQEBBS(rest); wqebbs *= wq_size; return wqebbs; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c index 6743806b8480..f0744a45db92 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c @@ -170,6 +170,7 @@ int mlx5e_activate_qos_sq(void *data, u16 node_qid, u32 hw_id) mlx5e_tx_disable_queue(netdev_get_tx_queue(priv->netdev, qid)); priv->txq2sq[qid] = sq; + priv->txq2sq_stats[qid] = sq->stats; /* Make the change to txq2sq visible before the queue is started. * As mlx5e_xmit runs under a spinlock, there is an implicit ACQUIRE, @@ -186,6 +187,7 @@ int mlx5e_activate_qos_sq(void *data, u16 node_qid, u32 hw_id) void mlx5e_deactivate_qos_sq(struct mlx5e_priv *priv, u16 qid) { struct mlx5e_txqsq *sq; + u16 txq_ix; sq = mlx5e_get_qos_sq(priv, qid); if (!sq) /* Handle the case when the SQ failed to open. */ @@ -194,7 +196,10 @@ void mlx5e_deactivate_qos_sq(struct mlx5e_priv *priv, u16 qid) qos_dbg(sq->mdev, "Deactivate QoS SQ qid %u\n", qid); mlx5e_deactivate_txqsq(sq); - priv->txq2sq[mlx5e_qid_from_qos(&priv->channels, qid)] = NULL; + txq_ix = mlx5e_qid_from_qos(&priv->channels, qid); + + priv->txq2sq[txq_ix] = NULL; + priv->txq2sq_stats[txq_ix] = NULL; /* Make the change to txq2sq visible before the queue is started again. * As mlx5e_xmit runs under a spinlock, there is an implicit ACQUIRE, @@ -325,6 +330,7 @@ void mlx5e_qos_deactivate_queues(struct mlx5e_channel *c) { struct mlx5e_params *params = &c->priv->channels.params; struct mlx5e_txqsq __rcu **qos_sqs; + u16 txq_ix; int i; qos_sqs = mlx5e_state_dereference(c->priv, c->qos_sqs); @@ -342,8 +348,11 @@ void mlx5e_qos_deactivate_queues(struct mlx5e_channel *c) qos_dbg(c->mdev, "Deactivate QoS SQ qid %u\n", qid); mlx5e_deactivate_txqsq(sq); + txq_ix = mlx5e_qid_from_qos(&c->priv->channels, qid); + /* The queue is disabled, no synchronization with datapath is needed. */ - c->priv->txq2sq[mlx5e_qid_from_qos(&c->priv->channels, qid)] = NULL; + c->priv->txq2sq[txq_ix] = NULL; + c->priv->txq2sq_stats[txq_ix] = NULL; } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c index fadfa8b50beb..8cf8ba2622f2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c @@ -69,6 +69,8 @@ struct mlx5_tc_ct_priv { struct rhashtable ct_tuples_nat_ht; struct mlx5_flow_table *ct; struct mlx5_flow_table *ct_nat; + struct mlx5_flow_group *ct_nat_miss_group; + struct mlx5_flow_handle *ct_nat_miss_rule; struct mlx5e_post_act *post_act; struct mutex control_lock; /* guards parallel adds/dels */ struct mapping_ctx *zone_mapping; @@ -141,6 +143,8 @@ struct mlx5_ct_counter { enum { MLX5_CT_ENTRY_FLAG_VALID, + MLX5_CT_ENTRY_IN_CT_TABLE, + MLX5_CT_ENTRY_IN_CT_NAT_TABLE, }; struct mlx5_ct_entry { @@ -198,9 +202,15 @@ static const struct rhashtable_params tuples_nat_ht_params = { }; static bool -mlx5_tc_ct_entry_has_nat(struct mlx5_ct_entry *entry) +mlx5_tc_ct_entry_in_ct_table(struct mlx5_ct_entry *entry) { - return !!(entry->tuple_nat_node.next); + return test_bit(MLX5_CT_ENTRY_IN_CT_TABLE, &entry->flags); +} + +static bool +mlx5_tc_ct_entry_in_ct_nat_table(struct mlx5_ct_entry *entry) +{ + return test_bit(MLX5_CT_ENTRY_IN_CT_NAT_TABLE, &entry->flags); } static int @@ -526,8 +536,10 @@ static void mlx5_tc_ct_entry_del_rules(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_entry *entry) { - mlx5_tc_ct_entry_del_rule(ct_priv, entry, true); - mlx5_tc_ct_entry_del_rule(ct_priv, entry, false); + if (mlx5_tc_ct_entry_in_ct_nat_table(entry)) + mlx5_tc_ct_entry_del_rule(ct_priv, entry, true); + if (mlx5_tc_ct_entry_in_ct_table(entry)) + mlx5_tc_ct_entry_del_rule(ct_priv, entry, false); atomic_dec(&ct_priv->debugfs.stats.offloaded); } @@ -814,7 +826,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv, &zone_rule->mh, zone_restore_id, nat, - mlx5_tc_ct_entry_has_nat(entry)); + mlx5_tc_ct_entry_in_ct_nat_table(entry)); if (err) { ct_dbg("Failed to create ct entry mod hdr"); goto err_mod_hdr; @@ -888,7 +900,7 @@ mlx5_tc_ct_entry_replace_rule(struct mlx5_tc_ct_priv *ct_priv, *old_attr = *attr; err = mlx5_tc_ct_entry_create_mod_hdr(ct_priv, attr, flow_rule, &mh, zone_restore_id, - nat, mlx5_tc_ct_entry_has_nat(entry)); + nat, mlx5_tc_ct_entry_in_ct_nat_table(entry)); if (err) { ct_dbg("Failed to create ct entry mod hdr"); goto err_mod_hdr; @@ -957,11 +969,13 @@ static void mlx5_tc_ct_entry_remove_from_tuples(struct mlx5_ct_entry *entry) { struct mlx5_tc_ct_priv *ct_priv = entry->ct_priv; - rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht, - &entry->tuple_nat_node, - tuples_nat_ht_params); - rhashtable_remove_fast(&ct_priv->ct_tuples_ht, &entry->tuple_node, - tuples_ht_params); + if (mlx5_tc_ct_entry_in_ct_nat_table(entry)) + rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht, + &entry->tuple_nat_node, + tuples_nat_ht_params); + if (mlx5_tc_ct_entry_in_ct_table(entry)) + rhashtable_remove_fast(&ct_priv->ct_tuples_ht, &entry->tuple_node, + tuples_ht_params); } static void mlx5_tc_ct_entry_del(struct mlx5_ct_entry *entry) @@ -1100,21 +1114,26 @@ mlx5_tc_ct_entry_add_rules(struct mlx5_tc_ct_priv *ct_priv, return err; } - err = mlx5_tc_ct_entry_add_rule(ct_priv, flow_rule, entry, false, - zone_restore_id); - if (err) - goto err_orig; + if (mlx5_tc_ct_entry_in_ct_table(entry)) { + err = mlx5_tc_ct_entry_add_rule(ct_priv, flow_rule, entry, false, + zone_restore_id); + if (err) + goto err_orig; + } - err = mlx5_tc_ct_entry_add_rule(ct_priv, flow_rule, entry, true, - zone_restore_id); - if (err) - goto err_nat; + if (mlx5_tc_ct_entry_in_ct_nat_table(entry)) { + err = mlx5_tc_ct_entry_add_rule(ct_priv, flow_rule, entry, true, + zone_restore_id); + if (err) + goto err_nat; + } atomic_inc(&ct_priv->debugfs.stats.offloaded); return 0; err_nat: - mlx5_tc_ct_entry_del_rule(ct_priv, entry, false); + if (mlx5_tc_ct_entry_in_ct_table(entry)) + mlx5_tc_ct_entry_del_rule(ct_priv, entry, false); err_orig: mlx5_tc_ct_counter_put(ct_priv, entry); return err; @@ -1126,17 +1145,21 @@ mlx5_tc_ct_entry_replace_rules(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_entry *entry, u8 zone_restore_id) { - int err; + int err = 0; - err = mlx5_tc_ct_entry_replace_rule(ct_priv, flow_rule, entry, false, - zone_restore_id); - if (err) - return err; + if (mlx5_tc_ct_entry_in_ct_table(entry)) { + err = mlx5_tc_ct_entry_replace_rule(ct_priv, flow_rule, entry, false, + zone_restore_id); + if (err) + return err; + } - err = mlx5_tc_ct_entry_replace_rule(ct_priv, flow_rule, entry, true, - zone_restore_id); - if (err) - mlx5_tc_ct_entry_del_rule(ct_priv, entry, false); + if (mlx5_tc_ct_entry_in_ct_nat_table(entry)) { + err = mlx5_tc_ct_entry_replace_rule(ct_priv, flow_rule, entry, true, + zone_restore_id); + if (err && mlx5_tc_ct_entry_in_ct_table(entry)) + mlx5_tc_ct_entry_del_rule(ct_priv, entry, false); + } return err; } @@ -1224,18 +1247,24 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft, if (err) goto err_entries; - err = rhashtable_lookup_insert_fast(&ct_priv->ct_tuples_ht, - &entry->tuple_node, - tuples_ht_params); - if (err) - goto err_tuple; - if (memcmp(&entry->tuple, &entry->tuple_nat, sizeof(entry->tuple))) { err = rhashtable_lookup_insert_fast(&ct_priv->ct_tuples_nat_ht, &entry->tuple_nat_node, tuples_nat_ht_params); if (err) goto err_tuple_nat; + + set_bit(MLX5_CT_ENTRY_IN_CT_NAT_TABLE, &entry->flags); + } + + if (!mlx5_tc_ct_entry_in_ct_nat_table(entry)) { + err = rhashtable_lookup_insert_fast(&ct_priv->ct_tuples_ht, + &entry->tuple_node, + tuples_ht_params); + if (err) + goto err_tuple; + + set_bit(MLX5_CT_ENTRY_IN_CT_TABLE, &entry->flags); } spin_unlock_bh(&ct_priv->ht_lock); @@ -1251,17 +1280,10 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft, err_rules: spin_lock_bh(&ct_priv->ht_lock); - if (mlx5_tc_ct_entry_has_nat(entry)) - rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht, - &entry->tuple_nat_node, tuples_nat_ht_params); -err_tuple_nat: - rhashtable_remove_fast(&ct_priv->ct_tuples_ht, - &entry->tuple_node, - tuples_ht_params); err_tuple: - rhashtable_remove_fast(&ft->ct_entries_ht, - &entry->node, - cts_ht_params); + mlx5_tc_ct_entry_remove_from_tuples(entry); +err_tuple_nat: + rhashtable_remove_fast(&ft->ct_entries_ht, &entry->node, cts_ht_params); err_entries: spin_unlock_bh(&ct_priv->ht_lock); err_set: @@ -2149,6 +2171,76 @@ mlx5_ct_tc_remove_dbgfs(struct mlx5_tc_ct_priv *ct_priv) debugfs_remove_recursive(ct_priv->debugfs.root); } +static struct mlx5_flow_handle * +tc_ct_add_miss_rule(struct mlx5_flow_table *ft, + struct mlx5_flow_table *next_ft) +{ + struct mlx5_flow_destination dest = {}; + struct mlx5_flow_act act = {}; + + act.flags = FLOW_ACT_IGNORE_FLOW_LEVEL | FLOW_ACT_NO_APPEND; + act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + dest.ft = next_ft; + + return mlx5_add_flow_rules(ft, NULL, &act, &dest, 1); +} + +static int +tc_ct_add_ct_table_miss_rule(struct mlx5_flow_table *from, + struct mlx5_flow_table *to, + struct mlx5_flow_group **miss_group, + struct mlx5_flow_handle **miss_rule) +{ + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + struct mlx5_flow_group *group; + struct mlx5_flow_handle *rule; + unsigned int max_fte = from->max_fte; + u32 *flow_group_in; + int err = 0; + + flow_group_in = kvzalloc(inlen, GFP_KERNEL); + if (!flow_group_in) + return -ENOMEM; + + /* create miss group */ + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, + max_fte - 2); + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, + max_fte - 1); + group = mlx5_create_flow_group(from, flow_group_in); + if (IS_ERR(group)) { + err = PTR_ERR(group); + goto err_miss_grp; + } + + /* add miss rule to next fdb */ + rule = tc_ct_add_miss_rule(from, to); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + goto err_miss_rule; + } + + *miss_group = group; + *miss_rule = rule; + kvfree(flow_group_in); + return 0; + +err_miss_rule: + mlx5_destroy_flow_group(group); +err_miss_grp: + kvfree(flow_group_in); + return err; +} + +static void +tc_ct_del_ct_table_miss_rule(struct mlx5_flow_group *miss_group, + struct mlx5_flow_handle *miss_rule) +{ + mlx5_del_flow_rules(miss_rule); + mlx5_destroy_flow_group(miss_group); +} + #define INIT_ERR_PREFIX "tc ct offload init failed" struct mlx5_tc_ct_priv * @@ -2212,6 +2304,12 @@ mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains, goto err_ct_nat_tbl; } + err = tc_ct_add_ct_table_miss_rule(ct_priv->ct_nat, ct_priv->ct, + &ct_priv->ct_nat_miss_group, + &ct_priv->ct_nat_miss_rule); + if (err) + goto err_ct_zone_ht; + ct_priv->post_act = post_act; mutex_init(&ct_priv->control_lock); if (rhashtable_init(&ct_priv->zone_ht, &zone_params)) @@ -2273,6 +2371,7 @@ mlx5_tc_ct_clean(struct mlx5_tc_ct_priv *ct_priv) ct_priv->fs_ops->destroy(ct_priv->fs); kfree(ct_priv->fs); + tc_ct_del_ct_table_miss_rule(ct_priv->ct_nat_miss_group, ct_priv->ct_nat_miss_rule); mlx5_chains_destroy_global_table(chains, ct_priv->ct_nat); mlx5_chains_destroy_global_table(chains, ct_priv->ct); mapping_destroy(ct_priv->zone_mapping); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c index 8dfb57f712b0..721f35e59757 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c @@ -850,6 +850,12 @@ int mlx5e_tc_tun_parse(struct net_device *filter_dev, flow_rule_match_enc_control(rule, &match); addr_type = match.key->addr_type; + if (flow_rule_has_enc_control_flags(match.mask->flags, + extack)) { + err = -EOPNOTSUPP; + goto out; + } + /* For tunnel addr_type used same key id`s as for non-tunnel */ if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { struct flow_match_ipv4_addrs match; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h index 879d698b6119..5ec468268d1a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h @@ -6,6 +6,8 @@ #include "en.h" #include <linux/indirect_call_wrapper.h> +#include <net/ip6_checksum.h> +#include <net/tcp.h> #define MLX5E_TX_WQE_EMPTY_DS_COUNT (sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS) @@ -34,6 +36,25 @@ #define MLX5E_RX_ERR_CQE(cqe) (get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND) +#define MLX5E_KSM_UMR_WQE_SZ(sgl_len)\ + (sizeof(struct mlx5e_umr_wqe) +\ + (sizeof(struct mlx5_ksm) * (sgl_len))) + +#define MLX5E_KSM_UMR_WQEBBS(ksm_entries) \ + (DIV_ROUND_UP(MLX5E_KSM_UMR_WQE_SZ(ksm_entries), MLX5_SEND_WQE_BB)) + +#define MLX5E_KSM_UMR_DS_CNT(ksm_entries)\ + (DIV_ROUND_UP(MLX5E_KSM_UMR_WQE_SZ(ksm_entries), MLX5_SEND_WQE_DS)) + +#define MLX5E_KSM_MAX_ENTRIES_PER_WQE(wqe_size)\ + (((wqe_size) - sizeof(struct mlx5e_umr_wqe)) / sizeof(struct mlx5_ksm)) + +#define MLX5E_KSM_ENTRIES_PER_WQE(wqe_size)\ + ALIGN_DOWN(MLX5E_KSM_MAX_ENTRIES_PER_WQE(wqe_size), MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT) + +#define MLX5E_MAX_KSM_PER_WQE(mdev) \ + MLX5E_KSM_ENTRIES_PER_WQE(MLX5_SEND_WQE_BB * mlx5e_get_max_sq_aligned_wqebbs(mdev)) + static inline ktime_t mlx5e_cqe_ts_to_ns(cqe_ts_to_ns func, struct mlx5_clock *clock, u64 cqe_ts) { @@ -460,6 +481,41 @@ mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg, } } +static inline void +mlx5e_swp_encap_csum_partial(struct mlx5_core_dev *mdev, struct sk_buff *skb, bool tunnel) +{ + const struct iphdr *ip = tunnel ? inner_ip_hdr(skb) : ip_hdr(skb); + const struct ipv6hdr *ip6; + struct tcphdr *th; + struct udphdr *uh; + int len; + + if (!MLX5_CAP_ETH(mdev, swp_csum_l4_partial) || !skb_is_gso(skb)) + return; + + if (skb_is_gso_tcp(skb)) { + th = inner_tcp_hdr(skb); + len = skb_shinfo(skb)->gso_size + inner_tcp_hdrlen(skb); + + if (ip->version == 4) { + th->check = ~tcp_v4_check(len, ip->saddr, ip->daddr, 0); + } else { + ip6 = tunnel ? inner_ipv6_hdr(skb) : ipv6_hdr(skb); + th->check = ~tcp_v6_check(len, &ip6->saddr, &ip6->daddr, 0); + } + } else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { + uh = (struct udphdr *)skb_inner_transport_header(skb); + len = skb_shinfo(skb)->gso_size + sizeof(struct udphdr); + + if (ip->version == 4) { + uh->check = ~udp_v4_check(len, ip->saddr, ip->daddr, 0); + } else { + ip6 = tunnel ? inner_ipv6_hdr(skb) : ipv6_hdr(skb); + uh->check = ~udp_v6_check(len, &ip6->saddr, &ip6->daddr, 0); + } + } +} + #define MLX5E_STOP_ROOM(wqebbs) ((wqebbs) * 2 - 1) static inline u16 mlx5e_stop_room_for_wqe(struct mlx5_core_dev *mdev, u16 wqe_size) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h index 359050f0b54d..3cc640669247 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h @@ -116,6 +116,7 @@ static inline bool mlx5e_ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg) { + struct mlx5_core_dev *mdev = sq->mdev; u8 inner_ipproto; if (!mlx5e_ipsec_eseg_meta(eseg)) @@ -125,9 +126,12 @@ mlx5e_ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, inner_ipproto = xfrm_offload(skb)->inner_ipproto; if (inner_ipproto) { eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM; - if (inner_ipproto == IPPROTO_TCP || inner_ipproto == IPPROTO_UDP) + if (inner_ipproto == IPPROTO_TCP || inner_ipproto == IPPROTO_UDP) { + mlx5e_swp_encap_csum_partial(mdev, skb, true); eseg->cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM; + } } else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { + mlx5e_swp_encap_csum_partial(mdev, skb, false); eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM; sq->stats->csum_partial_inner++; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 3320f12ba2db..279dcb54af14 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -525,7 +525,7 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv, opened = test_bit(MLX5E_STATE_OPENED, &priv->state); - arfs_enabled = opened && (priv->netdev->features & NETIF_F_NTUPLE); + arfs_enabled = opened && mlx5e_fs_want_arfs(priv->netdev); if (arfs_enabled) mlx5e_arfs_disable(priv->fs); @@ -1658,7 +1658,7 @@ static int mlx5e_set_pauseparam(struct net_device *netdev, } int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct mlx5_core_dev *mdev = priv->mdev; @@ -1682,7 +1682,7 @@ int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv, } static int mlx5e_get_ts_info(struct net_device *dev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct mlx5e_priv *priv = netdev_priv(dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index 8c5b291a171f..05058710d2c7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -1307,8 +1307,7 @@ int mlx5e_create_flow_steering(struct mlx5e_flow_steering *fs, return -EOPNOTSUPP; mlx5e_fs_set_ns(fs, ns, false); - err = mlx5e_arfs_create_tables(fs, rx_res, - !!(netdev->hw_features & NETIF_F_NTUPLE)); + err = mlx5e_arfs_create_tables(fs, rx_res, mlx5e_fs_has_arfs(netdev)); if (err) { fs_err(fs, "Failed to create arfs tables, err=%d\n", err); netdev->hw_features &= ~NETIF_F_NTUPLE; @@ -1355,7 +1354,7 @@ err_destroy_ttc_table: err_destroy_inner_ttc_table: mlx5e_destroy_inner_ttc_table(fs); err_destroy_arfs_tables: - mlx5e_arfs_destroy_tables(fs, !!(netdev->hw_features & NETIF_F_NTUPLE)); + mlx5e_arfs_destroy_tables(fs, mlx5e_fs_has_arfs(netdev)); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index eedbcba22689..6f686fabed44 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -39,6 +39,7 @@ #include <linux/debugfs.h> #include <linux/if_bridge.h> #include <linux/filter.h> +#include <net/netdev_queues.h> #include <net/page_pool/types.h> #include <net/pkt_sched.h> #include <net/xdp_sock_drv.h> @@ -74,6 +75,27 @@ #include "lib/devcom.h" #include "lib/sd.h" +static bool mlx5e_hw_gro_supported(struct mlx5_core_dev *mdev) +{ + if (!MLX5_CAP_GEN(mdev, shampo)) + return false; + + /* Our HW-GRO implementation relies on "KSM Mkey" for + * SHAMPO headers buffer mapping + */ + if (!MLX5_CAP_GEN(mdev, fixed_buffer_size)) + return false; + + if (!MLX5_CAP_GEN_2(mdev, min_mkey_log_entity_size_fixed_buffer_valid)) + return false; + + if (MLX5_CAP_GEN_2(mdev, min_mkey_log_entity_size_fixed_buffer) > + MLX5E_SHAMPO_LOG_HEADER_ENTRY_SIZE) + return false; + + return true; +} + bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev, u8 page_shift, enum mlx5e_mpwrq_umr_mode umr_mode) { @@ -504,8 +526,8 @@ static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev, return err; } -static int mlx5e_create_umr_klm_mkey(struct mlx5_core_dev *mdev, - u64 nentries, +static int mlx5e_create_umr_ksm_mkey(struct mlx5_core_dev *mdev, + u64 nentries, u8 log_entry_size, u32 *umr_mkey) { int inlen; @@ -525,12 +547,13 @@ static int mlx5e_create_umr_klm_mkey(struct mlx5_core_dev *mdev, MLX5_SET(mkc, mkc, umr_en, 1); MLX5_SET(mkc, mkc, lw, 1); MLX5_SET(mkc, mkc, lr, 1); - MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_KLMS); + MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_KSM); mlx5e_mkey_set_relaxed_ordering(mdev, mkc); MLX5_SET(mkc, mkc, qpn, 0xffffff); MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.hw_objs.pdn); MLX5_SET(mkc, mkc, translations_octword_size, nentries); - MLX5_SET(mkc, mkc, length64, 1); + MLX5_SET(mkc, mkc, log_page_size, log_entry_size); + MLX5_SET64(mkc, mkc, len, nentries << log_entry_size); err = mlx5_core_create_mkey(mdev, umr_mkey, in, inlen); kvfree(in); @@ -565,14 +588,16 @@ static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq static int mlx5e_create_rq_hd_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq *rq) { - u32 max_klm_size = BIT(MLX5_CAP_GEN(mdev, log_max_klm_list_size)); + u32 max_ksm_size = BIT(MLX5_CAP_GEN(mdev, log_max_klm_list_size)); - if (max_klm_size < rq->mpwqe.shampo->hd_per_wq) { - mlx5_core_err(mdev, "max klm list size 0x%x is smaller than shampo header buffer list size 0x%x\n", - max_klm_size, rq->mpwqe.shampo->hd_per_wq); + if (max_ksm_size < rq->mpwqe.shampo->hd_per_wq) { + mlx5_core_err(mdev, "max ksm list size 0x%x is smaller than shampo header buffer list size 0x%x\n", + max_ksm_size, rq->mpwqe.shampo->hd_per_wq); return -EINVAL; } - return mlx5e_create_umr_klm_mkey(mdev, rq->mpwqe.shampo->hd_per_wq, + + return mlx5e_create_umr_ksm_mkey(mdev, rq->mpwqe.shampo->hd_per_wq, + MLX5E_SHAMPO_LOG_HEADER_ENTRY_SIZE, &rq->mpwqe.shampo->mkey); } @@ -1208,15 +1233,6 @@ void mlx5e_free_rx_missing_descs(struct mlx5e_rq *rq) head = mlx5_wq_ll_get_wqe_next_ix(wq, head); } - if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) { - u16 len; - - len = (rq->mpwqe.shampo->pi - rq->mpwqe.shampo->ci) & - (rq->mpwqe.shampo->hd_per_wq - 1); - mlx5e_shampo_dealloc_hd(rq, len, rq->mpwqe.shampo->ci, false); - rq->mpwqe.shampo->pi = rq->mpwqe.shampo->ci; - } - rq->mpwqe.actual_wq_head = wq->head; rq->mpwqe.umr_in_progress = 0; rq->mpwqe.umr_completed = 0; @@ -1244,8 +1260,7 @@ void mlx5e_free_rx_descs(struct mlx5e_rq *rq) } if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) - mlx5e_shampo_dealloc_hd(rq, rq->mpwqe.shampo->hd_per_wq, - 0, true); + mlx5e_shampo_dealloc_hd(rq); } else { struct mlx5_wq_cyc *wq = &rq->wqe.wq; u16 missing = mlx5_wq_cyc_missing(wq); @@ -3111,6 +3126,7 @@ static void mlx5e_build_txq_maps(struct mlx5e_priv *priv) struct mlx5e_txqsq *sq = &c->sq[tc]; priv->txq2sq[sq->txq_ix] = sq; + priv->txq2sq_stats[sq->txq_ix] = sq->stats; } } @@ -3125,6 +3141,7 @@ static void mlx5e_build_txq_maps(struct mlx5e_priv *priv) struct mlx5e_txqsq *sq = &c->ptpsq[tc].txqsq; priv->txq2sq[sq->txq_ix] = sq; + priv->txq2sq_stats[sq->txq_ix] = sq->stats; } out: @@ -4259,13 +4276,19 @@ int mlx5e_set_features(struct net_device *netdev, netdev_features_t features) #define MLX5E_HANDLE_FEATURE(feature, handler) \ mlx5e_handle_feature(netdev, &oper_features, feature, handler) - err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro); - err |= MLX5E_HANDLE_FEATURE(NETIF_F_GRO_HW, set_feature_hw_gro); + if (features & (NETIF_F_GRO_HW | NETIF_F_LRO)) { + err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS, set_feature_rx_fcs); + err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro); + err |= MLX5E_HANDLE_FEATURE(NETIF_F_GRO_HW, set_feature_hw_gro); + } else { + err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro); + err |= MLX5E_HANDLE_FEATURE(NETIF_F_GRO_HW, set_feature_hw_gro); + err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS, set_feature_rx_fcs); + } err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER, set_feature_cvlan_filter); err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC, set_feature_hw_tc); err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXALL, set_feature_rx_all); - err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS, set_feature_rx_fcs); err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan); #ifdef CONFIG_MLX5_EN_ARFS err |= MLX5E_HANDLE_FEATURE(NETIF_F_NTUPLE, set_feature_arfs); @@ -4890,7 +4913,7 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv, } out: - /* Disable CSUM and GSO if the udp dport is not offloaded by HW */ + /* Disable CSUM and GSO if skb cannot be offloaded by HW */ return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); } @@ -5276,6 +5299,136 @@ static bool mlx5e_tunnel_any_tx_proto_supported(struct mlx5_core_dev *mdev) return (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev)); } +static void mlx5e_get_queue_stats_rx(struct net_device *dev, int i, + struct netdev_queue_stats_rx *stats) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5e_channel_stats *channel_stats; + struct mlx5e_rq_stats *xskrq_stats; + struct mlx5e_rq_stats *rq_stats; + + ASSERT_RTNL(); + if (mlx5e_is_uplink_rep(priv)) + return; + + channel_stats = priv->channel_stats[i]; + xskrq_stats = &channel_stats->xskrq; + rq_stats = &channel_stats->rq; + + stats->packets = rq_stats->packets + xskrq_stats->packets; + stats->bytes = rq_stats->bytes + xskrq_stats->bytes; + stats->alloc_fail = rq_stats->buff_alloc_err + + xskrq_stats->buff_alloc_err; +} + +static void mlx5e_get_queue_stats_tx(struct net_device *dev, int i, + struct netdev_queue_stats_tx *stats) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5e_sq_stats *sq_stats; + + ASSERT_RTNL(); + /* no special case needed for ptp htb etc since txq2sq_stats is kept up + * to date for active sq_stats, otherwise get_base_stats takes care of + * inactive sqs. + */ + sq_stats = priv->txq2sq_stats[i]; + stats->packets = sq_stats->packets; + stats->bytes = sq_stats->bytes; +} + +static void mlx5e_get_base_stats(struct net_device *dev, + struct netdev_queue_stats_rx *rx, + struct netdev_queue_stats_tx *tx) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5e_ptp *ptp_channel; + int i, tc; + + ASSERT_RTNL(); + if (!mlx5e_is_uplink_rep(priv)) { + rx->packets = 0; + rx->bytes = 0; + rx->alloc_fail = 0; + + for (i = priv->channels.params.num_channels; i < priv->stats_nch; i++) { + struct netdev_queue_stats_rx rx_i = {0}; + + mlx5e_get_queue_stats_rx(dev, i, &rx_i); + + rx->packets += rx_i.packets; + rx->bytes += rx_i.bytes; + rx->alloc_fail += rx_i.alloc_fail; + } + + /* always report PTP RX stats from base as there is no + * corresponding channel to report them under in + * mlx5e_get_queue_stats_rx. + */ + if (priv->rx_ptp_opened) { + struct mlx5e_rq_stats *rq_stats = &priv->ptp_stats.rq; + + rx->packets += rq_stats->packets; + rx->bytes += rq_stats->bytes; + } + } + + tx->packets = 0; + tx->bytes = 0; + + for (i = 0; i < priv->stats_nch; i++) { + struct mlx5e_channel_stats *channel_stats = priv->channel_stats[i]; + + /* handle two cases: + * + * 1. channels which are active. In this case, + * report only deactivated TCs on these channels. + * + * 2. channels which were deactivated + * (i > priv->channels.params.num_channels) + * must have all of their TCs [0 .. priv->max_opened_tc) + * examined because deactivated channels will not be in the + * range of [0..real_num_tx_queues) and will not have their + * stats reported by mlx5e_get_queue_stats_tx. + */ + if (i < priv->channels.params.num_channels) + tc = mlx5e_get_dcb_num_tc(&priv->channels.params); + else + tc = 0; + + for (; tc < priv->max_opened_tc; tc++) { + struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[tc]; + + tx->packets += sq_stats->packets; + tx->bytes += sq_stats->bytes; + } + } + + /* if PTP TX was opened at some point and has since either: + * - been shutdown and set to NULL, or + * - simply disabled (bit unset) + * + * report stats directly from the ptp_stats structures as these queues + * are now unavailable and there is no txq index to retrieve these + * stats via calls to mlx5e_get_queue_stats_tx. + */ + ptp_channel = priv->channels.ptp; + if (priv->tx_ptp_opened && (!ptp_channel || !test_bit(MLX5E_PTP_STATE_TX, ptp_channel->state))) { + for (tc = 0; tc < priv->max_opened_tc; tc++) { + struct mlx5e_sq_stats *sq_stats = &priv->ptp_stats.sq[tc]; + + tx->packets += sq_stats->packets; + tx->bytes += sq_stats->bytes; + } + } +} + +static const struct netdev_stat_ops mlx5e_stat_ops = { + .get_queue_stats_rx = mlx5e_get_queue_stats_rx, + .get_queue_stats_tx = mlx5e_get_queue_stats_tx, + .get_base_stats = mlx5e_get_base_stats, +}; + static void mlx5e_build_nic_netdev(struct net_device *netdev) { struct mlx5e_priv *priv = netdev_priv(netdev); @@ -5293,6 +5446,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) netdev->watchdog_timeo = 15 * HZ; + netdev->stat_ops = &mlx5e_stat_ops; netdev->ethtool_ops = &mlx5e_ethtool_ops; netdev->vlan_features |= NETIF_F_SG; @@ -5331,6 +5485,11 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX; + if (mlx5e_hw_gro_supported(mdev) && + mlx5e_check_fragmented_striding_rq_cap(mdev, PAGE_SHIFT, + MLX5E_MPWRQ_UMR_MODE_ALIGNED)) + netdev->hw_features |= NETIF_F_GRO_HW; + if (mlx5e_tunnel_any_tx_proto_supported(mdev)) { netdev->hw_enc_features |= NETIF_F_HW_CSUM; netdev->hw_enc_features |= NETIF_F_TSO; @@ -5397,8 +5556,10 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) #if IS_ENABLED(CONFIG_MLX5_CLS_ACT) netdev->hw_features |= NETIF_F_HW_TC; #endif -#ifdef CONFIG_MLX5_EN_ARFS +#if IS_ENABLED(CONFIG_MLX5_EN_ARFS) netdev->hw_features |= NETIF_F_NTUPLE; +#elif IS_ENABLED(CONFIG_MLX5_EN_RXNFC) + netdev->features |= NETIF_F_NTUPLE; #endif } @@ -5572,7 +5733,7 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv) err_tc_nic_cleanup: mlx5e_tc_nic_cleanup(priv); err_destroy_flow_steering: - mlx5e_destroy_flow_steering(priv->fs, !!(priv->netdev->hw_features & NETIF_F_NTUPLE), + mlx5e_destroy_flow_steering(priv->fs, mlx5e_fs_has_arfs(priv->netdev), priv->profile); err_destroy_rx_res: mlx5e_rx_res_destroy(priv->rx_res); @@ -5588,7 +5749,7 @@ static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv) { mlx5e_accel_cleanup_rx(priv); mlx5e_tc_nic_cleanup(priv); - mlx5e_destroy_flow_steering(priv->fs, !!(priv->netdev->hw_features & NETIF_F_NTUPLE), + mlx5e_destroy_flow_steering(priv->fs, mlx5e_fs_has_arfs(priv->netdev), priv->profile); mlx5e_rx_res_destroy(priv->rx_res); priv->rx_res = NULL; @@ -5823,9 +5984,13 @@ int mlx5e_priv_init(struct mlx5e_priv *priv, if (!priv->txq2sq) goto err_destroy_workqueue; + priv->txq2sq_stats = kcalloc_node(num_txqs, sizeof(*priv->txq2sq_stats), GFP_KERNEL, node); + if (!priv->txq2sq_stats) + goto err_free_txq2sq; + priv->tx_rates = kcalloc_node(num_txqs, sizeof(*priv->tx_rates), GFP_KERNEL, node); if (!priv->tx_rates) - goto err_free_txq2sq; + goto err_free_txq2sq_stats; priv->channel_stats = kcalloc_node(nch, sizeof(*priv->channel_stats), GFP_KERNEL, node); @@ -5836,6 +6001,8 @@ int mlx5e_priv_init(struct mlx5e_priv *priv, err_free_tx_rates: kfree(priv->tx_rates); +err_free_txq2sq_stats: + kfree(priv->txq2sq_stats); err_free_txq2sq: kfree(priv->txq2sq); err_destroy_workqueue: @@ -5859,6 +6026,7 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv) kvfree(priv->channel_stats[i]); kfree(priv->channel_stats); kfree(priv->tx_rates); + kfree(priv->txq2sq_stats); kfree(priv->txq2sq); destroy_workqueue(priv->wq); mlx5e_selq_cleanup(&priv->selq); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index b5333da20e8a..225da8d691fc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -523,15 +523,23 @@ mlx5e_add_skb_shared_info_frag(struct mlx5e_rq *rq, struct skb_shared_info *sinf static inline void mlx5e_add_skb_frag(struct mlx5e_rq *rq, struct sk_buff *skb, - struct page *page, u32 frag_offset, u32 len, + struct mlx5e_frag_page *frag_page, + u32 frag_offset, u32 len, unsigned int truesize) { - dma_addr_t addr = page_pool_get_dma_addr(page); + dma_addr_t addr = page_pool_get_dma_addr(frag_page->page); + u8 next_frag = skb_shinfo(skb)->nr_frags; dma_sync_single_for_cpu(rq->pdev, addr + frag_offset, len, rq->buff.map_dir); - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, - page, frag_offset, len, truesize); + + if (skb_can_coalesce(skb, next_frag, frag_page->page, frag_offset)) { + skb_coalesce_rx_frag(skb, next_frag - 1, len, truesize); + } else { + frag_page->frags++; + skb_add_rx_frag(skb, next_frag, frag_page->page, + frag_offset, len, truesize); + } } static inline void @@ -619,25 +627,25 @@ static int bitmap_find_window(unsigned long *bitmap, int len, return min(len, count); } -static void build_klm_umr(struct mlx5e_icosq *sq, struct mlx5e_umr_wqe *umr_wqe, - __be32 key, u16 offset, u16 klm_len, u16 wqe_bbs) +static void build_ksm_umr(struct mlx5e_icosq *sq, struct mlx5e_umr_wqe *umr_wqe, + __be32 key, u16 offset, u16 ksm_len) { - memset(umr_wqe, 0, offsetof(struct mlx5e_umr_wqe, inline_klms)); + memset(umr_wqe, 0, offsetof(struct mlx5e_umr_wqe, inline_ksms)); umr_wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) | MLX5_OPCODE_UMR); umr_wqe->ctrl.umr_mkey = key; umr_wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) - | MLX5E_KLM_UMR_DS_CNT(klm_len)); + | MLX5E_KSM_UMR_DS_CNT(ksm_len)); umr_wqe->uctrl.flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE; umr_wqe->uctrl.xlt_offset = cpu_to_be16(offset); - umr_wqe->uctrl.xlt_octowords = cpu_to_be16(klm_len); + umr_wqe->uctrl.xlt_octowords = cpu_to_be16(ksm_len); umr_wqe->uctrl.mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE); } static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq, struct mlx5e_icosq *sq, - u16 klm_entries, u16 index) + u16 ksm_entries, u16 index) { struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo; u16 entries, pi, header_offset, err, wqe_bbs, new_entries; @@ -650,20 +658,20 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq, int headroom, i; headroom = rq->buff.headroom; - new_entries = klm_entries - (shampo->pi & (MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT - 1)); - entries = ALIGN(klm_entries, MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT); - wqe_bbs = MLX5E_KLM_UMR_WQEBBS(entries); + new_entries = ksm_entries - (shampo->pi & (MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT - 1)); + entries = ALIGN(ksm_entries, MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT); + wqe_bbs = MLX5E_KSM_UMR_WQEBBS(entries); pi = mlx5e_icosq_get_next_pi(sq, wqe_bbs); umr_wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi); - build_klm_umr(sq, umr_wqe, shampo->key, index, entries, wqe_bbs); + build_ksm_umr(sq, umr_wqe, shampo->key, index, entries); frag_page = &shampo->pages[page_index]; for (i = 0; i < entries; i++, index++) { dma_info = &shampo->info[index]; - if (i >= klm_entries || (index < shampo->pi && shampo->pi - index < - MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT)) - goto update_klm; + if (i >= ksm_entries || (index < shampo->pi && shampo->pi - index < + MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT)) + goto update_ksm; header_offset = (index & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) << MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE; if (!(header_offset & (PAGE_SIZE - 1))) { @@ -683,12 +691,11 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq, dma_info->frag_page = frag_page; } -update_klm: - umr_wqe->inline_klms[i].bcount = - cpu_to_be32(MLX5E_RX_MAX_HEAD); - umr_wqe->inline_klms[i].key = cpu_to_be32(lkey); - umr_wqe->inline_klms[i].va = - cpu_to_be64(dma_info->addr + headroom); +update_ksm: + umr_wqe->inline_ksms[i] = (struct mlx5_ksm) { + .key = cpu_to_be32(lkey), + .va = cpu_to_be64(dma_info->addr + headroom), + }; } sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) { @@ -720,37 +727,37 @@ err_unmap: static int mlx5e_alloc_rx_hd_mpwqe(struct mlx5e_rq *rq) { struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo; - u16 klm_entries, num_wqe, index, entries_before; + u16 ksm_entries, num_wqe, index, entries_before; struct mlx5e_icosq *sq = rq->icosq; - int i, err, max_klm_entries, len; + int i, err, max_ksm_entries, len; - max_klm_entries = MLX5E_MAX_KLM_PER_WQE(rq->mdev); - klm_entries = bitmap_find_window(shampo->bitmap, + max_ksm_entries = MLX5E_MAX_KSM_PER_WQE(rq->mdev); + ksm_entries = bitmap_find_window(shampo->bitmap, shampo->hd_per_wqe, shampo->hd_per_wq, shampo->pi); - if (!klm_entries) + if (!ksm_entries) return 0; - klm_entries += (shampo->pi & (MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT - 1)); - index = ALIGN_DOWN(shampo->pi, MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT); + ksm_entries += (shampo->pi & (MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT - 1)); + index = ALIGN_DOWN(shampo->pi, MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT); entries_before = shampo->hd_per_wq - index; - if (unlikely(entries_before < klm_entries)) - num_wqe = DIV_ROUND_UP(entries_before, max_klm_entries) + - DIV_ROUND_UP(klm_entries - entries_before, max_klm_entries); + if (unlikely(entries_before < ksm_entries)) + num_wqe = DIV_ROUND_UP(entries_before, max_ksm_entries) + + DIV_ROUND_UP(ksm_entries - entries_before, max_ksm_entries); else - num_wqe = DIV_ROUND_UP(klm_entries, max_klm_entries); + num_wqe = DIV_ROUND_UP(ksm_entries, max_ksm_entries); for (i = 0; i < num_wqe; i++) { - len = (klm_entries > max_klm_entries) ? max_klm_entries : - klm_entries; + len = (ksm_entries > max_ksm_entries) ? max_ksm_entries : + ksm_entries; if (unlikely(index + len > shampo->hd_per_wq)) len = shampo->hd_per_wq - index; err = mlx5e_build_shampo_hd_umr(rq, sq, len, index); if (unlikely(err)) return err; index = (index + len) & (rq->mpwqe.shampo->hd_per_wq - 1); - klm_entries -= len; + ksm_entries -= len; } return 0; @@ -839,44 +846,28 @@ err: return err; } -/* This function is responsible to dealloc SHAMPO header buffer. - * close == true specifies that we are in the middle of closing RQ operation so - * we go over all the entries and if they are not in use we free them, - * otherwise we only go over a specific range inside the header buffer that are - * not in use. - */ -void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq, u16 len, u16 start, bool close) +static void +mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index) { struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo; - struct mlx5e_frag_page *deleted_page = NULL; - int hd_per_wq = shampo->hd_per_wq; - struct mlx5e_dma_info *hd_info; - int i, index = start; - - for (i = 0; i < len; i++, index++) { - if (index == hd_per_wq) - index = 0; - - if (close && !test_bit(index, shampo->bitmap)) - continue; + u64 addr = shampo->info[header_index].addr; - hd_info = &shampo->info[index]; - hd_info->addr = ALIGN_DOWN(hd_info->addr, PAGE_SIZE); - if (hd_info->frag_page && hd_info->frag_page != deleted_page) { - deleted_page = hd_info->frag_page; - mlx5e_page_release_fragmented(rq, hd_info->frag_page); - } + if (((header_index + 1) & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) == 0) { + struct mlx5e_dma_info *dma_info = &shampo->info[header_index]; - hd_info->frag_page = NULL; + dma_info->addr = ALIGN_DOWN(addr, PAGE_SIZE); + mlx5e_page_release_fragmented(rq, dma_info->frag_page); } + clear_bit(header_index, shampo->bitmap); +} - if (start + len > hd_per_wq) { - len -= hd_per_wq - start; - bitmap_clear(shampo->bitmap, start, hd_per_wq - start); - start = 0; - } +void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq) +{ + struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo; + int i; - bitmap_clear(shampo->bitmap, start, len); + for_each_set_bit(i, shampo->bitmap, rq->mpwqe.shampo->hd_per_wq) + mlx5e_free_rx_shampo_hd_entry(rq, i); } static void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) @@ -1191,9 +1182,8 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe, check = csum_partial(tcp, tcp->doff * 4, csum_unfold((__force __sum16)cqe->check_sum)); /* Almost done, don't forget the pseudo header */ - tcp->check = csum_tcpudp_magic(ipv4->saddr, ipv4->daddr, - tot_len - sizeof(struct iphdr), - IPPROTO_TCP, check); + tcp->check = tcp_v4_check(tot_len - sizeof(struct iphdr), + ipv4->saddr, ipv4->daddr, check); } else { u16 payload_len = tot_len - sizeof(struct ipv6hdr); struct ipv6hdr *ipv6 = ip_p; @@ -1208,8 +1198,8 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe, check = csum_partial(tcp, tcp->doff * 4, csum_unfold((__force __sum16)cqe->check_sum)); /* Almost done, don't forget the pseudo header */ - tcp->check = csum_ipv6_magic(&ipv6->saddr, &ipv6->daddr, payload_len, - IPPROTO_TCP, check); + tcp->check = tcp_v6_check(payload_len, &ipv6->saddr, + &ipv6->daddr, check); } } @@ -1612,9 +1602,7 @@ static void mlx5e_shampo_complete_rx_cqe(struct mlx5e_rq *rq, struct mlx5e_rq_stats *stats = rq->stats; stats->packets++; - stats->gro_packets++; stats->bytes += cqe_bcnt; - stats->gro_bytes += cqe_bcnt; if (NAPI_GRO_CB(skb)->count != 1) return; mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb); @@ -1964,30 +1952,24 @@ const struct mlx5e_rx_handlers mlx5e_rx_handlers_rep = { #endif static void -mlx5e_fill_skb_data(struct sk_buff *skb, struct mlx5e_rq *rq, - struct mlx5e_frag_page *frag_page, - u32 data_bcnt, u32 data_offset) +mlx5e_shampo_fill_skb_data(struct sk_buff *skb, struct mlx5e_rq *rq, + struct mlx5e_frag_page *frag_page, + u32 data_bcnt, u32 data_offset) { net_prefetchw(skb->data); - while (data_bcnt) { + do { /* Non-linear mode, hence non-XSK, which always uses PAGE_SIZE. */ u32 pg_consumed_bytes = min_t(u32, PAGE_SIZE - data_offset, data_bcnt); - unsigned int truesize; - - if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) - truesize = pg_consumed_bytes; - else - truesize = ALIGN(pg_consumed_bytes, BIT(rq->mpwqe.log_stride_sz)); + unsigned int truesize = pg_consumed_bytes; - frag_page->frags++; - mlx5e_add_skb_frag(rq, skb, frag_page->page, data_offset, + mlx5e_add_skb_frag(rq, skb, frag_page, data_offset, pg_consumed_bytes, truesize); data_bcnt -= pg_consumed_bytes; data_offset = 0; frag_page++; - } + } while (data_bcnt); } static struct sk_buff * @@ -2212,8 +2194,8 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, if (likely(frag_size <= BIT(MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE))) { /* build SKB around header */ dma_sync_single_range_for_cpu(rq->pdev, head->addr, 0, frag_size, rq->buff.map_dir); - prefetchw(hdr); - prefetch(data); + net_prefetchw(hdr); + net_prefetch(data); skb = mlx5e_build_linear_skb(rq, hdr, frag_size, rx_headroom, head_size, 0); if (unlikely(!skb)) @@ -2230,7 +2212,7 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, return NULL; } - prefetchw(skb->data); + net_prefetchw(skb->data); mlx5e_copy_skb_header(rq, skb, head->frag_page->page, head->addr, head_offset + rx_headroom, rx_headroom, head_size); @@ -2261,12 +2243,19 @@ mlx5e_shampo_flush_skb(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, bool match) { struct sk_buff *skb = rq->hw_gro_data->skb; struct mlx5e_rq_stats *stats = rq->stats; + u16 gro_count = NAPI_GRO_CB(skb)->count; - stats->gro_skbs++; if (likely(skb_shinfo(skb)->nr_frags)) mlx5e_shampo_align_fragment(skb, rq->mpwqe.log_stride_sz); - if (NAPI_GRO_CB(skb)->count > 1) + if (gro_count > 1) { + stats->gro_skbs++; + stats->gro_packets += gro_count; + stats->gro_bytes += skb->data_len + skb_headlen(skb) * gro_count; + mlx5e_shampo_update_hdr(rq, cqe, match); + } else { + skb_shinfo(skb)->gso_size = 0; + } napi_gro_receive(rq->cq.napi, skb); rq->hw_gro_data->skb = NULL; } @@ -2279,21 +2268,6 @@ mlx5e_hw_gro_skb_has_enough_space(struct sk_buff *skb, u16 data_bcnt) return PAGE_SIZE * nr_frags + data_bcnt <= GRO_LEGACY_MAX_SIZE; } -static void -mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index) -{ - struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo; - u64 addr = shampo->info[header_index].addr; - - if (((header_index + 1) & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) == 0) { - struct mlx5e_dma_info *dma_info = &shampo->info[header_index]; - - dma_info->addr = ALIGN_DOWN(addr, PAGE_SIZE); - mlx5e_page_release_fragmented(rq, dma_info->frag_page); - } - bitmap_clear(shampo->bitmap, header_index, 1); -} - static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) { u16 data_bcnt = mpwrq_get_cqe_byte_cnt(cqe) - cqe->shampo.header_size; @@ -2327,8 +2301,6 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq goto mpwrq_cqe_out; } - stats->gro_match_packets += match; - if (*skb && (!match || !(mlx5e_hw_gro_skb_has_enough_space(*skb, data_bcnt)))) { match = false; mlx5e_shampo_flush_skb(rq, cqe, match); @@ -2359,21 +2331,30 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq } if (likely(head_size)) { - struct mlx5e_frag_page *frag_page; + if (data_bcnt) { + struct mlx5e_frag_page *frag_page; - frag_page = &wi->alloc_units.frag_pages[page_idx]; - mlx5e_fill_skb_data(*skb, rq, frag_page, data_bcnt, data_offset); + frag_page = &wi->alloc_units.frag_pages[page_idx]; + mlx5e_shampo_fill_skb_data(*skb, rq, frag_page, data_bcnt, data_offset); + } else { + stats->hds_nodata_packets++; + stats->hds_nodata_bytes += head_size; + } } mlx5e_shampo_complete_rx_cqe(rq, cqe, cqe_bcnt, *skb); - if (flush) + if (flush && rq->hw_gro_data->skb) mlx5e_shampo_flush_skb(rq, cqe, match); free_hd_entry: - mlx5e_free_rx_shampo_hd_entry(rq, header_index); + if (likely(head_size)) + mlx5e_free_rx_shampo_hd_entry(rq, header_index); mpwrq_cqe_out: if (likely(wi->consumed_strides < rq->mpwqe.num_strides)) return; + if (unlikely(!cstrides)) + return; + wq = &rq->mpwqe.wq; wqe = mlx5_wq_ll_get_wqe(wq, wqe_id); mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index e1ed214e8651..e7a3290a708a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -141,8 +141,9 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_packets) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_bytes) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_skbs) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_match_packets) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_large_hds) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_hds_nodata_packets) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_hds_nodata_bytes) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_ecn_mark) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_removed_vlan_packets) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) }, @@ -343,8 +344,9 @@ static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s, s->rx_gro_packets += rq_stats->gro_packets; s->rx_gro_bytes += rq_stats->gro_bytes; s->rx_gro_skbs += rq_stats->gro_skbs; - s->rx_gro_match_packets += rq_stats->gro_match_packets; s->rx_gro_large_hds += rq_stats->gro_large_hds; + s->rx_hds_nodata_packets += rq_stats->hds_nodata_packets; + s->rx_hds_nodata_bytes += rq_stats->hds_nodata_bytes; s->rx_ecn_mark += rq_stats->ecn_mark; s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets; s->rx_csum_none += rq_stats->csum_none; @@ -2057,8 +2059,9 @@ static const struct counter_desc rq_stats_desc[] = { { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_packets) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_bytes) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_skbs) }, - { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_match_packets) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_large_hds) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, hds_nodata_packets) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, hds_nodata_bytes) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, ecn_mark) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) }, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 650732288616..4c5858c1dd82 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -153,8 +153,9 @@ struct mlx5e_sw_stats { u64 rx_gro_packets; u64 rx_gro_bytes; u64 rx_gro_skbs; - u64 rx_gro_match_packets; u64 rx_gro_large_hds; + u64 rx_hds_nodata_packets; + u64 rx_hds_nodata_bytes; u64 rx_mcast_packets; u64 rx_ecn_mark; u64 rx_removed_vlan_packets; @@ -352,8 +353,9 @@ struct mlx5e_rq_stats { u64 gro_packets; u64 gro_bytes; u64 gro_skbs; - u64 gro_match_packets; u64 gro_large_hds; + u64 hds_nodata_packets; + u64 hds_nodata_bytes; u64 mcast_packets; u64 ecn_mark; u64 removed_vlan_packets; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index ac1565c0c8af..cb7e7e4104af 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -714,7 +714,7 @@ err2: err1: mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL); mlx5_eq_notifier_unregister(dev, &table->cq_err_nb); - mlx5_ctrl_irq_release(table->ctrl_irq); + mlx5_ctrl_irq_release(dev, table->ctrl_irq); return err; } @@ -730,7 +730,7 @@ static void destroy_async_eqs(struct mlx5_core_dev *dev) cleanup_async_eq(dev, &table->cmd_eq, "cmd"); mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL); mlx5_eq_notifier_unregister(dev, &table->cq_err_nb); - mlx5_ctrl_irq_release(table->ctrl_irq); + mlx5_ctrl_irq_release(dev, table->ctrl_irq); } struct mlx5_eq *mlx5_get_async_eq(struct mlx5_core_dev *dev) @@ -918,7 +918,7 @@ static int comp_irq_request_sf(struct mlx5_core_dev *dev, u16 vecidx) af_desc.is_managed = 1; cpumask_copy(&af_desc.mask, cpu_online_mask); cpumask_andnot(&af_desc.mask, &af_desc.mask, &table->used_cpus); - irq = mlx5_irq_affinity_request(pool, &af_desc); + irq = mlx5_irq_affinity_request(dev, pool, &af_desc); if (IS_ERR(irq)) return PTR_ERR(irq); @@ -1187,7 +1187,6 @@ static int get_num_eqs(struct mlx5_core_dev *dev) { struct mlx5_eq_table *eq_table = dev->priv.eq_table; int max_dev_eqs; - int max_eqs_sf; int num_eqs; /* If ethernet is disabled we use just a single completion vector to @@ -1202,7 +1201,11 @@ static int get_num_eqs(struct mlx5_core_dev *dev) num_eqs = min_t(int, mlx5_irq_table_get_num_comp(eq_table->irq_table), max_dev_eqs - MLX5_MAX_ASYNC_EQS); if (mlx5_core_is_sf(dev)) { - max_eqs_sf = min_t(int, MLX5_COMP_EQS_PER_SF, + int max_eqs_sf = MLX5_CAP_GEN_2(dev, sf_eq_usage) ? + MLX5_CAP_GEN_2(dev, max_num_eqs_24b) : + MLX5_COMP_EQS_PER_SF; + + max_eqs_sf = min_t(int, max_eqs_sf, mlx5_irq_table_get_sfs_vec(eq_table->irq_table)); num_eqs = min_t(int, num_eqs, max_eqs_sf); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c index d2ebe56c3977..20146a2dc7f4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c @@ -531,7 +531,7 @@ static bool esw_qos_element_type_supported(struct mlx5_core_dev *dev, int type) switch (type) { case SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR: return MLX5_CAP_QOS(dev, esw_element_type) & - ELEMENT_TYPE_CAP_MASK_TASR; + ELEMENT_TYPE_CAP_MASK_TSAR; case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT: return MLX5_CAP_QOS(dev, esw_element_type) & ELEMENT_TYPE_CAP_MASK_VPORT; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 88745dc6aed5..578466d69f21 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -223,6 +223,7 @@ struct mlx5_vport { u16 vport; bool enabled; + bool max_eqs_set; enum mlx5_eswitch_vport_event enabled_events; int index; struct mlx5_devlink_port *dl_port; @@ -579,6 +580,8 @@ int mlx5_devlink_port_fn_max_io_eqs_get(struct devlink_port *port, int mlx5_devlink_port_fn_max_io_eqs_set(struct devlink_port *port, u32 max_io_eqs, struct netlink_ext_ack *extack); +int mlx5_devlink_port_fn_max_io_eqs_set_sf_default(struct devlink_port *port, + struct netlink_ext_ack *extack); void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 72949cb85244..768199d2255a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -68,6 +68,7 @@ #define MLX5_ESW_FT_OFFLOADS_DROP_RULE (1) #define MLX5_ESW_MAX_CTRL_EQS 4 +#define MLX5_ESW_DEFAULT_SF_COMP_EQS 8 static struct esw_vport_tbl_namespace mlx5_esw_vport_tbl_mirror_ns = { .max_fte = MLX5_ESW_VPORT_TBL_SIZE, @@ -4676,13 +4677,25 @@ mlx5_devlink_port_fn_max_io_eqs_set(struct devlink_port *port, u32 max_io_eqs, hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability); MLX5_SET(cmd_hca_cap_2, hca_caps, max_num_eqs_24b, max_eqs); + if (mlx5_esw_is_sf_vport(esw, vport_num)) + MLX5_SET(cmd_hca_cap_2, hca_caps, sf_eq_usage, 1); + err = mlx5_vport_set_other_func_cap(esw->dev, hca_caps, vport_num, MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE2); if (err) NL_SET_ERR_MSG_MOD(extack, "Failed setting HCA caps"); - + vport->max_eqs_set = true; out: mutex_unlock(&esw->state_lock); kfree(query_ctx); return err; } + +int +mlx5_devlink_port_fn_max_io_eqs_set_sf_default(struct devlink_port *port, + struct netlink_ext_ack *extack) +{ + return mlx5_devlink_port_fn_max_io_eqs_set(port, + MLX5_ESW_DEFAULT_SF_COMP_EQS, + extack); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 32cdacc34a0d..a47d6419160d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -3353,9 +3353,9 @@ static int mlx5_fs_mode_get(struct devlink *devlink, u32 id, struct mlx5_core_dev *dev = devlink_priv(devlink); if (dev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_SMFS) - strcpy(ctx->val.vstr, "smfs"); + strscpy(ctx->val.vstr, "smfs", sizeof(ctx->val.vstr)); else - strcpy(ctx->val.vstr, "dmfs"); + strscpy(ctx->val.vstr, "dmfs", sizeof(ctx->val.vstr)); return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c index 779d92b762d3..905bdbaffb9a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c @@ -136,7 +136,7 @@ static int mlx5i_get_coalesce(struct net_device *netdev, } static int mlx5i_get_ts_info(struct net_device *netdev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct mlx5e_priv *priv = mlx5i_epriv(netdev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index 8e0404c0d1ca..0979d672d47f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -372,7 +372,7 @@ static int mlx5i_create_flow_steering(struct mlx5e_priv *priv) mlx5e_fs_set_ns(priv->fs, ns, false); err = mlx5e_arfs_create_tables(priv->fs, priv->rx_res, - !!(priv->netdev->hw_features & NETIF_F_NTUPLE)); + mlx5e_fs_has_arfs(priv->netdev)); if (err) { netdev_err(priv->netdev, "Failed to create arfs tables, err=%d\n", err); @@ -391,8 +391,7 @@ static int mlx5i_create_flow_steering(struct mlx5e_priv *priv) return 0; err_destroy_arfs_tables: - mlx5e_arfs_destroy_tables(priv->fs, - !!(priv->netdev->hw_features & NETIF_F_NTUPLE)); + mlx5e_arfs_destroy_tables(priv->fs, mlx5e_fs_has_arfs(priv->netdev)); return err; } @@ -400,8 +399,7 @@ err_destroy_arfs_tables: static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv) { mlx5e_destroy_ttc_table(priv->fs); - mlx5e_arfs_destroy_tables(priv->fs, - !!(priv->netdev->hw_features & NETIF_F_NTUPLE)); + mlx5e_arfs_destroy_tables(priv->fs, mlx5e_fs_has_arfs(priv->netdev)); mlx5e_ethtool_cleanup_steering(priv->fs); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c index 612e666ec263..f7b01b3f0cba 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c @@ -112,15 +112,18 @@ irq_pool_find_least_loaded(struct mlx5_irq_pool *pool, const struct cpumask *req /** * mlx5_irq_affinity_request - request an IRQ according to the given mask. + * @dev: mlx5 core device which is requesting the IRQ. * @pool: IRQ pool to request from. * @af_desc: affinity descriptor for this IRQ. * * This function returns a pointer to IRQ, or ERR_PTR in case of error. */ struct mlx5_irq * -mlx5_irq_affinity_request(struct mlx5_irq_pool *pool, struct irq_affinity_desc *af_desc) +mlx5_irq_affinity_request(struct mlx5_core_dev *dev, struct mlx5_irq_pool *pool, + struct irq_affinity_desc *af_desc) { struct mlx5_irq *least_loaded_irq, *new_irq; + int ret; mutex_lock(&pool->lock); least_loaded_irq = irq_pool_find_least_loaded(pool, &af_desc->mask); @@ -153,6 +156,16 @@ out: mlx5_irq_read_locked(least_loaded_irq) / MLX5_EQ_REFS_PER_IRQ); unlock: mutex_unlock(&pool->lock); + if (mlx5_irq_pool_is_sf_pool(pool)) { + ret = auxiliary_device_sysfs_irq_add(mlx5_sf_coredev_to_adev(dev), + mlx5_irq_get_irq(least_loaded_irq)); + if (ret) { + mlx5_core_err(dev, "Failed to create sysfs entry for irq %d, ret = %d\n", + mlx5_irq_get_irq(least_loaded_irq), ret); + mlx5_irq_put(least_loaded_irq); + least_loaded_irq = ERR_PTR(ret); + } + } return least_loaded_irq; } @@ -164,6 +177,9 @@ void mlx5_irq_affinity_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *i cpu = cpumask_first(mlx5_irq_get_affinity_mask(irq)); synchronize_irq(pci_irq_vector(pool->dev->pdev, mlx5_irq_get_index(irq))); + if (mlx5_irq_pool_is_sf_pool(pool)) + auxiliary_device_sysfs_irq_remove(mlx5_sf_coredev_to_adev(dev), + mlx5_irq_get_irq(irq)); if (mlx5_irq_put(irq)) if (pool->irqs_per_cpu) cpu_put(pool, cpu); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 459a836a5d9c..527da58c7953 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -1819,6 +1819,7 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx) mutex_init(&dev->intf_state_mutex); lockdep_set_class(&dev->intf_state_mutex, &dev->lock_key); mutex_init(&dev->mlx5e_res.uplink_netdev_lock); + mutex_init(&dev->wc_state_lock); mutex_init(&priv->bfregs.reg_head.lock); mutex_init(&priv->bfregs.wc_head.lock); @@ -1916,6 +1917,7 @@ void mlx5_mdev_uninit(struct mlx5_core_dev *dev) mutex_destroy(&priv->alloc_mutex); mutex_destroy(&priv->bfregs.wc_head.lock); mutex_destroy(&priv->bfregs.reg_head.lock); + mutex_destroy(&dev->wc_state_lock); mutex_destroy(&dev->mlx5e_res.uplink_netdev_lock); mutex_destroy(&dev->intf_state_mutex); lockdep_unregister_key(&dev->lock_key); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index a7fd18888b6e..62c770b0eaa8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -320,6 +320,12 @@ static inline bool mlx5_core_is_sf(const struct mlx5_core_dev *dev) return dev->coredev_type == MLX5_COREDEV_SF; } +static inline struct auxiliary_device * +mlx5_sf_coredev_to_adev(struct mlx5_core_dev *mdev) +{ + return container_of(mdev->device, struct auxiliary_device, dev); +} + int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx); void mlx5_mdev_uninit(struct mlx5_core_dev *dev); int mlx5_init_one(struct mlx5_core_dev *dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h index 1088114e905d..0881e961d8b1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h @@ -25,7 +25,7 @@ int mlx5_set_msix_vec_count(struct mlx5_core_dev *dev, int devfn, int mlx5_get_default_msix_vec_count(struct mlx5_core_dev *dev, int num_vfs); struct mlx5_irq *mlx5_ctrl_irq_request(struct mlx5_core_dev *dev); -void mlx5_ctrl_irq_release(struct mlx5_irq *ctrl_irq); +void mlx5_ctrl_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *ctrl_irq); struct mlx5_irq *mlx5_irq_request(struct mlx5_core_dev *dev, u16 vecidx, struct irq_affinity_desc *af_desc, struct cpu_rmap **rmap); @@ -36,13 +36,15 @@ int mlx5_irq_attach_nb(struct mlx5_irq *irq, struct notifier_block *nb); int mlx5_irq_detach_nb(struct mlx5_irq *irq, struct notifier_block *nb); struct cpumask *mlx5_irq_get_affinity_mask(struct mlx5_irq *irq); int mlx5_irq_get_index(struct mlx5_irq *irq); +int mlx5_irq_get_irq(const struct mlx5_irq *irq); struct mlx5_irq_pool; #ifdef CONFIG_MLX5_SF struct mlx5_irq *mlx5_irq_affinity_irq_request_auto(struct mlx5_core_dev *dev, struct cpumask *used_cpus, u16 vecidx); -struct mlx5_irq *mlx5_irq_affinity_request(struct mlx5_irq_pool *pool, - struct irq_affinity_desc *af_desc); +struct mlx5_irq * +mlx5_irq_affinity_request(struct mlx5_core_dev *dev, struct mlx5_irq_pool *pool, + struct irq_affinity_desc *af_desc); void mlx5_irq_affinity_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *irq); #else static inline @@ -53,7 +55,8 @@ struct mlx5_irq *mlx5_irq_affinity_irq_request_auto(struct mlx5_core_dev *dev, } static inline struct mlx5_irq * -mlx5_irq_affinity_request(struct mlx5_irq_pool *pool, struct irq_affinity_desc *af_desc) +mlx5_irq_affinity_request(struct mlx5_core_dev *dev, struct mlx5_irq_pool *pool, + struct irq_affinity_desc *af_desc) { return ERR_PTR(-EOPNOTSUPP); } @@ -61,6 +64,7 @@ mlx5_irq_affinity_request(struct mlx5_irq_pool *pool, struct irq_affinity_desc * static inline void mlx5_irq_affinity_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *irq) { + mlx5_irq_release_vector(irq); } #endif #endif /* __MLX5_IRQ_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c index 401d39069680..81a9232a03e1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c @@ -16,6 +16,7 @@ #endif #define MLX5_SFS_PER_CTRL_IRQ 64 +#define MLX5_MAX_MSIX_PER_SF 256 #define MLX5_IRQ_CTRL_SF_MAX 8 /* min num of vectors for SFs to be enabled */ #define MLX5_IRQ_VEC_COMP_BASE_SF 2 @@ -367,6 +368,11 @@ struct cpumask *mlx5_irq_get_affinity_mask(struct mlx5_irq *irq) return irq->mask; } +int mlx5_irq_get_irq(const struct mlx5_irq *irq) +{ + return irq->map.virq; +} + int mlx5_irq_get_index(struct mlx5_irq *irq) { return irq->map.index; @@ -440,11 +446,12 @@ static void _mlx5_irq_release(struct mlx5_irq *irq) /** * mlx5_ctrl_irq_release - release a ctrl IRQ back to the system. + * @dev: mlx5 device that releasing the IRQ. * @ctrl_irq: ctrl IRQ to be released. */ -void mlx5_ctrl_irq_release(struct mlx5_irq *ctrl_irq) +void mlx5_ctrl_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *ctrl_irq) { - _mlx5_irq_release(ctrl_irq); + mlx5_irq_affinity_irq_release(dev, ctrl_irq); } /** @@ -473,7 +480,7 @@ struct mlx5_irq *mlx5_ctrl_irq_request(struct mlx5_core_dev *dev) /* Allocate the IRQ in index 0. The vector was already allocated */ irq = irq_pool_request_vector(pool, 0, &af_desc, NULL); } else { - irq = mlx5_irq_affinity_request(pool, &af_desc); + irq = mlx5_irq_affinity_request(dev, pool, &af_desc); } return irq; @@ -589,8 +596,6 @@ static void irq_pool_free(struct mlx5_irq_pool *pool) static int irq_pools_init(struct mlx5_core_dev *dev, int sf_vec, int pcif_vec) { struct mlx5_irq_table *table = dev->priv.irq_table; - int num_sf_ctrl_by_msix; - int num_sf_ctrl_by_sfs; int num_sf_ctrl; int err; @@ -608,10 +613,8 @@ static int irq_pools_init(struct mlx5_core_dev *dev, int sf_vec, int pcif_vec) } /* init sf_ctrl_pool */ - num_sf_ctrl_by_msix = DIV_ROUND_UP(sf_vec, MLX5_COMP_EQS_PER_SF); - num_sf_ctrl_by_sfs = DIV_ROUND_UP(mlx5_sf_max_functions(dev), - MLX5_SFS_PER_CTRL_IRQ); - num_sf_ctrl = min_t(int, num_sf_ctrl_by_msix, num_sf_ctrl_by_sfs); + num_sf_ctrl = DIV_ROUND_UP(mlx5_sf_max_functions(dev), + MLX5_SFS_PER_CTRL_IRQ); num_sf_ctrl = min_t(int, MLX5_IRQ_CTRL_SF_MAX, num_sf_ctrl); table->sf_ctrl_pool = irq_pool_alloc(dev, pcif_vec, num_sf_ctrl, "mlx5_sf_ctrl", @@ -726,8 +729,7 @@ int mlx5_irq_table_create(struct mlx5_core_dev *dev) total_vec = pcif_vec; if (mlx5_sf_max_functions(dev)) - total_vec += MLX5_IRQ_CTRL_SF_MAX + - MLX5_COMP_EQS_PER_SF * mlx5_sf_max_functions(dev); + total_vec += MLX5_MAX_MSIX_PER_SF * mlx5_sf_max_functions(dev); total_vec = min_t(int, total_vec, pci_msix_vec_count(dev->pdev)); pcif_vec = min_t(int, pcif_vec, pci_msix_vec_count(dev->pdev)); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c index 6c11e075cab0..a96be98be032 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c @@ -161,6 +161,7 @@ int mlx5_devlink_sf_port_fn_state_get(struct devlink_port *dl_port, static int mlx5_sf_activate(struct mlx5_core_dev *dev, struct mlx5_sf *sf, struct netlink_ext_ack *extack) { + struct mlx5_vport *vport; int err; if (mlx5_sf_is_active(sf)) @@ -170,6 +171,13 @@ static int mlx5_sf_activate(struct mlx5_core_dev *dev, struct mlx5_sf *sf, return -EBUSY; } + vport = mlx5_devlink_port_vport_get(&sf->dl_port.dl_port); + if (!vport->max_eqs_set && MLX5_CAP_GEN_2(dev, max_num_eqs_24b)) { + err = mlx5_devlink_port_fn_max_io_eqs_set_sf_default(&sf->dl_port.dl_port, + extack); + if (err) + return err; + } err = mlx5_cmd_sf_enable_hca(dev, sf->hw_fn_id); if (err) return err; @@ -318,7 +326,11 @@ int mlx5_devlink_sf_port_new(struct devlink *devlink, static void mlx5_sf_dealloc(struct mlx5_sf_table *table, struct mlx5_sf *sf) { + struct mlx5_vport *vport; + mutex_lock(&table->sf_state_lock); + vport = mlx5_devlink_port_vport_get(&sf->dl_port.dl_port); + vport->max_eqs_set = false; mlx5_sf_function_id_erase(table, sf); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h index 81eff6c410ce..7618c6147f86 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h @@ -1379,6 +1379,11 @@ int mlx5dr_cmd_create_modify_header_arg(struct mlx5_core_dev *dev, void mlx5dr_cmd_destroy_modify_header_arg(struct mlx5_core_dev *dev, u32 obj_id); +int mlx5dr_definer_get(struct mlx5dr_domain *dmn, u16 format_id, + u8 *dw_selectors, u8 *byte_selectors, + u8 *match_mask, u32 *definer_id); +void mlx5dr_definer_put(struct mlx5dr_domain *dmn, u32 definer_id); + struct mlx5dr_icm_pool *mlx5dr_icm_pool_create(struct mlx5dr_domain *dmn, enum mlx5dr_icm_type icm_type); void mlx5dr_icm_pool_destroy(struct mlx5dr_icm_pool *pool); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h index 89fced86936f..3ac7dc67509f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h @@ -153,11 +153,6 @@ int mlx5dr_action_destroy(struct mlx5dr_action *action); u32 mlx5dr_action_get_pkt_reformat_id(struct mlx5dr_action *action); -int mlx5dr_definer_get(struct mlx5dr_domain *dmn, u16 format_id, - u8 *dw_selectors, u8 *byte_selectors, - u8 *match_mask, u32 *definer_id); -void mlx5dr_definer_put(struct mlx5dr_domain *dmn, u32 definer_id); - static inline bool mlx5dr_is_supported(struct mlx5_core_dev *dev) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wc.c b/drivers/net/ethernet/mellanox/mlx5/core/wc.c new file mode 100644 index 000000000000..1bed75eca97d --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/wc.c @@ -0,0 +1,434 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +// Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +#include <linux/io.h> +#include <linux/mlx5/transobj.h> +#include "lib/clock.h" +#include "mlx5_core.h" +#include "wq.h" + +#define TEST_WC_NUM_WQES 255 +#define TEST_WC_LOG_CQ_SZ (order_base_2(TEST_WC_NUM_WQES)) +#define TEST_WC_SQ_LOG_WQ_SZ TEST_WC_LOG_CQ_SZ +#define TEST_WC_POLLING_MAX_TIME_JIFFIES msecs_to_jiffies(100) + +struct mlx5_wc_cq { + /* data path - accessed per cqe */ + struct mlx5_cqwq wq; + + /* data path - accessed per napi poll */ + struct mlx5_core_cq mcq; + + /* control */ + struct mlx5_core_dev *mdev; + struct mlx5_wq_ctrl wq_ctrl; +}; + +struct mlx5_wc_sq { + /* data path */ + u16 cc; + u16 pc; + + /* read only */ + struct mlx5_wq_cyc wq; + u32 sqn; + + /* control path */ + struct mlx5_wq_ctrl wq_ctrl; + + struct mlx5_wc_cq cq; + struct mlx5_sq_bfreg bfreg; +}; + +static int mlx5_wc_create_cqwq(struct mlx5_core_dev *mdev, void *cqc, + struct mlx5_wc_cq *cq) +{ + struct mlx5_core_cq *mcq = &cq->mcq; + struct mlx5_wq_param param = {}; + int err; + u32 i; + + err = mlx5_cqwq_create(mdev, ¶m, cqc, &cq->wq, &cq->wq_ctrl); + if (err) + return err; + + mcq->cqe_sz = 64; + mcq->set_ci_db = cq->wq_ctrl.db.db; + mcq->arm_db = cq->wq_ctrl.db.db + 1; + + for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) { + struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i); + + cqe->op_own = 0xf1; + } + + cq->mdev = mdev; + + return 0; +} + +static int create_wc_cq(struct mlx5_wc_cq *cq, void *cqc_data) +{ + u32 out[MLX5_ST_SZ_DW(create_cq_out)]; + struct mlx5_core_dev *mdev = cq->mdev; + struct mlx5_core_cq *mcq = &cq->mcq; + int err, inlen, eqn; + void *in, *cqc; + + err = mlx5_comp_eqn_get(mdev, 0, &eqn); + if (err) + return err; + + inlen = MLX5_ST_SZ_BYTES(create_cq_in) + + sizeof(u64) * cq->wq_ctrl.buf.npages; + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) + return -ENOMEM; + + cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context); + + memcpy(cqc, cqc_data, MLX5_ST_SZ_BYTES(cqc)); + + mlx5_fill_page_frag_array(&cq->wq_ctrl.buf, + (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas)); + + MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE); + MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn); + MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index); + MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift - + MLX5_ADAPTER_PAGE_SHIFT); + MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma); + + err = mlx5_core_create_cq(mdev, mcq, in, inlen, out, sizeof(out)); + + kvfree(in); + + return err; +} + +static int mlx5_wc_create_cq(struct mlx5_core_dev *mdev, struct mlx5_wc_cq *cq) +{ + void *cqc; + int err; + + cqc = kvzalloc(MLX5_ST_SZ_BYTES(cqc), GFP_KERNEL); + if (!cqc) + return -ENOMEM; + + MLX5_SET(cqc, cqc, log_cq_size, TEST_WC_LOG_CQ_SZ); + MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index); + if (MLX5_CAP_GEN(mdev, cqe_128_always) && cache_line_size() >= 128) + MLX5_SET(cqc, cqc, cqe_sz, CQE_STRIDE_128_PAD); + + err = mlx5_wc_create_cqwq(mdev, cqc, cq); + if (err) { + mlx5_core_err(mdev, "Failed to create wc cq wq, err=%d\n", err); + goto err_create_cqwq; + } + + err = create_wc_cq(cq, cqc); + if (err) { + mlx5_core_err(mdev, "Failed to create wc cq, err=%d\n", err); + goto err_create_cq; + } + + kvfree(cqc); + return 0; + +err_create_cq: + mlx5_wq_destroy(&cq->wq_ctrl); +err_create_cqwq: + kvfree(cqc); + return err; +} + +static void mlx5_wc_destroy_cq(struct mlx5_wc_cq *cq) +{ + mlx5_core_destroy_cq(cq->mdev, &cq->mcq); + mlx5_wq_destroy(&cq->wq_ctrl); +} + +static int create_wc_sq(struct mlx5_core_dev *mdev, void *sqc_data, + struct mlx5_wc_sq *sq) +{ + void *in, *sqc, *wq; + int inlen, err; + u8 ts_format; + + inlen = MLX5_ST_SZ_BYTES(create_sq_in) + + sizeof(u64) * sq->wq_ctrl.buf.npages; + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) + return -ENOMEM; + + sqc = MLX5_ADDR_OF(create_sq_in, in, ctx); + wq = MLX5_ADDR_OF(sqc, sqc, wq); + + memcpy(sqc, sqc_data, MLX5_ST_SZ_BYTES(sqc)); + MLX5_SET(sqc, sqc, cqn, sq->cq.mcq.cqn); + + MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST); + MLX5_SET(sqc, sqc, flush_in_error_en, 1); + + ts_format = mlx5_is_real_time_sq(mdev) ? + MLX5_TIMESTAMP_FORMAT_REAL_TIME : + MLX5_TIMESTAMP_FORMAT_FREE_RUNNING; + MLX5_SET(sqc, sqc, ts_format, ts_format); + + MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC); + MLX5_SET(wq, wq, uar_page, sq->bfreg.index); + MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift - + MLX5_ADAPTER_PAGE_SHIFT); + MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma); + + mlx5_fill_page_frag_array(&sq->wq_ctrl.buf, + (__be64 *)MLX5_ADDR_OF(wq, wq, pas)); + + err = mlx5_core_create_sq(mdev, in, inlen, &sq->sqn); + if (err) { + mlx5_core_err(mdev, "Failed to create wc sq, err=%d\n", err); + goto err_create_sq; + } + + memset(in, 0, MLX5_ST_SZ_BYTES(modify_sq_in)); + MLX5_SET(modify_sq_in, in, sq_state, MLX5_SQC_STATE_RST); + sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx); + MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RDY); + + err = mlx5_core_modify_sq(mdev, sq->sqn, in); + if (err) { + mlx5_core_err(mdev, "Failed to set wc sq(sqn=0x%x) ready, err=%d\n", + sq->sqn, err); + goto err_modify_sq; + } + + kvfree(in); + return 0; + +err_modify_sq: + mlx5_core_destroy_sq(mdev, sq->sqn); +err_create_sq: + kvfree(in); + return err; +} + +static int mlx5_wc_create_sq(struct mlx5_core_dev *mdev, struct mlx5_wc_sq *sq) +{ + struct mlx5_wq_param param = {}; + void *sqc_data, *wq; + int err; + + sqc_data = kvzalloc(MLX5_ST_SZ_BYTES(sqc), GFP_KERNEL); + if (!sqc_data) + return -ENOMEM; + + wq = MLX5_ADDR_OF(sqc, sqc_data, wq); + MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB)); + MLX5_SET(wq, wq, pd, mdev->mlx5e_res.hw_objs.pdn); + MLX5_SET(wq, wq, log_wq_sz, TEST_WC_SQ_LOG_WQ_SZ); + + err = mlx5_wq_cyc_create(mdev, ¶m, wq, &sq->wq, &sq->wq_ctrl); + if (err) { + mlx5_core_err(mdev, "Failed to create wc sq wq, err=%d\n", err); + goto err_create_wq_cyc; + } + + err = create_wc_sq(mdev, sqc_data, sq); + if (err) + goto err_create_sq; + + mlx5_core_dbg(mdev, "wc sq->sqn = 0x%x created\n", sq->sqn); + + kvfree(sqc_data); + return 0; + +err_create_sq: + mlx5_wq_destroy(&sq->wq_ctrl); +err_create_wq_cyc: + kvfree(sqc_data); + return err; +} + +static void mlx5_wc_destroy_sq(struct mlx5_wc_sq *sq) +{ + mlx5_core_destroy_sq(sq->cq.mdev, sq->sqn); + mlx5_wq_destroy(&sq->wq_ctrl); +} + +static void mlx5_wc_post_nop(struct mlx5_wc_sq *sq, bool signaled) +{ + int buf_size = (1 << MLX5_CAP_GEN(sq->cq.mdev, log_bf_reg_size)) / 2; + struct mlx5_wqe_ctrl_seg *ctrl; + __be32 mmio_wqe[16] = {}; + u16 pi; + + pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc); + ctrl = mlx5_wq_cyc_get_wqe(&sq->wq, pi); + memset(ctrl, 0, sizeof(*ctrl)); + ctrl->opmod_idx_opcode = + cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) | MLX5_OPCODE_NOP); + ctrl->qpn_ds = + cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) | + DIV_ROUND_UP(sizeof(struct mlx5_wqe_ctrl_seg), MLX5_SEND_WQE_DS)); + if (signaled) + ctrl->fm_ce_se |= MLX5_WQE_CTRL_CQ_UPDATE; + + memcpy(mmio_wqe, ctrl, sizeof(*ctrl)); + ((struct mlx5_wqe_ctrl_seg *)&mmio_wqe)->fm_ce_se |= + MLX5_WQE_CTRL_CQ_UPDATE; + + /* ensure wqe is visible to device before updating doorbell record */ + dma_wmb(); + + sq->pc++; + sq->wq.db[MLX5_SND_DBR] = cpu_to_be32(sq->pc); + + /* ensure doorbell record is visible to device before ringing the + * doorbell + */ + wmb(); + + __iowrite64_copy(sq->bfreg.map + sq->bfreg.offset, mmio_wqe, + sizeof(mmio_wqe) / 8); + + sq->bfreg.offset ^= buf_size; +} + +static int mlx5_wc_poll_cq(struct mlx5_wc_sq *sq) +{ + struct mlx5_wc_cq *cq = &sq->cq; + struct mlx5_cqe64 *cqe; + + cqe = mlx5_cqwq_get_cqe(&cq->wq); + if (!cqe) + return -ETIMEDOUT; + + /* sq->cc must be updated only after mlx5_cqwq_update_db_record(), + * otherwise a cq overrun may occur + */ + mlx5_cqwq_pop(&cq->wq); + + if (get_cqe_opcode(cqe) == MLX5_CQE_REQ) { + int wqe_counter = be16_to_cpu(cqe->wqe_counter); + struct mlx5_core_dev *mdev = cq->mdev; + + if (wqe_counter == TEST_WC_NUM_WQES - 1) + mdev->wc_state = MLX5_WC_STATE_UNSUPPORTED; + else + mdev->wc_state = MLX5_WC_STATE_SUPPORTED; + + mlx5_core_dbg(mdev, "wc wqe_counter = 0x%x\n", wqe_counter); + } + + mlx5_cqwq_update_db_record(&cq->wq); + + /* ensure cq space is freed before enabling more cqes */ + wmb(); + + sq->cc++; + + return 0; +} + +static void mlx5_core_test_wc(struct mlx5_core_dev *mdev) +{ + unsigned long expires; + struct mlx5_wc_sq *sq; + int i, err; + + if (mdev->wc_state != MLX5_WC_STATE_UNINITIALIZED) + return; + + sq = kzalloc(sizeof(*sq), GFP_KERNEL); + if (!sq) + return; + + err = mlx5_alloc_bfreg(mdev, &sq->bfreg, true, false); + if (err) { + mlx5_core_err(mdev, "Failed to alloc bfreg for wc, err=%d\n", err); + goto err_alloc_bfreg; + } + + err = mlx5_wc_create_cq(mdev, &sq->cq); + if (err) + goto err_create_cq; + + err = mlx5_wc_create_sq(mdev, sq); + if (err) + goto err_create_sq; + + for (i = 0; i < TEST_WC_NUM_WQES - 1; i++) + mlx5_wc_post_nop(sq, false); + + mlx5_wc_post_nop(sq, true); + + expires = jiffies + TEST_WC_POLLING_MAX_TIME_JIFFIES; + do { + err = mlx5_wc_poll_cq(sq); + if (err) + usleep_range(2, 10); + } while (mdev->wc_state == MLX5_WC_STATE_UNINITIALIZED && + time_is_after_jiffies(expires)); + + mlx5_wc_destroy_sq(sq); + +err_create_sq: + mlx5_wc_destroy_cq(&sq->cq); +err_create_cq: + mlx5_free_bfreg(mdev, &sq->bfreg); +err_alloc_bfreg: + kfree(sq); +} + +bool mlx5_wc_support_get(struct mlx5_core_dev *mdev) +{ + struct mlx5_core_dev *parent = NULL; + + if (!MLX5_CAP_GEN(mdev, bf)) { + mlx5_core_dbg(mdev, "BlueFlame not supported\n"); + goto out; + } + + if (!MLX5_CAP_GEN(mdev, log_max_sq)) { + mlx5_core_dbg(mdev, "SQ not supported\n"); + goto out; + } + + if (mdev->wc_state != MLX5_WC_STATE_UNINITIALIZED) + /* No need to lock anything as we perform WC test only + * once for whole device and was already done. + */ + goto out; + + mutex_lock(&mdev->wc_state_lock); + + if (mdev->wc_state != MLX5_WC_STATE_UNINITIALIZED) + goto unlock; + +#ifdef CONFIG_MLX5_SF + if (mlx5_core_is_sf(mdev)) + parent = mdev->priv.parent_mdev; +#endif + + if (parent) { + mutex_lock(&parent->wc_state_lock); + + mlx5_core_test_wc(parent); + + mlx5_core_dbg(mdev, "parent set wc_state=%d\n", + parent->wc_state); + mdev->wc_state = parent->wc_state; + + mutex_unlock(&parent->wc_state_lock); + } + + mlx5_core_test_wc(mdev); + +unlock: + mutex_unlock(&mdev->wc_state_lock); +out: + mlx5_core_dbg(mdev, "wc_state=%d\n", mdev->wc_state); + + return mdev->wc_state == MLX5_WC_STATE_SUPPORTED; +} +EXPORT_SYMBOL(mlx5_wc_support_get); diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig index a510bf2cff2f..74f7e27b490f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig +++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig @@ -33,6 +33,7 @@ config MLXSW_CORE_THERMAL config MLXSW_PCI tristate "PCI bus implementation for Mellanox Technologies Switch ASICs" depends on PCI && HAS_IOMEM && MLXSW_CORE + select PAGE_POOL default m help This is PCI bus implementation for Mellanox Technologies Switch ASICs. diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c index 6c06b0592760..294e758f1067 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c @@ -513,6 +513,63 @@ mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core, } EXPORT_SYMBOL(mlxsw_env_get_module_eeprom_by_page); +int +mlxsw_env_set_module_eeprom_by_page(struct mlxsw_core *mlxsw_core, + u8 slot_index, u8 module, + const struct ethtool_module_eeprom *page, + struct netlink_ext_ack *extack) +{ + struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core); + u32 bytes_written = 0; + u16 device_addr; + int err; + + if (!mlxsw_env_linecard_is_active(mlxsw_env, slot_index)) { + NL_SET_ERR_MSG_MOD(extack, + "Cannot write to EEPROM of a module on an inactive line card"); + return -EIO; + } + + err = mlxsw_env_validate_module_type(mlxsw_core, slot_index, module); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "EEPROM is not equipped on port module type"); + return err; + } + + device_addr = page->offset; + + while (bytes_written < page->length) { + char mcia_pl[MLXSW_REG_MCIA_LEN]; + char eeprom_tmp[128] = {}; + u8 size; + + size = min_t(u8, page->length - bytes_written, + mlxsw_env->max_eeprom_len); + + mlxsw_reg_mcia_pack(mcia_pl, slot_index, module, page->page, + device_addr + bytes_written, size, + page->i2c_address); + mlxsw_reg_mcia_bank_number_set(mcia_pl, page->bank); + memcpy(eeprom_tmp, page->data + bytes_written, size); + mlxsw_reg_mcia_eeprom_memcpy_to(mcia_pl, eeprom_tmp); + + err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcia), mcia_pl); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Failed to access module's EEPROM"); + return err; + } + + err = mlxsw_env_mcia_status_process(mcia_pl, extack); + if (err) + return err; + + bytes_written += size; + } + + return 0; +} +EXPORT_SYMBOL(mlxsw_env_set_module_eeprom_by_page); + static int mlxsw_env_module_reset(struct mlxsw_core *mlxsw_core, u8 slot_index, u8 module) { diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.h b/drivers/net/ethernet/mellanox/mlxsw/core_env.h index a197e3ae069c..e4ff17869400 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_env.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.h @@ -28,6 +28,12 @@ mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core, const struct ethtool_module_eeprom *page, struct netlink_ext_ack *extack); +int +mlxsw_env_set_module_eeprom_by_page(struct mlxsw_core *mlxsw_core, + u8 slot_index, u8 module, + const struct ethtool_module_eeprom *page, + struct netlink_ext_ack *extack); + int mlxsw_env_reset_module(struct net_device *netdev, struct mlxsw_core *mlxsw_core, u8 slot_index, u8 module, u32 *flags); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c index 5c511e1a8efa..d61478c0c632 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c @@ -100,6 +100,12 @@ static const struct mlxsw_cooling_states default_cooling_states[] = { struct mlxsw_thermal; +struct mlxsw_thermal_cooling_device { + struct mlxsw_thermal *thermal; + struct thermal_cooling_device *cdev; + unsigned int idx; +}; + struct mlxsw_thermal_module { struct mlxsw_thermal *parent; struct thermal_zone_device *tzdev; @@ -123,7 +129,7 @@ struct mlxsw_thermal { const struct mlxsw_bus_info *bus_info; struct thermal_zone_device *tzdev; int polling_delay; - struct thermal_cooling_device *cdevs[MLXSW_MFCR_PWMS_MAX]; + struct mlxsw_thermal_cooling_device cdevs[MLXSW_MFCR_PWMS_MAX]; struct thermal_trip trips[MLXSW_THERMAL_NUM_TRIPS]; struct mlxsw_cooling_states cooling_states[MLXSW_THERMAL_NUM_TRIPS]; struct mlxsw_thermal_area line_cards[]; @@ -147,7 +153,7 @@ static int mlxsw_get_cooling_device_idx(struct mlxsw_thermal *thermal, int i; for (i = 0; i < MLXSW_MFCR_PWMS_MAX; i++) - if (thermal->cdevs[i] == cdev) + if (thermal->cdevs[i].cdev == cdev) return i; /* Allow mlxsw thermal zone binding to an external cooling device */ @@ -352,17 +358,14 @@ static int mlxsw_thermal_get_cur_state(struct thermal_cooling_device *cdev, unsigned long *p_state) { - struct mlxsw_thermal *thermal = cdev->devdata; + struct mlxsw_thermal_cooling_device *mlxsw_cdev = cdev->devdata; + struct mlxsw_thermal *thermal = mlxsw_cdev->thermal; struct device *dev = thermal->bus_info->dev; char mfsc_pl[MLXSW_REG_MFSC_LEN]; - int err, idx; u8 duty; + int err; - idx = mlxsw_get_cooling_device_idx(thermal, cdev); - if (idx < 0) - return idx; - - mlxsw_reg_mfsc_pack(mfsc_pl, idx, 0); + mlxsw_reg_mfsc_pack(mfsc_pl, mlxsw_cdev->idx, 0); err = mlxsw_reg_query(thermal->core, MLXSW_REG(mfsc), mfsc_pl); if (err) { dev_err(dev, "Failed to query PWM duty\n"); @@ -378,22 +381,19 @@ static int mlxsw_thermal_set_cur_state(struct thermal_cooling_device *cdev, unsigned long state) { - struct mlxsw_thermal *thermal = cdev->devdata; + struct mlxsw_thermal_cooling_device *mlxsw_cdev = cdev->devdata; + struct mlxsw_thermal *thermal = mlxsw_cdev->thermal; struct device *dev = thermal->bus_info->dev; char mfsc_pl[MLXSW_REG_MFSC_LEN]; - int idx; int err; if (state > MLXSW_THERMAL_MAX_STATE) return -EINVAL; - idx = mlxsw_get_cooling_device_idx(thermal, cdev); - if (idx < 0) - return idx; - /* Normalize the state to the valid speed range. */ state = max_t(unsigned long, MLXSW_THERMAL_MIN_STATE, state); - mlxsw_reg_mfsc_pack(mfsc_pl, idx, mlxsw_state_to_duty(state)); + mlxsw_reg_mfsc_pack(mfsc_pl, mlxsw_cdev->idx, + mlxsw_state_to_duty(state)); err = mlxsw_reg_write(thermal->core, MLXSW_REG(mfsc), mfsc_pl); if (err) { dev_err(dev, "Failed to write PWM duty\n"); @@ -753,17 +753,21 @@ int mlxsw_thermal_init(struct mlxsw_core *core, } for (i = 0; i < MLXSW_MFCR_PWMS_MAX; i++) { if (pwm_active & BIT(i)) { + struct mlxsw_thermal_cooling_device *mlxsw_cdev; struct thermal_cooling_device *cdev; + mlxsw_cdev = &thermal->cdevs[i]; + mlxsw_cdev->thermal = thermal; + mlxsw_cdev->idx = i; cdev = thermal_cooling_device_register("mlxsw_fan", - thermal, + mlxsw_cdev, &mlxsw_cooling_ops); if (IS_ERR(cdev)) { err = PTR_ERR(cdev); dev_err(dev, "Failed to register cooling device\n"); goto err_thermal_cooling_device_register; } - thermal->cdevs[i] = cdev; + mlxsw_cdev->cdev = cdev; } } @@ -824,8 +828,7 @@ err_thermal_modules_init: err_thermal_zone_device_register: err_thermal_cooling_device_register: for (i = 0; i < MLXSW_MFCR_PWMS_MAX; i++) - if (thermal->cdevs[i]) - thermal_cooling_device_unregister(thermal->cdevs[i]); + thermal_cooling_device_unregister(thermal->cdevs[i].cdev); err_reg_write: err_reg_query: kfree(thermal); @@ -847,12 +850,8 @@ void mlxsw_thermal_fini(struct mlxsw_thermal *thermal) thermal->tzdev = NULL; } - for (i = 0; i < MLXSW_MFCR_PWMS_MAX; i++) { - if (thermal->cdevs[i]) { - thermal_cooling_device_unregister(thermal->cdevs[i]); - thermal->cdevs[i] = NULL; - } - } + for (i = 0; i < MLXSW_MFCR_PWMS_MAX; i++) + thermal_cooling_device_unregister(thermal->cdevs[i].cdev); kfree(thermal); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/item.h b/drivers/net/ethernet/mellanox/mlxsw/item.h index cfafbeb42586..a619a0736bd1 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/item.h +++ b/drivers/net/ethernet/mellanox/mlxsw/item.h @@ -218,6 +218,10 @@ __mlxsw_item_bit_array_offset(const struct mlxsw_item *item, } max_index = (item->size.bytes << 3) / item->element_size - 1; + if (WARN_ONCE(index > max_index, + "name=%s,index=%u,max_index=%u\n", item->name, index, + max_index)) + index = 0; be_index = max_index - index; offset = be_index * item->element_size >> 3; in_byte_index = index % (BITS_PER_BYTE / item->element_size); diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c index f0ceb196a6ce..828c65036a4c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/minimal.c +++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c @@ -140,6 +140,20 @@ mlxsw_m_get_module_eeprom_by_page(struct net_device *netdev, page, extack); } +static int +mlxsw_m_set_module_eeprom_by_page(struct net_device *netdev, + const struct ethtool_module_eeprom *page, + struct netlink_ext_ack *extack) +{ + struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev); + struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core; + + return mlxsw_env_set_module_eeprom_by_page(core, + mlxsw_m_port->slot_index, + mlxsw_m_port->module, + page, extack); +} + static int mlxsw_m_reset(struct net_device *netdev, u32 *flags) { struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev); @@ -181,6 +195,7 @@ static const struct ethtool_ops mlxsw_m_port_ethtool_ops = { .get_module_info = mlxsw_m_get_module_info, .get_module_eeprom = mlxsw_m_get_module_eeprom, .get_module_eeprom_by_page = mlxsw_m_get_module_eeprom_by_page, + .set_module_eeprom_by_page = mlxsw_m_set_module_eeprom_by_page, .reset = mlxsw_m_reset, .get_module_power_mode = mlxsw_m_get_module_power_mode, .set_module_power_mode = mlxsw_m_set_module_power_mode, @@ -702,8 +717,8 @@ static struct mlxsw_driver mlxsw_m_driver = { }; static const struct i2c_device_id mlxsw_m_i2c_id[] = { - { "mlxsw_minimal", 0}, - { }, + { "mlxsw_minimal" }, + { } }; static struct i2c_driver mlxsw_m_i2c_driver = { diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index c0ced4d315f3..060e5b939211 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -13,6 +13,7 @@ #include <linux/if_vlan.h> #include <linux/log2.h> #include <linux/string.h> +#include <net/page_pool/helpers.h> #include "pci_hw.h" #include "pci.h" @@ -61,15 +62,11 @@ struct mlxsw_pci_mem_item { }; struct mlxsw_pci_queue_elem_info { + struct page *pages[MLXSW_PCI_WQE_SG_ENTRIES]; char *elem; /* pointer to actual dma mapped element mem chunk */ - union { - struct { - struct sk_buff *skb; - } sdq; - struct { - struct sk_buff *skb; - } rdq; - } u; + struct { + struct sk_buff *skb; + } sdq; }; struct mlxsw_pci_queue { @@ -88,10 +85,14 @@ struct mlxsw_pci_queue { enum mlxsw_pci_cqe_v v; struct mlxsw_pci_queue *dq; struct napi_struct napi; + struct page_pool *page_pool; } cq; struct { struct tasklet_struct tasklet; } eq; + struct { + struct mlxsw_pci_queue *cq; + } rdq; } u; }; @@ -110,6 +111,7 @@ struct mlxsw_pci { bool cff_support; enum mlxsw_cmd_mbox_config_profile_lag_mode lag_mode; enum mlxsw_cmd_mbox_config_profile_flood_mode flood_mode; + u8 num_sg_entries; /* Number of scatter/gather entries for packets. */ struct mlxsw_pci_queue_type_group queues[MLXSW_PCI_QUEUE_TYPE_COUNT]; u32 doorbell_offset; struct mlxsw_core *core; @@ -335,6 +337,29 @@ static void mlxsw_pci_sdq_fini(struct mlxsw_pci *mlxsw_pci, mlxsw_cmd_hw2sw_sdq(mlxsw_pci->core, q->num); } +#define MLXSW_PCI_SKB_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN) + +#define MLXSW_PCI_RX_BUF_SW_OVERHEAD \ + (MLXSW_PCI_SKB_HEADROOM + \ + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) + +static void +mlxsw_pci_wqe_rx_frag_set(struct mlxsw_pci *mlxsw_pci, struct page *page, + char *wqe, int index, size_t frag_len) +{ + dma_addr_t mapaddr; + + mapaddr = page_pool_get_dma_addr(page); + + if (index == 0) { + mapaddr += MLXSW_PCI_SKB_HEADROOM; + frag_len = frag_len - MLXSW_PCI_RX_BUF_SW_OVERHEAD; + } + + mlxsw_pci_wqe_address_set(wqe, index, mapaddr); + mlxsw_pci_wqe_byte_count_set(wqe, index, frag_len); +} + static int mlxsw_pci_wqe_frag_map(struct mlxsw_pci *mlxsw_pci, char *wqe, int index, char *frag_data, size_t frag_len, int direction) @@ -364,43 +389,140 @@ static void mlxsw_pci_wqe_frag_unmap(struct mlxsw_pci *mlxsw_pci, char *wqe, dma_unmap_single(&pdev->dev, mapaddr, frag_len, direction); } -static int mlxsw_pci_rdq_skb_alloc(struct mlxsw_pci *mlxsw_pci, - struct mlxsw_pci_queue_elem_info *elem_info, - gfp_t gfp) +static struct sk_buff *mlxsw_pci_rdq_build_skb(struct page *pages[], + u16 byte_count) { - size_t buf_len = MLXSW_PORT_MAX_MTU; - char *wqe = elem_info->elem; + unsigned int linear_data_size; struct sk_buff *skb; - int err; + int page_index = 0; + bool linear_only; + void *data; + + data = page_address(pages[page_index]); + net_prefetch(data); + + skb = napi_build_skb(data, PAGE_SIZE); + if (unlikely(!skb)) + return ERR_PTR(-ENOMEM); + + linear_only = byte_count + MLXSW_PCI_RX_BUF_SW_OVERHEAD <= PAGE_SIZE; + linear_data_size = linear_only ? byte_count : + PAGE_SIZE - + MLXSW_PCI_RX_BUF_SW_OVERHEAD; + + skb_reserve(skb, MLXSW_PCI_SKB_HEADROOM); + skb_put(skb, linear_data_size); + + if (linear_only) + return skb; + + byte_count -= linear_data_size; + page_index++; + + while (byte_count > 0) { + unsigned int frag_size; + struct page *page; + + page = pages[page_index]; + frag_size = min(byte_count, PAGE_SIZE); + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, + page, 0, frag_size, PAGE_SIZE); + byte_count -= frag_size; + page_index++; + } - skb = __netdev_alloc_skb_ip_align(NULL, buf_len, gfp); - if (!skb) + return skb; +} + +static int mlxsw_pci_rdq_page_alloc(struct mlxsw_pci_queue *q, + struct mlxsw_pci_queue_elem_info *elem_info, + int index) +{ + struct mlxsw_pci_queue *cq = q->u.rdq.cq; + char *wqe = elem_info->elem; + struct page *page; + + page = page_pool_dev_alloc_pages(cq->u.cq.page_pool); + if (unlikely(!page)) return -ENOMEM; - err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data, - buf_len, DMA_FROM_DEVICE); - if (err) - goto err_frag_map; + mlxsw_pci_wqe_rx_frag_set(q->pci, page, wqe, index, PAGE_SIZE); + elem_info->pages[index] = page; + return 0; +} + +static void mlxsw_pci_rdq_page_free(struct mlxsw_pci_queue *q, + struct mlxsw_pci_queue_elem_info *elem_info, + int index) +{ + struct mlxsw_pci_queue *cq = q->u.rdq.cq; + + page_pool_put_page(cq->u.cq.page_pool, elem_info->pages[index], -1, + false); +} + +static u8 mlxsw_pci_num_sg_entries_get(u16 byte_count) +{ + return DIV_ROUND_UP(byte_count + MLXSW_PCI_RX_BUF_SW_OVERHEAD, + PAGE_SIZE); +} + +static int +mlxsw_pci_elem_info_pages_ref_store(const struct mlxsw_pci_queue *q, + const struct mlxsw_pci_queue_elem_info *el, + u16 byte_count, struct page *pages[], + u8 *p_num_sg_entries) +{ + u8 num_sg_entries; + int i; + + num_sg_entries = mlxsw_pci_num_sg_entries_get(byte_count); + if (WARN_ON_ONCE(num_sg_entries > q->pci->num_sg_entries)) + return -EINVAL; + + for (i = 0; i < num_sg_entries; i++) + pages[i] = el->pages[i]; + + *p_num_sg_entries = num_sg_entries; + return 0; +} + +static int +mlxsw_pci_rdq_pages_alloc(struct mlxsw_pci_queue *q, + struct mlxsw_pci_queue_elem_info *elem_info, + u8 num_sg_entries) +{ + struct page *old_pages[MLXSW_PCI_WQE_SG_ENTRIES]; + struct mlxsw_pci_queue *cq = q->u.rdq.cq; + int i, err; + + for (i = 0; i < num_sg_entries; i++) { + old_pages[i] = elem_info->pages[i]; + err = mlxsw_pci_rdq_page_alloc(q, elem_info, i); + if (err) { + dev_err_ratelimited(&q->pci->pdev->dev, "Failed to alloc page\n"); + goto err_page_alloc; + } + } - elem_info->u.rdq.skb = skb; return 0; -err_frag_map: - dev_kfree_skb_any(skb); +err_page_alloc: + for (i--; i >= 0; i--) + page_pool_recycle_direct(cq->u.cq.page_pool, old_pages[i]); + return err; } -static void mlxsw_pci_rdq_skb_free(struct mlxsw_pci *mlxsw_pci, - struct mlxsw_pci_queue_elem_info *elem_info) +static void +mlxsw_pci_rdq_pages_recycle(struct mlxsw_pci_queue *q, struct page *pages[], + u8 num_sg_entries) { - struct sk_buff *skb; - char *wqe; - - skb = elem_info->u.rdq.skb; - wqe = elem_info->elem; + struct mlxsw_pci_queue *cq = q->u.rdq.cq; + int i; - mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE); - dev_kfree_skb_any(skb); + for (i = 0; i < num_sg_entries; i++) + page_pool_recycle_direct(cq->u.cq.page_pool, pages[i]); } static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, @@ -410,7 +532,7 @@ static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, u8 sdq_count = mlxsw_pci->num_sdqs; struct mlxsw_pci_queue *cq; u8 cq_num; - int i; + int i, j; int err; q->producer_counter = 0; @@ -434,15 +556,19 @@ static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, cq = mlxsw_pci_cq_get(mlxsw_pci, cq_num); cq->u.cq.dq = q; + q->u.rdq.cq = cq; mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q); for (i = 0; i < q->count; i++) { elem_info = mlxsw_pci_queue_elem_info_producer_get(q); BUG_ON(!elem_info); - err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info, GFP_KERNEL); - if (err) - goto rollback; + + for (j = 0; j < mlxsw_pci->num_sg_entries; j++) { + err = mlxsw_pci_rdq_page_alloc(q, elem_info, j); + if (err) + goto rollback; + } /* Everything is set up, ring doorbell to pass elem to HW */ q->producer_counter++; mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q); @@ -453,8 +579,11 @@ static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, rollback: for (i--; i >= 0; i--) { elem_info = mlxsw_pci_queue_elem_info_get(q, i); - mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info); + for (j--; j >= 0; j--) + mlxsw_pci_rdq_page_free(q, elem_info, j); + j = mlxsw_pci->num_sg_entries; } + q->u.rdq.cq = NULL; cq->u.cq.dq = NULL; mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num); @@ -465,12 +594,13 @@ static void mlxsw_pci_rdq_fini(struct mlxsw_pci *mlxsw_pci, struct mlxsw_pci_queue *q) { struct mlxsw_pci_queue_elem_info *elem_info; - int i; + int i, j; mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num); for (i = 0; i < q->count; i++) { elem_info = mlxsw_pci_queue_elem_info_get(q, i); - mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info); + for (j = 0; j < mlxsw_pci->num_sg_entries; j++) + mlxsw_pci_rdq_page_free(q, elem_info, j); } } @@ -515,7 +645,7 @@ static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci, struct mlxsw_pci_queue *q, u16 consumer_counter_limit, enum mlxsw_pci_cqe_v cqe_v, - char *cqe) + char *cqe, int budget) { struct pci_dev *pdev = mlxsw_pci->pdev; struct mlxsw_pci_queue_elem_info *elem_info; @@ -526,8 +656,8 @@ static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci, spin_lock(&q->lock); elem_info = mlxsw_pci_queue_elem_info_consumer_get(q); - tx_info = mlxsw_skb_cb(elem_info->u.sdq.skb)->tx_info; - skb = elem_info->u.sdq.skb; + tx_info = mlxsw_skb_cb(elem_info->sdq.skb)->tx_info; + skb = elem_info->sdq.skb; wqe = elem_info->elem; for (i = 0; i < MLXSW_PCI_WQE_SG_ENTRIES; i++) mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE); @@ -541,8 +671,8 @@ static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci, } if (skb) - dev_kfree_skb_any(skb); - elem_info->u.sdq.skb = NULL; + napi_consume_skb(skb, budget); + elem_info->sdq.skb = NULL; if (q->consumer_counter++ != consumer_counter_limit) dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in SDQ\n"); @@ -604,27 +734,40 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci, enum mlxsw_pci_cqe_v cqe_v, char *cqe) { struct pci_dev *pdev = mlxsw_pci->pdev; + struct page *pages[MLXSW_PCI_WQE_SG_ENTRIES]; struct mlxsw_pci_queue_elem_info *elem_info; struct mlxsw_rx_info rx_info = {}; - char wqe[MLXSW_PCI_WQE_SIZE]; struct sk_buff *skb; + u8 num_sg_entries; u16 byte_count; int err; elem_info = mlxsw_pci_queue_elem_info_consumer_get(q); - skb = elem_info->u.rdq.skb; - memcpy(wqe, elem_info->elem, MLXSW_PCI_WQE_SIZE); if (q->consumer_counter++ != consumer_counter_limit) dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in RDQ\n"); - err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info, GFP_ATOMIC); - if (err) { - dev_err_ratelimited(&pdev->dev, "Failed to alloc skb for RDQ\n"); + byte_count = mlxsw_pci_cqe_byte_count_get(cqe); + if (mlxsw_pci_cqe_crc_get(cqe_v, cqe)) + byte_count -= ETH_FCS_LEN; + + err = mlxsw_pci_elem_info_pages_ref_store(q, elem_info, byte_count, + pages, &num_sg_entries); + if (err) + goto out; + + err = mlxsw_pci_rdq_pages_alloc(q, elem_info, num_sg_entries); + if (err) + goto out; + + skb = mlxsw_pci_rdq_build_skb(pages, byte_count); + if (IS_ERR(skb)) { + dev_err_ratelimited(&pdev->dev, "Failed to build skb for RDQ\n"); + mlxsw_pci_rdq_pages_recycle(q, pages, num_sg_entries); goto out; } - mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE); + skb_mark_for_recycle(skb); if (mlxsw_pci_cqe_lag_get(cqe_v, cqe)) { rx_info.is_lag = true; @@ -657,10 +800,6 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci, mlxsw_pci_skb_cb_ts_set(mlxsw_pci, skb, cqe_v, cqe); - byte_count = mlxsw_pci_cqe_byte_count_get(cqe); - if (mlxsw_pci_cqe_crc_get(cqe_v, cqe)) - byte_count -= ETH_FCS_LEN; - skb_put(skb, byte_count); mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info); out: @@ -785,7 +924,7 @@ static int mlxsw_pci_napi_poll_cq_tx(struct napi_struct *napi, int budget) mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q); mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq, - wqe_counter, q->u.cq.v, ncqe); + wqe_counter, q->u.cq.v, ncqe, budget); work_done++; } @@ -832,19 +971,51 @@ static void mlxsw_pci_cq_napi_setup(struct mlxsw_pci_queue *q, mlxsw_pci_napi_poll_cq_rx); break; } - - napi_enable(&q->u.cq.napi); } static void mlxsw_pci_cq_napi_teardown(struct mlxsw_pci_queue *q) { - napi_disable(&q->u.cq.napi); netif_napi_del(&q->u.cq.napi); } +static int mlxsw_pci_cq_page_pool_init(struct mlxsw_pci_queue *q, + enum mlxsw_pci_cq_type cq_type) +{ + struct page_pool_params pp_params = {}; + struct mlxsw_pci *mlxsw_pci = q->pci; + struct page_pool *page_pool; + + if (cq_type != MLXSW_PCI_CQ_RDQ) + return 0; + + pp_params.flags = PP_FLAG_DMA_MAP; + pp_params.pool_size = MLXSW_PCI_WQE_COUNT * mlxsw_pci->num_sg_entries; + pp_params.nid = dev_to_node(&mlxsw_pci->pdev->dev); + pp_params.dev = &mlxsw_pci->pdev->dev; + pp_params.napi = &q->u.cq.napi; + pp_params.dma_dir = DMA_FROM_DEVICE; + + page_pool = page_pool_create(&pp_params); + if (IS_ERR(page_pool)) + return PTR_ERR(page_pool); + + q->u.cq.page_pool = page_pool; + return 0; +} + +static void mlxsw_pci_cq_page_pool_fini(struct mlxsw_pci_queue *q, + enum mlxsw_pci_cq_type cq_type) +{ + if (cq_type != MLXSW_PCI_CQ_RDQ) + return; + + page_pool_destroy(q->u.cq.page_pool); +} + static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, struct mlxsw_pci_queue *q) { + enum mlxsw_pci_cq_type cq_type = mlxsw_pci_cq_type(mlxsw_pci, q); int i; int err; @@ -874,15 +1045,29 @@ static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, err = mlxsw_cmd_sw2hw_cq(mlxsw_pci->core, mbox, q->num); if (err) return err; - mlxsw_pci_cq_napi_setup(q, mlxsw_pci_cq_type(mlxsw_pci, q)); + mlxsw_pci_cq_napi_setup(q, cq_type); + + err = mlxsw_pci_cq_page_pool_init(q, cq_type); + if (err) + goto err_page_pool_init; + + napi_enable(&q->u.cq.napi); mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q); mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q); return 0; + +err_page_pool_init: + mlxsw_pci_cq_napi_teardown(q); + return err; } static void mlxsw_pci_cq_fini(struct mlxsw_pci *mlxsw_pci, struct mlxsw_pci_queue *q) { + enum mlxsw_pci_cq_type cq_type = mlxsw_pci_cq_type(mlxsw_pci, q); + + napi_disable(&q->u.cq.napi); + mlxsw_pci_cq_page_pool_fini(q, cq_type); mlxsw_pci_cq_napi_teardown(q); mlxsw_cmd_hw2sw_cq(mlxsw_pci->core, q->num); } @@ -1599,6 +1784,7 @@ static int mlxsw_pci_reset_at_pci_disable(struct mlxsw_pci *mlxsw_pci, { struct pci_dev *pdev = mlxsw_pci->pdev; char mrsr_pl[MLXSW_REG_MRSR_LEN]; + struct pci_dev *bridge; int err; if (!pci_reset_sbr_supported) { @@ -1615,6 +1801,9 @@ static int mlxsw_pci_reset_at_pci_disable(struct mlxsw_pci *mlxsw_pci, sbr: device_lock_assert(&pdev->dev); + bridge = pci_upstream_bridge(pdev); + if (bridge) + pci_cfg_access_lock(bridge); pci_cfg_access_lock(pdev); pci_save_state(pdev); @@ -1624,6 +1813,8 @@ sbr: pci_restore_state(pdev); pci_cfg_access_unlock(pdev); + if (bridge) + pci_cfg_access_unlock(bridge); return err; } @@ -1703,6 +1894,17 @@ static void mlxsw_pci_free_irq_vectors(struct mlxsw_pci *mlxsw_pci) pci_free_irq_vectors(mlxsw_pci->pdev); } +static void mlxsw_pci_num_sg_entries_set(struct mlxsw_pci *mlxsw_pci) +{ + u8 num_sg_entries; + + num_sg_entries = mlxsw_pci_num_sg_entries_get(MLXSW_PORT_MAX_MTU); + mlxsw_pci->num_sg_entries = min(num_sg_entries, + MLXSW_PCI_WQE_SG_ENTRIES); + + WARN_ON(num_sg_entries > MLXSW_PCI_WQE_SG_ENTRIES); +} + static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core, const struct mlxsw_config_profile *profile, struct mlxsw_res *res) @@ -1825,6 +2027,8 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core, if (err) goto err_requery_resources; + mlxsw_pci_num_sg_entries_set(mlxsw_pci); + err = mlxsw_pci_napi_devs_init(mlxsw_pci); if (err) goto err_napi_devs_init; @@ -1931,7 +2135,7 @@ static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb, goto unlock; } mlxsw_skb_cb(skb)->tx_info = *tx_info; - elem_info->u.sdq.skb = skb; + elem_info->sdq.skb = skb; wqe = elem_info->elem; mlxsw_pci_wqe_c_set(wqe, 1); /* always report completion */ diff --git a/drivers/net/ethernet/mellanox/mlxsw/port.h b/drivers/net/ethernet/mellanox/mlxsw/port.h index ac4d4ea51597..0a73b1a4526e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/port.h +++ b/drivers/net/ethernet/mellanox/mlxsw/port.h @@ -6,7 +6,8 @@ #include <linux/types.h> -#define MLXSW_PORT_MAX_MTU 10000 +#define MLXSW_PORT_MAX_MTU (10 * 1024) +#define MLXSW_PORT_ETH_FRAME_HDR (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN) #define MLXSW_PORT_DEFAULT_VID 1 diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 030ed71f945d..f064789f3240 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -405,29 +405,12 @@ static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port) mlxsw_sp_port->dev->dev_addr); } -static int mlxsw_sp_port_max_mtu_get(struct mlxsw_sp_port *mlxsw_sp_port, int *p_max_mtu) -{ - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - char pmtu_pl[MLXSW_REG_PMTU_LEN]; - int err; - - mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0); - err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); - if (err) - return err; - - *p_max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl); - return 0; -} - static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; char pmtu_pl[MLXSW_REG_PMTU_LEN]; - mtu += MLXSW_TXHDR_LEN + ETH_HLEN; - if (mtu > mlxsw_sp_port->max_mtu) - return -EINVAL; + mtu += MLXSW_PORT_ETH_FRAME_HDR; mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); @@ -1697,8 +1680,8 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u16 local_port, NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC; dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK; - dev->min_mtu = 0; - dev->max_mtu = ETH_MAX_MTU; + dev->min_mtu = ETH_MIN_MTU; + dev->max_mtu = MLXSW_PORT_MAX_MTU - MLXSW_PORT_ETH_FRAME_HDR; /* Each packet needs to have a Tx header (metadata) on top all other * headers. @@ -1727,13 +1710,6 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u16 local_port, goto err_max_speed_get; } - err = mlxsw_sp_port_max_mtu_get(mlxsw_sp_port, &mlxsw_sp_port->max_mtu); - if (err) { - dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum MTU\n", - mlxsw_sp_port->local_port); - goto err_port_max_mtu_get; - } - err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n", @@ -1877,7 +1853,6 @@ err_port_ets_init: err_port_buffers_init: err_port_admin_status_set: err_port_mtu_set: -err_port_max_mtu_get: err_max_speed_get: err_port_speed_by_width_set: err_port_system_port_mapping_set: diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 3beb5d0847ab..8d3c61287696 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -238,7 +238,7 @@ struct mlxsw_sp_ptp_ops { struct hwtstamp_config *config); void (*shaper_work)(struct work_struct *work); int (*get_ts_info)(struct mlxsw_sp *mlxsw_sp, - struct ethtool_ts_info *info); + struct kernel_ethtool_ts_info *info); int (*get_stats_count)(void); void (*get_stats_strings)(u8 **p); void (*get_stats)(struct mlxsw_sp_port *mlxsw_sp_port, @@ -359,7 +359,6 @@ struct mlxsw_sp_port { u16 egr_types; struct mlxsw_sp_ptp_port_stats stats; } ptp; - int max_mtu; u32 max_speed; struct mlxsw_sp_hdroom *hdroom; u64 module_overheat_initial_val; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c index 4b713832fdd5..07cb1e26ca3e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c @@ -391,7 +391,8 @@ mlxsw_sp_acl_atcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp, if (err) return err; - lkey_id = aregion->ops->lkey_id_get(aregion, aentry->enc_key, erp_id); + lkey_id = aregion->ops->lkey_id_get(aregion, aentry->ht_key.enc_key, + erp_id); if (IS_ERR(lkey_id)) return PTR_ERR(lkey_id); aentry->lkey_id = lkey_id; @@ -399,7 +400,7 @@ mlxsw_sp_acl_atcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp, kvdl_index = mlxsw_afa_block_first_kvdl_index(rulei->act_block); mlxsw_reg_ptce3_pack(ptce3_pl, true, MLXSW_REG_PTCE3_OP_WRITE_WRITE, priority, region->tcam_region_info, - aentry->enc_key, erp_id, + aentry->ht_key.enc_key, erp_id, aentry->delta_info.start, aentry->delta_info.mask, aentry->delta_info.value, @@ -428,7 +429,7 @@ mlxsw_sp_acl_atcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp, mlxsw_reg_ptce3_pack(ptce3_pl, false, MLXSW_REG_PTCE3_OP_WRITE_WRITE, 0, region->tcam_region_info, - aentry->enc_key, erp_id, + aentry->ht_key.enc_key, erp_id, aentry->delta_info.start, aentry->delta_info.mask, aentry->delta_info.value, @@ -457,7 +458,7 @@ mlxsw_sp_acl_atcam_region_entry_action_replace(struct mlxsw_sp *mlxsw_sp, kvdl_index = mlxsw_afa_block_first_kvdl_index(rulei->act_block); mlxsw_reg_ptce3_pack(ptce3_pl, true, MLXSW_REG_PTCE3_OP_WRITE_UPDATE, priority, region->tcam_region_info, - aentry->enc_key, erp_id, + aentry->ht_key.enc_key, erp_id, aentry->delta_info.start, aentry->delta_info.mask, aentry->delta_info.value, @@ -480,26 +481,23 @@ __mlxsw_sp_acl_atcam_entry_add(struct mlxsw_sp *mlxsw_sp, int err; mlxsw_afk_encode(afk, region->key_info, &rulei->values, - aentry->ht_key.full_enc_key, mask); + aentry->ht_key.enc_key, mask); erp_mask = mlxsw_sp_acl_erp_mask_get(aregion, mask, false); if (IS_ERR(erp_mask)) return PTR_ERR(erp_mask); aentry->erp_mask = erp_mask; aentry->ht_key.erp_id = mlxsw_sp_acl_erp_mask_erp_id(erp_mask); - memcpy(aentry->enc_key, aentry->ht_key.full_enc_key, - sizeof(aentry->enc_key)); /* Compute all needed delta information and clear the delta bits - * from the encrypted key. + * from the encoded key. */ delta = mlxsw_sp_acl_erp_delta(aentry->erp_mask); aentry->delta_info.start = mlxsw_sp_acl_erp_delta_start(delta); aentry->delta_info.mask = mlxsw_sp_acl_erp_delta_mask(delta); aentry->delta_info.value = - mlxsw_sp_acl_erp_delta_value(delta, - aentry->ht_key.full_enc_key); - mlxsw_sp_acl_erp_delta_clear(delta, aentry->enc_key); + mlxsw_sp_acl_erp_delta_value(delta, aentry->ht_key.enc_key); + mlxsw_sp_acl_erp_delta_clear(delta, aentry->ht_key.enc_key); /* Add rule to the list of A-TCAM rules, assuming this * rule is intended to A-TCAM. In case this rule does diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c index 95f63fcf4ba1..a54eedb69a3f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c @@ -249,7 +249,7 @@ __mlxsw_sp_acl_bf_key_encode(struct mlxsw_sp_acl_atcam_region *aregion, memcpy(chunk + pad_bytes, &erp_region_id, sizeof(erp_region_id)); memcpy(chunk + key_offset, - &aentry->enc_key[chunk_key_offsets[chunk_index]], + &aentry->ht_key.enc_key[chunk_key_offsets[chunk_index]], chunk_key_len); chunk += chunk_len; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c index d231f4d2888b..9eee229303cc 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c @@ -1217,18 +1217,6 @@ static bool mlxsw_sp_acl_erp_delta_check(void *priv, const void *parent_obj, return err ? false : true; } -static int mlxsw_sp_acl_erp_hints_obj_cmp(const void *obj1, const void *obj2) -{ - const struct mlxsw_sp_acl_erp_key *key1 = obj1; - const struct mlxsw_sp_acl_erp_key *key2 = obj2; - - /* For hints purposes, two objects are considered equal - * in case the masks are the same. Does not matter what - * the "ctcam" value is. - */ - return memcmp(key1->mask, key2->mask, sizeof(key1->mask)); -} - static void *mlxsw_sp_acl_erp_delta_create(void *priv, void *parent_obj, void *obj) { @@ -1308,7 +1296,6 @@ static void mlxsw_sp_acl_erp_root_destroy(void *priv, void *root_priv) static const struct objagg_ops mlxsw_sp_acl_erp_objagg_ops = { .obj_size = sizeof(struct mlxsw_sp_acl_erp_key), .delta_check = mlxsw_sp_acl_erp_delta_check, - .hints_obj_cmp = mlxsw_sp_acl_erp_hints_obj_cmp, .delta_create = mlxsw_sp_acl_erp_delta_create, .delta_destroy = mlxsw_sp_acl_erp_delta_destroy, .root_create = mlxsw_sp_acl_erp_root_create, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h index 79a1d8606512..010204f73ea4 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h @@ -167,9 +167,9 @@ struct mlxsw_sp_acl_atcam_region { }; struct mlxsw_sp_acl_atcam_entry_ht_key { - char full_enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* Encoded - * key. - */ + char enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* Encoded key, minus + * delta bits. + */ u8 erp_id; }; @@ -181,9 +181,6 @@ struct mlxsw_sp_acl_atcam_entry { struct rhash_head ht_node; struct list_head list; /* Member in entries_list */ struct mlxsw_sp_acl_atcam_entry_ht_key ht_key; - char enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* Encoded key, - * minus delta bits. - */ struct { u16 start; u8 mask; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c index ba090262e27e..2c0cfa79d138 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c @@ -399,11 +399,13 @@ void mlxsw_sp_hdroom_bufs_reset_sizes(struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp_hdroom *hdroom) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + unsigned int max_mtu = mlxsw_sp_port->dev->max_mtu; u16 reserve_cells; int i; + max_mtu += MLXSW_PORT_ETH_FRAME_HDR; /* Internal buffer. */ - reserve_cells = mlxsw_sp_hdroom_int_buf_size_get(mlxsw_sp, mlxsw_sp_port->max_mtu, + reserve_cells = mlxsw_sp_hdroom_int_buf_size_get(mlxsw_sp, max_mtu, mlxsw_sp_port->max_speed); reserve_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, reserve_cells); hdroom->int_buf.reserve_cells = reserve_cells; @@ -613,7 +615,9 @@ static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port *mlxsw_sp_port) mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom); /* Buffer 9 is used for control traffic. */ - size9 = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, mlxsw_sp_port->max_mtu); + size9 = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, + mlxsw_sp_port->dev->max_mtu + + MLXSW_PORT_ETH_FRAME_HDR); hdroom.bufs.buf[9].size_cells = mlxsw_sp_bytes_cells(mlxsw_sp, size9); return __mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom, true); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c index ca80af06465f..fa6eddd27ecf 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c @@ -283,7 +283,7 @@ static u64 mlxsw_sp_dpipe_table_erif_size_get(void *priv) return MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); } -static struct devlink_dpipe_table_ops mlxsw_sp_erif_ops = { +static const struct devlink_dpipe_table_ops mlxsw_sp_erif_ops = { .matches_dump = mlxsw_sp_dpipe_table_erif_matches_dump, .actions_dump = mlxsw_sp_dpipe_table_erif_actions_dump, .entries_dump = mlxsw_sp_dpipe_table_erif_entries_dump, @@ -734,7 +734,7 @@ static u64 mlxsw_sp_dpipe_table_host4_size_get(void *priv) return mlxsw_sp_dpipe_table_host_size_get(mlxsw_sp, AF_INET); } -static struct devlink_dpipe_table_ops mlxsw_sp_host4_ops = { +static const struct devlink_dpipe_table_ops mlxsw_sp_host4_ops = { .matches_dump = mlxsw_sp_dpipe_table_host4_matches_dump, .actions_dump = mlxsw_sp_dpipe_table_host_actions_dump, .entries_dump = mlxsw_sp_dpipe_table_host4_entries_dump, @@ -811,7 +811,7 @@ static u64 mlxsw_sp_dpipe_table_host6_size_get(void *priv) return mlxsw_sp_dpipe_table_host_size_get(mlxsw_sp, AF_INET6); } -static struct devlink_dpipe_table_ops mlxsw_sp_host6_ops = { +static const struct devlink_dpipe_table_ops mlxsw_sp_host6_ops = { .matches_dump = mlxsw_sp_dpipe_table_host6_matches_dump, .actions_dump = mlxsw_sp_dpipe_table_host_actions_dump, .entries_dump = mlxsw_sp_dpipe_table_host6_entries_dump, @@ -1230,7 +1230,7 @@ mlxsw_sp_dpipe_table_adj_size_get(void *priv) return size; } -static struct devlink_dpipe_table_ops mlxsw_sp_dpipe_table_adj_ops = { +static const struct devlink_dpipe_table_ops mlxsw_sp_dpipe_table_adj_ops = { .matches_dump = mlxsw_sp_dpipe_table_adj_matches_dump, .actions_dump = mlxsw_sp_dpipe_table_adj_actions_dump, .entries_dump = mlxsw_sp_dpipe_table_adj_entries_dump, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c index a755b0a901d3..2bed8c86b7cf 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c @@ -1068,7 +1068,21 @@ mlxsw_sp_get_module_eeprom_by_page(struct net_device *dev, } static int -mlxsw_sp_get_ts_info(struct net_device *netdev, struct ethtool_ts_info *info) +mlxsw_sp_set_module_eeprom_by_page(struct net_device *dev, + const struct ethtool_module_eeprom *page, + struct netlink_ext_ack *extack) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + u8 slot_index = mlxsw_sp_port->mapping.slot_index; + u8 module = mlxsw_sp_port->mapping.module; + + return mlxsw_env_set_module_eeprom_by_page(mlxsw_sp->core, slot_index, + module, page, extack); +} + +static int +mlxsw_sp_get_ts_info(struct net_device *netdev, struct kernel_ethtool_ts_info *info) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; @@ -1256,6 +1270,7 @@ const struct ethtool_ops mlxsw_sp_port_ethtool_ops = { .get_module_info = mlxsw_sp_get_module_info, .get_module_eeprom = mlxsw_sp_get_module_eeprom, .get_module_eeprom_by_page = mlxsw_sp_get_module_eeprom_by_page, + .set_module_eeprom_by_page = mlxsw_sp_set_module_eeprom_by_page, .get_ts_info = mlxsw_sp_get_ts_info, .get_eth_phy_stats = mlxsw_sp_get_eth_phy_stats, .get_eth_mac_stats = mlxsw_sp_get_eth_mac_stats, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c index cbb6c75a6620..5b174cb95eb8 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c @@ -1276,7 +1276,7 @@ int mlxsw_sp1_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port, } int mlxsw_sp1_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { info->phc_index = ptp_clock_index(mlxsw_sp->clock->ptp); @@ -1661,7 +1661,7 @@ err_get_message_types: } int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { info->phc_index = ptp_clock_index(mlxsw_sp->clock->ptp); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h index a8b88230959a..769095d4932d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h @@ -11,7 +11,7 @@ struct mlxsw_sp; struct mlxsw_sp_port; struct mlxsw_sp_ptp_clock; -static inline int mlxsw_sp_ptp_get_ts_info_noptp(struct ethtool_ts_info *info) +static inline int mlxsw_sp_ptp_get_ts_info_noptp(struct kernel_ethtool_ts_info *info) { info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE | SOF_TIMESTAMPING_SOFTWARE; @@ -50,7 +50,7 @@ int mlxsw_sp1_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port, void mlxsw_sp1_ptp_shaper_work(struct work_struct *work); int mlxsw_sp1_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp, - struct ethtool_ts_info *info); + struct kernel_ethtool_ts_info *info); int mlxsw_sp1_get_stats_count(void); void mlxsw_sp1_get_stats_strings(u8 **p); @@ -84,7 +84,7 @@ int mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port, struct hwtstamp_config *config); int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp, - struct ethtool_ts_info *info); + struct kernel_ethtool_ts_info *info); int mlxsw_sp2_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core, struct mlxsw_sp_port *mlxsw_sp_port, @@ -152,7 +152,7 @@ static inline void mlxsw_sp1_ptp_shaper_work(struct work_struct *work) } static inline int mlxsw_sp1_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { return mlxsw_sp_ptp_get_ts_info_noptp(info); } @@ -227,7 +227,7 @@ mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port, } static inline int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { return mlxsw_sp_ptp_get_ts_info_noptp(info); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 40ba314fbc72..800dfb64ec83 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -11450,12 +11450,16 @@ static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp) { bool old_inc_parsing_depth, new_inc_parsing_depth; struct mlxsw_sp_mp_hash_config config = {}; + struct net *net = mlxsw_sp_net(mlxsw_sp); char recr2_pl[MLXSW_REG_RECR2_LEN]; unsigned long bit; u32 seed; int err; - seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 0); + seed = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_seed).user_seed; + if (!seed) + seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 0); + mlxsw_reg_recr2_pack(recr2_pl, seed); mlxsw_sp_mp4_hash_init(mlxsw_sp, &config); mlxsw_sp_mp6_hash_init(mlxsw_sp, &config); diff --git a/drivers/net/ethernet/meta/Kconfig b/drivers/net/ethernet/meta/Kconfig new file mode 100644 index 000000000000..d8f5e9f9bb33 --- /dev/null +++ b/drivers/net/ethernet/meta/Kconfig @@ -0,0 +1,31 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Meta Platforms network device configuration +# + +config NET_VENDOR_META + bool "Meta Platforms devices" + default y + help + If you have a network (Ethernet) card designed by Meta, say Y. + That's Meta as in the parent company of Facebook. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Meta cards. If you say Y, you will be asked for + your specific card in the following questions. + +if NET_VENDOR_META + +config FBNIC + tristate "Meta Platforms Host Network Interface" + depends on X86_64 || COMPILE_TEST + depends on PCI_MSI + select PHYLINK + help + This driver supports Meta Platforms Host Network Interface. + + To compile this driver as a module, choose M here. The module + will be called fbnic. MSI-X interrupt support is required. + +endif # NET_VENDOR_META diff --git a/drivers/net/ethernet/meta/Makefile b/drivers/net/ethernet/meta/Makefile new file mode 100644 index 000000000000..88804f3de963 --- /dev/null +++ b/drivers/net/ethernet/meta/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the Meta Platforms network device drivers. +# + +obj-$(CONFIG_FBNIC) += fbnic/ diff --git a/drivers/net/ethernet/meta/fbnic/Makefile b/drivers/net/ethernet/meta/fbnic/Makefile new file mode 100644 index 000000000000..9373b558fdc9 --- /dev/null +++ b/drivers/net/ethernet/meta/fbnic/Makefile @@ -0,0 +1,19 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) Meta Platforms, Inc. and affiliates. + +# +# Makefile for the Meta(R) Host Network Interface +# + +obj-$(CONFIG_FBNIC) += fbnic.o + +fbnic-y := fbnic_devlink.o \ + fbnic_fw.o \ + fbnic_irq.o \ + fbnic_mac.o \ + fbnic_netdev.o \ + fbnic_pci.o \ + fbnic_phylink.o \ + fbnic_rpc.o \ + fbnic_tlv.o \ + fbnic_txrx.o diff --git a/drivers/net/ethernet/meta/fbnic/fbnic.h b/drivers/net/ethernet/meta/fbnic/fbnic.h new file mode 100644 index 000000000000..ad2689bfd6cb --- /dev/null +++ b/drivers/net/ethernet/meta/fbnic/fbnic.h @@ -0,0 +1,144 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Meta Platforms, Inc. and affiliates. */ + +#ifndef _FBNIC_H_ +#define _FBNIC_H_ + +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/types.h> +#include <linux/workqueue.h> + +#include "fbnic_csr.h" +#include "fbnic_fw.h" +#include "fbnic_mac.h" +#include "fbnic_rpc.h" + +struct fbnic_dev { + struct device *dev; + struct net_device *netdev; + + u32 __iomem *uc_addr0; + u32 __iomem *uc_addr4; + const struct fbnic_mac *mac; + unsigned int fw_msix_vector; + unsigned int pcs_msix_vector; + unsigned short num_irqs; + + struct delayed_work service_task; + + struct fbnic_fw_mbx mbx[FBNIC_IPC_MBX_INDICES]; + struct fbnic_fw_cap fw_cap; + /* Lock protecting Tx Mailbox queue to prevent possible races */ + spinlock_t fw_tx_lock; + + unsigned long last_heartbeat_request; + unsigned long last_heartbeat_response; + u8 fw_heartbeat_enabled; + + u64 dsn; + u32 mps; + u32 readrq; + + /* Local copy of the devices TCAM */ + struct fbnic_act_tcam act_tcam[FBNIC_RPC_TCAM_ACT_NUM_ENTRIES]; + struct fbnic_mac_addr mac_addr[FBNIC_RPC_TCAM_MACDA_NUM_ENTRIES]; + u8 mac_addr_boundary; + + /* Number of TCQs/RCQs available on hardware */ + u16 max_num_queues; +}; + +/* Reserve entry 0 in the MSI-X "others" array until we have filled all + * 32 of the possible interrupt slots. By doing this we can avoid any + * potential conflicts should we need to enable one of the debug interrupt + * causes later. + */ +enum { + FBNIC_FW_MSIX_ENTRY, + FBNIC_PCS_MSIX_ENTRY, + FBNIC_NON_NAPI_VECTORS +}; + +static inline bool fbnic_present(struct fbnic_dev *fbd) +{ + return !!READ_ONCE(fbd->uc_addr0); +} + +static inline void fbnic_wr32(struct fbnic_dev *fbd, u32 reg, u32 val) +{ + u32 __iomem *csr = READ_ONCE(fbd->uc_addr0); + + if (csr) + writel(val, csr + reg); +} + +u32 fbnic_rd32(struct fbnic_dev *fbd, u32 reg); + +static inline void fbnic_wrfl(struct fbnic_dev *fbd) +{ + fbnic_rd32(fbd, FBNIC_MASTER_SPARE_0); +} + +static inline void +fbnic_rmw32(struct fbnic_dev *fbd, u32 reg, u32 mask, u32 val) +{ + u32 v; + + v = fbnic_rd32(fbd, reg); + v &= ~mask; + v |= val; + fbnic_wr32(fbd, reg, v); +} + +#define wr32(_f, _r, _v) fbnic_wr32(_f, _r, _v) +#define rd32(_f, _r) fbnic_rd32(_f, _r) +#define wrfl(_f) fbnic_wrfl(_f) + +bool fbnic_fw_present(struct fbnic_dev *fbd); +u32 fbnic_fw_rd32(struct fbnic_dev *fbd, u32 reg); +void fbnic_fw_wr32(struct fbnic_dev *fbd, u32 reg, u32 val); + +#define fw_rd32(_f, _r) fbnic_fw_rd32(_f, _r) +#define fw_wr32(_f, _r, _v) fbnic_fw_wr32(_f, _r, _v) +#define fw_wrfl(_f) fbnic_fw_rd32(_f, FBNIC_FW_ZERO_REG) + +static inline bool fbnic_bmc_present(struct fbnic_dev *fbd) +{ + return fbd->fw_cap.bmc_present; +} + +static inline bool fbnic_init_failure(struct fbnic_dev *fbd) +{ + return !fbd->netdev; +} + +extern char fbnic_driver_name[]; + +void fbnic_devlink_free(struct fbnic_dev *fbd); +struct fbnic_dev *fbnic_devlink_alloc(struct pci_dev *pdev); +void fbnic_devlink_register(struct fbnic_dev *fbd); +void fbnic_devlink_unregister(struct fbnic_dev *fbd); + +int fbnic_fw_enable_mbx(struct fbnic_dev *fbd); +void fbnic_fw_disable_mbx(struct fbnic_dev *fbd); + +int fbnic_pcs_irq_enable(struct fbnic_dev *fbd); +void fbnic_pcs_irq_disable(struct fbnic_dev *fbd); + +int fbnic_request_irq(struct fbnic_dev *dev, int nr, irq_handler_t handler, + unsigned long flags, const char *name, void *data); +void fbnic_free_irq(struct fbnic_dev *dev, int nr, void *data); +void fbnic_free_irqs(struct fbnic_dev *fbd); +int fbnic_alloc_irqs(struct fbnic_dev *fbd); + +enum fbnic_boards { + fbnic_board_asic +}; + +struct fbnic_info { + unsigned int max_num_queues; + unsigned int bar_mask; +}; + +#endif /* _FBNIC_H_ */ diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_csr.h b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h new file mode 100644 index 000000000000..a64360de0552 --- /dev/null +++ b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h @@ -0,0 +1,838 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Meta Platforms, Inc. and affiliates. */ + +#ifndef _FBNIC_CSR_H_ +#define _FBNIC_CSR_H_ + +#include <linux/bitops.h> + +#define CSR_BIT(nr) (1u << (nr)) +#define CSR_GENMASK(h, l) GENMASK(h, l) + +#define DESC_BIT(nr) BIT_ULL(nr) +#define DESC_GENMASK(h, l) GENMASK_ULL(h, l) + +/* Defines the minimum firmware version required by the driver */ +#define MIN_FW_MAJOR_VERSION 0 +#define MIN_FW_MINOR_VERSION 10 +#define MIN_FW_BUILD_VERSION 6 +#define MIN_FW_VERSION_CODE (MIN_FW_MAJOR_VERSION * (1u << 24) + \ + MIN_FW_MINOR_VERSION * (1u << 16) + \ + MIN_FW_BUILD_VERSION) + +#define PCI_DEVICE_ID_META_FBNIC_ASIC 0x0013 + +#define FBNIC_CLOCK_FREQ (600 * (1000 * 1000)) + +/* Transmit Work Descriptor Format */ +/* Length, Type, Offset Masks and Shifts */ +#define FBNIC_TWD_L2_HLEN_MASK DESC_GENMASK(5, 0) + +#define FBNIC_TWD_L3_TYPE_MASK DESC_GENMASK(7, 6) +enum { + FBNIC_TWD_L3_TYPE_OTHER = 0, + FBNIC_TWD_L3_TYPE_IPV4 = 1, + FBNIC_TWD_L3_TYPE_IPV6 = 2, + FBNIC_TWD_L3_TYPE_V6V6 = 3, +}; + +#define FBNIC_TWD_L3_OHLEN_MASK DESC_GENMASK(15, 8) +#define FBNIC_TWD_L3_IHLEN_MASK DESC_GENMASK(23, 16) + +enum { + FBNIC_TWD_L4_TYPE_OTHER = 0, + FBNIC_TWD_L4_TYPE_TCP = 1, + FBNIC_TWD_L4_TYPE_UDP = 2, +}; + +#define FBNIC_TWD_CSUM_OFFSET_MASK DESC_GENMASK(27, 24) +#define FBNIC_TWD_L4_HLEN_MASK DESC_GENMASK(31, 28) + +/* Flags and Type */ +#define FBNIC_TWD_L4_TYPE_MASK DESC_GENMASK(33, 32) +#define FBNIC_TWD_FLAG_REQ_TS DESC_BIT(34) +#define FBNIC_TWD_FLAG_REQ_LSO DESC_BIT(35) +#define FBNIC_TWD_FLAG_REQ_CSO DESC_BIT(36) +#define FBNIC_TWD_FLAG_REQ_COMPLETION DESC_BIT(37) +#define FBNIC_TWD_FLAG_DEST_MAC DESC_BIT(43) +#define FBNIC_TWD_FLAG_DEST_BMC DESC_BIT(44) +#define FBNIC_TWD_FLAG_DEST_FW DESC_BIT(45) +#define FBNIC_TWD_TYPE_MASK DESC_GENMASK(47, 46) +enum { + FBNIC_TWD_TYPE_META = 0, + FBNIC_TWD_TYPE_OPT_META = 1, + FBNIC_TWD_TYPE_AL = 2, + FBNIC_TWD_TYPE_LAST_AL = 3, +}; + +/* MSS and Completion Req */ +#define FBNIC_TWD_MSS_MASK DESC_GENMASK(61, 48) + +#define FBNIC_TWD_TS_MASK DESC_GENMASK(39, 0) +#define FBNIC_TWD_ADDR_MASK DESC_GENMASK(45, 0) +#define FBNIC_TWD_LEN_MASK DESC_GENMASK(63, 48) + +/* Tx Completion Descriptor Format */ +#define FBNIC_TCD_TYPE0_HEAD0_MASK DESC_GENMASK(15, 0) +#define FBNIC_TCD_TYPE0_HEAD1_MASK DESC_GENMASK(31, 16) + +#define FBNIC_TCD_TYPE1_TS_MASK DESC_GENMASK(39, 0) + +#define FBNIC_TCD_STATUS_MASK DESC_GENMASK(59, 48) +#define FBNIC_TCD_STATUS_TS_INVALID DESC_BIT(48) +#define FBNIC_TCD_STATUS_ILLEGAL_TS_REQ DESC_BIT(49) +#define FBNIC_TCD_TWQ1 DESC_BIT(60) +#define FBNIC_TCD_TYPE_MASK DESC_GENMASK(62, 61) +enum { + FBNIC_TCD_TYPE_0 = 0, + FBNIC_TCD_TYPE_1 = 1, +}; + +#define FBNIC_TCD_DONE DESC_BIT(63) + +/* Rx Buffer Descriptor Format + * + * The layout of this can vary depending on the page size of the system. + * + * If the page size is 4K then the layout will simply consist of ID for + * the 16 most significant bits, and the lower 46 are essentially the page + * address with the lowest 12 bits being reserved 0 due to the fact that + * a page will be aligned. + * + * If the page size is larger than 4K then the lower n bits of the ID and + * page address will be reserved for the fragment ID. This fragment will + * be 4K in size and will be used to index both the DMA address and the ID + * by the same amount. + */ +#define FBNIC_BD_DESC_ADDR_MASK DESC_GENMASK(45, 12) +#define FBNIC_BD_DESC_ID_MASK DESC_GENMASK(63, 48) +#define FBNIC_BD_FRAG_SIZE \ + (FBNIC_BD_DESC_ADDR_MASK & ~(FBNIC_BD_DESC_ADDR_MASK - 1)) +#define FBNIC_BD_FRAG_COUNT \ + (PAGE_SIZE / FBNIC_BD_FRAG_SIZE) +#define FBNIC_BD_FRAG_ADDR_MASK \ + (FBNIC_BD_DESC_ADDR_MASK & \ + ~(FBNIC_BD_DESC_ADDR_MASK * FBNIC_BD_FRAG_COUNT)) +#define FBNIC_BD_FRAG_ID_MASK \ + (FBNIC_BD_DESC_ID_MASK & \ + ~(FBNIC_BD_DESC_ID_MASK * FBNIC_BD_FRAG_COUNT)) +#define FBNIC_BD_PAGE_ADDR_MASK \ + (FBNIC_BD_DESC_ADDR_MASK & ~FBNIC_BD_FRAG_ADDR_MASK) +#define FBNIC_BD_PAGE_ID_MASK \ + (FBNIC_BD_DESC_ID_MASK & ~FBNIC_BD_FRAG_ID_MASK) + +/* Rx Completion Queue Descriptors */ +#define FBNIC_RCD_TYPE_MASK DESC_GENMASK(62, 61) +enum { + FBNIC_RCD_TYPE_HDR_AL = 0, + FBNIC_RCD_TYPE_PAY_AL = 1, + FBNIC_RCD_TYPE_OPT_META = 2, + FBNIC_RCD_TYPE_META = 3, +}; + +#define FBNIC_RCD_DONE DESC_BIT(63) + +/* Address/Length Completion Descriptors */ +#define FBNIC_RCD_AL_BUFF_ID_MASK DESC_GENMASK(15, 0) +#define FBNIC_RCD_AL_BUFF_FRAG_MASK (FBNIC_BD_FRAG_COUNT - 1) +#define FBNIC_RCD_AL_BUFF_PAGE_MASK \ + (FBNIC_RCD_AL_BUFF_ID_MASK & ~FBNIC_RCD_AL_BUFF_FRAG_MASK) +#define FBNIC_RCD_AL_BUFF_LEN_MASK DESC_GENMASK(28, 16) +#define FBNIC_RCD_AL_BUFF_OFF_MASK DESC_GENMASK(43, 32) +#define FBNIC_RCD_AL_PAGE_FIN DESC_BIT(60) + +/* Header AL specific values */ +#define FBNIC_RCD_HDR_AL_OVERFLOW DESC_BIT(53) +#define FBNIC_RCD_HDR_AL_DMA_HINT_MASK DESC_GENMASK(59, 54) +enum { + FBNIC_RCD_HDR_AL_DMA_HINT_NONE = 0, + FBNIC_RCD_HDR_AL_DMA_HINT_L2 = 1, + FBNIC_RCD_HDR_AL_DMA_HINT_L3 = 2, + FBNIC_RCD_HDR_AL_DMA_HINT_L4 = 4, +}; + +/* Optional Metadata Completion Descriptors */ +#define FBNIC_RCD_OPT_META_TS_MASK DESC_GENMASK(39, 0) +#define FBNIC_RCD_OPT_META_ACTION_MASK DESC_GENMASK(45, 40) +#define FBNIC_RCD_OPT_META_ACTION DESC_BIT(57) +#define FBNIC_RCD_OPT_META_TS DESC_BIT(58) +#define FBNIC_RCD_OPT_META_TYPE_MASK DESC_GENMASK(60, 59) + +/* Metadata Completion Descriptors */ +#define FBNIC_RCD_META_RSS_HASH_MASK DESC_GENMASK(31, 0) +#define FBNIC_RCD_META_L2_CSUM_MASK DESC_GENMASK(47, 32) +#define FBNIC_RCD_META_L3_TYPE_MASK DESC_GENMASK(49, 48) +enum { + FBNIC_RCD_META_L3_TYPE_OTHER = 0, + FBNIC_RCD_META_L3_TYPE_IPV4 = 1, + FBNIC_RCD_META_L3_TYPE_IPV6 = 2, + FBNIC_RCD_META_L3_TYPE_V6V6 = 3, +}; + +#define FBNIC_RCD_META_L4_TYPE_MASK DESC_GENMASK(51, 50) +enum { + FBNIC_RCD_META_L4_TYPE_OTHER = 0, + FBNIC_RCD_META_L4_TYPE_TCP = 1, + FBNIC_RCD_META_L4_TYPE_UDP = 2, +}; + +#define FBNIC_RCD_META_L4_CSUM_UNNECESSARY DESC_BIT(52) +#define FBNIC_RCD_META_ERR_MAC_EOP DESC_BIT(53) +#define FBNIC_RCD_META_ERR_TRUNCATED_FRAME DESC_BIT(54) +#define FBNIC_RCD_META_ERR_PARSER DESC_BIT(55) +#define FBNIC_RCD_META_UNCORRECTABLE_ERR_MASK \ + (FBNIC_RCD_META_ERR_MAC_EOP | FBNIC_RCD_META_ERR_TRUNCATED_FRAME) +#define FBNIC_RCD_META_ECN DESC_BIT(60) + +/* Register Definitions + * + * The registers are laid as indexes into an le32 array. As such the actual + * address is 4 times the index value. Below each register is defined as 3 + * fields, name, index, and Address. + * + * Name Index Address + *************************************************************************/ +/* Interrupt Registers */ +#define FBNIC_CSR_START_INTR 0x00000 /* CSR section delimiter */ +#define FBNIC_INTR_STATUS(n) (0x00000 + (n)) /* 0x00000 + 4*n */ +#define FBNIC_INTR_STATUS_CNT 8 +#define FBNIC_INTR_MASK(n) (0x00008 + (n)) /* 0x00020 + 4*n */ +#define FBNIC_INTR_MASK_CNT 8 +#define FBNIC_INTR_SET(n) (0x00010 + (n)) /* 0x00040 + 4*n */ +#define FBNIC_INTR_SET_CNT 8 +#define FBNIC_INTR_CLEAR(n) (0x00018 + (n)) /* 0x00060 + 4*n */ +#define FBNIC_INTR_CLEAR_CNT 8 +#define FBNIC_INTR_SW_STATUS(n) (0x00020 + (n)) /* 0x00080 + 4*n */ +#define FBNIC_INTR_SW_STATUS_CNT 8 +#define FBNIC_INTR_SW_AC_MODE(n) (0x00028 + (n)) /* 0x000a0 + 4*n */ +#define FBNIC_INTR_SW_AC_MODE_CNT 8 +#define FBNIC_INTR_MASK_SET(n) (0x00030 + (n)) /* 0x000c0 + 4*n */ +#define FBNIC_INTR_MASK_SET_CNT 8 +#define FBNIC_INTR_MASK_CLEAR(n) (0x00038 + (n)) /* 0x000e0 + 4*n */ +#define FBNIC_INTR_MASK_CLEAR_CNT 8 +#define FBNIC_MAX_MSIX_VECS 256U +#define FBNIC_INTR_MSIX_CTRL(n) (0x00040 + (n)) /* 0x00100 + 4*n */ +#define FBNIC_INTR_MSIX_CTRL_VECTOR_MASK CSR_GENMASK(7, 0) +#define FBNIC_INTR_MSIX_CTRL_ENABLE CSR_BIT(31) +enum { + FBNIC_INTR_MSIX_CTRL_PCS_IDX = 34, +}; + +#define FBNIC_CSR_END_INTR 0x0005f /* CSR section delimiter */ + +/* Interrupt MSIX Registers */ +#define FBNIC_CSR_START_INTR_CQ 0x00400 /* CSR section delimiter */ +#define FBNIC_INTR_CQ_REARM(n) \ + (0x00400 + 4 * (n)) /* 0x01000 + 16*n */ +#define FBNIC_INTR_CQ_REARM_CNT 256 +#define FBNIC_INTR_CQ_REARM_RCQ_TIMEOUT CSR_GENMASK(13, 0) +#define FBNIC_INTR_CQ_REARM_RCQ_TIMEOUT_UPD_EN CSR_BIT(14) +#define FBNIC_INTR_CQ_REARM_TCQ_TIMEOUT CSR_GENMASK(28, 15) +#define FBNIC_INTR_CQ_REARM_TCQ_TIMEOUT_UPD_EN CSR_BIT(29) +#define FBNIC_INTR_CQ_REARM_INTR_RELOAD CSR_BIT(30) +#define FBNIC_INTR_CQ_REARM_INTR_UNMASK CSR_BIT(31) + +#define FBNIC_INTR_RCQ_TIMEOUT(n) \ + (0x00401 + 4 * (n)) /* 0x01004 + 16*n */ +#define FBNIC_INTR_RCQ_TIMEOUT_CNT 256 +#define FBNIC_INTR_TCQ_TIMEOUT(n) \ + (0x00402 + 4 * (n)) /* 0x01008 + 16*n */ +#define FBNIC_INTR_TCQ_TIMEOUT_CNT 256 +#define FBNIC_CSR_END_INTR_CQ 0x007fe /* CSR section delimiter */ + +/* Global QM Tx registers */ +#define FBNIC_CSR_START_QM_TX 0x00800 /* CSR section delimiter */ +#define FBNIC_QM_TWQ_IDLE(n) (0x00800 + (n)) /* 0x02000 + 4*n */ +#define FBNIC_QM_TWQ_IDLE_CNT 8 +#define FBNIC_QM_TWQ_DEFAULT_META_L 0x00818 /* 0x02060 */ +#define FBNIC_QM_TWQ_DEFAULT_META_H 0x00819 /* 0x02064 */ + +#define FBNIC_QM_TQS_CTL0 0x0081b /* 0x0206c */ +#define FBNIC_QM_TQS_CTL0_LSO_TS_MASK CSR_BIT(0) +enum { + FBNIC_QM_TQS_CTL0_LSO_TS_FIRST = 0, + FBNIC_QM_TQS_CTL0_LSO_TS_LAST = 1, +}; + +#define FBNIC_QM_TQS_CTL0_PREFETCH_THRESH CSR_GENMASK(7, 1) +enum { + FBNIC_QM_TQS_CTL0_PREFETCH_THRESH_MIN = 16, +}; + +#define FBNIC_QM_TQS_CTL1 0x0081c /* 0x02070 */ +#define FBNIC_QM_TQS_CTL1_MC_MAX_CREDITS CSR_GENMASK(7, 0) +#define FBNIC_QM_TQS_CTL1_BULK_MAX_CREDITS CSR_GENMASK(15, 8) +#define FBNIC_QM_TQS_MTU_CTL0 0x0081d /* 0x02074 */ +#define FBNIC_QM_TQS_MTU_CTL1 0x0081e /* 0x02078 */ +#define FBNIC_QM_TQS_MTU_CTL1_BULK CSR_GENMASK(13, 0) +#define FBNIC_QM_TCQ_IDLE(n) (0x00821 + (n)) /* 0x02084 + 4*n */ +#define FBNIC_QM_TCQ_IDLE_CNT 4 +#define FBNIC_QM_TCQ_CTL0 0x0082d /* 0x020b4 */ +#define FBNIC_QM_TCQ_CTL0_COAL_WAIT CSR_GENMASK(15, 0) +#define FBNIC_QM_TCQ_CTL0_TICK_CYCLES CSR_GENMASK(26, 16) +#define FBNIC_QM_TQS_IDLE(n) (0x00830 + (n)) /* 0x020c0 + 4*n */ +#define FBNIC_QM_TQS_IDLE_CNT 8 +#define FBNIC_QM_TQS_EDT_TS_RANGE 0x00849 /* 0x2124 */ +#define FBNIC_QM_TDE_IDLE(n) (0x00853 + (n)) /* 0x0214c + 4*n */ +#define FBNIC_QM_TDE_IDLE_CNT 8 +#define FBNIC_QM_TNI_TDF_CTL 0x0086c /* 0x021b0 */ +#define FBNIC_QM_TNI_TDF_CTL_MRRS CSR_GENMASK(1, 0) +#define FBNIC_QM_TNI_TDF_CTL_CLS CSR_GENMASK(3, 2) +#define FBNIC_QM_TNI_TDF_CTL_MAX_OT CSR_GENMASK(11, 4) +#define FBNIC_QM_TNI_TDF_CTL_MAX_OB CSR_GENMASK(23, 12) +#define FBNIC_QM_TNI_TDE_CTL 0x0086d /* 0x021b4 */ +#define FBNIC_QM_TNI_TDE_CTL_MRRS CSR_GENMASK(1, 0) +#define FBNIC_QM_TNI_TDE_CTL_CLS CSR_GENMASK(3, 2) +#define FBNIC_QM_TNI_TDE_CTL_MAX_OT CSR_GENMASK(11, 4) +#define FBNIC_QM_TNI_TDE_CTL_MAX_OB CSR_GENMASK(24, 12) +#define FBNIC_QM_TNI_TDE_CTL_MRRS_1K CSR_BIT(25) +#define FBNIC_QM_TNI_TCM_CTL 0x0086e /* 0x021b8 */ +#define FBNIC_QM_TNI_TCM_CTL_MPS CSR_GENMASK(1, 0) +#define FBNIC_QM_TNI_TCM_CTL_CLS CSR_GENMASK(3, 2) +#define FBNIC_QM_TNI_TCM_CTL_MAX_OT CSR_GENMASK(11, 4) +#define FBNIC_QM_TNI_TCM_CTL_MAX_OB CSR_GENMASK(23, 12) +#define FBNIC_CSR_END_QM_TX 0x00873 /* CSR section delimiter */ + +/* Global QM Rx registers */ +#define FBNIC_CSR_START_QM_RX 0x00c00 /* CSR section delimiter */ +#define FBNIC_QM_RCQ_IDLE(n) (0x00c00 + (n)) /* 0x03000 + 4*n */ +#define FBNIC_QM_RCQ_IDLE_CNT 4 +#define FBNIC_QM_RCQ_CTL0 0x00c0c /* 0x03030 */ +#define FBNIC_QM_RCQ_CTL0_COAL_WAIT CSR_GENMASK(15, 0) +#define FBNIC_QM_RCQ_CTL0_TICK_CYCLES CSR_GENMASK(26, 16) +#define FBNIC_QM_HPQ_IDLE(n) (0x00c0f + (n)) /* 0x0303c + 4*n */ +#define FBNIC_QM_HPQ_IDLE_CNT 4 +#define FBNIC_QM_PPQ_IDLE(n) (0x00c13 + (n)) /* 0x0304c + 4*n */ +#define FBNIC_QM_PPQ_IDLE_CNT 4 +#define FBNIC_QM_RNI_RBP_CTL 0x00c2d /* 0x030b4 */ +#define FBNIC_QM_RNI_RBP_CTL_MRRS CSR_GENMASK(1, 0) +#define FBNIC_QM_RNI_RBP_CTL_CLS CSR_GENMASK(3, 2) +#define FBNIC_QM_RNI_RBP_CTL_MAX_OT CSR_GENMASK(11, 4) +#define FBNIC_QM_RNI_RBP_CTL_MAX_OB CSR_GENMASK(23, 12) +#define FBNIC_QM_RNI_RDE_CTL 0x00c2e /* 0x030b8 */ +#define FBNIC_QM_RNI_RDE_CTL_MPS CSR_GENMASK(1, 0) +#define FBNIC_QM_RNI_RDE_CTL_CLS CSR_GENMASK(3, 2) +#define FBNIC_QM_RNI_RDE_CTL_MAX_OT CSR_GENMASK(11, 4) +#define FBNIC_QM_RNI_RDE_CTL_MAX_OB CSR_GENMASK(23, 12) +#define FBNIC_QM_RNI_RCM_CTL 0x00c2f /* 0x030bc */ +#define FBNIC_QM_RNI_RCM_CTL_MPS CSR_GENMASK(1, 0) +#define FBNIC_QM_RNI_RCM_CTL_CLS CSR_GENMASK(3, 2) +#define FBNIC_QM_RNI_RCM_CTL_MAX_OT CSR_GENMASK(11, 4) +#define FBNIC_QM_RNI_RCM_CTL_MAX_OB CSR_GENMASK(23, 12) +#define FBNIC_CSR_END_QM_RX 0x00c34 /* CSR section delimiter */ + +/* TCE registers */ +#define FBNIC_CSR_START_TCE 0x04000 /* CSR section delimiter */ +#define FBNIC_TCE_REG_BASE 0x04000 /* 0x10000 */ + +#define FBNIC_TCE_LSO_CTRL 0x04000 /* 0x10000 */ +#define FBNIC_TCE_LSO_CTRL_TCPF_CLR_1ST CSR_GENMASK(8, 0) +#define FBNIC_TCE_LSO_CTRL_TCPF_CLR_MID CSR_GENMASK(17, 9) +#define FBNIC_TCE_LSO_CTRL_TCPF_CLR_END CSR_GENMASK(26, 18) +#define FBNIC_TCE_LSO_CTRL_IPID_MODE_INC CSR_BIT(27) + +#define FBNIC_TCE_CSO_CTRL 0x04001 /* 0x10004 */ +#define FBNIC_TCE_CSO_CTRL_TCP_ZERO_CSUM CSR_BIT(0) + +#define FBNIC_TCE_TXB_CTRL 0x04002 /* 0x10008 */ +#define FBNIC_TCE_TXB_CTRL_LOAD CSR_BIT(0) +#define FBNIC_TCE_TXB_CTRL_TCAM_ENABLE CSR_BIT(1) +#define FBNIC_TCE_TXB_CTRL_DISABLE CSR_BIT(2) + +#define FBNIC_TCE_TXB_ENQ_WRR_CTRL 0x04003 /* 0x1000c */ +#define FBNIC_TCE_TXB_ENQ_WRR_CTRL_WEIGHT0 CSR_GENMASK(7, 0) +#define FBNIC_TCE_TXB_ENQ_WRR_CTRL_WEIGHT1 CSR_GENMASK(15, 8) +#define FBNIC_TCE_TXB_ENQ_WRR_CTRL_WEIGHT2 CSR_GENMASK(23, 16) + +#define FBNIC_TCE_TXB_TEI_Q0_CTRL 0x04004 /* 0x10010 */ +#define FBNIC_TCE_TXB_TEI_Q1_CTRL 0x04005 /* 0x10014 */ +#define FBNIC_TCE_TXB_MC_Q_CTRL 0x04006 /* 0x10018 */ +#define FBNIC_TCE_TXB_RX_TEI_Q_CTRL 0x04007 /* 0x1001c */ +#define FBNIC_TCE_TXB_RX_BMC_Q_CTRL 0x04008 /* 0x10020 */ +#define FBNIC_TCE_TXB_Q_CTRL_START CSR_GENMASK(10, 0) +#define FBNIC_TCE_TXB_Q_CTRL_SIZE CSR_GENMASK(22, 11) + +#define FBNIC_TCE_TXB_TEI_DWRR_CTRL 0x04009 /* 0x10024 */ +#define FBNIC_TCE_TXB_TEI_DWRR_CTRL_QUANTUM0 CSR_GENMASK(7, 0) +#define FBNIC_TCE_TXB_TEI_DWRR_CTRL_QUANTUM1 CSR_GENMASK(15, 8) +#define FBNIC_TCE_TXB_NTWRK_DWRR_CTRL 0x0400a /* 0x10028 */ +#define FBNIC_TCE_TXB_NTWRK_DWRR_CTRL_QUANTUM0 CSR_GENMASK(7, 0) +#define FBNIC_TCE_TXB_NTWRK_DWRR_CTRL_QUANTUM1 CSR_GENMASK(15, 8) +#define FBNIC_TCE_TXB_NTWRK_DWRR_CTRL_QUANTUM2 CSR_GENMASK(23, 16) + +#define FBNIC_TCE_TXB_CLDR_CFG 0x0400b /* 0x1002c */ +#define FBNIC_TCE_TXB_CLDR_CFG_NUM_SLOT CSR_GENMASK(5, 0) +#define FBNIC_TCE_TXB_CLDR_SLOT_CFG(n) (0x0400c + (n)) /* 0x10030 + 4*n */ +#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_CNT 16 +#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_0_0 CSR_GENMASK(1, 0) +#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_0_1 CSR_GENMASK(3, 2) +#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_0_2 CSR_GENMASK(5, 4) +#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_0_3 CSR_GENMASK(7, 6) +#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_1_0 CSR_GENMASK(9, 8) +#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_1_1 CSR_GENMASK(11, 10) +#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_1_2 CSR_GENMASK(13, 12) +#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_1_3 CSR_GENMASK(15, 14) +#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_2_0 CSR_GENMASK(17, 16) +#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_2_1 CSR_GENMASK(19, 18) +#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_2_2 CSR_GENMASK(21, 20) +#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_2_3 CSR_GENMASK(23, 22) +#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_3_0 CSR_GENMASK(25, 24) +#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_3_1 CSR_GENMASK(27, 26) +#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_3_2 CSR_GENMASK(29, 28) +#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_3_3 CSR_GENMASK(31, 30) + +#define FBNIC_TCE_BMC_MAX_PKTSZ 0x0403a /* 0x100e8 */ +#define FBNIC_TCE_BMC_MAX_PKTSZ_TX CSR_GENMASK(13, 0) +#define FBNIC_TCE_BMC_MAX_PKTSZ_RX CSR_GENMASK(27, 14) +#define FBNIC_TCE_MC_MAX_PKTSZ 0x0403b /* 0x100ec */ +#define FBNIC_TCE_MC_MAX_PKTSZ_TMI CSR_GENMASK(13, 0) + +#define FBNIC_TCE_SOP_PROT_CTRL 0x0403c /* 0x100f0 */ +#define FBNIC_TCE_SOP_PROT_CTRL_TBI CSR_GENMASK(7, 0) +#define FBNIC_TCE_SOP_PROT_CTRL_TTI_FRM CSR_GENMASK(14, 8) +#define FBNIC_TCE_SOP_PROT_CTRL_TTI_CM CSR_GENMASK(18, 15) + +#define FBNIC_TCE_DROP_CTRL 0x0403d /* 0x100f4 */ +#define FBNIC_TCE_DROP_CTRL_TTI_CM_DROP_EN CSR_BIT(0) +#define FBNIC_TCE_DROP_CTRL_TTI_FRM_DROP_EN CSR_BIT(1) +#define FBNIC_TCE_DROP_CTRL_TTI_TBI_DROP_EN CSR_BIT(2) + +#define FBNIC_TCE_TXB_TX_BMC_Q_CTRL 0x0404B /* 0x1012c */ +#define FBNIC_TCE_TXB_BMC_DWRR_CTRL 0x0404C /* 0x10130 */ +#define FBNIC_TCE_TXB_BMC_DWRR_CTRL_QUANTUM0 CSR_GENMASK(7, 0) +#define FBNIC_TCE_TXB_BMC_DWRR_CTRL_QUANTUM1 CSR_GENMASK(15, 8) +#define FBNIC_TCE_TXB_TEI_DWRR_CTRL_EXT 0x0404D /* 0x10134 */ +#define FBNIC_TCE_TXB_NTWRK_DWRR_CTRL_EXT \ + 0x0404E /* 0x10138 */ +#define FBNIC_TCE_TXB_BMC_DWRR_CTRL_EXT 0x0404F /* 0x1013c */ +#define FBNIC_CSR_END_TCE 0x04050 /* CSR section delimiter */ + +/* TMI registers */ +#define FBNIC_CSR_START_TMI 0x04400 /* CSR section delimiter */ +#define FBNIC_TMI_SOP_PROT_CTRL 0x04400 /* 0x11000 */ +#define FBNIC_TMI_DROP_CTRL 0x04401 /* 0x11004 */ +#define FBNIC_TMI_DROP_CTRL_EN CSR_BIT(0) +#define FBNIC_CSR_END_TMI 0x0443f /* CSR section delimiter */ +/* Rx Buffer Registers */ +#define FBNIC_CSR_START_RXB 0x08000 /* CSR section delimiter */ +enum { + FBNIC_RXB_FIFO_MC = 0, + /* Unused */ + /* Unused */ + FBNIC_RXB_FIFO_NET_TO_BMC = 3, + FBNIC_RXB_FIFO_HOST = 4, + /* Unused */ + FBNIC_RXB_FIFO_BMC_TO_HOST = 6, + /* Unused */ + FBNIC_RXB_FIFO_INDICES = 8 +}; + +#define FBNIC_RXB_CT_SIZE(n) (0x08000 + (n)) /* 0x20000 + 4*n */ +#define FBNIC_RXB_CT_SIZE_CNT 8 +#define FBNIC_RXB_CT_SIZE_HEADER CSR_GENMASK(5, 0) +#define FBNIC_RXB_CT_SIZE_PAYLOAD CSR_GENMASK(11, 6) +#define FBNIC_RXB_CT_SIZE_ENABLE CSR_BIT(12) +#define FBNIC_RXB_PAUSE_DROP_CTRL 0x08008 /* 0x20020 */ +#define FBNIC_RXB_PAUSE_DROP_CTRL_DROP_ENABLE CSR_GENMASK(7, 0) +#define FBNIC_RXB_PAUSE_DROP_CTRL_PAUSE_ENABLE CSR_GENMASK(15, 8) +#define FBNIC_RXB_PAUSE_DROP_CTRL_ECN_ENABLE CSR_GENMASK(23, 16) +#define FBNIC_RXB_PAUSE_DROP_CTRL_PS_ENABLE CSR_GENMASK(27, 24) +#define FBNIC_RXB_PAUSE_THLD(n) (0x08009 + (n)) /* 0x20024 + 4*n */ +#define FBNIC_RXB_PAUSE_THLD_CNT 8 +#define FBNIC_RXB_PAUSE_THLD_ON CSR_GENMASK(12, 0) +#define FBNIC_RXB_PAUSE_THLD_OFF CSR_GENMASK(25, 13) +#define FBNIC_RXB_DROP_THLD(n) (0x08011 + (n)) /* 0x20044 + 4*n */ +#define FBNIC_RXB_DROP_THLD_CNT 8 +#define FBNIC_RXB_DROP_THLD_ON CSR_GENMASK(12, 0) +#define FBNIC_RXB_DROP_THLD_OFF CSR_GENMASK(25, 13) +#define FBNIC_RXB_ECN_THLD(n) (0x0801e + (n)) /* 0x20078 + 4*n */ +#define FBNIC_RXB_ECN_THLD_CNT 8 +#define FBNIC_RXB_ECN_THLD_ON CSR_GENMASK(12, 0) +#define FBNIC_RXB_ECN_THLD_OFF CSR_GENMASK(25, 13) +#define FBNIC_RXB_PBUF_CFG(n) (0x08027 + (n)) /* 0x2009c + 4*n */ +#define FBNIC_RXB_PBUF_CFG_CNT 8 +#define FBNIC_RXB_PBUF_BASE_ADDR CSR_GENMASK(12, 0) +#define FBNIC_RXB_PBUF_SIZE CSR_GENMASK(21, 13) +#define FBNIC_RXB_DWRR_RDE_WEIGHT0 0x0802f /* 0x200bc */ +#define FBNIC_RXB_DWRR_RDE_WEIGHT0_QUANTUM0 CSR_GENMASK(7, 0) +#define FBNIC_RXB_DWRR_RDE_WEIGHT0_QUANTUM1 CSR_GENMASK(15, 8) +#define FBNIC_RXB_DWRR_RDE_WEIGHT0_QUANTUM2 CSR_GENMASK(23, 16) +#define FBNIC_RXB_DWRR_RDE_WEIGHT0_QUANTUM3 CSR_GENMASK(31, 24) +#define FBNIC_RXB_DWRR_RDE_WEIGHT1 0x08030 /* 0x200c0 */ +#define FBNIC_RXB_DWRR_RDE_WEIGHT1_QUANTUM4 CSR_GENMASK(7, 0) +#define FBNIC_RXB_DWRR_BMC_WEIGHT 0x08031 /* 0x200c4 */ +#define FBNIC_RXB_CLDR_PRIO_CFG(n) (0x8034 + (n)) /* 0x200d0 + 4*n */ +#define FBNIC_RXB_CLDR_PRIO_CFG_CNT 16 +#define FBNIC_RXB_ENDIAN_FCS 0x08044 /* 0x20110 */ +enum { + /* Unused */ + /* Unused */ + FBNIC_RXB_DEQUEUE_BMC = 2, + FBNIC_RXB_DEQUEUE_HOST = 3, + FBNIC_RXB_DEQUEUE_INDICES = 4 +}; + +#define FBNIC_RXB_PBUF_CREDIT(n) (0x08047 + (n)) /* 0x2011C + 4*n */ +#define FBNIC_RXB_PBUF_CREDIT_CNT 8 +#define FBNIC_RXB_PBUF_CREDIT_MASK CSR_GENMASK(13, 0) +#define FBNIC_RXB_INTF_CREDIT 0x0804f /* 0x2013C */ +#define FBNIC_RXB_INTF_CREDIT_MASK0 CSR_GENMASK(3, 0) +#define FBNIC_RXB_INTF_CREDIT_MASK1 CSR_GENMASK(7, 4) +#define FBNIC_RXB_INTF_CREDIT_MASK2 CSR_GENMASK(11, 8) +#define FBNIC_RXB_INTF_CREDIT_MASK3 CSR_GENMASK(15, 12) + +#define FBNIC_RXB_PAUSE_EVENT_CNT(n) (0x08053 + (n)) /* 0x2014c + 4*n */ +#define FBNIC_RXB_DROP_FRMS_STS(n) (0x08057 + (n)) /* 0x2015c + 4*n */ +#define FBNIC_RXB_DROP_BYTES_STS_L(n) \ + (0x08080 + 2 * (n)) /* 0x20200 + 8*n */ +#define FBNIC_RXB_DROP_BYTES_STS_H(n) \ + (0x08081 + 2 * (n)) /* 0x20204 + 8*n */ +#define FBNIC_RXB_TRUN_FRMS_STS(n) (0x08091 + (n)) /* 0x20244 + 4*n */ +#define FBNIC_RXB_TRUN_BYTES_STS_L(n) \ + (0x080c0 + 2 * (n)) /* 0x20300 + 8*n */ +#define FBNIC_RXB_TRUN_BYTES_STS_H(n) \ + (0x080c1 + 2 * (n)) /* 0x20304 + 8*n */ +#define FBNIC_RXB_TRANS_PAUSE_STS(n) (0x080d1 + (n)) /* 0x20344 + 4*n */ +#define FBNIC_RXB_TRANS_DROP_STS(n) (0x080d9 + (n)) /* 0x20364 + 4*n */ +#define FBNIC_RXB_TRANS_ECN_STS(n) (0x080e1 + (n)) /* 0x20384 + 4*n */ +enum { + FBNIC_RXB_ENQUEUE_NET = 0, + FBNIC_RXB_ENQUEUE_BMC = 1, + /* Unused */ + /* Unused */ + FBNIC_RXB_ENQUEUE_INDICES = 4 +}; + +#define FBNIC_RXB_DRBO_FRM_CNT_SRC(n) (0x080f9 + (n)) /* 0x203e4 + 4*n */ +#define FBNIC_RXB_DRBO_BYTE_CNT_SRC_L(n) \ + (0x080fd + (n)) /* 0x203f4 + 4*n */ +#define FBNIC_RXB_DRBO_BYTE_CNT_SRC_H(n) \ + (0x08101 + (n)) /* 0x20404 + 4*n */ +#define FBNIC_RXB_INTF_FRM_CNT_DST(n) (0x08105 + (n)) /* 0x20414 + 4*n */ +#define FBNIC_RXB_INTF_BYTE_CNT_DST_L(n) \ + (0x08109 + (n)) /* 0x20424 + 4*n */ +#define FBNIC_RXB_INTF_BYTE_CNT_DST_H(n) \ + (0x0810d + (n)) /* 0x20434 + 4*n */ +#define FBNIC_RXB_PBUF_FRM_CNT_DST(n) (0x08111 + (n)) /* 0x20444 + 4*n */ +#define FBNIC_RXB_PBUF_BYTE_CNT_DST_L(n) \ + (0x08115 + (n)) /* 0x20454 + 4*n */ +#define FBNIC_RXB_PBUF_BYTE_CNT_DST_H(n) \ + (0x08119 + (n)) /* 0x20464 + 4*n */ + +#define FBNIC_RXB_PBUF_FIFO_LEVEL(n) (0x0811d + (n)) /* 0x20474 + 4*n */ + +#define FBNIC_RXB_INTEGRITY_ERR(n) (0x0812f + (n)) /* 0x204bc + 4*n */ +#define FBNIC_RXB_MAC_ERR(n) (0x08133 + (n)) /* 0x204cc + 4*n */ +#define FBNIC_RXB_PARSER_ERR(n) (0x08137 + (n)) /* 0x204dc + 4*n */ +#define FBNIC_RXB_FRM_ERR(n) (0x0813b + (n)) /* 0x204ec + 4*n */ + +#define FBNIC_RXB_DWRR_RDE_WEIGHT0_EXT 0x08143 /* 0x2050c */ +#define FBNIC_RXB_DWRR_RDE_WEIGHT1_EXT 0x08144 /* 0x20510 */ +#define FBNIC_CSR_END_RXB 0x081b1 /* CSR section delimiter */ + +/* Rx Parser and Classifier Registers */ +#define FBNIC_CSR_START_RPC 0x08400 /* CSR section delimiter */ +#define FBNIC_RPC_RMI_CONFIG 0x08400 /* 0x21000 */ +#define FBNIC_RPC_RMI_CONFIG_OH_BYTES CSR_GENMASK(4, 0) +#define FBNIC_RPC_RMI_CONFIG_FCS_PRESENT CSR_BIT(8) +#define FBNIC_RPC_RMI_CONFIG_ENABLE CSR_BIT(12) +#define FBNIC_RPC_RMI_CONFIG_MTU CSR_GENMASK(31, 16) + +#define FBNIC_RPC_ACT_TBL0_DEFAULT 0x0840a /* 0x21028 */ +#define FBNIC_RPC_ACT_TBL0_DROP CSR_BIT(0) +#define FBNIC_RPC_ACT_TBL0_DEST_MASK CSR_GENMASK(3, 1) +enum { + FBNIC_RPC_ACT_TBL0_DEST_HOST = 1, + FBNIC_RPC_ACT_TBL0_DEST_BMC = 2, + FBNIC_RPC_ACT_TBL0_DEST_EI = 4, +}; + +#define FBNIC_RPC_ACT_TBL0_DMA_HINT CSR_GENMASK(24, 16) +#define FBNIC_RPC_ACT_TBL0_RSS_CTXT_ID CSR_BIT(30) + +#define FBNIC_RPC_ACT_TBL1_DEFAULT 0x0840b /* 0x2102c */ +#define FBNIC_RPC_ACT_TBL1_RSS_ENA_MASK CSR_GENMASK(15, 0) +enum { + FBNIC_RPC_ACT_TBL1_RSS_ENA_IP_SRC = 1, + FBNIC_RPC_ACT_TBL1_RSS_ENA_IP_DST = 2, + FBNIC_RPC_ACT_TBL1_RSS_ENA_L4_SRC = 4, + FBNIC_RPC_ACT_TBL1_RSS_ENA_L4_DST = 8, + FBNIC_RPC_ACT_TBL1_RSS_ENA_L2_DA = 16, + FBNIC_RPC_ACT_TBL1_RSS_ENA_L4_RSS_BYTE = 32, + FBNIC_RPC_ACT_TBL1_RSS_ENA_IV6_FL_LBL = 64, + FBNIC_RPC_ACT_TBL1_RSS_ENA_OV6_FL_LBL = 128, + FBNIC_RPC_ACT_TBL1_RSS_ENA_DSCP = 256, + FBNIC_RPC_ACT_TBL1_RSS_ENA_L3_PROT = 512, + FBNIC_RPC_ACT_TBL1_RSS_ENA_L4_PROT = 1024, +}; + +#define FBNIC_RPC_RSS_KEY(n) (0x0840c + (n)) /* 0x21030 + 4*n */ +#define FBNIC_RPC_RSS_KEY_BIT_LEN 425 +#define FBNIC_RPC_RSS_KEY_BYTE_LEN \ + DIV_ROUND_UP(FBNIC_RPC_RSS_KEY_BIT_LEN, 8) +#define FBNIC_RPC_RSS_KEY_DWORD_LEN \ + DIV_ROUND_UP(FBNIC_RPC_RSS_KEY_BIT_LEN, 32) +#define FBNIC_RPC_RSS_KEY_LAST_IDX \ + (FBNIC_RPC_RSS_KEY_DWORD_LEN - 1) +#define FBNIC_RPC_RSS_KEY_LAST_MASK \ + CSR_GENMASK(31, \ + FBNIC_RPC_RSS_KEY_DWORD_LEN * 32 - \ + FBNIC_RPC_RSS_KEY_BIT_LEN) + +#define FBNIC_RPC_TCAM_MACDA_VALIDATE 0x0852d /* 0x214b4 */ +#define FBNIC_CSR_END_RPC 0x0856b /* CSR section delimiter */ + +/* RPC RAM Registers */ + +#define FBNIC_CSR_START_RPC_RAM 0x08800 /* CSR section delimiter */ +#define FBNIC_RPC_ACT_TBL0(n) (0x08800 + (n)) /* 0x22000 + 4*n */ +#define FBNIC_RPC_ACT_TBL1(n) (0x08840 + (n)) /* 0x22100 + 4*n */ +#define FBNIC_RPC_ACT_TBL_NUM_ENTRIES 64 + +/* TCAM Tables */ +#define FBNIC_RPC_TCAM_VALIDATE CSR_BIT(31) + +/* 64 Action TCAM Entries, 12 registers + * 3 mixed, src port, dst port, 6 L4 words, and Validate + */ +#define FBNIC_RPC_TCAM_ACT(m, n) \ + (0x08880 + 0x40 * (n) + (m)) /* 0x22200 + 256*n + 4*m */ + +#define FBNIC_RPC_TCAM_ACT_VALUE CSR_GENMASK(15, 0) +#define FBNIC_RPC_TCAM_ACT_MASK CSR_GENMASK(31, 16) + +#define FBNIC_RPC_TCAM_MACDA(m, n) \ + (0x08b80 + 0x20 * (n) + (m)) /* 0x022e00 + 128*n + 4*m */ +#define FBNIC_RPC_TCAM_MACDA_VALUE CSR_GENMASK(15, 0) +#define FBNIC_RPC_TCAM_MACDA_MASK CSR_GENMASK(31, 16) + +#define FBNIC_RPC_RSS_TBL(n, m) \ + (0x08d20 + 0x100 * (n) + (m)) /* 0x023480 + 1024*n + 4*m */ +#define FBNIC_RPC_RSS_TBL_COUNT 2 +#define FBNIC_RPC_RSS_TBL_SIZE 256 +#define FBNIC_CSR_END_RPC_RAM 0x08f1f /* CSR section delimiter */ + +/* Fab Registers */ +#define FBNIC_CSR_START_FAB 0x0C000 /* CSR section delimiter */ +#define FBNIC_FAB_AXI4_AR_SPACER_2_CFG 0x0C005 /* 0x30014 */ +#define FBNIC_FAB_AXI4_AR_SPACER_MASK CSR_BIT(16) +#define FBNIC_FAB_AXI4_AR_SPACER_THREADSHOLD CSR_GENMASK(15, 0) +#define FBNIC_CSR_END_FAB 0x0C020 /* CSR section delimiter */ + +/* Master Registers */ +#define FBNIC_CSR_START_MASTER 0x0C400 /* CSR section delimiter */ +#define FBNIC_MASTER_SPARE_0 0x0C41B /* 0x3106c */ +#define FBNIC_CSR_END_MASTER 0x0C452 /* CSR section delimiter */ + +/* MAC MAC registers (ASIC only) */ +#define FBNIC_CSR_START_MAC_MAC 0x11000 /* CSR section delimiter */ +#define FBNIC_MAC_COMMAND_CONFIG 0x11002 /* 0x44008 */ +#define FBNIC_MAC_COMMAND_CONFIG_RX_PAUSE_DIS CSR_BIT(29) +#define FBNIC_MAC_COMMAND_CONFIG_TX_PAUSE_DIS CSR_BIT(28) +#define FBNIC_MAC_COMMAND_CONFIG_FLT_HDL_DIS CSR_BIT(27) +#define FBNIC_MAC_COMMAND_CONFIG_TX_PAD_EN CSR_BIT(11) +#define FBNIC_MAC_COMMAND_CONFIG_LOOPBACK_EN CSR_BIT(10) +#define FBNIC_MAC_COMMAND_CONFIG_PROMISC_EN CSR_BIT(4) +#define FBNIC_MAC_COMMAND_CONFIG_RX_ENA CSR_BIT(1) +#define FBNIC_MAC_COMMAND_CONFIG_TX_ENA CSR_BIT(0) +#define FBNIC_MAC_CL01_PAUSE_QUANTA 0x11015 /* 0x44054 */ +#define FBNIC_MAC_CL01_QUANTA_THRESH 0x11019 /* 0x44064 */ +#define FBNIC_CSR_END_MAC_MAC 0x11028 /* CSR section delimiter */ + +/* Signals from MAC, AN, PCS, and LED CSR registers (ASIC only) */ +#define FBNIC_CSR_START_SIG 0x11800 /* CSR section delimiter */ +#define FBNIC_SIG_MAC_IN0 0x11800 /* 0x46000 */ +#define FBNIC_SIG_MAC_IN0_RESET_FF_TX_CLK CSR_BIT(14) +#define FBNIC_SIG_MAC_IN0_RESET_FF_RX_CLK CSR_BIT(13) +#define FBNIC_SIG_MAC_IN0_RESET_TX_CLK CSR_BIT(12) +#define FBNIC_SIG_MAC_IN0_RESET_RX_CLK CSR_BIT(11) +#define FBNIC_SIG_MAC_IN0_TX_CRC CSR_BIT(8) +#define FBNIC_SIG_MAC_IN0_CFG_MODE128 CSR_BIT(10) +#define FBNIC_SIG_PCS_OUT0 0x11808 /* 0x46020 */ +#define FBNIC_SIG_PCS_OUT0_LINK CSR_BIT(27) +#define FBNIC_SIG_PCS_OUT0_BLOCK_LOCK CSR_GENMASK(24, 5) +#define FBNIC_SIG_PCS_OUT0_AMPS_LOCK CSR_GENMASK(4, 1) +#define FBNIC_SIG_PCS_OUT1 0x11809 /* 0x46024 */ +#define FBNIC_SIG_PCS_OUT1_FCFEC_LOCK CSR_GENMASK(11, 8) +#define FBNIC_SIG_PCS_INTR_STS 0x11814 /* 0x46050 */ +#define FBNIC_SIG_PCS_INTR_LINK_DOWN CSR_BIT(1) +#define FBNIC_SIG_PCS_INTR_LINK_UP CSR_BIT(0) +#define FBNIC_SIG_PCS_INTR_MASK 0x11816 /* 0x46058 */ +#define FBNIC_CSR_END_SIG 0x1184e /* CSR section delimiter */ + +/* PUL User Registers */ +#define FBNIC_CSR_START_PUL_USER 0x31000 /* CSR section delimiter */ +#define FBNIC_PUL_OB_TLP_HDR_AW_CFG 0x3103d /* 0xc40f4 */ +#define FBNIC_PUL_OB_TLP_HDR_AW_CFG_BME CSR_BIT(18) +#define FBNIC_PUL_OB_TLP_HDR_AR_CFG 0x3103e /* 0xc40f8 */ +#define FBNIC_PUL_OB_TLP_HDR_AR_CFG_BME CSR_BIT(18) +#define FBNIC_CSR_END_PUL_USER 0x31080 /* CSR section delimiter */ + +/* Queue Registers + * + * The queue register offsets are specific for a given queue grouping. So to + * find the actual register offset it is necessary to combine FBNIC_QUEUE(n) + * with the register to get the actual register offset like so: + * FBNIC_QUEUE_TWQ0_CTL(n) == FBNIC_QUEUE(n) + FBNIC_QUEUE_TWQ0_CTL + */ +#define FBNIC_CSR_START_QUEUE 0x40000 /* CSR section delimiter */ +#define FBNIC_QUEUE_STRIDE 0x400 /* 0x1000 */ +#define FBNIC_QUEUE(n)\ + (0x40000 + FBNIC_QUEUE_STRIDE * (n)) /* 0x100000 + 4096*n */ + +#define FBNIC_QUEUE_TWQ0_CTL 0x000 /* 0x000 */ +#define FBNIC_QUEUE_TWQ1_CTL 0x001 /* 0x004 */ +#define FBNIC_QUEUE_TWQ_CTL_RESET CSR_BIT(0) +#define FBNIC_QUEUE_TWQ_CTL_ENABLE CSR_BIT(1) +#define FBNIC_QUEUE_TWQ0_TAIL 0x002 /* 0x008 */ +#define FBNIC_QUEUE_TWQ1_TAIL 0x003 /* 0x00c */ + +#define FBNIC_QUEUE_TWQ0_SIZE 0x00a /* 0x028 */ +#define FBNIC_QUEUE_TWQ1_SIZE 0x00b /* 0x02c */ +#define FBNIC_QUEUE_TWQ_SIZE_MASK CSR_GENMASK(3, 0) + +#define FBNIC_QUEUE_TWQ0_BAL 0x020 /* 0x080 */ +#define FBNIC_QUEUE_BAL_MASK CSR_GENMASK(31, 7) +#define FBNIC_QUEUE_TWQ0_BAH 0x021 /* 0x084 */ +#define FBNIC_QUEUE_TWQ1_BAL 0x022 /* 0x088 */ +#define FBNIC_QUEUE_TWQ1_BAH 0x023 /* 0x08c */ + +/* Tx Completion Queue Registers */ +#define FBNIC_QUEUE_TCQ_CTL 0x080 /* 0x200 */ +#define FBNIC_QUEUE_TCQ_CTL_RESET CSR_BIT(0) +#define FBNIC_QUEUE_TCQ_CTL_ENABLE CSR_BIT(1) + +#define FBNIC_QUEUE_TCQ_HEAD 0x081 /* 0x204 */ + +#define FBNIC_QUEUE_TCQ_SIZE 0x084 /* 0x210 */ +#define FBNIC_QUEUE_TCQ_SIZE_MASK CSR_GENMASK(3, 0) + +#define FBNIC_QUEUE_TCQ_BAL 0x0a0 /* 0x280 */ +#define FBNIC_QUEUE_TCQ_BAH 0x0a1 /* 0x284 */ + +/* Tx Interrupt Manager Registers */ +#define FBNIC_QUEUE_TIM_CTL 0x0c0 /* 0x300 */ +#define FBNIC_QUEUE_TIM_CTL_MSIX_MASK CSR_GENMASK(7, 0) + +#define FBNIC_QUEUE_TIM_THRESHOLD 0x0c1 /* 0x304 */ +#define FBNIC_QUEUE_TIM_THRESHOLD_TWD_MASK CSR_GENMASK(14, 0) + +#define FBNIC_QUEUE_TIM_CLEAR 0x0c2 /* 0x308 */ +#define FBNIC_QUEUE_TIM_CLEAR_MASK CSR_BIT(0) +#define FBNIC_QUEUE_TIM_SET 0x0c3 /* 0x30c */ +#define FBNIC_QUEUE_TIM_SET_MASK CSR_BIT(0) +#define FBNIC_QUEUE_TIM_MASK 0x0c4 /* 0x310 */ +#define FBNIC_QUEUE_TIM_MASK_MASK CSR_BIT(0) + +#define FBNIC_QUEUE_TIM_TIMER 0x0c5 /* 0x314 */ + +#define FBNIC_QUEUE_TIM_COUNTS 0x0c6 /* 0x318 */ +#define FBNIC_QUEUE_TIM_COUNTS_CNT1_MASK CSR_GENMASK(30, 16) +#define FBNIC_QUEUE_TIM_COUNTS_CNT0_MASK CSR_GENMASK(14, 0) + +/* Rx Completion Queue Registers */ +#define FBNIC_QUEUE_RCQ_CTL 0x200 /* 0x800 */ +#define FBNIC_QUEUE_RCQ_CTL_RESET CSR_BIT(0) +#define FBNIC_QUEUE_RCQ_CTL_ENABLE CSR_BIT(1) + +#define FBNIC_QUEUE_RCQ_HEAD 0x201 /* 0x804 */ + +#define FBNIC_QUEUE_RCQ_SIZE 0x204 /* 0x810 */ +#define FBNIC_QUEUE_RCQ_SIZE_MASK CSR_GENMASK(3, 0) + +#define FBNIC_QUEUE_RCQ_BAL 0x220 /* 0x880 */ +#define FBNIC_QUEUE_RCQ_BAH 0x221 /* 0x884 */ + +/* Rx Buffer Descriptor Queue Registers */ +#define FBNIC_QUEUE_BDQ_CTL 0x240 /* 0x900 */ +#define FBNIC_QUEUE_BDQ_CTL_RESET CSR_BIT(0) +#define FBNIC_QUEUE_BDQ_CTL_ENABLE CSR_BIT(1) +#define FBNIC_QUEUE_BDQ_CTL_PPQ_ENABLE CSR_BIT(30) + +#define FBNIC_QUEUE_BDQ_HPQ_TAIL 0x241 /* 0x904 */ +#define FBNIC_QUEUE_BDQ_PPQ_TAIL 0x242 /* 0x908 */ + +#define FBNIC_QUEUE_BDQ_HPQ_SIZE 0x247 /* 0x91c */ +#define FBNIC_QUEUE_BDQ_PPQ_SIZE 0x248 /* 0x920 */ +#define FBNIC_QUEUE_BDQ_SIZE_MASK CSR_GENMASK(3, 0) + +#define FBNIC_QUEUE_BDQ_HPQ_BAL 0x260 /* 0x980 */ +#define FBNIC_QUEUE_BDQ_HPQ_BAH 0x261 /* 0x984 */ +#define FBNIC_QUEUE_BDQ_PPQ_BAL 0x262 /* 0x988 */ +#define FBNIC_QUEUE_BDQ_PPQ_BAH 0x263 /* 0x98c */ + +/* Rx DMA Engine Configuration */ +#define FBNIC_QUEUE_RDE_CTL0 0x2a0 /* 0xa80 */ +#define FBNIC_QUEUE_RDE_CTL0_EN_HDR_SPLIT CSR_BIT(31) +#define FBNIC_QUEUE_RDE_CTL0_DROP_MODE_MASK CSR_GENMASK(30, 29) +enum { + FBNIC_QUEUE_RDE_CTL0_DROP_IMMEDIATE = 0, + FBNIC_QUEUE_RDE_CTL0_DROP_WAIT = 1, + FBNIC_QUEUE_RDE_CTL0_DROP_NEVER = 2, +}; + +#define FBNIC_QUEUE_RDE_CTL0_MIN_HROOM_MASK CSR_GENMASK(28, 20) +#define FBNIC_QUEUE_RDE_CTL0_MIN_TROOM_MASK CSR_GENMASK(19, 11) + +#define FBNIC_QUEUE_RDE_CTL1 0x2a1 /* 0xa84 */ +#define FBNIC_QUEUE_RDE_CTL1_MAX_HDR_MASK CSR_GENMASK(24, 12) +#define FBNIC_QUEUE_RDE_CTL1_PAYLD_OFF_MASK CSR_GENMASK(11, 9) +#define FBNIC_QUEUE_RDE_CTL1_PAYLD_PG_CL_MASK CSR_GENMASK(8, 6) +#define FBNIC_QUEUE_RDE_CTL1_PADLEN_MASK CSR_GENMASK(5, 2) +#define FBNIC_QUEUE_RDE_CTL1_PAYLD_PACK_MASK CSR_GENMASK(1, 0) +enum { + FBNIC_QUEUE_RDE_CTL1_PAYLD_PACK_NONE = 0, + FBNIC_QUEUE_RDE_CTL1_PAYLD_PACK_ALL = 1, + FBNIC_QUEUE_RDE_CTL1_PAYLD_PACK_RSS = 2, +}; + +/* Rx Interrupt Manager Registers */ +#define FBNIC_QUEUE_RIM_CTL 0x2c0 /* 0xb00 */ +#define FBNIC_QUEUE_RIM_CTL_MSIX_MASK CSR_GENMASK(7, 0) + +#define FBNIC_QUEUE_RIM_THRESHOLD 0x2c1 /* 0xb04 */ +#define FBNIC_QUEUE_RIM_THRESHOLD_RCD_MASK CSR_GENMASK(14, 0) + +#define FBNIC_QUEUE_RIM_CLEAR 0x2c2 /* 0xb08 */ +#define FBNIC_QUEUE_RIM_CLEAR_MASK CSR_BIT(0) +#define FBNIC_QUEUE_RIM_SET 0x2c3 /* 0xb0c */ +#define FBNIC_QUEUE_RIM_SET_MASK CSR_BIT(0) +#define FBNIC_QUEUE_RIM_MASK 0x2c4 /* 0xb10 */ +#define FBNIC_QUEUE_RIM_MASK_MASK CSR_BIT(0) + +#define FBNIC_QUEUE_RIM_COAL_STATUS 0x2c5 /* 0xb14 */ +#define FBNIC_QUEUE_RIM_RCD_COUNT_MASK CSR_GENMASK(30, 16) +#define FBNIC_QUEUE_RIM_TIMER_MASK CSR_GENMASK(13, 0) +#define FBNIC_MAX_QUEUES 128 +#define FBNIC_CSR_END_QUEUE (0x40000 + 0x400 * FBNIC_MAX_QUEUES - 1) + +/* BAR 4 CSRs */ + +/* The IPC mailbox consists of 32 mailboxes, with each mailbox consisting + * of 32 4 byte registers. We will use 2 registers per descriptor so the + * length of the mailbox is reduced to 16. + * + * Currently we use an offset of 0x6000 on BAR4 for the mailbox so we just + * have to do the math and determine the offset based on the mailbox + * direction and index inside that mailbox. + */ +#define FBNIC_IPC_MBX_DESC_LEN 16 +#define FBNIC_IPC_MBX(mbx_idx, desc_idx) \ + ((((mbx_idx) * FBNIC_IPC_MBX_DESC_LEN + (desc_idx)) * 2) + 0x6000) + +/* Use first register in mailbox to flush writes */ +#define FBNIC_FW_ZERO_REG FBNIC_IPC_MBX(0, 0) + +enum { + FBNIC_IPC_MBX_RX_IDX, + FBNIC_IPC_MBX_TX_IDX, + FBNIC_IPC_MBX_INDICES, +}; + +#define FBNIC_IPC_MBX_DESC_LEN_MASK DESC_GENMASK(63, 48) +#define FBNIC_IPC_MBX_DESC_EOM DESC_BIT(46) +#define FBNIC_IPC_MBX_DESC_ADDR_MASK DESC_GENMASK(45, 3) +#define FBNIC_IPC_MBX_DESC_FW_CMPL DESC_BIT(1) +#define FBNIC_IPC_MBX_DESC_HOST_CMPL DESC_BIT(0) + +#endif /* _FBNIC_CSR_H_ */ diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c b/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c new file mode 100644 index 000000000000..e87049dfd223 --- /dev/null +++ b/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c @@ -0,0 +1,88 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) Meta Platforms, Inc. and affiliates. */ + +#include <asm/unaligned.h> +#include <linux/pci.h> +#include <linux/types.h> +#include <net/devlink.h> + +#include "fbnic.h" + +#define FBNIC_SN_STR_LEN 24 + +static int fbnic_devlink_info_get(struct devlink *devlink, + struct devlink_info_req *req, + struct netlink_ext_ack *extack) +{ + struct fbnic_dev *fbd = devlink_priv(devlink); + int err; + + if (fbd->dsn) { + unsigned char serial[FBNIC_SN_STR_LEN]; + u8 dsn[8]; + + put_unaligned_be64(fbd->dsn, dsn); + err = snprintf(serial, FBNIC_SN_STR_LEN, "%8phD", dsn); + if (err < 0) + return err; + + err = devlink_info_serial_number_put(req, serial); + if (err) + return err; + } + + return 0; +} + +static const struct devlink_ops fbnic_devlink_ops = { + .info_get = fbnic_devlink_info_get, +}; + +void fbnic_devlink_free(struct fbnic_dev *fbd) +{ + struct devlink *devlink = priv_to_devlink(fbd); + + devlink_free(devlink); +} + +struct fbnic_dev *fbnic_devlink_alloc(struct pci_dev *pdev) +{ + void __iomem * const *iomap_table; + struct devlink *devlink; + struct fbnic_dev *fbd; + + devlink = devlink_alloc(&fbnic_devlink_ops, sizeof(struct fbnic_dev), + &pdev->dev); + if (!devlink) + return NULL; + + fbd = devlink_priv(devlink); + pci_set_drvdata(pdev, fbd); + fbd->dev = &pdev->dev; + + iomap_table = pcim_iomap_table(pdev); + fbd->uc_addr0 = iomap_table[0]; + fbd->uc_addr4 = iomap_table[4]; + + fbd->dsn = pci_get_dsn(pdev); + fbd->mps = pcie_get_mps(pdev); + fbd->readrq = pcie_get_readrq(pdev); + + fbd->mac_addr_boundary = FBNIC_RPC_TCAM_MACDA_DEFAULT_BOUNDARY; + + return fbd; +} + +void fbnic_devlink_register(struct fbnic_dev *fbd) +{ + struct devlink *devlink = priv_to_devlink(fbd); + + devlink_register(devlink); +} + +void fbnic_devlink_unregister(struct fbnic_dev *fbd) +{ + struct devlink *devlink = priv_to_devlink(fbd); + + devlink_unregister(devlink); +} diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_drvinfo.h b/drivers/net/ethernet/meta/fbnic/fbnic_drvinfo.h new file mode 100644 index 000000000000..809ba6729442 --- /dev/null +++ b/drivers/net/ethernet/meta/fbnic/fbnic_drvinfo.h @@ -0,0 +1,5 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Meta Platforms, Inc. and affiliates. */ + +#define DRV_NAME "fbnic" +#define DRV_SUMMARY "Meta(R) Host Network Interface Driver" diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c new file mode 100644 index 000000000000..0c6e1b4c119b --- /dev/null +++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c @@ -0,0 +1,791 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) Meta Platforms, Inc. and affiliates. */ + +#include <linux/bitfield.h> +#include <linux/etherdevice.h> +#include <linux/delay.h> +#include <linux/dev_printk.h> +#include <linux/dma-mapping.h> +#include <linux/gfp.h> +#include <linux/types.h> + +#include "fbnic.h" +#include "fbnic_tlv.h" + +static void __fbnic_mbx_wr_desc(struct fbnic_dev *fbd, int mbx_idx, + int desc_idx, u64 desc) +{ + u32 desc_offset = FBNIC_IPC_MBX(mbx_idx, desc_idx); + + fw_wr32(fbd, desc_offset + 1, upper_32_bits(desc)); + fw_wrfl(fbd); + fw_wr32(fbd, desc_offset, lower_32_bits(desc)); +} + +static u64 __fbnic_mbx_rd_desc(struct fbnic_dev *fbd, int mbx_idx, int desc_idx) +{ + u32 desc_offset = FBNIC_IPC_MBX(mbx_idx, desc_idx); + u64 desc; + + desc = fw_rd32(fbd, desc_offset); + desc |= (u64)fw_rd32(fbd, desc_offset + 1) << 32; + + return desc; +} + +static void fbnic_mbx_init_desc_ring(struct fbnic_dev *fbd, int mbx_idx) +{ + int desc_idx; + + /* Initialize first descriptor to all 0s. Doing this gives us a + * solid stop for the firmware to hit when it is done looping + * through the ring. + */ + __fbnic_mbx_wr_desc(fbd, mbx_idx, 0, 0); + + fw_wrfl(fbd); + + /* We then fill the rest of the ring starting at the end and moving + * back toward descriptor 0 with skip descriptors that have no + * length nor address, and tell the firmware that they can skip + * them and just move past them to the one we initialized to 0. + */ + for (desc_idx = FBNIC_IPC_MBX_DESC_LEN; --desc_idx;) { + __fbnic_mbx_wr_desc(fbd, mbx_idx, desc_idx, + FBNIC_IPC_MBX_DESC_FW_CMPL | + FBNIC_IPC_MBX_DESC_HOST_CMPL); + fw_wrfl(fbd); + } +} + +void fbnic_mbx_init(struct fbnic_dev *fbd) +{ + int i; + + /* Initialize lock to protect Tx ring */ + spin_lock_init(&fbd->fw_tx_lock); + + /* Reinitialize mailbox memory */ + for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++) + memset(&fbd->mbx[i], 0, sizeof(struct fbnic_fw_mbx)); + + /* Do not auto-clear the FW mailbox interrupt, let SW clear it */ + wr32(fbd, FBNIC_INTR_SW_AC_MODE(0), ~(1u << FBNIC_FW_MSIX_ENTRY)); + + /* Clear any stale causes in vector 0 as that is used for doorbell */ + wr32(fbd, FBNIC_INTR_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY); + + for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++) + fbnic_mbx_init_desc_ring(fbd, i); +} + +static int fbnic_mbx_map_msg(struct fbnic_dev *fbd, int mbx_idx, + struct fbnic_tlv_msg *msg, u16 length, u8 eom) +{ + struct fbnic_fw_mbx *mbx = &fbd->mbx[mbx_idx]; + u8 tail = mbx->tail; + dma_addr_t addr; + int direction; + + if (!mbx->ready || !fbnic_fw_present(fbd)) + return -ENODEV; + + direction = (mbx_idx == FBNIC_IPC_MBX_RX_IDX) ? DMA_FROM_DEVICE : + DMA_TO_DEVICE; + + if (mbx->head == ((tail + 1) % FBNIC_IPC_MBX_DESC_LEN)) + return -EBUSY; + + addr = dma_map_single(fbd->dev, msg, PAGE_SIZE, direction); + if (dma_mapping_error(fbd->dev, addr)) { + free_page((unsigned long)msg); + + return -ENOSPC; + } + + mbx->buf_info[tail].msg = msg; + mbx->buf_info[tail].addr = addr; + + mbx->tail = (tail + 1) % FBNIC_IPC_MBX_DESC_LEN; + + fw_wr32(fbd, FBNIC_IPC_MBX(mbx_idx, mbx->tail), 0); + + __fbnic_mbx_wr_desc(fbd, mbx_idx, tail, + FIELD_PREP(FBNIC_IPC_MBX_DESC_LEN_MASK, length) | + (addr & FBNIC_IPC_MBX_DESC_ADDR_MASK) | + (eom ? FBNIC_IPC_MBX_DESC_EOM : 0) | + FBNIC_IPC_MBX_DESC_HOST_CMPL); + + return 0; +} + +static void fbnic_mbx_unmap_and_free_msg(struct fbnic_dev *fbd, int mbx_idx, + int desc_idx) +{ + struct fbnic_fw_mbx *mbx = &fbd->mbx[mbx_idx]; + int direction; + + if (!mbx->buf_info[desc_idx].msg) + return; + + direction = (mbx_idx == FBNIC_IPC_MBX_RX_IDX) ? DMA_FROM_DEVICE : + DMA_TO_DEVICE; + dma_unmap_single(fbd->dev, mbx->buf_info[desc_idx].addr, + PAGE_SIZE, direction); + + free_page((unsigned long)mbx->buf_info[desc_idx].msg); + mbx->buf_info[desc_idx].msg = NULL; +} + +static void fbnic_mbx_clean_desc_ring(struct fbnic_dev *fbd, int mbx_idx) +{ + int i; + + fbnic_mbx_init_desc_ring(fbd, mbx_idx); + + for (i = FBNIC_IPC_MBX_DESC_LEN; i--;) + fbnic_mbx_unmap_and_free_msg(fbd, mbx_idx, i); +} + +void fbnic_mbx_clean(struct fbnic_dev *fbd) +{ + int i; + + for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++) + fbnic_mbx_clean_desc_ring(fbd, i); +} + +#define FBNIC_MBX_MAX_PAGE_SIZE FIELD_MAX(FBNIC_IPC_MBX_DESC_LEN_MASK) +#define FBNIC_RX_PAGE_SIZE min_t(int, PAGE_SIZE, FBNIC_MBX_MAX_PAGE_SIZE) + +static int fbnic_mbx_alloc_rx_msgs(struct fbnic_dev *fbd) +{ + struct fbnic_fw_mbx *rx_mbx = &fbd->mbx[FBNIC_IPC_MBX_RX_IDX]; + u8 tail = rx_mbx->tail, head = rx_mbx->head, count; + int err = 0; + + /* Do nothing if mailbox is not ready, or we already have pages on + * the ring that can be used by the firmware + */ + if (!rx_mbx->ready) + return -ENODEV; + + /* Fill all but 1 unused descriptors in the Rx queue. */ + count = (head - tail - 1) % FBNIC_IPC_MBX_DESC_LEN; + while (!err && count--) { + struct fbnic_tlv_msg *msg; + + msg = (struct fbnic_tlv_msg *)__get_free_page(GFP_ATOMIC | + __GFP_NOWARN); + if (!msg) { + err = -ENOMEM; + break; + } + + err = fbnic_mbx_map_msg(fbd, FBNIC_IPC_MBX_RX_IDX, msg, + FBNIC_RX_PAGE_SIZE, 0); + if (err) + free_page((unsigned long)msg); + } + + return err; +} + +static int fbnic_mbx_map_tlv_msg(struct fbnic_dev *fbd, + struct fbnic_tlv_msg *msg) +{ + unsigned long flags; + int err; + + spin_lock_irqsave(&fbd->fw_tx_lock, flags); + + err = fbnic_mbx_map_msg(fbd, FBNIC_IPC_MBX_TX_IDX, msg, + le16_to_cpu(msg->hdr.len) * sizeof(u32), 1); + + spin_unlock_irqrestore(&fbd->fw_tx_lock, flags); + + return err; +} + +static void fbnic_mbx_process_tx_msgs(struct fbnic_dev *fbd) +{ + struct fbnic_fw_mbx *tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX]; + u8 head = tx_mbx->head; + u64 desc; + + while (head != tx_mbx->tail) { + desc = __fbnic_mbx_rd_desc(fbd, FBNIC_IPC_MBX_TX_IDX, head); + if (!(desc & FBNIC_IPC_MBX_DESC_FW_CMPL)) + break; + + fbnic_mbx_unmap_and_free_msg(fbd, FBNIC_IPC_MBX_TX_IDX, head); + + head++; + head %= FBNIC_IPC_MBX_DESC_LEN; + } + + /* Record head for next interrupt */ + tx_mbx->head = head; +} + +/** + * fbnic_fw_xmit_simple_msg - Transmit a simple single TLV message w/o data + * @fbd: FBNIC device structure + * @msg_type: ENUM value indicating message type to send + * + * Return: + * One the following values: + * -EOPNOTSUPP: Is not ASIC so mailbox is not supported + * -ENODEV: Device I/O error + * -ENOMEM: Failed to allocate message + * -EBUSY: No space in mailbox + * -ENOSPC: DMA mapping failed + * + * This function sends a single TLV header indicating the host wants to take + * some action. However there are no other side effects which means that any + * response will need to be caught via a completion if this action is + * expected to kick off a resultant action. + */ +static int fbnic_fw_xmit_simple_msg(struct fbnic_dev *fbd, u32 msg_type) +{ + struct fbnic_tlv_msg *msg; + int err = 0; + + if (!fbnic_fw_present(fbd)) + return -ENODEV; + + msg = fbnic_tlv_msg_alloc(msg_type); + if (!msg) + return -ENOMEM; + + err = fbnic_mbx_map_tlv_msg(fbd, msg); + if (err) + free_page((unsigned long)msg); + + return err; +} + +/** + * fbnic_fw_xmit_cap_msg - Allocate and populate a FW capabilities message + * @fbd: FBNIC device structure + * + * Return: NULL on failure to allocate, error pointer on error, or pointer + * to new TLV test message. + * + * Sends a single TLV header indicating the host wants the firmware to + * confirm the capabilities and version. + **/ +static int fbnic_fw_xmit_cap_msg(struct fbnic_dev *fbd) +{ + int err = fbnic_fw_xmit_simple_msg(fbd, FBNIC_TLV_MSG_ID_HOST_CAP_REQ); + + /* Return 0 if we are not calling this on ASIC */ + return (err == -EOPNOTSUPP) ? 0 : err; +} + +static void fbnic_mbx_postinit_desc_ring(struct fbnic_dev *fbd, int mbx_idx) +{ + struct fbnic_fw_mbx *mbx = &fbd->mbx[mbx_idx]; + + /* This is a one time init, so just exit if it is completed */ + if (mbx->ready) + return; + + mbx->ready = true; + + switch (mbx_idx) { + case FBNIC_IPC_MBX_RX_IDX: + /* Make sure we have a page for the FW to write to */ + fbnic_mbx_alloc_rx_msgs(fbd); + break; + case FBNIC_IPC_MBX_TX_IDX: + /* Force version to 1 if we successfully requested an update + * from the firmware. This should be overwritten once we get + * the actual version from the firmware in the capabilities + * request message. + */ + if (!fbnic_fw_xmit_cap_msg(fbd) && + !fbd->fw_cap.running.mgmt.version) + fbd->fw_cap.running.mgmt.version = 1; + break; + } +} + +static void fbnic_mbx_postinit(struct fbnic_dev *fbd) +{ + int i; + + /* We only need to do this on the first interrupt following init. + * this primes the mailbox so that we will have cleared all the + * skip descriptors. + */ + if (!(rd32(fbd, FBNIC_INTR_STATUS(0)) & (1u << FBNIC_FW_MSIX_ENTRY))) + return; + + wr32(fbd, FBNIC_INTR_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY); + + for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++) + fbnic_mbx_postinit_desc_ring(fbd, i); +} + +/** + * fbnic_fw_xmit_ownership_msg - Create and transmit a host ownership message + * to FW mailbox + * + * @fbd: FBNIC device structure + * @take_ownership: take/release the ownership + * + * Return: zero on success, negative value on failure + * + * Notifies the firmware that the driver either takes ownership of the NIC + * (when @take_ownership is true) or releases it. + */ +int fbnic_fw_xmit_ownership_msg(struct fbnic_dev *fbd, bool take_ownership) +{ + unsigned long req_time = jiffies; + struct fbnic_tlv_msg *msg; + int err = 0; + + if (!fbnic_fw_present(fbd)) + return -ENODEV; + + msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_OWNERSHIP_REQ); + if (!msg) + return -ENOMEM; + + if (take_ownership) { + err = fbnic_tlv_attr_put_flag(msg, FBNIC_FW_OWNERSHIP_FLAG); + if (err) + goto free_message; + } + + err = fbnic_mbx_map_tlv_msg(fbd, msg); + if (err) + goto free_message; + + /* Initialize heartbeat, set last response to 1 second in the past + * so that we will trigger a timeout if the firmware doesn't respond + */ + fbd->last_heartbeat_response = req_time - HZ; + + fbd->last_heartbeat_request = req_time; + + /* Set heartbeat detection based on if we are taking ownership */ + fbd->fw_heartbeat_enabled = take_ownership; + + return err; + +free_message: + free_page((unsigned long)msg); + return err; +} + +static const struct fbnic_tlv_index fbnic_fw_cap_resp_index[] = { + FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_VERSION), + FBNIC_TLV_ATTR_FLAG(FBNIC_FW_CAP_RESP_BMC_PRESENT), + FBNIC_TLV_ATTR_MAC_ADDR(FBNIC_FW_CAP_RESP_BMC_MAC_ADDR), + FBNIC_TLV_ATTR_ARRAY(FBNIC_FW_CAP_RESP_BMC_MAC_ARRAY), + FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_STORED_VERSION), + FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_ACTIVE_FW_SLOT), + FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_VERSION_COMMIT_STR, + FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE), + FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_BMC_ALL_MULTI), + FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_FW_LINK_SPEED), + FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_FW_LINK_FEC), + FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_STORED_COMMIT_STR, + FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE), + FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_CMRT_VERSION), + FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_STORED_CMRT_VERSION), + FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_CMRT_COMMIT_STR, + FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE), + FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_STORED_CMRT_COMMIT_STR, + FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE), + FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_UEFI_VERSION), + FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_UEFI_COMMIT_STR, + FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE), + FBNIC_TLV_ATTR_LAST +}; + +static int fbnic_fw_parse_bmc_addrs(u8 bmc_mac_addr[][ETH_ALEN], + struct fbnic_tlv_msg *attr, int len) +{ + int attr_len = le16_to_cpu(attr->hdr.len) / sizeof(u32) - 1; + struct fbnic_tlv_msg *mac_results[8]; + int err, i = 0; + + /* Make sure we have enough room to process all the MAC addresses */ + if (len > 8) + return -ENOSPC; + + /* Parse the array */ + err = fbnic_tlv_attr_parse_array(&attr[1], attr_len, mac_results, + fbnic_fw_cap_resp_index, + FBNIC_FW_CAP_RESP_BMC_MAC_ADDR, len); + if (err) + return err; + + /* Copy results into MAC addr array */ + for (i = 0; i < len && mac_results[i]; i++) + fbnic_tlv_attr_addr_copy(bmc_mac_addr[i], mac_results[i]); + + /* Zero remaining unused addresses */ + while (i < len) + eth_zero_addr(bmc_mac_addr[i++]); + + return 0; +} + +static int fbnic_fw_parse_cap_resp(void *opaque, struct fbnic_tlv_msg **results) +{ + u32 active_slot = 0, all_multi = 0; + struct fbnic_dev *fbd = opaque; + u32 speed = 0, fec = 0; + size_t commit_size = 0; + bool bmc_present; + int err; + + get_unsigned_result(FBNIC_FW_CAP_RESP_VERSION, + fbd->fw_cap.running.mgmt.version); + + if (!fbd->fw_cap.running.mgmt.version) + return -EINVAL; + + if (fbd->fw_cap.running.mgmt.version < MIN_FW_VERSION_CODE) { + char running_ver[FBNIC_FW_VER_MAX_SIZE]; + + fbnic_mk_fw_ver_str(fbd->fw_cap.running.mgmt.version, + running_ver); + dev_err(fbd->dev, "Device firmware version(%s) is older than minimum required version(%02d.%02d.%02d)\n", + running_ver, + MIN_FW_MAJOR_VERSION, + MIN_FW_MINOR_VERSION, + MIN_FW_BUILD_VERSION); + /* Disable TX mailbox to prevent card use until firmware is + * updated. + */ + fbd->mbx[FBNIC_IPC_MBX_TX_IDX].ready = false; + return -EINVAL; + } + + get_string_result(FBNIC_FW_CAP_RESP_VERSION_COMMIT_STR, commit_size, + fbd->fw_cap.running.mgmt.commit, + FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE); + if (!commit_size) + dev_warn(fbd->dev, "Firmware did not send mgmt commit!\n"); + + get_unsigned_result(FBNIC_FW_CAP_RESP_STORED_VERSION, + fbd->fw_cap.stored.mgmt.version); + get_string_result(FBNIC_FW_CAP_RESP_STORED_COMMIT_STR, commit_size, + fbd->fw_cap.stored.mgmt.commit, + FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE); + + get_unsigned_result(FBNIC_FW_CAP_RESP_CMRT_VERSION, + fbd->fw_cap.running.bootloader.version); + get_string_result(FBNIC_FW_CAP_RESP_CMRT_COMMIT_STR, commit_size, + fbd->fw_cap.running.bootloader.commit, + FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE); + + get_unsigned_result(FBNIC_FW_CAP_RESP_STORED_CMRT_VERSION, + fbd->fw_cap.stored.bootloader.version); + get_string_result(FBNIC_FW_CAP_RESP_STORED_CMRT_COMMIT_STR, commit_size, + fbd->fw_cap.stored.bootloader.commit, + FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE); + + get_unsigned_result(FBNIC_FW_CAP_RESP_UEFI_VERSION, + fbd->fw_cap.stored.undi.version); + get_string_result(FBNIC_FW_CAP_RESP_UEFI_COMMIT_STR, commit_size, + fbd->fw_cap.stored.undi.commit, + FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE); + + get_unsigned_result(FBNIC_FW_CAP_RESP_ACTIVE_FW_SLOT, active_slot); + fbd->fw_cap.active_slot = active_slot; + + get_unsigned_result(FBNIC_FW_CAP_RESP_FW_LINK_SPEED, speed); + get_unsigned_result(FBNIC_FW_CAP_RESP_FW_LINK_FEC, fec); + fbd->fw_cap.link_speed = speed; + fbd->fw_cap.link_fec = fec; + + bmc_present = !!results[FBNIC_FW_CAP_RESP_BMC_PRESENT]; + if (bmc_present) { + struct fbnic_tlv_msg *attr; + + attr = results[FBNIC_FW_CAP_RESP_BMC_MAC_ARRAY]; + if (!attr) + return -EINVAL; + + err = fbnic_fw_parse_bmc_addrs(fbd->fw_cap.bmc_mac_addr, + attr, 4); + if (err) + return err; + + get_unsigned_result(FBNIC_FW_CAP_RESP_BMC_ALL_MULTI, all_multi); + } else { + memset(fbd->fw_cap.bmc_mac_addr, 0, + sizeof(fbd->fw_cap.bmc_mac_addr)); + } + + fbd->fw_cap.bmc_present = bmc_present; + + if (results[FBNIC_FW_CAP_RESP_BMC_ALL_MULTI] || !bmc_present) + fbd->fw_cap.all_multi = all_multi; + + return 0; +} + +static const struct fbnic_tlv_index fbnic_ownership_resp_index[] = { + FBNIC_TLV_ATTR_LAST +}; + +static int fbnic_fw_parse_ownership_resp(void *opaque, + struct fbnic_tlv_msg **results) +{ + struct fbnic_dev *fbd = (struct fbnic_dev *)opaque; + + /* Count the ownership response as a heartbeat reply */ + fbd->last_heartbeat_response = jiffies; + + return 0; +} + +static const struct fbnic_tlv_index fbnic_heartbeat_resp_index[] = { + FBNIC_TLV_ATTR_LAST +}; + +static int fbnic_fw_parse_heartbeat_resp(void *opaque, + struct fbnic_tlv_msg **results) +{ + struct fbnic_dev *fbd = (struct fbnic_dev *)opaque; + + fbd->last_heartbeat_response = jiffies; + + return 0; +} + +static int fbnic_fw_xmit_heartbeat_message(struct fbnic_dev *fbd) +{ + unsigned long req_time = jiffies; + struct fbnic_tlv_msg *msg; + int err = 0; + + if (!fbnic_fw_present(fbd)) + return -ENODEV; + + msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_HEARTBEAT_REQ); + if (!msg) + return -ENOMEM; + + err = fbnic_mbx_map_tlv_msg(fbd, msg); + if (err) + goto free_message; + + fbd->last_heartbeat_request = req_time; + + return err; + +free_message: + free_page((unsigned long)msg); + return err; +} + +static bool fbnic_fw_heartbeat_current(struct fbnic_dev *fbd) +{ + unsigned long last_response = fbd->last_heartbeat_response; + unsigned long last_request = fbd->last_heartbeat_request; + + return !time_before(last_response, last_request); +} + +int fbnic_fw_init_heartbeat(struct fbnic_dev *fbd, bool poll) +{ + int err = -ETIMEDOUT; + int attempts = 50; + + if (!fbnic_fw_present(fbd)) + return -ENODEV; + + while (attempts--) { + msleep(200); + if (poll) + fbnic_mbx_poll(fbd); + + if (!fbnic_fw_heartbeat_current(fbd)) + continue; + + /* Place new message on mailbox to elicit a response */ + err = fbnic_fw_xmit_heartbeat_message(fbd); + if (err) + dev_warn(fbd->dev, + "Failed to send heartbeat message: %d\n", + err); + break; + } + + return err; +} + +void fbnic_fw_check_heartbeat(struct fbnic_dev *fbd) +{ + unsigned long last_request = fbd->last_heartbeat_request; + int err; + + /* Do not check heartbeat or send another request until current + * period has expired. Otherwise we might start spamming requests. + */ + if (time_is_after_jiffies(last_request + FW_HEARTBEAT_PERIOD)) + return; + + /* We already reported no mailbox. Wait for it to come back */ + if (!fbd->fw_heartbeat_enabled) + return; + + /* Was the last heartbeat response long time ago? */ + if (!fbnic_fw_heartbeat_current(fbd)) { + dev_warn(fbd->dev, + "Firmware did not respond to heartbeat message\n"); + fbd->fw_heartbeat_enabled = false; + } + + /* Place new message on mailbox to elicit a response */ + err = fbnic_fw_xmit_heartbeat_message(fbd); + if (err) + dev_warn(fbd->dev, "Failed to send heartbeat message\n"); +} + +static const struct fbnic_tlv_parser fbnic_fw_tlv_parser[] = { + FBNIC_TLV_PARSER(FW_CAP_RESP, fbnic_fw_cap_resp_index, + fbnic_fw_parse_cap_resp), + FBNIC_TLV_PARSER(OWNERSHIP_RESP, fbnic_ownership_resp_index, + fbnic_fw_parse_ownership_resp), + FBNIC_TLV_PARSER(HEARTBEAT_RESP, fbnic_heartbeat_resp_index, + fbnic_fw_parse_heartbeat_resp), + FBNIC_TLV_MSG_ERROR +}; + +static void fbnic_mbx_process_rx_msgs(struct fbnic_dev *fbd) +{ + struct fbnic_fw_mbx *rx_mbx = &fbd->mbx[FBNIC_IPC_MBX_RX_IDX]; + u8 head = rx_mbx->head; + u64 desc, length; + + while (head != rx_mbx->tail) { + struct fbnic_tlv_msg *msg; + int err; + + desc = __fbnic_mbx_rd_desc(fbd, FBNIC_IPC_MBX_RX_IDX, head); + if (!(desc & FBNIC_IPC_MBX_DESC_FW_CMPL)) + break; + + dma_unmap_single(fbd->dev, rx_mbx->buf_info[head].addr, + PAGE_SIZE, DMA_FROM_DEVICE); + + msg = rx_mbx->buf_info[head].msg; + + length = FIELD_GET(FBNIC_IPC_MBX_DESC_LEN_MASK, desc); + + /* Ignore NULL mailbox descriptors */ + if (!length) + goto next_page; + + /* Report descriptors with length greater than page size */ + if (length > PAGE_SIZE) { + dev_warn(fbd->dev, + "Invalid mailbox descriptor length: %lld\n", + length); + goto next_page; + } + + if (le16_to_cpu(msg->hdr.len) * sizeof(u32) > length) + dev_warn(fbd->dev, "Mailbox message length mismatch\n"); + + /* If parsing fails dump contents of message to dmesg */ + err = fbnic_tlv_msg_parse(fbd, msg, fbnic_fw_tlv_parser); + if (err) { + dev_warn(fbd->dev, "Unable to process message: %d\n", + err); + print_hex_dump(KERN_WARNING, "fbnic:", + DUMP_PREFIX_OFFSET, 16, 2, + msg, length, true); + } + + dev_dbg(fbd->dev, "Parsed msg type %d\n", msg->hdr.type); +next_page: + + free_page((unsigned long)rx_mbx->buf_info[head].msg); + rx_mbx->buf_info[head].msg = NULL; + + head++; + head %= FBNIC_IPC_MBX_DESC_LEN; + } + + /* Record head for next interrupt */ + rx_mbx->head = head; + + /* Make sure we have at least one page for the FW to write to */ + fbnic_mbx_alloc_rx_msgs(fbd); +} + +void fbnic_mbx_poll(struct fbnic_dev *fbd) +{ + fbnic_mbx_postinit(fbd); + + fbnic_mbx_process_tx_msgs(fbd); + fbnic_mbx_process_rx_msgs(fbd); +} + +int fbnic_mbx_poll_tx_ready(struct fbnic_dev *fbd) +{ + struct fbnic_fw_mbx *tx_mbx; + int attempts = 50; + + /* Immediate fail if BAR4 isn't there */ + if (!fbnic_fw_present(fbd)) + return -ENODEV; + + tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX]; + while (!tx_mbx->ready && --attempts) { + /* Force the firmware to trigger an interrupt response to + * avoid the mailbox getting stuck closed if the interrupt + * is reset. + */ + fbnic_mbx_init_desc_ring(fbd, FBNIC_IPC_MBX_TX_IDX); + + msleep(200); + + fbnic_mbx_poll(fbd); + } + + return attempts ? 0 : -ETIMEDOUT; +} + +void fbnic_mbx_flush_tx(struct fbnic_dev *fbd) +{ + struct fbnic_fw_mbx *tx_mbx; + int attempts = 50; + u8 count = 0; + + /* Nothing to do if there is no mailbox */ + if (!fbnic_fw_present(fbd)) + return; + + /* Record current Rx stats */ + tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX]; + + /* Nothing to do if mailbox never got to ready */ + if (!tx_mbx->ready) + return; + + /* Give firmware time to process packet, + * we will wait up to 10 seconds which is 50 waits of 200ms. + */ + do { + u8 head = tx_mbx->head; + + if (head == tx_mbx->tail) + break; + + msleep(200); + fbnic_mbx_process_tx_msgs(fbd); + + count += (tx_mbx->head - head) % FBNIC_IPC_MBX_DESC_LEN; + } while (count < FBNIC_IPC_MBX_DESC_LEN && --attempts); +} diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.h b/drivers/net/ethernet/meta/fbnic/fbnic_fw.h new file mode 100644 index 000000000000..c65bca613665 --- /dev/null +++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.h @@ -0,0 +1,124 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Meta Platforms, Inc. and affiliates. */ + +#ifndef _FBNIC_FW_H_ +#define _FBNIC_FW_H_ + +#include <linux/if_ether.h> +#include <linux/types.h> + +struct fbnic_dev; +struct fbnic_tlv_msg; + +struct fbnic_fw_mbx { + u8 ready, head, tail; + struct { + struct fbnic_tlv_msg *msg; + dma_addr_t addr; + } buf_info[FBNIC_IPC_MBX_DESC_LEN]; +}; + +// FW_VER_MAX_SIZE must match ETHTOOL_FWVERS_LEN +#define FBNIC_FW_VER_MAX_SIZE 32 +// Formatted version is in the format XX.YY.ZZ_RRR_COMMIT +#define FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE (FBNIC_FW_VER_MAX_SIZE - 13) +#define FBNIC_FW_LOG_MAX_SIZE 256 + +struct fbnic_fw_ver { + u32 version; + char commit[FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE]; +}; + +struct fbnic_fw_cap { + struct { + struct fbnic_fw_ver mgmt, bootloader; + } running; + struct { + struct fbnic_fw_ver mgmt, bootloader, undi; + } stored; + u8 active_slot; + u8 bmc_mac_addr[4][ETH_ALEN]; + u8 bmc_present : 1; + u8 all_multi : 1; + u8 link_speed; + u8 link_fec; +}; + +void fbnic_mbx_init(struct fbnic_dev *fbd); +void fbnic_mbx_clean(struct fbnic_dev *fbd); +void fbnic_mbx_poll(struct fbnic_dev *fbd); +int fbnic_mbx_poll_tx_ready(struct fbnic_dev *fbd); +void fbnic_mbx_flush_tx(struct fbnic_dev *fbd); +int fbnic_fw_xmit_ownership_msg(struct fbnic_dev *fbd, bool take_ownership); +int fbnic_fw_init_heartbeat(struct fbnic_dev *fbd, bool poll); +void fbnic_fw_check_heartbeat(struct fbnic_dev *fbd); + +#define fbnic_mk_full_fw_ver_str(_rev_id, _delim, _commit, _str) \ +do { \ + const u32 __rev_id = _rev_id; \ + snprintf(_str, sizeof(_str), "%02lu.%02lu.%02lu-%03lu%s%s", \ + FIELD_GET(FBNIC_FW_CAP_RESP_VERSION_MAJOR, __rev_id), \ + FIELD_GET(FBNIC_FW_CAP_RESP_VERSION_MINOR, __rev_id), \ + FIELD_GET(FBNIC_FW_CAP_RESP_VERSION_PATCH, __rev_id), \ + FIELD_GET(FBNIC_FW_CAP_RESP_VERSION_BUILD, __rev_id), \ + _delim, _commit); \ +} while (0) + +#define fbnic_mk_fw_ver_str(_rev_id, _str) \ + fbnic_mk_full_fw_ver_str(_rev_id, "", "", _str) + +#define FW_HEARTBEAT_PERIOD (10 * HZ) + +enum { + FBNIC_TLV_MSG_ID_HOST_CAP_REQ = 0x10, + FBNIC_TLV_MSG_ID_FW_CAP_RESP = 0x11, + FBNIC_TLV_MSG_ID_OWNERSHIP_REQ = 0x12, + FBNIC_TLV_MSG_ID_OWNERSHIP_RESP = 0x13, + FBNIC_TLV_MSG_ID_HEARTBEAT_REQ = 0x14, + FBNIC_TLV_MSG_ID_HEARTBEAT_RESP = 0x15, +}; + +#define FBNIC_FW_CAP_RESP_VERSION_MAJOR CSR_GENMASK(31, 24) +#define FBNIC_FW_CAP_RESP_VERSION_MINOR CSR_GENMASK(23, 16) +#define FBNIC_FW_CAP_RESP_VERSION_PATCH CSR_GENMASK(15, 8) +#define FBNIC_FW_CAP_RESP_VERSION_BUILD CSR_GENMASK(7, 0) +enum { + FBNIC_FW_CAP_RESP_VERSION = 0x0, + FBNIC_FW_CAP_RESP_BMC_PRESENT = 0x1, + FBNIC_FW_CAP_RESP_BMC_MAC_ADDR = 0x2, + FBNIC_FW_CAP_RESP_BMC_MAC_ARRAY = 0x3, + FBNIC_FW_CAP_RESP_STORED_VERSION = 0x4, + FBNIC_FW_CAP_RESP_ACTIVE_FW_SLOT = 0x5, + FBNIC_FW_CAP_RESP_VERSION_COMMIT_STR = 0x6, + FBNIC_FW_CAP_RESP_BMC_ALL_MULTI = 0x8, + FBNIC_FW_CAP_RESP_FW_STATE = 0x9, + FBNIC_FW_CAP_RESP_FW_LINK_SPEED = 0xa, + FBNIC_FW_CAP_RESP_FW_LINK_FEC = 0xb, + FBNIC_FW_CAP_RESP_STORED_COMMIT_STR = 0xc, + FBNIC_FW_CAP_RESP_CMRT_VERSION = 0xd, + FBNIC_FW_CAP_RESP_STORED_CMRT_VERSION = 0xe, + FBNIC_FW_CAP_RESP_CMRT_COMMIT_STR = 0xf, + FBNIC_FW_CAP_RESP_STORED_CMRT_COMMIT_STR = 0x10, + FBNIC_FW_CAP_RESP_UEFI_VERSION = 0x11, + FBNIC_FW_CAP_RESP_UEFI_COMMIT_STR = 0x12, + FBNIC_FW_CAP_RESP_MSG_MAX +}; + +enum { + FBNIC_FW_LINK_SPEED_25R1 = 1, + FBNIC_FW_LINK_SPEED_50R2 = 2, + FBNIC_FW_LINK_SPEED_50R1 = 3, + FBNIC_FW_LINK_SPEED_100R2 = 4, +}; + +enum { + FBNIC_FW_LINK_FEC_NONE = 1, + FBNIC_FW_LINK_FEC_RS = 2, + FBNIC_FW_LINK_FEC_BASER = 3, +}; + +enum { + FBNIC_FW_OWNERSHIP_FLAG = 0x0, + FBNIC_FW_OWNERSHIP_MSG_MAX +}; +#endif /* _FBNIC_FW_H_ */ diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_irq.c b/drivers/net/ethernet/meta/fbnic/fbnic_irq.c new file mode 100644 index 000000000000..914362195920 --- /dev/null +++ b/drivers/net/ethernet/meta/fbnic/fbnic_irq.c @@ -0,0 +1,208 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) Meta Platforms, Inc. and affiliates. */ + +#include <linux/pci.h> +#include <linux/types.h> + +#include "fbnic.h" +#include "fbnic_netdev.h" +#include "fbnic_txrx.h" + +static irqreturn_t fbnic_fw_msix_intr(int __always_unused irq, void *data) +{ + struct fbnic_dev *fbd = (struct fbnic_dev *)data; + + fbnic_mbx_poll(fbd); + + fbnic_wr32(fbd, FBNIC_INTR_MASK_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY); + + return IRQ_HANDLED; +} + +/** + * fbnic_fw_enable_mbx - Configure and initialize Firmware Mailbox + * @fbd: Pointer to device to initialize + * + * This function will initialize the firmware mailbox rings, enable the IRQ + * and initialize the communication between the Firmware and the host. The + * firmware is expected to respond to the initialization by sending an + * interrupt essentially notifying the host that it has seen the + * initialization and is now synced up. + * + * Return: non-zero on failure. + **/ +int fbnic_fw_enable_mbx(struct fbnic_dev *fbd) +{ + u32 vector = fbd->fw_msix_vector; + int err; + + /* Request the IRQ for FW Mailbox vector. */ + err = request_threaded_irq(vector, NULL, &fbnic_fw_msix_intr, + IRQF_ONESHOT, dev_name(fbd->dev), fbd); + if (err) + return err; + + /* Initialize mailbox and attempt to poll it into ready state */ + fbnic_mbx_init(fbd); + err = fbnic_mbx_poll_tx_ready(fbd); + if (err) { + dev_warn(fbd->dev, "FW mailbox did not enter ready state\n"); + free_irq(vector, fbd); + return err; + } + + /* Enable interrupts */ + fbnic_wr32(fbd, FBNIC_INTR_MASK_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY); + + return 0; +} + +/** + * fbnic_fw_disable_mbx - Disable mailbox and place it in standby state + * @fbd: Pointer to device to disable + * + * This function will disable the mailbox interrupt, free any messages still + * in the mailbox and place it into a standby state. The firmware is + * expected to see the update and assume that the host is in the reset state. + **/ +void fbnic_fw_disable_mbx(struct fbnic_dev *fbd) +{ + /* Disable interrupt and free vector */ + fbnic_wr32(fbd, FBNIC_INTR_MASK_SET(0), 1u << FBNIC_FW_MSIX_ENTRY); + + /* Free the vector */ + free_irq(fbd->fw_msix_vector, fbd); + + /* Make sure disabling logs message is sent, must be done here to + * avoid risk of completing without a running interrupt. + */ + fbnic_mbx_flush_tx(fbd); + + /* Reset the mailboxes to the initialized state */ + fbnic_mbx_clean(fbd); +} + +static irqreturn_t fbnic_pcs_msix_intr(int __always_unused irq, void *data) +{ + struct fbnic_dev *fbd = data; + struct fbnic_net *fbn; + + if (fbd->mac->pcs_get_link_event(fbd) == FBNIC_LINK_EVENT_NONE) { + fbnic_wr32(fbd, FBNIC_INTR_MASK_CLEAR(0), + 1u << FBNIC_PCS_MSIX_ENTRY); + return IRQ_HANDLED; + } + + fbn = netdev_priv(fbd->netdev); + + phylink_pcs_change(&fbn->phylink_pcs, false); + + return IRQ_HANDLED; +} + +/** + * fbnic_pcs_irq_enable - Configure the MAC to enable it to advertise link + * @fbd: Pointer to device to initialize + * + * This function provides basic bringup for the MAC/PCS IRQ. For now the IRQ + * will remain disabled until we start the MAC/PCS/PHY logic via phylink. + * + * Return: non-zero on failure. + **/ +int fbnic_pcs_irq_enable(struct fbnic_dev *fbd) +{ + u32 vector = fbd->pcs_msix_vector; + int err; + + /* Request the IRQ for MAC link vector. + * Map MAC cause to it, and unmask it + */ + err = request_irq(vector, &fbnic_pcs_msix_intr, 0, + fbd->netdev->name, fbd); + if (err) + return err; + + fbnic_wr32(fbd, FBNIC_INTR_MSIX_CTRL(FBNIC_INTR_MSIX_CTRL_PCS_IDX), + FBNIC_PCS_MSIX_ENTRY | FBNIC_INTR_MSIX_CTRL_ENABLE); + + return 0; +} + +/** + * fbnic_pcs_irq_disable - Teardown the MAC IRQ to prepare for stopping + * @fbd: Pointer to device that is stopping + * + * This function undoes the work done in fbnic_pcs_irq_enable and prepares + * the device to no longer receive traffic on the host interface. + **/ +void fbnic_pcs_irq_disable(struct fbnic_dev *fbd) +{ + /* Disable interrupt */ + fbnic_wr32(fbd, FBNIC_INTR_MSIX_CTRL(FBNIC_INTR_MSIX_CTRL_PCS_IDX), + FBNIC_PCS_MSIX_ENTRY); + fbnic_wr32(fbd, FBNIC_INTR_MASK_SET(0), 1u << FBNIC_PCS_MSIX_ENTRY); + + /* Free the vector */ + free_irq(fbd->pcs_msix_vector, fbd); +} + +int fbnic_request_irq(struct fbnic_dev *fbd, int nr, irq_handler_t handler, + unsigned long flags, const char *name, void *data) +{ + struct pci_dev *pdev = to_pci_dev(fbd->dev); + int irq = pci_irq_vector(pdev, nr); + + if (irq < 0) + return irq; + + return request_irq(irq, handler, flags, name, data); +} + +void fbnic_free_irq(struct fbnic_dev *fbd, int nr, void *data) +{ + struct pci_dev *pdev = to_pci_dev(fbd->dev); + int irq = pci_irq_vector(pdev, nr); + + if (irq < 0) + return; + + free_irq(irq, data); +} + +void fbnic_free_irqs(struct fbnic_dev *fbd) +{ + struct pci_dev *pdev = to_pci_dev(fbd->dev); + + fbd->pcs_msix_vector = 0; + fbd->fw_msix_vector = 0; + + fbd->num_irqs = 0; + + pci_free_irq_vectors(pdev); +} + +int fbnic_alloc_irqs(struct fbnic_dev *fbd) +{ + unsigned int wanted_irqs = FBNIC_NON_NAPI_VECTORS; + struct pci_dev *pdev = to_pci_dev(fbd->dev); + int num_irqs; + + wanted_irqs += min_t(unsigned int, num_online_cpus(), FBNIC_MAX_RXQS); + num_irqs = pci_alloc_irq_vectors(pdev, FBNIC_NON_NAPI_VECTORS + 1, + wanted_irqs, PCI_IRQ_MSIX); + if (num_irqs < 0) { + dev_err(fbd->dev, "Failed to allocate MSI-X entries\n"); + return num_irqs; + } + + if (num_irqs < wanted_irqs) + dev_warn(fbd->dev, "Allocated %d IRQs, expected %d\n", + num_irqs, wanted_irqs); + + fbd->num_irqs = num_irqs; + + fbd->pcs_msix_vector = pci_irq_vector(pdev, FBNIC_PCS_MSIX_ENTRY); + fbd->fw_msix_vector = pci_irq_vector(pdev, FBNIC_FW_MSIX_ENTRY); + + return 0; +} diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c new file mode 100644 index 000000000000..7920e7af82d9 --- /dev/null +++ b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c @@ -0,0 +1,666 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) Meta Platforms, Inc. and affiliates. */ + +#include <linux/bitfield.h> +#include <net/tcp.h> + +#include "fbnic.h" +#include "fbnic_mac.h" +#include "fbnic_netdev.h" + +static void fbnic_init_readrq(struct fbnic_dev *fbd, unsigned int offset, + unsigned int cls, unsigned int readrq) +{ + u32 val = rd32(fbd, offset); + + /* The TDF_CTL masks are a superset of the RNI_RBP ones. So we can + * use them when setting either the TDE_CTF or RNI_RBP registers. + */ + val &= FBNIC_QM_TNI_TDF_CTL_MAX_OT | FBNIC_QM_TNI_TDF_CTL_MAX_OB; + + val |= FIELD_PREP(FBNIC_QM_TNI_TDF_CTL_MRRS, readrq) | + FIELD_PREP(FBNIC_QM_TNI_TDF_CTL_CLS, cls); + + wr32(fbd, offset, val); +} + +static void fbnic_init_mps(struct fbnic_dev *fbd, unsigned int offset, + unsigned int cls, unsigned int mps) +{ + u32 val = rd32(fbd, offset); + + /* Currently all MPS masks are identical so just use the first one */ + val &= ~(FBNIC_QM_TNI_TCM_CTL_MPS | FBNIC_QM_TNI_TCM_CTL_CLS); + + val |= FIELD_PREP(FBNIC_QM_TNI_TCM_CTL_MPS, mps) | + FIELD_PREP(FBNIC_QM_TNI_TCM_CTL_CLS, cls); + + wr32(fbd, offset, val); +} + +static void fbnic_mac_init_axi(struct fbnic_dev *fbd) +{ + bool override_1k = false; + int readrq, mps, cls; + + /* All of the values are based on being a power of 2 starting + * with 64 == 0. Therefore we can either divide by 64 in the + * case of constants, or just subtract 6 from the log2 of the value + * in order to get the value we will be programming into the + * registers. + */ + readrq = ilog2(fbd->readrq) - 6; + if (readrq > 3) + override_1k = true; + readrq = clamp(readrq, 0, 3); + + mps = ilog2(fbd->mps) - 6; + mps = clamp(mps, 0, 3); + + cls = ilog2(L1_CACHE_BYTES) - 6; + cls = clamp(cls, 0, 3); + + /* Configure Tx/Rx AXI Paths w/ Read Request and Max Payload sizes */ + fbnic_init_readrq(fbd, FBNIC_QM_TNI_TDF_CTL, cls, readrq); + fbnic_init_mps(fbd, FBNIC_QM_TNI_TCM_CTL, cls, mps); + + /* Configure QM TNI TDE: + * - Max outstanding AXI beats to 704(768 - 64) - guaranetees 8% of + * buffer capacity to descriptors. + * - Max outstanding transactions to 128 + */ + wr32(fbd, FBNIC_QM_TNI_TDE_CTL, + FIELD_PREP(FBNIC_QM_TNI_TDE_CTL_MRRS_1K, override_1k ? 1 : 0) | + FIELD_PREP(FBNIC_QM_TNI_TDE_CTL_MAX_OB, 704) | + FIELD_PREP(FBNIC_QM_TNI_TDE_CTL_MAX_OT, 128) | + FIELD_PREP(FBNIC_QM_TNI_TDE_CTL_MRRS, readrq) | + FIELD_PREP(FBNIC_QM_TNI_TDE_CTL_CLS, cls)); + + fbnic_init_readrq(fbd, FBNIC_QM_RNI_RBP_CTL, cls, readrq); + fbnic_init_mps(fbd, FBNIC_QM_RNI_RDE_CTL, cls, mps); + fbnic_init_mps(fbd, FBNIC_QM_RNI_RCM_CTL, cls, mps); + + /* Enable XALI AR/AW outbound */ + wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AW_CFG, + FBNIC_PUL_OB_TLP_HDR_AW_CFG_BME); + wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AR_CFG, + FBNIC_PUL_OB_TLP_HDR_AR_CFG_BME); +} + +static void fbnic_mac_init_qm(struct fbnic_dev *fbd) +{ + u32 clock_freq; + + /* Configure TSO behavior */ + wr32(fbd, FBNIC_QM_TQS_CTL0, + FIELD_PREP(FBNIC_QM_TQS_CTL0_LSO_TS_MASK, + FBNIC_QM_TQS_CTL0_LSO_TS_LAST) | + FIELD_PREP(FBNIC_QM_TQS_CTL0_PREFETCH_THRESH, + FBNIC_QM_TQS_CTL0_PREFETCH_THRESH_MIN)); + + /* Limit EDT to INT_MAX as this is the limit of the EDT Qdisc */ + wr32(fbd, FBNIC_QM_TQS_EDT_TS_RANGE, INT_MAX); + + /* Configure MTU + * Due to known HW issue we cannot set the MTU to within 16 octets + * of a 64 octet aligned boundary. So we will set the TQS_MTU(s) to + * MTU + 1. + */ + wr32(fbd, FBNIC_QM_TQS_MTU_CTL0, FBNIC_MAX_JUMBO_FRAME_SIZE + 1); + wr32(fbd, FBNIC_QM_TQS_MTU_CTL1, + FIELD_PREP(FBNIC_QM_TQS_MTU_CTL1_BULK, + FBNIC_MAX_JUMBO_FRAME_SIZE + 1)); + + clock_freq = FBNIC_CLOCK_FREQ; + + /* Be aggressive on the timings. We will have the interrupt + * threshold timer tick once every 1 usec and coalesce writes for + * up to 80 usecs. + */ + wr32(fbd, FBNIC_QM_TCQ_CTL0, + FIELD_PREP(FBNIC_QM_TCQ_CTL0_TICK_CYCLES, + clock_freq / 1000000) | + FIELD_PREP(FBNIC_QM_TCQ_CTL0_COAL_WAIT, + clock_freq / 12500)); + + /* We will have the interrupt threshold timer tick once every + * 1 usec and coalesce writes for up to 2 usecs. + */ + wr32(fbd, FBNIC_QM_RCQ_CTL0, + FIELD_PREP(FBNIC_QM_RCQ_CTL0_TICK_CYCLES, + clock_freq / 1000000) | + FIELD_PREP(FBNIC_QM_RCQ_CTL0_COAL_WAIT, + clock_freq / 500000)); + + /* Configure spacer control to 64 beats. */ + wr32(fbd, FBNIC_FAB_AXI4_AR_SPACER_2_CFG, + FBNIC_FAB_AXI4_AR_SPACER_MASK | + FIELD_PREP(FBNIC_FAB_AXI4_AR_SPACER_THREADSHOLD, 2)); +} + +#define FBNIC_DROP_EN_MASK 0x7d +#define FBNIC_PAUSE_EN_MASK 0x14 +#define FBNIC_ECN_EN_MASK 0x10 + +struct fbnic_fifo_config { + unsigned int addr; + unsigned int size; +}; + +/* Rx FIFO Configuration + * The table consists of 8 entries, of which only 4 are currently used + * The starting addr is in units of 64B and the size is in 2KB units + * Below is the human readable version of the table defined below: + * Function Addr Size + * ---------------------------------- + * Network to Host/BMC 384K 64K + * Unused + * Unused + * Network to BMC 448K 32K + * Network to Host 0 384K + * Unused + * BMC to Host 480K 32K + * Unused + */ +static const struct fbnic_fifo_config fifo_config[] = { + { .addr = 0x1800, .size = 0x20 }, /* Network to Host/BMC */ + { }, /* Unused */ + { }, /* Unused */ + { .addr = 0x1c00, .size = 0x10 }, /* Network to BMC */ + { .addr = 0x0000, .size = 0xc0 }, /* Network to Host */ + { }, /* Unused */ + { .addr = 0x1e00, .size = 0x10 }, /* BMC to Host */ + { } /* Unused */ +}; + +static void fbnic_mac_init_rxb(struct fbnic_dev *fbd) +{ + bool rx_enable; + int i; + + rx_enable = !!(rd32(fbd, FBNIC_RPC_RMI_CONFIG) & + FBNIC_RPC_RMI_CONFIG_ENABLE); + + for (i = 0; i < 8; i++) { + unsigned int size = fifo_config[i].size; + + /* If we are coming up on a system that already has the + * Rx data path enabled we don't need to reconfigure the + * FIFOs. Instead we can check to verify the values are + * large enough to meet our needs, and use the values to + * populate the flow control, ECN, and drop thresholds. + */ + if (rx_enable) { + size = FIELD_GET(FBNIC_RXB_PBUF_SIZE, + rd32(fbd, FBNIC_RXB_PBUF_CFG(i))); + if (size < fifo_config[i].size) + dev_warn(fbd->dev, + "fifo%d size of %d smaller than expected value of %d\n", + i, size << 11, + fifo_config[i].size << 11); + } else { + /* Program RXB Cuthrough */ + wr32(fbd, FBNIC_RXB_CT_SIZE(i), + FIELD_PREP(FBNIC_RXB_CT_SIZE_HEADER, 4) | + FIELD_PREP(FBNIC_RXB_CT_SIZE_PAYLOAD, 2)); + + /* The granularity for the packet buffer size is 2KB + * granularity while the packet buffer base address is + * only 64B granularity + */ + wr32(fbd, FBNIC_RXB_PBUF_CFG(i), + FIELD_PREP(FBNIC_RXB_PBUF_BASE_ADDR, + fifo_config[i].addr) | + FIELD_PREP(FBNIC_RXB_PBUF_SIZE, size)); + + /* The granularity for the credits is 64B. This is + * based on RXB_PBUF_SIZE * 32 + 4. + */ + wr32(fbd, FBNIC_RXB_PBUF_CREDIT(i), + FIELD_PREP(FBNIC_RXB_PBUF_CREDIT_MASK, + size ? size * 32 + 4 : 0)); + } + + if (!size) + continue; + + /* Pause is size of FIFO with 56KB skid to start/stop */ + wr32(fbd, FBNIC_RXB_PAUSE_THLD(i), + !(FBNIC_PAUSE_EN_MASK & (1u << i)) ? 0x1fff : + FIELD_PREP(FBNIC_RXB_PAUSE_THLD_ON, + size * 32 - 0x380) | + FIELD_PREP(FBNIC_RXB_PAUSE_THLD_OFF, 0x380)); + + /* Enable Drop when only one packet is left in the FIFO */ + wr32(fbd, FBNIC_RXB_DROP_THLD(i), + !(FBNIC_DROP_EN_MASK & (1u << i)) ? 0x1fff : + FIELD_PREP(FBNIC_RXB_DROP_THLD_ON, + size * 32 - + FBNIC_MAX_JUMBO_FRAME_SIZE / 64) | + FIELD_PREP(FBNIC_RXB_DROP_THLD_OFF, + size * 32 - + FBNIC_MAX_JUMBO_FRAME_SIZE / 64)); + + /* Enable ECN bit when 1/4 of RXB is filled with at least + * 1 room for one full jumbo frame before setting ECN + */ + wr32(fbd, FBNIC_RXB_ECN_THLD(i), + !(FBNIC_ECN_EN_MASK & (1u << i)) ? 0x1fff : + FIELD_PREP(FBNIC_RXB_ECN_THLD_ON, + max_t(unsigned int, + size * 32 / 4, + FBNIC_MAX_JUMBO_FRAME_SIZE / 64)) | + FIELD_PREP(FBNIC_RXB_ECN_THLD_OFF, + max_t(unsigned int, + size * 32 / 4, + FBNIC_MAX_JUMBO_FRAME_SIZE / 64))); + } + + /* For now only enable drop and ECN. We need to add driver/kernel + * interfaces for configuring pause. + */ + wr32(fbd, FBNIC_RXB_PAUSE_DROP_CTRL, + FIELD_PREP(FBNIC_RXB_PAUSE_DROP_CTRL_DROP_ENABLE, + FBNIC_DROP_EN_MASK) | + FIELD_PREP(FBNIC_RXB_PAUSE_DROP_CTRL_ECN_ENABLE, + FBNIC_ECN_EN_MASK)); + + /* Program INTF credits */ + wr32(fbd, FBNIC_RXB_INTF_CREDIT, + FBNIC_RXB_INTF_CREDIT_MASK0 | + FBNIC_RXB_INTF_CREDIT_MASK1 | + FBNIC_RXB_INTF_CREDIT_MASK2 | + FIELD_PREP(FBNIC_RXB_INTF_CREDIT_MASK3, 8)); + + /* Configure calendar slots. + * Rx: 0 - 62 RDE 1st, BMC 2nd + * 63 BMC 1st, RDE 2nd + */ + for (i = 0; i < 16; i++) { + u32 calendar_val = (i == 15) ? 0x1e1b1b1b : 0x1b1b1b1b; + + wr32(fbd, FBNIC_RXB_CLDR_PRIO_CFG(i), calendar_val); + } + + /* Split the credits for the DRR up as follows: + * Quantum0: 8000 Network to Host + * Quantum1: 0 Not used + * Quantum2: 80 BMC to Host + * Quantum3: 0 Not used + * Quantum4: 8000 Multicast to Host and BMC + */ + wr32(fbd, FBNIC_RXB_DWRR_RDE_WEIGHT0, + FIELD_PREP(FBNIC_RXB_DWRR_RDE_WEIGHT0_QUANTUM0, 0x40) | + FIELD_PREP(FBNIC_RXB_DWRR_RDE_WEIGHT0_QUANTUM2, 0x50)); + wr32(fbd, FBNIC_RXB_DWRR_RDE_WEIGHT0_EXT, + FIELD_PREP(FBNIC_RXB_DWRR_RDE_WEIGHT0_QUANTUM0, 0x1f)); + wr32(fbd, FBNIC_RXB_DWRR_RDE_WEIGHT1, + FIELD_PREP(FBNIC_RXB_DWRR_RDE_WEIGHT1_QUANTUM4, 0x40)); + wr32(fbd, FBNIC_RXB_DWRR_RDE_WEIGHT1_EXT, + FIELD_PREP(FBNIC_RXB_DWRR_RDE_WEIGHT1_QUANTUM4, 0x1f)); + + /* Program RXB FCS Endian register */ + wr32(fbd, FBNIC_RXB_ENDIAN_FCS, 0x0aaaaaa0); +} + +static void fbnic_mac_init_txb(struct fbnic_dev *fbd) +{ + int i; + + wr32(fbd, FBNIC_TCE_TXB_CTRL, 0); + + /* Configure Tx QM Credits */ + wr32(fbd, FBNIC_QM_TQS_CTL1, + FIELD_PREP(FBNIC_QM_TQS_CTL1_MC_MAX_CREDITS, 0x40) | + FIELD_PREP(FBNIC_QM_TQS_CTL1_BULK_MAX_CREDITS, 0x20)); + + /* Initialize internal Tx queues */ + wr32(fbd, FBNIC_TCE_TXB_TEI_Q0_CTRL, 0); + wr32(fbd, FBNIC_TCE_TXB_TEI_Q1_CTRL, 0); + wr32(fbd, FBNIC_TCE_TXB_MC_Q_CTRL, + FIELD_PREP(FBNIC_TCE_TXB_Q_CTRL_SIZE, 0x400) | + FIELD_PREP(FBNIC_TCE_TXB_Q_CTRL_START, 0x000)); + wr32(fbd, FBNIC_TCE_TXB_RX_TEI_Q_CTRL, 0); + wr32(fbd, FBNIC_TCE_TXB_TX_BMC_Q_CTRL, + FIELD_PREP(FBNIC_TCE_TXB_Q_CTRL_SIZE, 0x200) | + FIELD_PREP(FBNIC_TCE_TXB_Q_CTRL_START, 0x400)); + wr32(fbd, FBNIC_TCE_TXB_RX_BMC_Q_CTRL, + FIELD_PREP(FBNIC_TCE_TXB_Q_CTRL_SIZE, 0x200) | + FIELD_PREP(FBNIC_TCE_TXB_Q_CTRL_START, 0x600)); + + wr32(fbd, FBNIC_TCE_LSO_CTRL, + FBNIC_TCE_LSO_CTRL_IPID_MODE_INC | + FIELD_PREP(FBNIC_TCE_LSO_CTRL_TCPF_CLR_1ST, TCPHDR_PSH | + TCPHDR_FIN) | + FIELD_PREP(FBNIC_TCE_LSO_CTRL_TCPF_CLR_MID, TCPHDR_PSH | + TCPHDR_CWR | + TCPHDR_FIN) | + FIELD_PREP(FBNIC_TCE_LSO_CTRL_TCPF_CLR_END, TCPHDR_CWR)); + wr32(fbd, FBNIC_TCE_CSO_CTRL, 0); + + wr32(fbd, FBNIC_TCE_BMC_MAX_PKTSZ, + FIELD_PREP(FBNIC_TCE_BMC_MAX_PKTSZ_TX, + FBNIC_MAX_JUMBO_FRAME_SIZE) | + FIELD_PREP(FBNIC_TCE_BMC_MAX_PKTSZ_RX, + FBNIC_MAX_JUMBO_FRAME_SIZE)); + wr32(fbd, FBNIC_TCE_MC_MAX_PKTSZ, + FIELD_PREP(FBNIC_TCE_MC_MAX_PKTSZ_TMI, + FBNIC_MAX_JUMBO_FRAME_SIZE)); + + /* Configure calendar slots. + * Tx: 0 - 62 TMI 1st, BMC 2nd + * 63 BMC 1st, TMI 2nd + */ + for (i = 0; i < 16; i++) { + u32 calendar_val = (i == 15) ? 0x1e1b1b1b : 0x1b1b1b1b; + + wr32(fbd, FBNIC_TCE_TXB_CLDR_SLOT_CFG(i), calendar_val); + } + + /* Configure DWRR */ + wr32(fbd, FBNIC_TCE_TXB_ENQ_WRR_CTRL, + FIELD_PREP(FBNIC_TCE_TXB_ENQ_WRR_CTRL_WEIGHT0, 0x64) | + FIELD_PREP(FBNIC_TCE_TXB_ENQ_WRR_CTRL_WEIGHT2, 0x04)); + wr32(fbd, FBNIC_TCE_TXB_TEI_DWRR_CTRL, 0); + wr32(fbd, FBNIC_TCE_TXB_TEI_DWRR_CTRL_EXT, 0); + wr32(fbd, FBNIC_TCE_TXB_BMC_DWRR_CTRL, + FIELD_PREP(FBNIC_TCE_TXB_BMC_DWRR_CTRL_QUANTUM0, 0x50) | + FIELD_PREP(FBNIC_TCE_TXB_BMC_DWRR_CTRL_QUANTUM1, 0x82)); + wr32(fbd, FBNIC_TCE_TXB_BMC_DWRR_CTRL_EXT, 0); + wr32(fbd, FBNIC_TCE_TXB_NTWRK_DWRR_CTRL, + FIELD_PREP(FBNIC_TCE_TXB_NTWRK_DWRR_CTRL_QUANTUM1, 0x50) | + FIELD_PREP(FBNIC_TCE_TXB_NTWRK_DWRR_CTRL_QUANTUM2, 0x20)); + wr32(fbd, FBNIC_TCE_TXB_NTWRK_DWRR_CTRL_EXT, + FIELD_PREP(FBNIC_TCE_TXB_NTWRK_DWRR_CTRL_QUANTUM2, 0x03)); + + /* Configure SOP protocol protection */ + wr32(fbd, FBNIC_TCE_SOP_PROT_CTRL, + FIELD_PREP(FBNIC_TCE_SOP_PROT_CTRL_TBI, 0x78) | + FIELD_PREP(FBNIC_TCE_SOP_PROT_CTRL_TTI_FRM, 0x40) | + FIELD_PREP(FBNIC_TCE_SOP_PROT_CTRL_TTI_CM, 0x0c)); + + /* Conservative configuration on MAC interface Start of Packet + * protection FIFO. This sets the minimum depth of the FIFO before + * we start sending packets to the MAC measured in 64B units and + * up to 160 entries deep. + * + * For the ASIC the clock is fast enough that we will likely fill + * the SOP FIFO before the MAC can drain it. So just use a minimum + * value of 8. + */ + wr32(fbd, FBNIC_TMI_SOP_PROT_CTRL, 8); + + wrfl(fbd); + wr32(fbd, FBNIC_TCE_TXB_CTRL, FBNIC_TCE_TXB_CTRL_TCAM_ENABLE | + FBNIC_TCE_TXB_CTRL_LOAD); +} + +static void fbnic_mac_init_regs(struct fbnic_dev *fbd) +{ + fbnic_mac_init_axi(fbd); + fbnic_mac_init_qm(fbd); + fbnic_mac_init_rxb(fbd); + fbnic_mac_init_txb(fbd); +} + +static void fbnic_mac_tx_pause_config(struct fbnic_dev *fbd, bool tx_pause) +{ + u32 rxb_pause_ctrl; + + /* Enable generation of pause frames if enabled */ + rxb_pause_ctrl = rd32(fbd, FBNIC_RXB_PAUSE_DROP_CTRL); + rxb_pause_ctrl &= ~FBNIC_RXB_PAUSE_DROP_CTRL_PAUSE_ENABLE; + if (tx_pause) + rxb_pause_ctrl |= + FIELD_PREP(FBNIC_RXB_PAUSE_DROP_CTRL_PAUSE_ENABLE, + FBNIC_PAUSE_EN_MASK); + wr32(fbd, FBNIC_RXB_PAUSE_DROP_CTRL, rxb_pause_ctrl); +} + +static int fbnic_pcs_get_link_event_asic(struct fbnic_dev *fbd) +{ + u32 pcs_intr_mask = rd32(fbd, FBNIC_SIG_PCS_INTR_STS); + + if (pcs_intr_mask & FBNIC_SIG_PCS_INTR_LINK_DOWN) + return FBNIC_LINK_EVENT_DOWN; + + return (pcs_intr_mask & FBNIC_SIG_PCS_INTR_LINK_UP) ? + FBNIC_LINK_EVENT_UP : FBNIC_LINK_EVENT_NONE; +} + +static u32 __fbnic_mac_cmd_config_asic(struct fbnic_dev *fbd, + bool tx_pause, bool rx_pause) +{ + /* Enable MAC Promiscuous mode and Tx padding */ + u32 command_config = FBNIC_MAC_COMMAND_CONFIG_TX_PAD_EN | + FBNIC_MAC_COMMAND_CONFIG_PROMISC_EN; + struct fbnic_net *fbn = netdev_priv(fbd->netdev); + + /* Disable pause frames if not enabled */ + if (!tx_pause) + command_config |= FBNIC_MAC_COMMAND_CONFIG_TX_PAUSE_DIS; + if (!rx_pause) + command_config |= FBNIC_MAC_COMMAND_CONFIG_RX_PAUSE_DIS; + + /* Disable fault handling if no FEC is requested */ + if ((fbn->fec & FBNIC_FEC_MODE_MASK) == FBNIC_FEC_OFF) + command_config |= FBNIC_MAC_COMMAND_CONFIG_FLT_HDL_DIS; + + return command_config; +} + +static bool fbnic_mac_get_pcs_link_status(struct fbnic_dev *fbd) +{ + struct fbnic_net *fbn = netdev_priv(fbd->netdev); + u32 pcs_status, lane_mask = ~0; + + pcs_status = rd32(fbd, FBNIC_SIG_PCS_OUT0); + if (!(pcs_status & FBNIC_SIG_PCS_OUT0_LINK)) + return false; + + /* Define the expected lane mask for the status bits we need to check */ + switch (fbn->link_mode & FBNIC_LINK_MODE_MASK) { + case FBNIC_LINK_100R2: + lane_mask = 0xf; + break; + case FBNIC_LINK_50R1: + lane_mask = 3; + break; + case FBNIC_LINK_50R2: + switch (fbn->fec & FBNIC_FEC_MODE_MASK) { + case FBNIC_FEC_OFF: + lane_mask = 0x63; + break; + case FBNIC_FEC_RS: + lane_mask = 5; + break; + case FBNIC_FEC_BASER: + lane_mask = 0xf; + break; + } + break; + case FBNIC_LINK_25R1: + lane_mask = 1; + break; + } + + /* Use an XOR to remove the bits we expect to see set */ + switch (fbn->fec & FBNIC_FEC_MODE_MASK) { + case FBNIC_FEC_OFF: + lane_mask ^= FIELD_GET(FBNIC_SIG_PCS_OUT0_BLOCK_LOCK, + pcs_status); + break; + case FBNIC_FEC_RS: + lane_mask ^= FIELD_GET(FBNIC_SIG_PCS_OUT0_AMPS_LOCK, + pcs_status); + break; + case FBNIC_FEC_BASER: + lane_mask ^= FIELD_GET(FBNIC_SIG_PCS_OUT1_FCFEC_LOCK, + rd32(fbd, FBNIC_SIG_PCS_OUT1)); + break; + } + + /* If all lanes cancelled then we have a lock on all lanes */ + return !lane_mask; +} + +static bool fbnic_pcs_get_link_asic(struct fbnic_dev *fbd) +{ + bool link; + + /* Flush status bits to clear possible stale data, + * bits should reset themselves back to 1 if link is truly up + */ + wr32(fbd, FBNIC_SIG_PCS_OUT0, FBNIC_SIG_PCS_OUT0_LINK | + FBNIC_SIG_PCS_OUT0_BLOCK_LOCK | + FBNIC_SIG_PCS_OUT0_AMPS_LOCK); + wr32(fbd, FBNIC_SIG_PCS_OUT1, FBNIC_SIG_PCS_OUT1_FCFEC_LOCK); + wrfl(fbd); + + /* Clear interrupt state due to recent changes. */ + wr32(fbd, FBNIC_SIG_PCS_INTR_STS, + FBNIC_SIG_PCS_INTR_LINK_DOWN | FBNIC_SIG_PCS_INTR_LINK_UP); + + link = fbnic_mac_get_pcs_link_status(fbd); + + /* Enable interrupt to only capture changes in link state */ + wr32(fbd, FBNIC_SIG_PCS_INTR_MASK, + ~FBNIC_SIG_PCS_INTR_LINK_DOWN & ~FBNIC_SIG_PCS_INTR_LINK_UP); + wr32(fbd, FBNIC_INTR_MASK_CLEAR(0), 1u << FBNIC_PCS_MSIX_ENTRY); + + return link; +} + +static void fbnic_pcs_get_fw_settings(struct fbnic_dev *fbd) +{ + struct fbnic_net *fbn = netdev_priv(fbd->netdev); + u8 link_mode = fbn->link_mode; + u8 fec = fbn->fec; + + /* Update FEC first to reflect FW current mode */ + if (fbn->fec & FBNIC_FEC_AUTO) { + switch (fbd->fw_cap.link_fec) { + case FBNIC_FW_LINK_FEC_NONE: + fec = FBNIC_FEC_OFF; + break; + case FBNIC_FW_LINK_FEC_RS: + fec = FBNIC_FEC_RS; + break; + case FBNIC_FW_LINK_FEC_BASER: + fec = FBNIC_FEC_BASER; + break; + default: + return; + } + + fbn->fec = fec; + } + + /* Do nothing if AUTO mode is not engaged */ + if (fbn->link_mode & FBNIC_LINK_AUTO) { + switch (fbd->fw_cap.link_speed) { + case FBNIC_FW_LINK_SPEED_25R1: + link_mode = FBNIC_LINK_25R1; + break; + case FBNIC_FW_LINK_SPEED_50R2: + link_mode = FBNIC_LINK_50R2; + break; + case FBNIC_FW_LINK_SPEED_50R1: + link_mode = FBNIC_LINK_50R1; + fec = FBNIC_FEC_RS; + break; + case FBNIC_FW_LINK_SPEED_100R2: + link_mode = FBNIC_LINK_100R2; + fec = FBNIC_FEC_RS; + break; + default: + return; + } + + fbn->link_mode = link_mode; + } +} + +static int fbnic_pcs_enable_asic(struct fbnic_dev *fbd) +{ + /* Mask and clear the PCS interrupt, will be enabled by link handler */ + wr32(fbd, FBNIC_SIG_PCS_INTR_MASK, ~0); + wr32(fbd, FBNIC_SIG_PCS_INTR_STS, ~0); + + /* Pull in settings from FW */ + fbnic_pcs_get_fw_settings(fbd); + + return 0; +} + +static void fbnic_pcs_disable_asic(struct fbnic_dev *fbd) +{ + /* Mask and clear the PCS interrupt */ + wr32(fbd, FBNIC_SIG_PCS_INTR_MASK, ~0); + wr32(fbd, FBNIC_SIG_PCS_INTR_STS, ~0); +} + +static void fbnic_mac_link_down_asic(struct fbnic_dev *fbd) +{ + u32 cmd_cfg, mac_ctrl; + + cmd_cfg = __fbnic_mac_cmd_config_asic(fbd, false, false); + mac_ctrl = rd32(fbd, FBNIC_SIG_MAC_IN0); + + mac_ctrl |= FBNIC_SIG_MAC_IN0_RESET_FF_TX_CLK | + FBNIC_SIG_MAC_IN0_RESET_TX_CLK | + FBNIC_SIG_MAC_IN0_RESET_FF_RX_CLK | + FBNIC_SIG_MAC_IN0_RESET_RX_CLK; + + wr32(fbd, FBNIC_SIG_MAC_IN0, mac_ctrl); + wr32(fbd, FBNIC_MAC_COMMAND_CONFIG, cmd_cfg); +} + +static void fbnic_mac_link_up_asic(struct fbnic_dev *fbd, + bool tx_pause, bool rx_pause) +{ + u32 cmd_cfg, mac_ctrl; + + fbnic_mac_tx_pause_config(fbd, tx_pause); + + cmd_cfg = __fbnic_mac_cmd_config_asic(fbd, tx_pause, rx_pause); + mac_ctrl = rd32(fbd, FBNIC_SIG_MAC_IN0); + + mac_ctrl &= ~(FBNIC_SIG_MAC_IN0_RESET_FF_TX_CLK | + FBNIC_SIG_MAC_IN0_RESET_TX_CLK | + FBNIC_SIG_MAC_IN0_RESET_FF_RX_CLK | + FBNIC_SIG_MAC_IN0_RESET_RX_CLK); + cmd_cfg |= FBNIC_MAC_COMMAND_CONFIG_RX_ENA | + FBNIC_MAC_COMMAND_CONFIG_TX_ENA; + + wr32(fbd, FBNIC_SIG_MAC_IN0, mac_ctrl); + wr32(fbd, FBNIC_MAC_COMMAND_CONFIG, cmd_cfg); +} + +static const struct fbnic_mac fbnic_mac_asic = { + .init_regs = fbnic_mac_init_regs, + .pcs_enable = fbnic_pcs_enable_asic, + .pcs_disable = fbnic_pcs_disable_asic, + .pcs_get_link = fbnic_pcs_get_link_asic, + .pcs_get_link_event = fbnic_pcs_get_link_event_asic, + .link_down = fbnic_mac_link_down_asic, + .link_up = fbnic_mac_link_up_asic, +}; + +/** + * fbnic_mac_init - Assign a MAC type and initialize the fbnic device + * @fbd: Device pointer to device to initialize + * + * Return: zero on success, negative on failure + * + * Initialize the MAC function pointers and initializes the MAC of + * the device. + **/ +int fbnic_mac_init(struct fbnic_dev *fbd) +{ + fbd->mac = &fbnic_mac_asic; + + fbd->mac->init_regs(fbd); + + return 0; +} diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_mac.h b/drivers/net/ethernet/meta/fbnic/fbnic_mac.h new file mode 100644 index 000000000000..f53be6e6aef9 --- /dev/null +++ b/drivers/net/ethernet/meta/fbnic/fbnic_mac.h @@ -0,0 +1,86 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Meta Platforms, Inc. and affiliates. */ + +#ifndef _FBNIC_MAC_H_ +#define _FBNIC_MAC_H_ + +#include <linux/types.h> + +struct fbnic_dev; + +#define FBNIC_MAX_JUMBO_FRAME_SIZE 9742 + +enum { + FBNIC_LINK_EVENT_NONE = 0, + FBNIC_LINK_EVENT_UP = 1, + FBNIC_LINK_EVENT_DOWN = 2, +}; + +/* Treat the FEC bits as a bitmask laid out as follows: + * Bit 0: RS Enabled + * Bit 1: BASER(Firecode) Enabled + * Bit 2: Retrieve FEC from FW + */ +enum { + FBNIC_FEC_OFF = 0, + FBNIC_FEC_RS = 1, + FBNIC_FEC_BASER = 2, + FBNIC_FEC_AUTO = 4, +}; + +#define FBNIC_FEC_MODE_MASK (FBNIC_FEC_AUTO - 1) + +/* Treat the link modes as a set of modulation/lanes bitmask: + * Bit 0: Lane Count, 0 = R1, 1 = R2 + * Bit 1: Modulation, 0 = NRZ, 1 = PAM4 + * Bit 2: Retrieve link mode from FW + */ +enum { + FBNIC_LINK_25R1 = 0, + FBNIC_LINK_50R2 = 1, + FBNIC_LINK_50R1 = 2, + FBNIC_LINK_100R2 = 3, + FBNIC_LINK_AUTO = 4, +}; + +#define FBNIC_LINK_MODE_R2 (FBNIC_LINK_50R2) +#define FBNIC_LINK_MODE_PAM4 (FBNIC_LINK_50R1) +#define FBNIC_LINK_MODE_MASK (FBNIC_LINK_AUTO - 1) + +/* This structure defines the interface hooks for the MAC. The MAC hooks + * will be configured as a const struct provided with a set of function + * pointers. + * + * void (*init_regs)(struct fbnic_dev *fbd); + * Initialize MAC registers to enable Tx/Rx paths and FIFOs. + * + * void (*pcs_enable)(struct fbnic_dev *fbd); + * Configure and enable PCS to enable link if not already enabled + * void (*pcs_disable)(struct fbnic_dev *fbd); + * Shutdown the link if we are the only consumer of it. + * bool (*pcs_get_link)(struct fbnic_dev *fbd); + * Check PCS link status + * int (*pcs_get_link_event)(struct fbnic_dev *fbd) + * Get the current link event status, reports true if link has + * changed to either FBNIC_LINK_EVENT_DOWN or FBNIC_LINK_EVENT_UP + * + * void (*link_down)(struct fbnic_dev *fbd); + * Configure MAC for link down event + * void (*link_up)(struct fbnic_dev *fbd, bool tx_pause, bool rx_pause); + * Configure MAC for link up event; + * + */ +struct fbnic_mac { + void (*init_regs)(struct fbnic_dev *fbd); + + int (*pcs_enable)(struct fbnic_dev *fbd); + void (*pcs_disable)(struct fbnic_dev *fbd); + bool (*pcs_get_link)(struct fbnic_dev *fbd); + int (*pcs_get_link_event)(struct fbnic_dev *fbd); + + void (*link_down)(struct fbnic_dev *fbd); + void (*link_up)(struct fbnic_dev *fbd, bool tx_pause, bool rx_pause); +}; + +int fbnic_mac_init(struct fbnic_dev *fbd); +#endif /* _FBNIC_MAC_H_ */ diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c new file mode 100644 index 000000000000..b7ce6da68543 --- /dev/null +++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c @@ -0,0 +1,488 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) Meta Platforms, Inc. and affiliates. */ + +#include <linux/etherdevice.h> +#include <linux/ipv6.h> +#include <linux/types.h> + +#include "fbnic.h" +#include "fbnic_netdev.h" +#include "fbnic_txrx.h" + +int __fbnic_open(struct fbnic_net *fbn) +{ + struct fbnic_dev *fbd = fbn->fbd; + int err; + + err = fbnic_alloc_napi_vectors(fbn); + if (err) + return err; + + err = fbnic_alloc_resources(fbn); + if (err) + goto free_napi_vectors; + + err = netif_set_real_num_tx_queues(fbn->netdev, + fbn->num_tx_queues); + if (err) + goto free_resources; + + err = netif_set_real_num_rx_queues(fbn->netdev, + fbn->num_rx_queues); + if (err) + goto free_resources; + + /* Send ownership message and flush to verify FW has seen it */ + err = fbnic_fw_xmit_ownership_msg(fbd, true); + if (err) { + dev_warn(fbd->dev, + "Error %d sending host ownership message to the firmware\n", + err); + goto free_resources; + } + + err = fbnic_fw_init_heartbeat(fbd, false); + if (err) + goto release_ownership; + + err = fbnic_pcs_irq_enable(fbd); + if (err) + goto release_ownership; + /* Pull the BMC config and initialize the RPC */ + fbnic_bmc_rpc_init(fbd); + fbnic_rss_reinit(fbd, fbn); + + return 0; +release_ownership: + fbnic_fw_xmit_ownership_msg(fbn->fbd, false); +free_resources: + fbnic_free_resources(fbn); +free_napi_vectors: + fbnic_free_napi_vectors(fbn); + return err; +} + +static int fbnic_open(struct net_device *netdev) +{ + struct fbnic_net *fbn = netdev_priv(netdev); + int err; + + err = __fbnic_open(fbn); + if (!err) + fbnic_up(fbn); + + return err; +} + +static int fbnic_stop(struct net_device *netdev) +{ + struct fbnic_net *fbn = netdev_priv(netdev); + + fbnic_down(fbn); + fbnic_pcs_irq_disable(fbn->fbd); + + fbnic_fw_xmit_ownership_msg(fbn->fbd, false); + + fbnic_free_resources(fbn); + fbnic_free_napi_vectors(fbn); + + return 0; +} + +static int fbnic_uc_sync(struct net_device *netdev, const unsigned char *addr) +{ + struct fbnic_net *fbn = netdev_priv(netdev); + struct fbnic_mac_addr *avail_addr; + + if (WARN_ON(!is_valid_ether_addr(addr))) + return -EADDRNOTAVAIL; + + avail_addr = __fbnic_uc_sync(fbn->fbd, addr); + if (!avail_addr) + return -ENOSPC; + + /* Add type flag indicating this address is in use by the host */ + set_bit(FBNIC_MAC_ADDR_T_UNICAST, avail_addr->act_tcam); + + return 0; +} + +static int fbnic_uc_unsync(struct net_device *netdev, const unsigned char *addr) +{ + struct fbnic_net *fbn = netdev_priv(netdev); + struct fbnic_dev *fbd = fbn->fbd; + int i, ret; + + /* Scan from middle of list to bottom, filling bottom up. + * Skip the first entry which is reserved for dev_addr and + * leave the last entry to use for promiscuous filtering. + */ + for (i = fbd->mac_addr_boundary, ret = -ENOENT; + i < FBNIC_RPC_TCAM_MACDA_HOST_ADDR_IDX && ret; i++) { + struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i]; + + if (!ether_addr_equal(mac_addr->value.addr8, addr)) + continue; + + ret = __fbnic_uc_unsync(mac_addr); + } + + return ret; +} + +static int fbnic_mc_sync(struct net_device *netdev, const unsigned char *addr) +{ + struct fbnic_net *fbn = netdev_priv(netdev); + struct fbnic_mac_addr *avail_addr; + + if (WARN_ON(!is_multicast_ether_addr(addr))) + return -EADDRNOTAVAIL; + + avail_addr = __fbnic_mc_sync(fbn->fbd, addr); + if (!avail_addr) + return -ENOSPC; + + /* Add type flag indicating this address is in use by the host */ + set_bit(FBNIC_MAC_ADDR_T_MULTICAST, avail_addr->act_tcam); + + return 0; +} + +static int fbnic_mc_unsync(struct net_device *netdev, const unsigned char *addr) +{ + struct fbnic_net *fbn = netdev_priv(netdev); + struct fbnic_dev *fbd = fbn->fbd; + int i, ret; + + /* Scan from middle of list to top, filling top down. + * Skip over the address reserved for the BMC MAC and + * exclude index 0 as that belongs to the broadcast address + */ + for (i = fbd->mac_addr_boundary, ret = -ENOENT; + --i > FBNIC_RPC_TCAM_MACDA_BROADCAST_IDX && ret;) { + struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i]; + + if (!ether_addr_equal(mac_addr->value.addr8, addr)) + continue; + + ret = __fbnic_mc_unsync(mac_addr); + } + + return ret; +} + +void __fbnic_set_rx_mode(struct net_device *netdev) +{ + struct fbnic_net *fbn = netdev_priv(netdev); + bool uc_promisc = false, mc_promisc = false; + struct fbnic_dev *fbd = fbn->fbd; + struct fbnic_mac_addr *mac_addr; + int err; + + /* Populate host address from dev_addr */ + mac_addr = &fbd->mac_addr[FBNIC_RPC_TCAM_MACDA_HOST_ADDR_IDX]; + if (!ether_addr_equal(mac_addr->value.addr8, netdev->dev_addr) || + mac_addr->state != FBNIC_TCAM_S_VALID) { + ether_addr_copy(mac_addr->value.addr8, netdev->dev_addr); + mac_addr->state = FBNIC_TCAM_S_UPDATE; + set_bit(FBNIC_MAC_ADDR_T_UNICAST, mac_addr->act_tcam); + } + + /* Populate broadcast address if broadcast is enabled */ + mac_addr = &fbd->mac_addr[FBNIC_RPC_TCAM_MACDA_BROADCAST_IDX]; + if (netdev->flags & IFF_BROADCAST) { + if (!is_broadcast_ether_addr(mac_addr->value.addr8) || + mac_addr->state != FBNIC_TCAM_S_VALID) { + eth_broadcast_addr(mac_addr->value.addr8); + mac_addr->state = FBNIC_TCAM_S_ADD; + } + set_bit(FBNIC_MAC_ADDR_T_BROADCAST, mac_addr->act_tcam); + } else if (mac_addr->state == FBNIC_TCAM_S_VALID) { + __fbnic_xc_unsync(mac_addr, FBNIC_MAC_ADDR_T_BROADCAST); + } + + /* Synchronize unicast and multicast address lists */ + err = __dev_uc_sync(netdev, fbnic_uc_sync, fbnic_uc_unsync); + if (err == -ENOSPC) + uc_promisc = true; + err = __dev_mc_sync(netdev, fbnic_mc_sync, fbnic_mc_unsync); + if (err == -ENOSPC) + mc_promisc = true; + + uc_promisc |= !!(netdev->flags & IFF_PROMISC); + mc_promisc |= !!(netdev->flags & IFF_ALLMULTI) || uc_promisc; + + /* Populate last TCAM entry with promiscuous entry and 0/1 bit mask */ + mac_addr = &fbd->mac_addr[FBNIC_RPC_TCAM_MACDA_PROMISC_IDX]; + if (uc_promisc) { + if (!is_zero_ether_addr(mac_addr->value.addr8) || + mac_addr->state != FBNIC_TCAM_S_VALID) { + eth_zero_addr(mac_addr->value.addr8); + eth_broadcast_addr(mac_addr->mask.addr8); + clear_bit(FBNIC_MAC_ADDR_T_ALLMULTI, + mac_addr->act_tcam); + set_bit(FBNIC_MAC_ADDR_T_PROMISC, + mac_addr->act_tcam); + mac_addr->state = FBNIC_TCAM_S_ADD; + } + } else if (mc_promisc && + (!fbnic_bmc_present(fbd) || !fbd->fw_cap.all_multi)) { + /* We have to add a special handler for multicast as the + * BMC may have an all-multi rule already in place. As such + * adding a rule ourselves won't do any good so we will have + * to modify the rules for the ALL MULTI below if the BMC + * already has the rule in place. + */ + if (!is_multicast_ether_addr(mac_addr->value.addr8) || + mac_addr->state != FBNIC_TCAM_S_VALID) { + eth_zero_addr(mac_addr->value.addr8); + eth_broadcast_addr(mac_addr->mask.addr8); + mac_addr->value.addr8[0] ^= 1; + mac_addr->mask.addr8[0] ^= 1; + set_bit(FBNIC_MAC_ADDR_T_ALLMULTI, + mac_addr->act_tcam); + clear_bit(FBNIC_MAC_ADDR_T_PROMISC, + mac_addr->act_tcam); + mac_addr->state = FBNIC_TCAM_S_ADD; + } + } else if (mac_addr->state == FBNIC_TCAM_S_VALID) { + if (test_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam)) { + clear_bit(FBNIC_MAC_ADDR_T_ALLMULTI, + mac_addr->act_tcam); + clear_bit(FBNIC_MAC_ADDR_T_PROMISC, + mac_addr->act_tcam); + } else { + mac_addr->state = FBNIC_TCAM_S_DELETE; + } + } + + /* Add rules for BMC all multicast if it is enabled */ + fbnic_bmc_rpc_all_multi_config(fbd, mc_promisc); + + /* Sift out any unshared BMC rules and place them in BMC only section */ + fbnic_sift_macda(fbd); + + /* Write updates to hardware */ + fbnic_write_rules(fbd); + fbnic_write_macda(fbd); +} + +static void fbnic_set_rx_mode(struct net_device *netdev) +{ + /* No need to update the hardware if we are not running */ + if (netif_running(netdev)) + __fbnic_set_rx_mode(netdev); +} + +static int fbnic_set_mac(struct net_device *netdev, void *p) +{ + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + eth_hw_addr_set(netdev, addr->sa_data); + + fbnic_set_rx_mode(netdev); + + return 0; +} + +void fbnic_clear_rx_mode(struct net_device *netdev) +{ + struct fbnic_net *fbn = netdev_priv(netdev); + struct fbnic_dev *fbd = fbn->fbd; + int idx; + + for (idx = ARRAY_SIZE(fbd->mac_addr); idx--;) { + struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[idx]; + + if (mac_addr->state != FBNIC_TCAM_S_VALID) + continue; + + bitmap_clear(mac_addr->act_tcam, + FBNIC_MAC_ADDR_T_HOST_START, + FBNIC_MAC_ADDR_T_HOST_LEN); + + if (bitmap_empty(mac_addr->act_tcam, + FBNIC_RPC_TCAM_ACT_NUM_ENTRIES)) + mac_addr->state = FBNIC_TCAM_S_DELETE; + } + + /* Write updates to hardware */ + fbnic_write_macda(fbd); + + __dev_uc_unsync(netdev, NULL); + __dev_mc_unsync(netdev, NULL); +} + +static const struct net_device_ops fbnic_netdev_ops = { + .ndo_open = fbnic_open, + .ndo_stop = fbnic_stop, + .ndo_validate_addr = eth_validate_addr, + .ndo_start_xmit = fbnic_xmit_frame, + .ndo_features_check = fbnic_features_check, + .ndo_set_mac_address = fbnic_set_mac, + .ndo_set_rx_mode = fbnic_set_rx_mode, +}; + +void fbnic_reset_queues(struct fbnic_net *fbn, + unsigned int tx, unsigned int rx) +{ + struct fbnic_dev *fbd = fbn->fbd; + unsigned int max_napis; + + max_napis = fbd->num_irqs - FBNIC_NON_NAPI_VECTORS; + + tx = min(tx, max_napis); + fbn->num_tx_queues = tx; + + rx = min(rx, max_napis); + fbn->num_rx_queues = rx; + + fbn->num_napi = max(tx, rx); +} + +/** + * fbnic_netdev_free - Free the netdev associate with fbnic + * @fbd: Driver specific structure to free netdev from + * + * Allocate and initialize the netdev and netdev private structure. Bind + * together the hardware, netdev, and pci data structures. + **/ +void fbnic_netdev_free(struct fbnic_dev *fbd) +{ + struct fbnic_net *fbn = netdev_priv(fbd->netdev); + + if (fbn->phylink) + phylink_destroy(fbn->phylink); + + free_netdev(fbd->netdev); + fbd->netdev = NULL; +} + +/** + * fbnic_netdev_alloc - Allocate a netdev and associate with fbnic + * @fbd: Driver specific structure to associate netdev with + * + * Allocate and initialize the netdev and netdev private structure. Bind + * together the hardware, netdev, and pci data structures. + * + * Return: 0 on success, negative on failure + **/ +struct net_device *fbnic_netdev_alloc(struct fbnic_dev *fbd) +{ + struct net_device *netdev; + struct fbnic_net *fbn; + int default_queues; + + netdev = alloc_etherdev_mq(sizeof(*fbn), FBNIC_MAX_RXQS); + if (!netdev) + return NULL; + + SET_NETDEV_DEV(netdev, fbd->dev); + fbd->netdev = netdev; + + netdev->netdev_ops = &fbnic_netdev_ops; + + fbn = netdev_priv(netdev); + + fbn->netdev = netdev; + fbn->fbd = fbd; + INIT_LIST_HEAD(&fbn->napis); + + fbn->txq_size = FBNIC_TXQ_SIZE_DEFAULT; + fbn->hpq_size = FBNIC_HPQ_SIZE_DEFAULT; + fbn->ppq_size = FBNIC_PPQ_SIZE_DEFAULT; + fbn->rcq_size = FBNIC_RCQ_SIZE_DEFAULT; + + default_queues = netif_get_num_default_rss_queues(); + if (default_queues > fbd->max_num_queues) + default_queues = fbd->max_num_queues; + + fbnic_reset_queues(fbn, default_queues, default_queues); + + fbnic_reset_indir_tbl(fbn); + fbnic_rss_key_fill(fbn->rss_key); + fbnic_rss_init_en_mask(fbn); + + netdev->features |= + NETIF_F_RXHASH | + NETIF_F_SG | + NETIF_F_HW_CSUM | + NETIF_F_RXCSUM; + + netdev->hw_features |= netdev->features; + netdev->vlan_features |= netdev->features; + netdev->hw_enc_features |= netdev->features; + + netdev->min_mtu = IPV6_MIN_MTU; + netdev->max_mtu = FBNIC_MAX_JUMBO_FRAME_SIZE - ETH_HLEN; + + /* TBD: This is workaround for BMC as phylink doesn't have support + * for leavling the link enabled if a BMC is present. + */ + netdev->ethtool->wol_enabled = true; + + fbn->fec = FBNIC_FEC_AUTO | FBNIC_FEC_RS; + fbn->link_mode = FBNIC_LINK_AUTO | FBNIC_LINK_50R2; + netif_carrier_off(netdev); + + netif_tx_stop_all_queues(netdev); + + if (fbnic_phylink_init(netdev)) { + fbnic_netdev_free(fbd); + return NULL; + } + + return netdev; +} + +static int fbnic_dsn_to_mac_addr(u64 dsn, char *addr) +{ + addr[0] = (dsn >> 56) & 0xFF; + addr[1] = (dsn >> 48) & 0xFF; + addr[2] = (dsn >> 40) & 0xFF; + addr[3] = (dsn >> 16) & 0xFF; + addr[4] = (dsn >> 8) & 0xFF; + addr[5] = dsn & 0xFF; + + return is_valid_ether_addr(addr) ? 0 : -EINVAL; +} + +/** + * fbnic_netdev_register - Initialize general software structures + * @netdev: Netdev containing structure to initialize and register + * + * Initialize the MAC address for the netdev and register it. + * + * Return: 0 on success, negative on failure + **/ +int fbnic_netdev_register(struct net_device *netdev) +{ + struct fbnic_net *fbn = netdev_priv(netdev); + struct fbnic_dev *fbd = fbn->fbd; + u64 dsn = fbd->dsn; + u8 addr[ETH_ALEN]; + int err; + + err = fbnic_dsn_to_mac_addr(dsn, addr); + if (!err) { + ether_addr_copy(netdev->perm_addr, addr); + eth_hw_addr_set(netdev, addr); + } else { + /* A randomly assigned MAC address will cause provisioning + * issues so instead just fail to spawn the netdev and + * avoid any confusion. + */ + dev_err(fbd->dev, "MAC addr %pM invalid\n", addr); + return err; + } + + return register_netdev(netdev); +} + +void fbnic_netdev_unregister(struct net_device *netdev) +{ + unregister_netdev(netdev); +} diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h new file mode 100644 index 000000000000..6bc0ebeb8182 --- /dev/null +++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h @@ -0,0 +1,63 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Meta Platforms, Inc. and affiliates. */ + +#ifndef _FBNIC_NETDEV_H_ +#define _FBNIC_NETDEV_H_ + +#include <linux/types.h> +#include <linux/phylink.h> + +#include "fbnic_csr.h" +#include "fbnic_rpc.h" +#include "fbnic_txrx.h" + +struct fbnic_net { + struct fbnic_ring *tx[FBNIC_MAX_TXQS]; + struct fbnic_ring *rx[FBNIC_MAX_RXQS]; + + struct net_device *netdev; + struct fbnic_dev *fbd; + + u32 txq_size; + u32 hpq_size; + u32 ppq_size; + u32 rcq_size; + + u16 num_napi; + + struct phylink *phylink; + struct phylink_config phylink_config; + struct phylink_pcs phylink_pcs; + + /* TBD: Remove these when phylink supports FEC and lane config */ + u8 fec; + u8 link_mode; + + u16 num_tx_queues; + u16 num_rx_queues; + + u8 indir_tbl[FBNIC_RPC_RSS_TBL_COUNT][FBNIC_RPC_RSS_TBL_SIZE]; + u32 rss_key[FBNIC_RPC_RSS_KEY_DWORD_LEN]; + u32 rss_flow_hash[FBNIC_NUM_HASH_OPT]; + + u64 link_down_events; + + struct list_head napis; +}; + +int __fbnic_open(struct fbnic_net *fbn); +void fbnic_up(struct fbnic_net *fbn); +void fbnic_down(struct fbnic_net *fbn); + +struct net_device *fbnic_netdev_alloc(struct fbnic_dev *fbd); +void fbnic_netdev_free(struct fbnic_dev *fbd); +int fbnic_netdev_register(struct net_device *netdev); +void fbnic_netdev_unregister(struct net_device *netdev); +void fbnic_reset_queues(struct fbnic_net *fbn, + unsigned int tx, unsigned int rx); + +void __fbnic_set_rx_mode(struct net_device *netdev); +void fbnic_clear_rx_mode(struct net_device *netdev); + +int fbnic_phylink_init(struct net_device *netdev); +#endif /* _FBNIC_NETDEV_H_ */ diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_pci.c b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c new file mode 100644 index 000000000000..a4809fe0fc24 --- /dev/null +++ b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c @@ -0,0 +1,564 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) Meta Platforms, Inc. and affiliates. */ + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/rtnetlink.h> +#include <linux/types.h> + +#include "fbnic.h" +#include "fbnic_drvinfo.h" +#include "fbnic_netdev.h" + +char fbnic_driver_name[] = DRV_NAME; + +MODULE_DESCRIPTION(DRV_SUMMARY); +MODULE_LICENSE("GPL"); + +static const struct fbnic_info fbnic_asic_info = { + .max_num_queues = FBNIC_MAX_QUEUES, + .bar_mask = BIT(0) | BIT(4) +}; + +static const struct fbnic_info *fbnic_info_tbl[] = { + [fbnic_board_asic] = &fbnic_asic_info, +}; + +static const struct pci_device_id fbnic_pci_tbl[] = { + { PCI_DEVICE_DATA(META, FBNIC_ASIC, fbnic_board_asic) }, + /* Required last entry */ + {0, } +}; +MODULE_DEVICE_TABLE(pci, fbnic_pci_tbl); + +u32 fbnic_rd32(struct fbnic_dev *fbd, u32 reg) +{ + u32 __iomem *csr = READ_ONCE(fbd->uc_addr0); + u32 value; + + if (!csr) + return ~0U; + + value = readl(csr + reg); + + /* If any bits are 0 value should be valid */ + if (~value) + return value; + + /* All 1's may be valid if ZEROs register still works */ + if (reg != FBNIC_MASTER_SPARE_0 && ~readl(csr + FBNIC_MASTER_SPARE_0)) + return value; + + /* Hardware is giving us all 1's reads, assume it is gone */ + WRITE_ONCE(fbd->uc_addr0, NULL); + WRITE_ONCE(fbd->uc_addr4, NULL); + + dev_err(fbd->dev, + "Failed read (idx 0x%x AKA addr 0x%x), disabled CSR access, awaiting reset\n", + reg, reg << 2); + + /* Notify stack that device has lost (PCIe) link */ + if (!fbnic_init_failure(fbd)) + netif_device_detach(fbd->netdev); + + return ~0U; +} + +bool fbnic_fw_present(struct fbnic_dev *fbd) +{ + return !!READ_ONCE(fbd->uc_addr4); +} + +void fbnic_fw_wr32(struct fbnic_dev *fbd, u32 reg, u32 val) +{ + u32 __iomem *csr = READ_ONCE(fbd->uc_addr4); + + if (csr) + writel(val, csr + reg); +} + +u32 fbnic_fw_rd32(struct fbnic_dev *fbd, u32 reg) +{ + u32 __iomem *csr = READ_ONCE(fbd->uc_addr4); + u32 value; + + if (!csr) + return ~0U; + + value = readl(csr + reg); + + /* If any bits are 0 value should be valid */ + if (~value) + return value; + + /* All 1's may be valid if ZEROs register still works */ + if (reg != FBNIC_FW_ZERO_REG && ~readl(csr + FBNIC_FW_ZERO_REG)) + return value; + + /* Hardware is giving us all 1's reads, assume it is gone */ + WRITE_ONCE(fbd->uc_addr0, NULL); + WRITE_ONCE(fbd->uc_addr4, NULL); + + dev_err(fbd->dev, + "Failed read (idx 0x%x AKA addr 0x%x), disabled CSR access, awaiting reset\n", + reg, reg << 2); + + /* Notify stack that device has lost (PCIe) link */ + if (!fbnic_init_failure(fbd)) + netif_device_detach(fbd->netdev); + + return ~0U; +} + +static void fbnic_service_task_start(struct fbnic_net *fbn) +{ + struct fbnic_dev *fbd = fbn->fbd; + + schedule_delayed_work(&fbd->service_task, HZ); + phylink_resume(fbn->phylink); +} + +static void fbnic_service_task_stop(struct fbnic_net *fbn) +{ + struct fbnic_dev *fbd = fbn->fbd; + + phylink_suspend(fbn->phylink, fbnic_bmc_present(fbd)); + cancel_delayed_work(&fbd->service_task); +} + +void fbnic_up(struct fbnic_net *fbn) +{ + fbnic_enable(fbn); + + fbnic_fill(fbn); + + fbnic_rss_reinit_hw(fbn->fbd, fbn); + + __fbnic_set_rx_mode(fbn->netdev); + + /* Enable Tx/Rx processing */ + fbnic_napi_enable(fbn); + netif_tx_start_all_queues(fbn->netdev); + + fbnic_service_task_start(fbn); +} + +static void fbnic_down_noidle(struct fbnic_net *fbn) +{ + fbnic_service_task_stop(fbn); + + /* Disable Tx/Rx Processing */ + fbnic_napi_disable(fbn); + netif_tx_disable(fbn->netdev); + + fbnic_clear_rx_mode(fbn->netdev); + fbnic_clear_rules(fbn->fbd); + fbnic_rss_disable_hw(fbn->fbd); + fbnic_disable(fbn); +} + +void fbnic_down(struct fbnic_net *fbn) +{ + fbnic_down_noidle(fbn); + + fbnic_wait_all_queues_idle(fbn->fbd, false); + + fbnic_flush(fbn); +} + +static void fbnic_health_check(struct fbnic_dev *fbd) +{ + struct fbnic_fw_mbx *tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX]; + + /* As long as the heart is beating the FW is healty */ + if (fbd->fw_heartbeat_enabled) + return; + + /* If the Tx mailbox still has messages sitting in it then there likely + * isn't anything we can do. We will wait until the mailbox is empty to + * report the fault so we can collect the crashlog. + */ + if (tx_mbx->head != tx_mbx->tail) + return; + + /* TBD: Need to add a more thorough recovery here. + * Specifically I need to verify what all the firmware will have + * changed since we had setup and it rebooted. May just need to + * perform a down/up. For now we will just reclaim ownership so + * the heartbeat can catch the next fault. + */ + fbnic_fw_xmit_ownership_msg(fbd, true); +} + +static void fbnic_service_task(struct work_struct *work) +{ + struct fbnic_dev *fbd = container_of(to_delayed_work(work), + struct fbnic_dev, service_task); + + rtnl_lock(); + + fbnic_fw_check_heartbeat(fbd); + + fbnic_health_check(fbd); + + if (netif_carrier_ok(fbd->netdev)) + fbnic_napi_depletion_check(fbd->netdev); + + if (netif_running(fbd->netdev)) + schedule_delayed_work(&fbd->service_task, HZ); + + rtnl_unlock(); +} + +/** + * fbnic_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in fbnic_pci_tbl + * + * Initializes a PCI device identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + * + * Return: 0 on success, negative on failure + **/ +static int fbnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + const struct fbnic_info *info = fbnic_info_tbl[ent->driver_data]; + struct net_device *netdev; + struct fbnic_dev *fbd; + int err; + + if (pdev->error_state != pci_channel_io_normal) { + dev_err(&pdev->dev, + "PCI device still in an error state. Unable to load...\n"); + return -EIO; + } + + err = pcim_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "PCI enable device failed: %d\n", err); + return err; + } + + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(46)); + if (err) + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, "DMA configuration failed: %d\n", err); + return err; + } + + err = pcim_iomap_regions(pdev, info->bar_mask, fbnic_driver_name); + if (err) { + dev_err(&pdev->dev, + "pci_request_selected_regions failed: %d\n", err); + return err; + } + + fbd = fbnic_devlink_alloc(pdev); + if (!fbd) { + dev_err(&pdev->dev, "Devlink allocation failed\n"); + return -ENOMEM; + } + + /* Populate driver with hardware-specific info and handlers */ + fbd->max_num_queues = info->max_num_queues; + + pci_set_master(pdev); + pci_save_state(pdev); + + INIT_DELAYED_WORK(&fbd->service_task, fbnic_service_task); + + err = fbnic_alloc_irqs(fbd); + if (err) + goto free_fbd; + + err = fbnic_mac_init(fbd); + if (err) { + dev_err(&pdev->dev, "Failed to initialize MAC: %d\n", err); + goto free_irqs; + } + + err = fbnic_fw_enable_mbx(fbd); + if (err) { + dev_err(&pdev->dev, + "Firmware mailbox initialization failure\n"); + goto free_irqs; + } + + fbnic_devlink_register(fbd); + + if (!fbd->dsn) { + dev_warn(&pdev->dev, "Reading serial number failed\n"); + goto init_failure_mode; + } + + netdev = fbnic_netdev_alloc(fbd); + if (!netdev) { + dev_err(&pdev->dev, "Netdev allocation failed\n"); + goto init_failure_mode; + } + + err = fbnic_netdev_register(netdev); + if (err) { + dev_err(&pdev->dev, "Netdev registration failed: %d\n", err); + goto ifm_free_netdev; + } + + return 0; + +ifm_free_netdev: + fbnic_netdev_free(fbd); +init_failure_mode: + dev_warn(&pdev->dev, "Probe error encountered, entering init failure mode. Normal networking functionality will not be available.\n"); + /* Always return 0 even on error so devlink is registered to allow + * firmware updates for fixes. + */ + return 0; +free_irqs: + fbnic_free_irqs(fbd); +free_fbd: + pci_disable_device(pdev); + fbnic_devlink_free(fbd); + + return err; +} + +/** + * fbnic_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * Called by the PCI subsystem to alert the driver that it should release + * a PCI device. The could be caused by a Hot-Plug event, or because the + * driver is going to be removed from memory. + **/ +static void fbnic_remove(struct pci_dev *pdev) +{ + struct fbnic_dev *fbd = pci_get_drvdata(pdev); + + if (!fbnic_init_failure(fbd)) { + struct net_device *netdev = fbd->netdev; + + fbnic_netdev_unregister(netdev); + cancel_delayed_work_sync(&fbd->service_task); + fbnic_netdev_free(fbd); + } + + fbnic_devlink_unregister(fbd); + fbnic_fw_disable_mbx(fbd); + fbnic_free_irqs(fbd); + + pci_disable_device(pdev); + fbnic_devlink_free(fbd); +} + +static int fbnic_pm_suspend(struct device *dev) +{ + struct fbnic_dev *fbd = dev_get_drvdata(dev); + struct net_device *netdev = fbd->netdev; + + if (fbnic_init_failure(fbd)) + goto null_uc_addr; + + rtnl_lock(); + + netif_device_detach(netdev); + + if (netif_running(netdev)) + netdev->netdev_ops->ndo_stop(netdev); + + rtnl_unlock(); + +null_uc_addr: + fbnic_fw_disable_mbx(fbd); + + /* Free the IRQs so they aren't trying to occupy sleeping CPUs */ + fbnic_free_irqs(fbd); + + /* Hardware is about to go away, so switch off MMIO access internally */ + WRITE_ONCE(fbd->uc_addr0, NULL); + WRITE_ONCE(fbd->uc_addr4, NULL); + + return 0; +} + +static int __fbnic_pm_resume(struct device *dev) +{ + struct fbnic_dev *fbd = dev_get_drvdata(dev); + struct net_device *netdev = fbd->netdev; + void __iomem * const *iomap_table; + struct fbnic_net *fbn; + int err; + + /* Restore MMIO access */ + iomap_table = pcim_iomap_table(to_pci_dev(dev)); + fbd->uc_addr0 = iomap_table[0]; + fbd->uc_addr4 = iomap_table[4]; + + /* Rerequest the IRQs */ + err = fbnic_alloc_irqs(fbd); + if (err) + goto err_invalidate_uc_addr; + + fbd->mac->init_regs(fbd); + + /* Re-enable mailbox */ + err = fbnic_fw_enable_mbx(fbd); + if (err) + goto err_free_irqs; + + /* No netdev means there isn't a network interface to bring up */ + if (fbnic_init_failure(fbd)) + return 0; + + fbn = netdev_priv(netdev); + + /* Reset the queues if needed */ + fbnic_reset_queues(fbn, fbn->num_tx_queues, fbn->num_rx_queues); + + rtnl_lock(); + + if (netif_running(netdev)) { + err = __fbnic_open(fbn); + if (err) + goto err_disable_mbx; + } + + rtnl_unlock(); + + return 0; +err_disable_mbx: + rtnl_unlock(); + fbnic_fw_disable_mbx(fbd); +err_free_irqs: + fbnic_free_irqs(fbd); +err_invalidate_uc_addr: + WRITE_ONCE(fbd->uc_addr0, NULL); + WRITE_ONCE(fbd->uc_addr4, NULL); + return err; +} + +static void __fbnic_pm_attach(struct device *dev) +{ + struct fbnic_dev *fbd = dev_get_drvdata(dev); + struct net_device *netdev = fbd->netdev; + struct fbnic_net *fbn; + + if (fbnic_init_failure(fbd)) + return; + + fbn = netdev_priv(netdev); + + if (netif_running(netdev)) + fbnic_up(fbn); + + netif_device_attach(netdev); +} + +static int __maybe_unused fbnic_pm_resume(struct device *dev) +{ + int err; + + err = __fbnic_pm_resume(dev); + if (!err) + __fbnic_pm_attach(dev); + + return err; +} + +static const struct dev_pm_ops fbnic_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(fbnic_pm_suspend, fbnic_pm_resume) +}; + +static void fbnic_shutdown(struct pci_dev *pdev) +{ + fbnic_pm_suspend(&pdev->dev); +} + +static pci_ers_result_t fbnic_err_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + /* Disconnect device if failure is not recoverable via reset */ + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + fbnic_pm_suspend(&pdev->dev); + + /* Request a slot reset */ + return PCI_ERS_RESULT_NEED_RESET; +} + +static pci_ers_result_t fbnic_err_slot_reset(struct pci_dev *pdev) +{ + int err; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + pci_save_state(pdev); + + if (pci_enable_device_mem(pdev)) { + dev_err(&pdev->dev, + "Cannot re-enable PCI device after reset.\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + + /* Restore device to previous state */ + err = __fbnic_pm_resume(&pdev->dev); + + return err ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; +} + +static void fbnic_err_resume(struct pci_dev *pdev) +{ + __fbnic_pm_attach(&pdev->dev); +} + +static const struct pci_error_handlers fbnic_err_handler = { + .error_detected = fbnic_err_error_detected, + .slot_reset = fbnic_err_slot_reset, + .resume = fbnic_err_resume, +}; + +static struct pci_driver fbnic_driver = { + .name = fbnic_driver_name, + .id_table = fbnic_pci_tbl, + .probe = fbnic_probe, + .remove = fbnic_remove, + .driver.pm = &fbnic_pm_ops, + .shutdown = fbnic_shutdown, + .err_handler = &fbnic_err_handler, +}; + +/** + * fbnic_init_module - Driver Registration Routine + * + * The first routine called when the driver is loaded. All it does is + * register with the PCI subsystem. + * + * Return: 0 on success, negative on failure + **/ +static int __init fbnic_init_module(void) +{ + int err; + + err = pci_register_driver(&fbnic_driver); + if (err) + goto out; + + pr_info(DRV_SUMMARY " (%s)", fbnic_driver.name); +out: + return err; +} +module_init(fbnic_init_module); + +/** + * fbnic_exit_module - Driver Exit Cleanup Routine + * + * Called just before the driver is removed from memory. + **/ +static void __exit fbnic_exit_module(void) +{ + pci_unregister_driver(&fbnic_driver); +} +module_exit(fbnic_exit_module); diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c b/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c new file mode 100644 index 000000000000..1a5e1e719b30 --- /dev/null +++ b/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c @@ -0,0 +1,161 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) Meta Platforms, Inc. and affiliates. */ + +#include <linux/phy.h> +#include <linux/phylink.h> + +#include "fbnic.h" +#include "fbnic_mac.h" +#include "fbnic_netdev.h" + +static struct fbnic_net * +fbnic_pcs_to_net(struct phylink_pcs *pcs) +{ + return container_of(pcs, struct fbnic_net, phylink_pcs); +} + +static void +fbnic_phylink_pcs_get_state(struct phylink_pcs *pcs, + struct phylink_link_state *state) +{ + struct fbnic_net *fbn = fbnic_pcs_to_net(pcs); + struct fbnic_dev *fbd = fbn->fbd; + + /* For now we use hard-coded defaults and FW config to determine + * the current values. In future patches we will add support for + * reconfiguring these values and changing link settings. + */ + switch (fbd->fw_cap.link_speed) { + case FBNIC_FW_LINK_SPEED_25R1: + state->speed = SPEED_25000; + break; + case FBNIC_FW_LINK_SPEED_50R2: + state->speed = SPEED_50000; + break; + case FBNIC_FW_LINK_SPEED_100R2: + state->speed = SPEED_100000; + break; + default: + state->speed = SPEED_UNKNOWN; + break; + } + + state->duplex = DUPLEX_FULL; + + state->link = fbd->mac->pcs_get_link(fbd); +} + +static int +fbnic_phylink_pcs_enable(struct phylink_pcs *pcs) +{ + struct fbnic_net *fbn = fbnic_pcs_to_net(pcs); + struct fbnic_dev *fbd = fbn->fbd; + + return fbd->mac->pcs_enable(fbd); +} + +static void +fbnic_phylink_pcs_disable(struct phylink_pcs *pcs) +{ + struct fbnic_net *fbn = fbnic_pcs_to_net(pcs); + struct fbnic_dev *fbd = fbn->fbd; + + return fbd->mac->pcs_disable(fbd); +} + +static int +fbnic_phylink_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode, + phy_interface_t interface, + const unsigned long *advertising, + bool permit_pause_to_mac) +{ + return 0; +} + +static const struct phylink_pcs_ops fbnic_phylink_pcs_ops = { + .pcs_config = fbnic_phylink_pcs_config, + .pcs_enable = fbnic_phylink_pcs_enable, + .pcs_disable = fbnic_phylink_pcs_disable, + .pcs_get_state = fbnic_phylink_pcs_get_state, +}; + +static struct phylink_pcs * +fbnic_phylink_mac_select_pcs(struct phylink_config *config, + phy_interface_t interface) +{ + struct net_device *netdev = to_net_dev(config->dev); + struct fbnic_net *fbn = netdev_priv(netdev); + + return &fbn->phylink_pcs; +} + +static void +fbnic_phylink_mac_config(struct phylink_config *config, unsigned int mode, + const struct phylink_link_state *state) +{ +} + +static void +fbnic_phylink_mac_link_down(struct phylink_config *config, unsigned int mode, + phy_interface_t interface) +{ + struct net_device *netdev = to_net_dev(config->dev); + struct fbnic_net *fbn = netdev_priv(netdev); + struct fbnic_dev *fbd = fbn->fbd; + + fbd->mac->link_down(fbd); + + fbn->link_down_events++; +} + +static void +fbnic_phylink_mac_link_up(struct phylink_config *config, + struct phy_device *phy, unsigned int mode, + phy_interface_t interface, int speed, int duplex, + bool tx_pause, bool rx_pause) +{ + struct net_device *netdev = to_net_dev(config->dev); + struct fbnic_net *fbn = netdev_priv(netdev); + struct fbnic_dev *fbd = fbn->fbd; + + fbd->mac->link_up(fbd, tx_pause, rx_pause); +} + +static const struct phylink_mac_ops fbnic_phylink_mac_ops = { + .mac_select_pcs = fbnic_phylink_mac_select_pcs, + .mac_config = fbnic_phylink_mac_config, + .mac_link_down = fbnic_phylink_mac_link_down, + .mac_link_up = fbnic_phylink_mac_link_up, +}; + +int fbnic_phylink_init(struct net_device *netdev) +{ + struct fbnic_net *fbn = netdev_priv(netdev); + struct phylink *phylink; + + fbn->phylink_pcs.neg_mode = true; + fbn->phylink_pcs.ops = &fbnic_phylink_pcs_ops; + + fbn->phylink_config.dev = &netdev->dev; + fbn->phylink_config.type = PHYLINK_NETDEV; + fbn->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE | + MAC_10000FD | MAC_25000FD | + MAC_40000FD | MAC_50000FD | + MAC_100000FD; + fbn->phylink_config.default_an_inband = true; + + __set_bit(PHY_INTERFACE_MODE_XGMII, + fbn->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_XLGMII, + fbn->phylink_config.supported_interfaces); + + phylink = phylink_create(&fbn->phylink_config, NULL, + PHY_INTERFACE_MODE_XLGMII, + &fbnic_phylink_mac_ops); + if (IS_ERR(phylink)) + return PTR_ERR(phylink); + + fbn->phylink = phylink; + + return 0; +} diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c new file mode 100644 index 000000000000..c8aa29fc052b --- /dev/null +++ b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c @@ -0,0 +1,651 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) Meta Platforms, Inc. and affiliates. */ + +#include <linux/etherdevice.h> +#include <linux/ethtool.h> + +#include "fbnic.h" +#include "fbnic_netdev.h" +#include "fbnic_rpc.h" + +void fbnic_reset_indir_tbl(struct fbnic_net *fbn) +{ + unsigned int num_rx = fbn->num_rx_queues; + unsigned int i; + + for (i = 0; i < FBNIC_RPC_RSS_TBL_SIZE; i++) { + fbn->indir_tbl[0][i] = ethtool_rxfh_indir_default(i, num_rx); + fbn->indir_tbl[1][i] = ethtool_rxfh_indir_default(i, num_rx); + } +} + +void fbnic_rss_key_fill(u32 *buffer) +{ + static u32 rss_key[FBNIC_RPC_RSS_KEY_DWORD_LEN]; + + net_get_random_once(rss_key, sizeof(rss_key)); + rss_key[FBNIC_RPC_RSS_KEY_LAST_IDX] &= FBNIC_RPC_RSS_KEY_LAST_MASK; + + memcpy(buffer, rss_key, sizeof(rss_key)); +} + +#define RX_HASH_OPT_L4 \ + (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3) +#define RX_HASH_OPT_L3 \ + (RXH_IP_SRC | RXH_IP_DST) +#define RX_HASH_OPT_L2 RXH_L2DA + +void fbnic_rss_init_en_mask(struct fbnic_net *fbn) +{ + fbn->rss_flow_hash[FBNIC_TCP4_HASH_OPT] = RX_HASH_OPT_L4; + fbn->rss_flow_hash[FBNIC_TCP6_HASH_OPT] = RX_HASH_OPT_L4; + + fbn->rss_flow_hash[FBNIC_UDP4_HASH_OPT] = RX_HASH_OPT_L3; + fbn->rss_flow_hash[FBNIC_UDP6_HASH_OPT] = RX_HASH_OPT_L3; + fbn->rss_flow_hash[FBNIC_IPV4_HASH_OPT] = RX_HASH_OPT_L3; + fbn->rss_flow_hash[FBNIC_IPV6_HASH_OPT] = RX_HASH_OPT_L3; + + fbn->rss_flow_hash[FBNIC_ETHER_HASH_OPT] = RX_HASH_OPT_L2; +} + +void fbnic_rss_disable_hw(struct fbnic_dev *fbd) +{ + /* Disable RPC by clearing enable bit and configuration */ + if (!fbnic_bmc_present(fbd)) + wr32(fbd, FBNIC_RPC_RMI_CONFIG, + FIELD_PREP(FBNIC_RPC_RMI_CONFIG_OH_BYTES, 20)); +} + +#define FBNIC_FH_2_RSSEM_BIT(_fh, _rssem, _val) \ + FIELD_PREP(FBNIC_RPC_ACT_TBL1_RSS_ENA_##_rssem, \ + FIELD_GET(RXH_##_fh, _val)) +static u16 fbnic_flow_hash_2_rss_en_mask(struct fbnic_net *fbn, int flow_type) +{ + u32 flow_hash = fbn->rss_flow_hash[flow_type]; + u32 rss_en_mask = 0; + + rss_en_mask |= FBNIC_FH_2_RSSEM_BIT(L2DA, L2_DA, flow_hash); + rss_en_mask |= FBNIC_FH_2_RSSEM_BIT(IP_SRC, IP_SRC, flow_hash); + rss_en_mask |= FBNIC_FH_2_RSSEM_BIT(IP_DST, IP_DST, flow_hash); + rss_en_mask |= FBNIC_FH_2_RSSEM_BIT(L4_B_0_1, L4_SRC, flow_hash); + rss_en_mask |= FBNIC_FH_2_RSSEM_BIT(L4_B_2_3, L4_DST, flow_hash); + + return rss_en_mask; +} + +void fbnic_rss_reinit_hw(struct fbnic_dev *fbd, struct fbnic_net *fbn) +{ + unsigned int i; + + for (i = 0; i < FBNIC_RPC_RSS_TBL_SIZE; i++) { + wr32(fbd, FBNIC_RPC_RSS_TBL(0, i), fbn->indir_tbl[0][i]); + wr32(fbd, FBNIC_RPC_RSS_TBL(1, i), fbn->indir_tbl[1][i]); + } + + for (i = 0; i < FBNIC_RPC_RSS_KEY_DWORD_LEN; i++) + wr32(fbd, FBNIC_RPC_RSS_KEY(i), fbn->rss_key[i]); + + /* Default action for this to drop w/ no destination */ + wr32(fbd, FBNIC_RPC_ACT_TBL0_DEFAULT, FBNIC_RPC_ACT_TBL0_DROP); + wrfl(fbd); + + wr32(fbd, FBNIC_RPC_ACT_TBL1_DEFAULT, 0); + + /* If it isn't already enabled set the RMI Config value to enable RPC */ + wr32(fbd, FBNIC_RPC_RMI_CONFIG, + FIELD_PREP(FBNIC_RPC_RMI_CONFIG_MTU, FBNIC_MAX_JUMBO_FRAME_SIZE) | + FIELD_PREP(FBNIC_RPC_RMI_CONFIG_OH_BYTES, 20) | + FBNIC_RPC_RMI_CONFIG_ENABLE); +} + +void fbnic_bmc_rpc_all_multi_config(struct fbnic_dev *fbd, + bool enable_host) +{ + struct fbnic_act_tcam *act_tcam; + struct fbnic_mac_addr *mac_addr; + int j; + + /* We need to add the all multicast filter at the end of the + * multicast address list. This way if there are any that are + * shared between the host and the BMC they can be directed to + * both. Otherwise the remainder just get sent directly to the + * BMC. + */ + mac_addr = &fbd->mac_addr[fbd->mac_addr_boundary - 1]; + if (fbnic_bmc_present(fbd) && fbd->fw_cap.all_multi) { + if (mac_addr->state != FBNIC_TCAM_S_VALID) { + eth_zero_addr(mac_addr->value.addr8); + eth_broadcast_addr(mac_addr->mask.addr8); + mac_addr->value.addr8[0] ^= 1; + mac_addr->mask.addr8[0] ^= 1; + set_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam); + mac_addr->state = FBNIC_TCAM_S_ADD; + } + if (enable_host) + set_bit(FBNIC_MAC_ADDR_T_ALLMULTI, + mac_addr->act_tcam); + else + clear_bit(FBNIC_MAC_ADDR_T_ALLMULTI, + mac_addr->act_tcam); + } else if (!test_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam) && + !is_zero_ether_addr(mac_addr->mask.addr8) && + mac_addr->state == FBNIC_TCAM_S_VALID) { + clear_bit(FBNIC_MAC_ADDR_T_ALLMULTI, mac_addr->act_tcam); + clear_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam); + mac_addr->state = FBNIC_TCAM_S_DELETE; + } + + /* We have to add a special handler for multicast as the + * BMC may have an all-multi rule already in place. As such + * adding a rule ourselves won't do any good so we will have + * to modify the rules for the ALL MULTI below if the BMC + * already has the rule in place. + */ + act_tcam = &fbd->act_tcam[FBNIC_RPC_ACT_TBL_BMC_ALL_MULTI_OFFSET]; + + /* If we are not enabling the rule just delete it. We will fall + * back to the RSS rules that support the multicast addresses. + */ + if (!fbnic_bmc_present(fbd) || !fbd->fw_cap.all_multi || enable_host) { + if (act_tcam->state == FBNIC_TCAM_S_VALID) + act_tcam->state = FBNIC_TCAM_S_DELETE; + return; + } + + /* Rewrite TCAM rule 23 to handle BMC all-multi traffic */ + act_tcam->dest = FIELD_PREP(FBNIC_RPC_ACT_TBL0_DEST_MASK, + FBNIC_RPC_ACT_TBL0_DEST_BMC); + act_tcam->mask.tcam[0] = 0xffff; + + /* MACDA 0 - 3 is reserved for the BMC MAC address */ + act_tcam->value.tcam[1] = + FIELD_PREP(FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX, + fbd->mac_addr_boundary - 1) | + FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID; + act_tcam->mask.tcam[1] = 0xffff & + ~FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX & + ~FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID; + + for (j = 2; j < FBNIC_RPC_TCAM_ACT_WORD_LEN; j++) + act_tcam->mask.tcam[j] = 0xffff; + + act_tcam->state = FBNIC_TCAM_S_UPDATE; +} + +void fbnic_bmc_rpc_init(struct fbnic_dev *fbd) +{ + int i = FBNIC_RPC_TCAM_MACDA_BMC_ADDR_IDX; + struct fbnic_act_tcam *act_tcam; + struct fbnic_mac_addr *mac_addr; + int j; + + /* Check if BMC is present */ + if (!fbnic_bmc_present(fbd)) + return; + + /* Fetch BMC MAC addresses from firmware capabilities */ + for (j = 0; j < 4; j++) { + u8 *bmc_mac = fbd->fw_cap.bmc_mac_addr[j]; + + /* Validate BMC MAC addresses */ + if (is_zero_ether_addr(bmc_mac)) + continue; + + if (is_multicast_ether_addr(bmc_mac)) + mac_addr = __fbnic_mc_sync(fbd, bmc_mac); + else + mac_addr = &fbd->mac_addr[i++]; + + if (!mac_addr) { + netdev_err(fbd->netdev, + "No slot for BMC MAC address[%d]\n", j); + continue; + } + + ether_addr_copy(mac_addr->value.addr8, bmc_mac); + eth_zero_addr(mac_addr->mask.addr8); + + set_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam); + mac_addr->state = FBNIC_TCAM_S_ADD; + } + + /* Validate Broadcast is also present, record it and tag it */ + mac_addr = &fbd->mac_addr[FBNIC_RPC_TCAM_MACDA_BROADCAST_IDX]; + eth_broadcast_addr(mac_addr->value.addr8); + set_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam); + mac_addr->state = FBNIC_TCAM_S_ADD; + + /* Rewrite TCAM rule 0 if it isn't present to relocate BMC rules */ + act_tcam = &fbd->act_tcam[FBNIC_RPC_ACT_TBL_BMC_OFFSET]; + act_tcam->dest = FIELD_PREP(FBNIC_RPC_ACT_TBL0_DEST_MASK, + FBNIC_RPC_ACT_TBL0_DEST_BMC); + act_tcam->mask.tcam[0] = 0xffff; + + /* MACDA 0 - 3 is reserved for the BMC MAC address + * to account for that we have to mask out the lower 2 bits + * of the macda by performing an &= with 0x1c. + */ + act_tcam->value.tcam[1] = FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID; + act_tcam->mask.tcam[1] = 0xffff & + ~FIELD_PREP(FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX, 0x1c) & + ~FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID; + + for (j = 2; j < FBNIC_RPC_TCAM_ACT_WORD_LEN; j++) + act_tcam->mask.tcam[j] = 0xffff; + + act_tcam->state = FBNIC_TCAM_S_UPDATE; + + fbnic_bmc_rpc_all_multi_config(fbd, false); +} + +#define FBNIC_ACT1_INIT(_l4, _udp, _ip, _v6) \ + (((_l4) ? FBNIC_RPC_TCAM_ACT1_L4_VALID : 0) | \ + ((_udp) ? FBNIC_RPC_TCAM_ACT1_L4_IS_UDP : 0) | \ + ((_ip) ? FBNIC_RPC_TCAM_ACT1_IP_VALID : 0) | \ + ((_v6) ? FBNIC_RPC_TCAM_ACT1_IP_IS_V6 : 0)) + +void fbnic_rss_reinit(struct fbnic_dev *fbd, struct fbnic_net *fbn) +{ + static const u32 act1_value[FBNIC_NUM_HASH_OPT] = { + FBNIC_ACT1_INIT(1, 1, 1, 1), /* UDP6 */ + FBNIC_ACT1_INIT(1, 1, 1, 0), /* UDP4 */ + FBNIC_ACT1_INIT(1, 0, 1, 1), /* TCP6 */ + FBNIC_ACT1_INIT(1, 0, 1, 0), /* TCP4 */ + FBNIC_ACT1_INIT(0, 0, 1, 1), /* IP6 */ + FBNIC_ACT1_INIT(0, 0, 1, 0), /* IP4 */ + 0 /* Ether */ + }; + unsigned int i; + + /* To support scenarios where a BMC is present we must write the + * rules twice, once for the unicast cases, and once again for + * the broadcast/multicast cases as we have to support 2 destinations. + */ + BUILD_BUG_ON(FBNIC_RSS_EN_NUM_UNICAST * 2 != FBNIC_RSS_EN_NUM_ENTRIES); + BUILD_BUG_ON(ARRAY_SIZE(act1_value) != FBNIC_NUM_HASH_OPT); + + /* Program RSS hash enable mask for host in action TCAM/table. */ + for (i = fbnic_bmc_present(fbd) ? 0 : FBNIC_RSS_EN_NUM_UNICAST; + i < FBNIC_RSS_EN_NUM_ENTRIES; i++) { + unsigned int idx = i + FBNIC_RPC_ACT_TBL_RSS_OFFSET; + struct fbnic_act_tcam *act_tcam = &fbd->act_tcam[idx]; + u32 flow_hash, dest, rss_en_mask; + int flow_type, j; + u16 value = 0; + + flow_type = i % FBNIC_RSS_EN_NUM_UNICAST; + flow_hash = fbn->rss_flow_hash[flow_type]; + + /* Set DEST_HOST based on absence of RXH_DISCARD */ + dest = FIELD_PREP(FBNIC_RPC_ACT_TBL0_DEST_MASK, + !(RXH_DISCARD & flow_hash) ? + FBNIC_RPC_ACT_TBL0_DEST_HOST : 0); + + if (i >= FBNIC_RSS_EN_NUM_UNICAST && fbnic_bmc_present(fbd)) + dest |= FIELD_PREP(FBNIC_RPC_ACT_TBL0_DEST_MASK, + FBNIC_RPC_ACT_TBL0_DEST_BMC); + + if (!dest) + dest = FBNIC_RPC_ACT_TBL0_DROP; + + if (act1_value[flow_type] & FBNIC_RPC_TCAM_ACT1_L4_VALID) + dest |= FIELD_PREP(FBNIC_RPC_ACT_TBL0_DMA_HINT, + FBNIC_RCD_HDR_AL_DMA_HINT_L4); + + rss_en_mask = fbnic_flow_hash_2_rss_en_mask(fbn, flow_type); + + act_tcam->dest = dest; + act_tcam->rss_en_mask = rss_en_mask; + act_tcam->state = FBNIC_TCAM_S_UPDATE; + + act_tcam->mask.tcam[0] = 0xffff; + + /* We reserve the upper 8 MACDA TCAM entries for host + * unicast. So we set the value to 24, and the mask the + * lower bits so that the lower entries can be used as + * multicast or BMC addresses. + */ + if (i < FBNIC_RSS_EN_NUM_UNICAST) + value = FIELD_PREP(FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX, + fbd->mac_addr_boundary); + value |= FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID; + + flow_type = i % FBNIC_RSS_EN_NUM_UNICAST; + value |= act1_value[flow_type]; + + act_tcam->value.tcam[1] = value; + act_tcam->mask.tcam[1] = ~value; + + for (j = 2; j < FBNIC_RPC_TCAM_ACT_WORD_LEN; j++) + act_tcam->mask.tcam[j] = 0xffff; + + act_tcam->state = FBNIC_TCAM_S_UPDATE; + } +} + +struct fbnic_mac_addr *__fbnic_uc_sync(struct fbnic_dev *fbd, + const unsigned char *addr) +{ + struct fbnic_mac_addr *avail_addr = NULL; + unsigned int i; + + /* Scan from middle of list to bottom, filling bottom up. + * Skip the first entry which is reserved for dev_addr and + * leave the last entry to use for promiscuous filtering. + */ + for (i = fbd->mac_addr_boundary - 1; + i < FBNIC_RPC_TCAM_MACDA_HOST_ADDR_IDX; i++) { + struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i]; + + if (mac_addr->state == FBNIC_TCAM_S_DISABLED) { + avail_addr = mac_addr; + } else if (ether_addr_equal(mac_addr->value.addr8, addr)) { + avail_addr = mac_addr; + break; + } + } + + if (avail_addr && avail_addr->state == FBNIC_TCAM_S_DISABLED) { + ether_addr_copy(avail_addr->value.addr8, addr); + eth_zero_addr(avail_addr->mask.addr8); + avail_addr->state = FBNIC_TCAM_S_ADD; + } + + return avail_addr; +} + +struct fbnic_mac_addr *__fbnic_mc_sync(struct fbnic_dev *fbd, + const unsigned char *addr) +{ + struct fbnic_mac_addr *avail_addr = NULL; + unsigned int i; + + /* Scan from middle of list to top, filling top down. + * Skip over the address reserved for the BMC MAC and + * exclude index 0 as that belongs to the broadcast address + */ + for (i = fbd->mac_addr_boundary; + --i > FBNIC_RPC_TCAM_MACDA_BROADCAST_IDX;) { + struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i]; + + if (mac_addr->state == FBNIC_TCAM_S_DISABLED) { + avail_addr = mac_addr; + } else if (ether_addr_equal(mac_addr->value.addr8, addr)) { + avail_addr = mac_addr; + break; + } + } + + /* Scan the BMC addresses to see if it may have already + * reserved the address. + */ + while (--i) { + struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i]; + + if (!is_zero_ether_addr(mac_addr->mask.addr8)) + continue; + + /* Only move on if we find a match */ + if (!ether_addr_equal(mac_addr->value.addr8, addr)) + continue; + + /* We need to pull this address to the shared area */ + if (avail_addr) { + memcpy(avail_addr, mac_addr, sizeof(*mac_addr)); + mac_addr->state = FBNIC_TCAM_S_DELETE; + avail_addr->state = FBNIC_TCAM_S_ADD; + } + + break; + } + + if (avail_addr && avail_addr->state == FBNIC_TCAM_S_DISABLED) { + ether_addr_copy(avail_addr->value.addr8, addr); + eth_zero_addr(avail_addr->mask.addr8); + avail_addr->state = FBNIC_TCAM_S_ADD; + } + + return avail_addr; +} + +int __fbnic_xc_unsync(struct fbnic_mac_addr *mac_addr, unsigned int tcam_idx) +{ + if (!test_and_clear_bit(tcam_idx, mac_addr->act_tcam)) + return -ENOENT; + + if (bitmap_empty(mac_addr->act_tcam, FBNIC_RPC_TCAM_ACT_NUM_ENTRIES)) + mac_addr->state = FBNIC_TCAM_S_DELETE; + + return 0; +} + +void fbnic_sift_macda(struct fbnic_dev *fbd) +{ + int dest, src; + + /* Move BMC only addresses back into BMC region */ + for (dest = FBNIC_RPC_TCAM_MACDA_BMC_ADDR_IDX, + src = FBNIC_RPC_TCAM_MACDA_MULTICAST_IDX; + ++dest < FBNIC_RPC_TCAM_MACDA_BROADCAST_IDX && + src < fbd->mac_addr_boundary;) { + struct fbnic_mac_addr *dest_addr = &fbd->mac_addr[dest]; + + if (dest_addr->state != FBNIC_TCAM_S_DISABLED) + continue; + + while (src < fbd->mac_addr_boundary) { + struct fbnic_mac_addr *src_addr = &fbd->mac_addr[src++]; + + /* Verify BMC bit is set */ + if (!test_bit(FBNIC_MAC_ADDR_T_BMC, src_addr->act_tcam)) + continue; + + /* Verify filter isn't already disabled */ + if (src_addr->state == FBNIC_TCAM_S_DISABLED || + src_addr->state == FBNIC_TCAM_S_DELETE) + continue; + + /* Verify only BMC bit is set */ + if (bitmap_weight(src_addr->act_tcam, + FBNIC_RPC_TCAM_ACT_NUM_ENTRIES) != 1) + continue; + + /* Verify we are not moving wildcard address */ + if (!is_zero_ether_addr(src_addr->mask.addr8)) + continue; + + memcpy(dest_addr, src_addr, sizeof(*src_addr)); + src_addr->state = FBNIC_TCAM_S_DELETE; + dest_addr->state = FBNIC_TCAM_S_ADD; + } + } +} + +static void fbnic_clear_macda_entry(struct fbnic_dev *fbd, unsigned int idx) +{ + int i; + + /* Invalidate entry and clear addr state info */ + for (i = 0; i <= FBNIC_RPC_TCAM_MACDA_WORD_LEN; i++) + wr32(fbd, FBNIC_RPC_TCAM_MACDA(idx, i), 0); +} + +static void fbnic_clear_macda(struct fbnic_dev *fbd) +{ + int idx; + + for (idx = ARRAY_SIZE(fbd->mac_addr); idx--;) { + struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[idx]; + + if (mac_addr->state == FBNIC_TCAM_S_DISABLED) + continue; + + if (test_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam)) { + if (fbnic_bmc_present(fbd)) + continue; + dev_warn_once(fbd->dev, + "Found BMC MAC address w/ BMC not present\n"); + } + + fbnic_clear_macda_entry(fbd, idx); + + /* If rule was already destined for deletion just wipe it now */ + if (mac_addr->state == FBNIC_TCAM_S_DELETE) { + memset(mac_addr, 0, sizeof(*mac_addr)); + continue; + } + + /* Change state to update so that we will rewrite + * this tcam the next time fbnic_write_macda is called. + */ + mac_addr->state = FBNIC_TCAM_S_UPDATE; + } +} + +static void fbnic_write_macda_entry(struct fbnic_dev *fbd, unsigned int idx, + struct fbnic_mac_addr *mac_addr) +{ + __be16 *mask, *value; + int i; + + mask = &mac_addr->mask.addr16[FBNIC_RPC_TCAM_MACDA_WORD_LEN - 1]; + value = &mac_addr->value.addr16[FBNIC_RPC_TCAM_MACDA_WORD_LEN - 1]; + + for (i = 0; i < FBNIC_RPC_TCAM_MACDA_WORD_LEN; i++) + wr32(fbd, FBNIC_RPC_TCAM_MACDA(idx, i), + FIELD_PREP(FBNIC_RPC_TCAM_MACDA_MASK, ntohs(*mask--)) | + FIELD_PREP(FBNIC_RPC_TCAM_MACDA_VALUE, ntohs(*value--))); + + wrfl(fbd); + + wr32(fbd, FBNIC_RPC_TCAM_MACDA(idx, i), FBNIC_RPC_TCAM_VALIDATE); +} + +void fbnic_write_macda(struct fbnic_dev *fbd) +{ + int idx; + + for (idx = ARRAY_SIZE(fbd->mac_addr); idx--;) { + struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[idx]; + + /* Check if update flag is set else exit. */ + if (!(mac_addr->state & FBNIC_TCAM_S_UPDATE)) + continue; + + /* Clear by writing 0s. */ + if (mac_addr->state == FBNIC_TCAM_S_DELETE) { + /* Invalidate entry and clear addr state info */ + fbnic_clear_macda_entry(fbd, idx); + memset(mac_addr, 0, sizeof(*mac_addr)); + + continue; + } + + fbnic_write_macda_entry(fbd, idx, mac_addr); + + mac_addr->state = FBNIC_TCAM_S_VALID; + } +} + +static void fbnic_clear_act_tcam(struct fbnic_dev *fbd, unsigned int idx) +{ + int i; + + /* Invalidate entry and clear addr state info */ + for (i = 0; i <= FBNIC_RPC_TCAM_ACT_WORD_LEN; i++) + wr32(fbd, FBNIC_RPC_TCAM_ACT(idx, i), 0); +} + +void fbnic_clear_rules(struct fbnic_dev *fbd) +{ + u32 dest = FIELD_PREP(FBNIC_RPC_ACT_TBL0_DEST_MASK, + FBNIC_RPC_ACT_TBL0_DEST_BMC); + int i = FBNIC_RPC_TCAM_ACT_NUM_ENTRIES - 1; + struct fbnic_act_tcam *act_tcam; + + /* Clear MAC rules */ + fbnic_clear_macda(fbd); + + /* If BMC is present we need to preserve the last rule which + * will be used to route traffic to the BMC if it is received. + * + * At this point it should be the only MAC address in the MACDA + * so any unicast or multicast traffic received should be routed + * to it. So leave the last rule in place. + * + * It will be rewritten to add the host again when we bring + * the interface back up. + */ + if (fbnic_bmc_present(fbd)) { + act_tcam = &fbd->act_tcam[i]; + + if (act_tcam->state == FBNIC_TCAM_S_VALID && + (act_tcam->dest & dest)) { + wr32(fbd, FBNIC_RPC_ACT_TBL0(i), dest); + wr32(fbd, FBNIC_RPC_ACT_TBL1(i), 0); + + act_tcam->state = FBNIC_TCAM_S_UPDATE; + + i--; + } + } + + /* Work from the bottom up deleting all other rules from hardware */ + do { + act_tcam = &fbd->act_tcam[i]; + + if (act_tcam->state != FBNIC_TCAM_S_VALID) + continue; + + fbnic_clear_act_tcam(fbd, i); + act_tcam->state = FBNIC_TCAM_S_UPDATE; + } while (i--); +} + +static void fbnic_delete_act_tcam(struct fbnic_dev *fbd, unsigned int idx) +{ + fbnic_clear_act_tcam(fbd, idx); + memset(&fbd->act_tcam[idx], 0, sizeof(struct fbnic_act_tcam)); +} + +static void fbnic_update_act_tcam(struct fbnic_dev *fbd, unsigned int idx) +{ + struct fbnic_act_tcam *act_tcam = &fbd->act_tcam[idx]; + int i; + + /* Update entry by writing the destination and RSS mask */ + wr32(fbd, FBNIC_RPC_ACT_TBL0(idx), act_tcam->dest); + wr32(fbd, FBNIC_RPC_ACT_TBL1(idx), act_tcam->rss_en_mask); + + /* Write new TCAM rule to hardware */ + for (i = 0; i < FBNIC_RPC_TCAM_ACT_WORD_LEN; i++) + wr32(fbd, FBNIC_RPC_TCAM_ACT(idx, i), + FIELD_PREP(FBNIC_RPC_TCAM_ACT_MASK, + act_tcam->mask.tcam[i]) | + FIELD_PREP(FBNIC_RPC_TCAM_ACT_VALUE, + act_tcam->value.tcam[i])); + + wrfl(fbd); + + wr32(fbd, FBNIC_RPC_TCAM_ACT(idx, i), FBNIC_RPC_TCAM_VALIDATE); + act_tcam->state = FBNIC_TCAM_S_VALID; +} + +void fbnic_write_rules(struct fbnic_dev *fbd) +{ + int i; + + /* Flush any pending action table rules */ + for (i = 0; i < FBNIC_RPC_ACT_TBL_NUM_ENTRIES; i++) { + struct fbnic_act_tcam *act_tcam = &fbd->act_tcam[i]; + + /* Check if update flag is set else exit. */ + if (!(act_tcam->state & FBNIC_TCAM_S_UPDATE)) + continue; + + if (act_tcam->state == FBNIC_TCAM_S_DELETE) + fbnic_delete_act_tcam(fbd, i); + else + fbnic_update_act_tcam(fbd, i); + } +} diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_rpc.h b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.h new file mode 100644 index 000000000000..d62935f722a2 --- /dev/null +++ b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.h @@ -0,0 +1,189 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Meta Platforms, Inc. and affiliates. */ + +#ifndef _FBNIC_RPC_H_ +#define _FBNIC_RPC_H_ + +#include <uapi/linux/in6.h> +#include <linux/bitfield.h> + +/* The TCAM state definitions follow an expected ordering. + * They start out disabled, then move through the following states: + * Disabled 0 -> Add 2 + * Add 2 -> Valid 1 + * + * Valid 1 -> Add/Update 2 + * Add 2 -> Valid 1 + * + * Valid 1 -> Delete 3 + * Delete 3 -> Disabled 0 + */ +enum { + FBNIC_TCAM_S_DISABLED = 0, + FBNIC_TCAM_S_VALID = 1, + FBNIC_TCAM_S_ADD = 2, + FBNIC_TCAM_S_UPDATE = FBNIC_TCAM_S_ADD, + FBNIC_TCAM_S_DELETE = 3, +}; + +/* 32 MAC Destination Address TCAM Entries + * 4 registers DA[1:0], DA[3:2], DA[5:4], Validate + */ +#define FBNIC_RPC_TCAM_MACDA_WORD_LEN 3 +#define FBNIC_RPC_TCAM_MACDA_NUM_ENTRIES 32 + +#define FBNIC_RPC_TCAM_ACT_WORD_LEN 11 +#define FBNIC_RPC_TCAM_ACT_NUM_ENTRIES 64 + +struct fbnic_mac_addr { + union { + unsigned char addr8[ETH_ALEN]; + __be16 addr16[FBNIC_RPC_TCAM_MACDA_WORD_LEN]; + } mask, value; + unsigned char state; + DECLARE_BITMAP(act_tcam, FBNIC_RPC_TCAM_ACT_NUM_ENTRIES); +}; + +struct fbnic_act_tcam { + struct { + u16 tcam[FBNIC_RPC_TCAM_ACT_WORD_LEN]; + } mask, value; + unsigned char state; + u16 rss_en_mask; + u32 dest; +}; + +enum { + FBNIC_RSS_EN_HOST_UDP6, + FBNIC_RSS_EN_HOST_UDP4, + FBNIC_RSS_EN_HOST_TCP6, + FBNIC_RSS_EN_HOST_TCP4, + FBNIC_RSS_EN_HOST_IP6, + FBNIC_RSS_EN_HOST_IP4, + FBNIC_RSS_EN_HOST_ETHER, + FBNIC_RSS_EN_XCAST_UDP6, +#define FBNIC_RSS_EN_NUM_UNICAST FBNIC_RSS_EN_XCAST_UDP6 + FBNIC_RSS_EN_XCAST_UDP4, + FBNIC_RSS_EN_XCAST_TCP6, + FBNIC_RSS_EN_XCAST_TCP4, + FBNIC_RSS_EN_XCAST_IP6, + FBNIC_RSS_EN_XCAST_IP4, + FBNIC_RSS_EN_XCAST_ETHER, + FBNIC_RSS_EN_NUM_ENTRIES +}; + +/* Reserve the first 2 entries for the use by the BMC so that we can + * avoid allowing rules to get in the way of BMC unicast traffic. + */ +#define FBNIC_RPC_ACT_TBL_BMC_OFFSET 0 +#define FBNIC_RPC_ACT_TBL_BMC_ALL_MULTI_OFFSET 1 + +/* We reserve the last 14 entries for RSS rules on the host. The BMC + * unicast rule will need to be populated above these and is expected to + * use MACDA TCAM entry 23 to store the BMC MAC address. + */ +#define FBNIC_RPC_ACT_TBL_RSS_OFFSET \ + (FBNIC_RPC_ACT_TBL_NUM_ENTRIES - FBNIC_RSS_EN_NUM_ENTRIES) + +/* Flags used to identify the owner for this MAC filter. Note that any + * flags set for Broadcast thru Promisc indicate that the rule belongs + * to the RSS filters for the host. + */ +enum { + FBNIC_MAC_ADDR_T_BMC = 0, + FBNIC_MAC_ADDR_T_BROADCAST = FBNIC_RPC_ACT_TBL_RSS_OFFSET, +#define FBNIC_MAC_ADDR_T_HOST_START FBNIC_MAC_ADDR_T_BROADCAST + FBNIC_MAC_ADDR_T_MULTICAST, + FBNIC_MAC_ADDR_T_UNICAST, + FBNIC_MAC_ADDR_T_ALLMULTI, /* BROADCAST ... MULTICAST*/ + FBNIC_MAC_ADDR_T_PROMISC, /* BROADCAST ... UNICAST */ + FBNIC_MAC_ADDR_T_HOST_LAST +}; + +#define FBNIC_MAC_ADDR_T_HOST_LEN \ + (FBNIC_MAC_ADDR_T_HOST_LAST - FBNIC_MAC_ADDR_T_HOST_START) + +#define FBNIC_RPC_TCAM_ACT0_IPSRC_IDX CSR_GENMASK(2, 0) +#define FBNIC_RPC_TCAM_ACT0_IPSRC_VALID CSR_BIT(3) +#define FBNIC_RPC_TCAM_ACT0_IPDST_IDX CSR_GENMASK(6, 4) +#define FBNIC_RPC_TCAM_ACT0_IPDST_VALID CSR_BIT(7) +#define FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX CSR_GENMASK(10, 8) +#define FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_VALID CSR_BIT(11) +#define FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX CSR_GENMASK(14, 12) +#define FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_VALID CSR_BIT(15) + +#define FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX CSR_GENMASK(9, 5) +#define FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID CSR_BIT(10) +#define FBNIC_RPC_TCAM_ACT1_IP_IS_V6 CSR_BIT(11) +#define FBNIC_RPC_TCAM_ACT1_IP_VALID CSR_BIT(12) +#define FBNIC_RPC_TCAM_ACT1_OUTER_IP_VALID CSR_BIT(13) +#define FBNIC_RPC_TCAM_ACT1_L4_IS_UDP CSR_BIT(14) +#define FBNIC_RPC_TCAM_ACT1_L4_VALID CSR_BIT(15) + +/* TCAM 0 - 3 reserved for BMC MAC addresses */ +#define FBNIC_RPC_TCAM_MACDA_BMC_ADDR_IDX 0 +/* TCAM 4 reserved for broadcast MAC address */ +#define FBNIC_RPC_TCAM_MACDA_BROADCAST_IDX 4 +/* TCAMs 5 - 30 will be used for multicast and unicast addresses. The + * boundary between the two can be variable it is currently set to 24 + * on which the unicast addresses start. The general idea is that we will + * always go top-down with unicast, and bottom-up with multicast so that + * there should be free-space in the middle between the two. + * + * The entry at MADCA_DEFAULT_BOUNDARY is a special case as it can be used + * for the ALL MULTI address if the list is full, or the BMC has requested + * it. + */ +#define FBNIC_RPC_TCAM_MACDA_MULTICAST_IDX 5 +#define FBNIC_RPC_TCAM_MACDA_DEFAULT_BOUNDARY 24 +#define FBNIC_RPC_TCAM_MACDA_HOST_ADDR_IDX 30 +/* Reserved for use to record Multicast promisc, or Promiscuous */ +#define FBNIC_RPC_TCAM_MACDA_PROMISC_IDX 31 + +enum { + FBNIC_UDP6_HASH_OPT, + FBNIC_UDP4_HASH_OPT, + FBNIC_TCP6_HASH_OPT, + FBNIC_TCP4_HASH_OPT, +#define FBNIC_L4_HASH_OPT FBNIC_TCP4_HASH_OPT + FBNIC_IPV6_HASH_OPT, + FBNIC_IPV4_HASH_OPT, +#define FBNIC_IP_HASH_OPT FBNIC_IPV4_HASH_OPT + FBNIC_ETHER_HASH_OPT, + FBNIC_NUM_HASH_OPT, +}; + +struct fbnic_dev; +struct fbnic_net; + +void fbnic_bmc_rpc_init(struct fbnic_dev *fbd); +void fbnic_bmc_rpc_all_multi_config(struct fbnic_dev *fbd, bool enable_host); + +void fbnic_reset_indir_tbl(struct fbnic_net *fbn); +void fbnic_rss_key_fill(u32 *buffer); +void fbnic_rss_init_en_mask(struct fbnic_net *fbn); +void fbnic_rss_disable_hw(struct fbnic_dev *fbd); +void fbnic_rss_reinit_hw(struct fbnic_dev *fbd, struct fbnic_net *fbn); +void fbnic_rss_reinit(struct fbnic_dev *fbd, struct fbnic_net *fbn); + +int __fbnic_xc_unsync(struct fbnic_mac_addr *mac_addr, unsigned int tcam_idx); +struct fbnic_mac_addr *__fbnic_uc_sync(struct fbnic_dev *fbd, + const unsigned char *addr); +struct fbnic_mac_addr *__fbnic_mc_sync(struct fbnic_dev *fbd, + const unsigned char *addr); +void fbnic_sift_macda(struct fbnic_dev *fbd); +void fbnic_write_macda(struct fbnic_dev *fbd); + +static inline int __fbnic_uc_unsync(struct fbnic_mac_addr *mac_addr) +{ + return __fbnic_xc_unsync(mac_addr, FBNIC_MAC_ADDR_T_UNICAST); +} + +static inline int __fbnic_mc_unsync(struct fbnic_mac_addr *mac_addr) +{ + return __fbnic_xc_unsync(mac_addr, FBNIC_MAC_ADDR_T_MULTICAST); +} + +void fbnic_clear_rules(struct fbnic_dev *fbd); +void fbnic_write_rules(struct fbnic_dev *fbd); +#endif /* _FBNIC_RPC_H_ */ diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_tlv.c b/drivers/net/ethernet/meta/fbnic/fbnic_tlv.c new file mode 100644 index 000000000000..2a174ab062a3 --- /dev/null +++ b/drivers/net/ethernet/meta/fbnic/fbnic_tlv.c @@ -0,0 +1,529 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) Meta Platforms, Inc. and affiliates. */ + +#include <linux/gfp.h> +#include <linux/mm.h> +#include <linux/once.h> +#include <linux/random.h> +#include <linux/string.h> +#include <uapi/linux/if_ether.h> + +#include "fbnic_tlv.h" + +/** + * fbnic_tlv_msg_alloc - Allocate page and initialize FW message header + * @msg_id: Identifier for new message we are starting + * + * Return: pointer to start of message, or NULL on failure. + * + * Allocates a page and initializes message header at start of page. + * Initial message size is 1 DWORD which is just the header. + **/ +struct fbnic_tlv_msg *fbnic_tlv_msg_alloc(u16 msg_id) +{ + struct fbnic_tlv_hdr hdr = { 0 }; + struct fbnic_tlv_msg *msg; + + msg = (struct fbnic_tlv_msg *)__get_free_page(GFP_KERNEL); + if (!msg) + return NULL; + + /* Start with zero filled header and then back fill with data */ + hdr.type = msg_id; + hdr.is_msg = 1; + hdr.len = cpu_to_le16(1); + + /* Copy header into start of message */ + msg->hdr = hdr; + + return msg; +} + +/** + * fbnic_tlv_attr_put_flag - Add flag value to message + * @msg: Message header we are adding flag attribute to + * @attr_id: ID of flag attribute we are adding to message + * + * Return: -ENOSPC if there is no room for the attribute. Otherwise 0. + * + * Adds a 1 DWORD flag attribute to the message. The presence of this + * attribute can be used as a boolean value indicating true, otherwise the + * value is considered false. + **/ +int fbnic_tlv_attr_put_flag(struct fbnic_tlv_msg *msg, const u16 attr_id) +{ + int attr_max_len = PAGE_SIZE - offset_in_page(msg) - sizeof(*msg); + struct fbnic_tlv_hdr hdr = { 0 }; + struct fbnic_tlv_msg *attr; + + attr_max_len -= le16_to_cpu(msg->hdr.len) * sizeof(u32); + if (attr_max_len < sizeof(*attr)) + return -ENOSPC; + + /* Get header pointer and bump attr to start of data */ + attr = &msg[le16_to_cpu(msg->hdr.len)]; + + /* Record attribute type and size */ + hdr.type = attr_id; + hdr.len = cpu_to_le16(sizeof(hdr)); + + attr->hdr = hdr; + le16_add_cpu(&msg->hdr.len, + FBNIC_TLV_MSG_SIZE(le16_to_cpu(hdr.len))); + + return 0; +} + +/** + * fbnic_tlv_attr_put_value - Add data to message + * @msg: Message header we are adding flag attribute to + * @attr_id: ID of flag attribute we are adding to message + * @value: Pointer to data to be stored + * @len: Size of data to be stored. + * + * Return: -ENOSPC if there is no room for the attribute. Otherwise 0. + * + * Adds header and copies data pointed to by value into the message. The + * result is rounded up to the nearest DWORD for sizing so that the + * headers remain aligned. + * + * The assumption is that the value field is in a format where byte + * ordering can be guaranteed such as a byte array or a little endian + * format. + **/ +int fbnic_tlv_attr_put_value(struct fbnic_tlv_msg *msg, const u16 attr_id, + const void *value, const int len) +{ + int attr_max_len = PAGE_SIZE - offset_in_page(msg) - sizeof(*msg); + struct fbnic_tlv_hdr hdr = { 0 }; + struct fbnic_tlv_msg *attr; + + attr_max_len -= le16_to_cpu(msg->hdr.len) * sizeof(u32); + if (attr_max_len < sizeof(*attr) + len) + return -ENOSPC; + + /* Get header pointer and bump attr to start of data */ + attr = &msg[le16_to_cpu(msg->hdr.len)]; + + /* Record attribute type and size */ + hdr.type = attr_id; + hdr.len = cpu_to_le16(sizeof(hdr) + len); + + /* Zero pad end of region to be written if we aren't aligned */ + if (len % sizeof(hdr)) + attr->value[len / sizeof(hdr)] = 0; + + /* Copy data over */ + memcpy(attr->value, value, len); + + attr->hdr = hdr; + le16_add_cpu(&msg->hdr.len, + FBNIC_TLV_MSG_SIZE(le16_to_cpu(hdr.len))); + + return 0; +} + +/** + * __fbnic_tlv_attr_put_int - Add integer to message + * @msg: Message header we are adding flag attribute to + * @attr_id: ID of flag attribute we are adding to message + * @value: Data to be stored + * @len: Size of data to be stored, either 4 or 8 bytes. + * + * Return: -ENOSPC if there is no room for the attribute. Otherwise 0. + * + * Adds header and copies data pointed to by value into the message. Will + * format the data as little endian. + **/ +int __fbnic_tlv_attr_put_int(struct fbnic_tlv_msg *msg, const u16 attr_id, + s64 value, const int len) +{ + __le64 le64_value = cpu_to_le64(value); + + return fbnic_tlv_attr_put_value(msg, attr_id, &le64_value, len); +} + +/** + * fbnic_tlv_attr_put_mac_addr - Add mac_addr to message + * @msg: Message header we are adding flag attribute to + * @attr_id: ID of flag attribute we are adding to message + * @mac_addr: Byte pointer to MAC address to be stored + * + * Return: -ENOSPC if there is no room for the attribute. Otherwise 0. + * + * Adds header and copies data pointed to by mac_addr into the message. Will + * copy the address raw so it will be in big endian with start of MAC + * address at start of attribute. + **/ +int fbnic_tlv_attr_put_mac_addr(struct fbnic_tlv_msg *msg, const u16 attr_id, + const u8 *mac_addr) +{ + return fbnic_tlv_attr_put_value(msg, attr_id, mac_addr, ETH_ALEN); +} + +/** + * fbnic_tlv_attr_put_string - Add string to message + * @msg: Message header we are adding flag attribute to + * @attr_id: ID of flag attribute we are adding to message + * @string: Byte pointer to null terminated string to be stored + * + * Return: -ENOSPC if there is no room for the attribute. Otherwise 0. + * + * Adds header and copies data pointed to by string into the message. Will + * copy the address raw so it will be in byte order. + **/ +int fbnic_tlv_attr_put_string(struct fbnic_tlv_msg *msg, u16 attr_id, + const char *string) +{ + int attr_max_len = PAGE_SIZE - sizeof(*msg); + int str_len = 1; + + /* The max length will be message minus existing message and new + * attribute header. Since the message is measured in DWORDs we have + * to multiply the size by 4. + * + * The string length doesn't include the \0 so we have to add one to + * the final value, so start with that as our initial value. + * + * We will verify if the string will fit in fbnic_tlv_attr_put_value() + */ + attr_max_len -= le16_to_cpu(msg->hdr.len) * sizeof(u32); + str_len += strnlen(string, attr_max_len); + + return fbnic_tlv_attr_put_value(msg, attr_id, string, str_len); +} + +/** + * fbnic_tlv_attr_get_unsigned - Retrieve unsigned value from result + * @attr: Attribute to retrieve data from + * + * Return: unsigned 64b value containing integer value + **/ +u64 fbnic_tlv_attr_get_unsigned(struct fbnic_tlv_msg *attr) +{ + __le64 le64_value = 0; + + memcpy(&le64_value, &attr->value[0], + le16_to_cpu(attr->hdr.len) - sizeof(*attr)); + + return le64_to_cpu(le64_value); +} + +/** + * fbnic_tlv_attr_get_signed - Retrieve signed value from result + * @attr: Attribute to retrieve data from + * + * Return: signed 64b value containing integer value + **/ +s64 fbnic_tlv_attr_get_signed(struct fbnic_tlv_msg *attr) +{ + int shift = (8 + sizeof(*attr) - le16_to_cpu(attr->hdr.len)) * 8; + __le64 le64_value = 0; + s64 value; + + /* Copy the value and adjust for byte ordering */ + memcpy(&le64_value, &attr->value[0], + le16_to_cpu(attr->hdr.len) - sizeof(*attr)); + value = le64_to_cpu(le64_value); + + /* Sign extend the return value by using a pair of shifts */ + return (value << shift) >> shift; +} + +/** + * fbnic_tlv_attr_get_string - Retrieve string value from result + * @attr: Attribute to retrieve data from + * @str: Pointer to an allocated string to store the data + * @max_size: The maximum size which can be in str + * + * Return: the size of the string read from firmware + **/ +size_t fbnic_tlv_attr_get_string(struct fbnic_tlv_msg *attr, char *str, + size_t max_size) +{ + max_size = min_t(size_t, max_size, + (le16_to_cpu(attr->hdr.len) * 4) - sizeof(*attr)); + memcpy(str, &attr->value, max_size); + + return max_size; +} + +/** + * fbnic_tlv_attr_nest_start - Add nested attribute header to message + * @msg: Message header we are adding flag attribute to + * @attr_id: ID of flag attribute we are adding to message + * + * Return: NULL if there is no room for the attribute. Otherwise a pointer + * to the new attribute header. + * + * New header length is stored initially in DWORDs. + **/ +struct fbnic_tlv_msg *fbnic_tlv_attr_nest_start(struct fbnic_tlv_msg *msg, + u16 attr_id) +{ + int attr_max_len = PAGE_SIZE - offset_in_page(msg) - sizeof(*msg); + struct fbnic_tlv_msg *attr = &msg[le16_to_cpu(msg->hdr.len)]; + struct fbnic_tlv_hdr hdr = { 0 }; + + /* Make sure we have space for at least the nest header plus one more */ + attr_max_len -= le16_to_cpu(msg->hdr.len) * sizeof(u32); + if (attr_max_len < sizeof(*attr) * 2) + return NULL; + + /* Record attribute type and size */ + hdr.type = attr_id; + + /* Add current message length to account for consumption within the + * page and leave it as a multiple of DWORDs, we will shift to + * bytes when we close it out. + */ + hdr.len = cpu_to_le16(1); + + attr->hdr = hdr; + + return attr; +} + +/** + * fbnic_tlv_attr_nest_stop - Close out nested attribute and add it to message + * @msg: Message header we are adding flag attribute to + * + * Closes out nested attribute, adds length to message, and then bumps + * length from DWORDs to bytes to match other attributes. + **/ +void fbnic_tlv_attr_nest_stop(struct fbnic_tlv_msg *msg) +{ + struct fbnic_tlv_msg *attr = &msg[le16_to_cpu(msg->hdr.len)]; + u16 len = le16_to_cpu(attr->hdr.len); + + /* Add attribute to message if there is more than just a header */ + if (len <= 1) + return; + + le16_add_cpu(&msg->hdr.len, len); + + /* Convert from DWORDs to bytes */ + attr->hdr.len = cpu_to_le16(len * sizeof(u32)); +} + +static int +fbnic_tlv_attr_validate(struct fbnic_tlv_msg *attr, + const struct fbnic_tlv_index *tlv_index) +{ + u16 len = le16_to_cpu(attr->hdr.len) - sizeof(*attr); + u16 attr_id = attr->hdr.type; + __le32 *value = &attr->value[0]; + + if (attr->hdr.is_msg) + return -EINVAL; + + if (attr_id >= FBNIC_TLV_RESULTS_MAX) + return -EINVAL; + + while (tlv_index->id != attr_id) { + if (tlv_index->id == FBNIC_TLV_ATTR_ID_UNKNOWN) { + if (attr->hdr.cannot_ignore) + return -ENOENT; + return le16_to_cpu(attr->hdr.len); + } + + tlv_index++; + } + + if (offset_in_page(attr) + len > PAGE_SIZE - sizeof(*attr)) + return -E2BIG; + + switch (tlv_index->type) { + case FBNIC_TLV_STRING: + if (!len || len > tlv_index->len) + return -EINVAL; + if (((char *)value)[len - 1]) + return -EINVAL; + break; + case FBNIC_TLV_FLAG: + if (len) + return -EINVAL; + break; + case FBNIC_TLV_UNSIGNED: + case FBNIC_TLV_SIGNED: + if (tlv_index->len > sizeof(__le64)) + return -EINVAL; + fallthrough; + case FBNIC_TLV_BINARY: + if (!len || len > tlv_index->len) + return -EINVAL; + break; + case FBNIC_TLV_NESTED: + case FBNIC_TLV_ARRAY: + if (len % 4) + return -EINVAL; + break; + default: + return -EINVAL; + } + + return 0; +} + +/** + * fbnic_tlv_attr_parse_array - Parse array of attributes into results array + * @attr: Start of attributes in the message + * @len: Length of attributes in the message + * @results: Array of pointers to store the results of parsing + * @tlv_index: List of TLV attributes to be parsed from message + * @tlv_attr_id: Specific ID that is repeated in array + * @array_len: Number of results to store in results array + * + * Return: zero on success, or negative value on error. + * + * Will take a list of attributes and a parser definition and will capture + * the results in the results array to have the data extracted later. + **/ +int fbnic_tlv_attr_parse_array(struct fbnic_tlv_msg *attr, int len, + struct fbnic_tlv_msg **results, + const struct fbnic_tlv_index *tlv_index, + u16 tlv_attr_id, size_t array_len) +{ + int i = 0; + + /* Initialize results table to NULL. */ + memset(results, 0, array_len * sizeof(results[0])); + + /* Nothing to parse if header was only thing there */ + if (!len) + return 0; + + /* Work through list of attributes, parsing them as necessary */ + while (len > 0) { + u16 attr_id = attr->hdr.type; + u16 attr_len; + int err; + + if (tlv_attr_id != attr_id) + return -EINVAL; + + /* Stop parsing on full error */ + err = fbnic_tlv_attr_validate(attr, tlv_index); + if (err < 0) + return err; + + if (i >= array_len) + return -ENOSPC; + + results[i++] = attr; + + attr_len = FBNIC_TLV_MSG_SIZE(le16_to_cpu(attr->hdr.len)); + len -= attr_len; + attr += attr_len; + } + + return len == 0 ? 0 : -EINVAL; +} + +/** + * fbnic_tlv_attr_parse - Parse attributes into a list of attribute results + * @attr: Start of attributes in the message + * @len: Length of attributes in the message + * @results: Array of pointers to store the results of parsing + * @tlv_index: List of TLV attributes to be parsed from message + * + * Return: zero on success, or negative value on error. + * + * Will take a list of attributes and a parser definition and will capture + * the results in the results array to have the data extracted later. + **/ +int fbnic_tlv_attr_parse(struct fbnic_tlv_msg *attr, int len, + struct fbnic_tlv_msg **results, + const struct fbnic_tlv_index *tlv_index) +{ + /* Initialize results table to NULL. */ + memset(results, 0, sizeof(results[0]) * FBNIC_TLV_RESULTS_MAX); + + /* Nothing to parse if header was only thing there */ + if (!len) + return 0; + + /* Work through list of attributes, parsing them as necessary */ + while (len > 0) { + int err = fbnic_tlv_attr_validate(attr, tlv_index); + u16 attr_id = attr->hdr.type; + u16 attr_len; + + /* Stop parsing on full error */ + if (err < 0) + return err; + + /* Ignore results for unsupported values */ + if (!err) { + /* Do not overwrite existing entries */ + if (results[attr_id]) + return -EADDRINUSE; + + results[attr_id] = attr; + } + + attr_len = FBNIC_TLV_MSG_SIZE(le16_to_cpu(attr->hdr.len)); + len -= attr_len; + attr += attr_len; + } + + return len == 0 ? 0 : -EINVAL; +} + +/** + * fbnic_tlv_msg_parse - Parse message and process via predetermined functions + * @opaque: Value passed to parser function to enable driver access + * @msg: Message to be parsed. + * @parser: TLV message parser definition. + * + * Return: zero on success, or negative value on error. + * + * Will take a message a number of message types via the attribute parsing + * definitions and function provided for the parser array. + **/ +int fbnic_tlv_msg_parse(void *opaque, struct fbnic_tlv_msg *msg, + const struct fbnic_tlv_parser *parser) +{ + struct fbnic_tlv_msg *results[FBNIC_TLV_RESULTS_MAX]; + u16 msg_id = msg->hdr.type; + int err; + + if (!msg->hdr.is_msg) + return -EINVAL; + + if (le16_to_cpu(msg->hdr.len) > PAGE_SIZE / sizeof(u32)) + return -E2BIG; + + while (parser->id != msg_id) { + if (parser->id == FBNIC_TLV_MSG_ID_UNKNOWN) + return -ENOENT; + parser++; + } + + err = fbnic_tlv_attr_parse(&msg[1], le16_to_cpu(msg->hdr.len) - 1, + results, parser->attr); + if (err) + return err; + + return parser->func(opaque, results); +} + +/** + * fbnic_tlv_parser_error - called if message doesn't match known type + * @opaque: (unused) + * @results: (unused) + * + * Return: -EBADMSG to indicate the message is an unsupported type + **/ +int fbnic_tlv_parser_error(void *opaque, struct fbnic_tlv_msg **results) +{ + return -EBADMSG; +} + +void fbnic_tlv_attr_addr_copy(u8 *dest, struct fbnic_tlv_msg *src) +{ + u8 *mac_addr; + + mac_addr = fbnic_tlv_attr_get_value_ptr(src); + memcpy(dest, mac_addr, ETH_ALEN); +} diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_tlv.h b/drivers/net/ethernet/meta/fbnic/fbnic_tlv.h new file mode 100644 index 000000000000..67300ab44353 --- /dev/null +++ b/drivers/net/ethernet/meta/fbnic/fbnic_tlv.h @@ -0,0 +1,175 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Meta Platforms, Inc. and affiliates. */ + +#ifndef _FBNIC_TLV_H_ +#define _FBNIC_TLV_H_ + +#include <asm/byteorder.h> +#include <linux/bits.h> +#include <linux/const.h> +#include <linux/types.h> + +#define FBNIC_TLV_MSG_ALIGN(len) ALIGN(len, sizeof(u32)) +#define FBNIC_TLV_MSG_SIZE(len) \ + (FBNIC_TLV_MSG_ALIGN(len) / sizeof(u32)) + +/* TLV Header Format + * 3 2 1 + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Length |M|I|RSV| Type / ID | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * The TLV header format described above will be used for transferring + * messages between the host and the firmware. To ensure byte ordering + * we have defined all fields as being little endian. + * Type/ID: Identifier for message and/or attribute + * RSV: Reserved field for future use, likely as additional flags + * I: cannot_ignore flag, identifies if unrecognized attribute can be ignored + * M: is_msg, indicates that this is the start of a new message + * Length: Total length of message in dwords including header + * or + * Total length of attribute in bytes including header + */ +struct fbnic_tlv_hdr { +#if defined(__LITTLE_ENDIAN_BITFIELD) + u16 type : 12; /* 0 .. 11 Type / ID */ + u16 rsvd : 2; /* 12 .. 13 Reserved for future use */ + u16 cannot_ignore : 1; /* 14 Attribute can be ignored */ + u16 is_msg : 1; /* 15 Header belongs to message */ +#elif defined(__BIG_ENDIAN_BITFIELD) + u16 is_msg : 1; /* 15 Header belongs to message */ + u16 cannot_ignore : 1; /* 14 Attribute can be ignored */ + u16 rsvd : 2; /* 13 .. 12 Reserved for future use */ + u16 type : 12; /* 11 .. 0 Type / ID */ +#else +#error "Missing defines from byteorder.h" +#endif + __le16 len; /* 16 .. 32 length including TLV header */ +}; + +#define FBNIC_TLV_RESULTS_MAX 32 + +struct fbnic_tlv_msg { + struct fbnic_tlv_hdr hdr; + __le32 value[]; +}; + +#define FBNIC_TLV_MSG_ID_UNKNOWN USHRT_MAX + +enum fbnic_tlv_type { + FBNIC_TLV_STRING, + FBNIC_TLV_FLAG, + FBNIC_TLV_UNSIGNED, + FBNIC_TLV_SIGNED, + FBNIC_TLV_BINARY, + FBNIC_TLV_NESTED, + FBNIC_TLV_ARRAY, + __FBNIC_TLV_MAX_TYPE +}; + +/* TLV Index + * Defines the relationship between the attribute IDs and their types. + * For each entry in the index there will be a size and type associated + * with it so that we can use this to parse the data and verify it matches + * the expected layout. + */ +struct fbnic_tlv_index { + u16 id; + u16 len; + enum fbnic_tlv_type type; +}; + +#define TLV_MAX_DATA (PAGE_SIZE - 512) +#define FBNIC_TLV_ATTR_ID_UNKNOWN USHRT_MAX +#define FBNIC_TLV_ATTR_STRING(id, len) { id, len, FBNIC_TLV_STRING } +#define FBNIC_TLV_ATTR_FLAG(id) { id, 0, FBNIC_TLV_FLAG } +#define FBNIC_TLV_ATTR_U32(id) { id, sizeof(u32), FBNIC_TLV_UNSIGNED } +#define FBNIC_TLV_ATTR_U64(id) { id, sizeof(u64), FBNIC_TLV_UNSIGNED } +#define FBNIC_TLV_ATTR_S32(id) { id, sizeof(s32), FBNIC_TLV_SIGNED } +#define FBNIC_TLV_ATTR_S64(id) { id, sizeof(s64), FBNIC_TLV_SIGNED } +#define FBNIC_TLV_ATTR_MAC_ADDR(id) { id, ETH_ALEN, FBNIC_TLV_BINARY } +#define FBNIC_TLV_ATTR_NESTED(id) { id, 0, FBNIC_TLV_NESTED } +#define FBNIC_TLV_ATTR_ARRAY(id) { id, 0, FBNIC_TLV_ARRAY } +#define FBNIC_TLV_ATTR_RAW_DATA(id) { id, TLV_MAX_DATA, FBNIC_TLV_BINARY } +#define FBNIC_TLV_ATTR_LAST { FBNIC_TLV_ATTR_ID_UNKNOWN, 0, 0 } + +struct fbnic_tlv_parser { + u16 id; + const struct fbnic_tlv_index *attr; + int (*func)(void *opaque, + struct fbnic_tlv_msg **results); +}; + +#define FBNIC_TLV_PARSER(id, attr, func) { FBNIC_TLV_MSG_ID_##id, attr, func } + +static inline void * +fbnic_tlv_attr_get_value_ptr(struct fbnic_tlv_msg *attr) +{ + return (void *)&attr->value[0]; +} + +static inline bool fbnic_tlv_attr_get_bool(struct fbnic_tlv_msg *attr) +{ + return !!attr; +} + +u64 fbnic_tlv_attr_get_unsigned(struct fbnic_tlv_msg *attr); +s64 fbnic_tlv_attr_get_signed(struct fbnic_tlv_msg *attr); +size_t fbnic_tlv_attr_get_string(struct fbnic_tlv_msg *attr, char *str, + size_t max_size); + +#define get_unsigned_result(id, location) \ +do { \ + struct fbnic_tlv_msg *result = results[id]; \ + if (result) \ + location = fbnic_tlv_attr_get_unsigned(result); \ +} while (0) + +#define get_signed_result(id, location) \ +do { \ + struct fbnic_tlv_msg *result = results[id]; \ + if (result) \ + location = fbnic_tlv_attr_get_signed(result); \ +} while (0) + +#define get_string_result(id, size, str, max_size) \ +do { \ + struct fbnic_tlv_msg *result = results[id]; \ + if (result) \ + size = fbnic_tlv_attr_get_string(result, str, max_size); \ +} while (0) + +#define get_bool(id) (!!(results[id])) + +struct fbnic_tlv_msg *fbnic_tlv_msg_alloc(u16 msg_id); +int fbnic_tlv_attr_put_flag(struct fbnic_tlv_msg *msg, const u16 attr_id); +int fbnic_tlv_attr_put_value(struct fbnic_tlv_msg *msg, const u16 attr_id, + const void *value, const int len); +int __fbnic_tlv_attr_put_int(struct fbnic_tlv_msg *msg, const u16 attr_id, + s64 value, const int len); +#define fbnic_tlv_attr_put_int(msg, attr_id, value) \ + __fbnic_tlv_attr_put_int(msg, attr_id, value, \ + FBNIC_TLV_MSG_ALIGN(sizeof(value))) +int fbnic_tlv_attr_put_mac_addr(struct fbnic_tlv_msg *msg, const u16 attr_id, + const u8 *mac_addr); +int fbnic_tlv_attr_put_string(struct fbnic_tlv_msg *msg, u16 attr_id, + const char *string); +struct fbnic_tlv_msg *fbnic_tlv_attr_nest_start(struct fbnic_tlv_msg *msg, + u16 attr_id); +void fbnic_tlv_attr_nest_stop(struct fbnic_tlv_msg *msg); +void fbnic_tlv_attr_addr_copy(u8 *dest, struct fbnic_tlv_msg *src); +int fbnic_tlv_attr_parse_array(struct fbnic_tlv_msg *attr, int len, + struct fbnic_tlv_msg **results, + const struct fbnic_tlv_index *tlv_index, + u16 tlv_attr_id, size_t array_len); +int fbnic_tlv_attr_parse(struct fbnic_tlv_msg *attr, int len, + struct fbnic_tlv_msg **results, + const struct fbnic_tlv_index *tlv_index); +int fbnic_tlv_msg_parse(void *opaque, struct fbnic_tlv_msg *msg, + const struct fbnic_tlv_parser *parser); +int fbnic_tlv_parser_error(void *opaque, struct fbnic_tlv_msg **results); + +#define FBNIC_TLV_MSG_ERROR \ + FBNIC_TLV_PARSER(UNKNOWN, NULL, fbnic_tlv_parser_error) +#endif /* _FBNIC_TLV_H_ */ diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c new file mode 100644 index 000000000000..0ed4c9fff5d8 --- /dev/null +++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c @@ -0,0 +1,1913 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) Meta Platforms, Inc. and affiliates. */ + +#include <linux/bitfield.h> +#include <linux/iopoll.h> +#include <linux/pci.h> +#include <net/netdev_queues.h> +#include <net/page_pool/helpers.h> + +#include "fbnic.h" +#include "fbnic_csr.h" +#include "fbnic_netdev.h" +#include "fbnic_txrx.h" + +struct fbnic_xmit_cb { + u32 bytecount; + u8 desc_count; + int hw_head; +}; + +#define FBNIC_XMIT_CB(__skb) ((struct fbnic_xmit_cb *)((__skb)->cb)) + +static u32 __iomem *fbnic_ring_csr_base(const struct fbnic_ring *ring) +{ + unsigned long csr_base = (unsigned long)ring->doorbell; + + csr_base &= ~(FBNIC_QUEUE_STRIDE * sizeof(u32) - 1); + + return (u32 __iomem *)csr_base; +} + +static u32 fbnic_ring_rd32(struct fbnic_ring *ring, unsigned int csr) +{ + u32 __iomem *csr_base = fbnic_ring_csr_base(ring); + + return readl(csr_base + csr); +} + +static void fbnic_ring_wr32(struct fbnic_ring *ring, unsigned int csr, u32 val) +{ + u32 __iomem *csr_base = fbnic_ring_csr_base(ring); + + writel(val, csr_base + csr); +} + +static unsigned int fbnic_desc_unused(struct fbnic_ring *ring) +{ + return (ring->head - ring->tail - 1) & ring->size_mask; +} + +static unsigned int fbnic_desc_used(struct fbnic_ring *ring) +{ + return (ring->tail - ring->head) & ring->size_mask; +} + +static struct netdev_queue *txring_txq(const struct net_device *dev, + const struct fbnic_ring *ring) +{ + return netdev_get_tx_queue(dev, ring->q_idx); +} + +static int fbnic_maybe_stop_tx(const struct net_device *dev, + struct fbnic_ring *ring, + const unsigned int size) +{ + struct netdev_queue *txq = txring_txq(dev, ring); + int res; + + res = netif_txq_maybe_stop(txq, fbnic_desc_unused(ring), size, + FBNIC_TX_DESC_WAKEUP); + + return !res; +} + +static bool fbnic_tx_sent_queue(struct sk_buff *skb, struct fbnic_ring *ring) +{ + struct netdev_queue *dev_queue = txring_txq(skb->dev, ring); + unsigned int bytecount = FBNIC_XMIT_CB(skb)->bytecount; + bool xmit_more = netdev_xmit_more(); + + /* TBD: Request completion more often if xmit_more becomes large */ + + return __netdev_tx_sent_queue(dev_queue, bytecount, xmit_more); +} + +static void fbnic_unmap_single_twd(struct device *dev, __le64 *twd) +{ + u64 raw_twd = le64_to_cpu(*twd); + unsigned int len; + dma_addr_t dma; + + dma = FIELD_GET(FBNIC_TWD_ADDR_MASK, raw_twd); + len = FIELD_GET(FBNIC_TWD_LEN_MASK, raw_twd); + + dma_unmap_single(dev, dma, len, DMA_TO_DEVICE); +} + +static void fbnic_unmap_page_twd(struct device *dev, __le64 *twd) +{ + u64 raw_twd = le64_to_cpu(*twd); + unsigned int len; + dma_addr_t dma; + + dma = FIELD_GET(FBNIC_TWD_ADDR_MASK, raw_twd); + len = FIELD_GET(FBNIC_TWD_LEN_MASK, raw_twd); + + dma_unmap_page(dev, dma, len, DMA_TO_DEVICE); +} + +#define FBNIC_TWD_TYPE(_type) \ + cpu_to_le64(FIELD_PREP(FBNIC_TWD_TYPE_MASK, FBNIC_TWD_TYPE_##_type)) + +static bool +fbnic_tx_offloads(struct fbnic_ring *ring, struct sk_buff *skb, __le64 *meta) +{ + unsigned int l2len, i3len; + + if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) + return false; + + l2len = skb_mac_header_len(skb); + i3len = skb_checksum_start(skb) - skb_network_header(skb); + + *meta |= cpu_to_le64(FIELD_PREP(FBNIC_TWD_CSUM_OFFSET_MASK, + skb->csum_offset / 2)); + + *meta |= cpu_to_le64(FBNIC_TWD_FLAG_REQ_CSO); + + *meta |= cpu_to_le64(FIELD_PREP(FBNIC_TWD_L2_HLEN_MASK, l2len / 2) | + FIELD_PREP(FBNIC_TWD_L3_IHLEN_MASK, i3len / 2)); + return false; +} + +static void +fbnic_rx_csum(u64 rcd, struct sk_buff *skb, struct fbnic_ring *rcq) +{ + skb_checksum_none_assert(skb); + + if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) + return; + + if (FIELD_GET(FBNIC_RCD_META_L4_CSUM_UNNECESSARY, rcd)) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + } else { + u16 csum = FIELD_GET(FBNIC_RCD_META_L2_CSUM_MASK, rcd); + + skb->ip_summed = CHECKSUM_COMPLETE; + skb->csum = (__force __wsum)csum; + } +} + +static bool +fbnic_tx_map(struct fbnic_ring *ring, struct sk_buff *skb, __le64 *meta) +{ + struct device *dev = skb->dev->dev.parent; + unsigned int tail = ring->tail, first; + unsigned int size, data_len; + skb_frag_t *frag; + dma_addr_t dma; + __le64 *twd; + + ring->tx_buf[tail] = skb; + + tail++; + tail &= ring->size_mask; + first = tail; + + size = skb_headlen(skb); + data_len = skb->data_len; + + if (size > FIELD_MAX(FBNIC_TWD_LEN_MASK)) + goto dma_error; + + dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE); + + for (frag = &skb_shinfo(skb)->frags[0];; frag++) { + twd = &ring->desc[tail]; + + if (dma_mapping_error(dev, dma)) + goto dma_error; + + *twd = cpu_to_le64(FIELD_PREP(FBNIC_TWD_ADDR_MASK, dma) | + FIELD_PREP(FBNIC_TWD_LEN_MASK, size) | + FIELD_PREP(FBNIC_TWD_TYPE_MASK, + FBNIC_TWD_TYPE_AL)); + + tail++; + tail &= ring->size_mask; + + if (!data_len) + break; + + size = skb_frag_size(frag); + data_len -= size; + + if (size > FIELD_MAX(FBNIC_TWD_LEN_MASK)) + goto dma_error; + + dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE); + } + + *twd |= FBNIC_TWD_TYPE(LAST_AL); + + FBNIC_XMIT_CB(skb)->desc_count = ((twd - meta) + 1) & ring->size_mask; + + ring->tail = tail; + + /* Verify there is room for another packet */ + fbnic_maybe_stop_tx(skb->dev, ring, FBNIC_MAX_SKB_DESC); + + if (fbnic_tx_sent_queue(skb, ring)) { + *meta |= cpu_to_le64(FBNIC_TWD_FLAG_REQ_COMPLETION); + + /* Force DMA writes to flush before writing to tail */ + dma_wmb(); + + writel(tail, ring->doorbell); + } + + return false; +dma_error: + if (net_ratelimit()) + netdev_err(skb->dev, "TX DMA map failed\n"); + + while (tail != first) { + tail--; + tail &= ring->size_mask; + twd = &ring->desc[tail]; + if (tail == first) + fbnic_unmap_single_twd(dev, twd); + else + fbnic_unmap_page_twd(dev, twd); + } + + return true; +} + +#define FBNIC_MIN_FRAME_LEN 60 + +static netdev_tx_t +fbnic_xmit_frame_ring(struct sk_buff *skb, struct fbnic_ring *ring) +{ + __le64 *meta = &ring->desc[ring->tail]; + u16 desc_needed; + + if (skb_put_padto(skb, FBNIC_MIN_FRAME_LEN)) + goto err_count; + + /* Need: 1 descriptor per page, + * + 1 desc for skb_head, + * + 2 desc for metadata and timestamp metadata + * + 7 desc gap to keep tail from touching head + * otherwise try next time + */ + desc_needed = skb_shinfo(skb)->nr_frags + 10; + if (fbnic_maybe_stop_tx(skb->dev, ring, desc_needed)) + return NETDEV_TX_BUSY; + + *meta = cpu_to_le64(FBNIC_TWD_FLAG_DEST_MAC); + + /* Write all members within DWORD to condense this into 2 4B writes */ + FBNIC_XMIT_CB(skb)->bytecount = skb->len; + FBNIC_XMIT_CB(skb)->desc_count = 0; + + if (fbnic_tx_offloads(ring, skb, meta)) + goto err_free; + + if (fbnic_tx_map(ring, skb, meta)) + goto err_free; + + return NETDEV_TX_OK; + +err_free: + dev_kfree_skb_any(skb); +err_count: + return NETDEV_TX_OK; +} + +netdev_tx_t fbnic_xmit_frame(struct sk_buff *skb, struct net_device *dev) +{ + struct fbnic_net *fbn = netdev_priv(dev); + unsigned int q_map = skb->queue_mapping; + + return fbnic_xmit_frame_ring(skb, fbn->tx[q_map]); +} + +netdev_features_t +fbnic_features_check(struct sk_buff *skb, struct net_device *dev, + netdev_features_t features) +{ + unsigned int l2len, l3len; + + if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) + return features; + + l2len = skb_mac_header_len(skb); + l3len = skb_checksum_start(skb) - skb_network_header(skb); + + /* Check header lengths are multiple of 2. + * In case of 6in6 we support longer headers (IHLEN + OHLEN) + * but keep things simple for now, 512B is plenty. + */ + if ((l2len | l3len | skb->csum_offset) % 2 || + !FIELD_FIT(FBNIC_TWD_L2_HLEN_MASK, l2len / 2) || + !FIELD_FIT(FBNIC_TWD_L3_IHLEN_MASK, l3len / 2) || + !FIELD_FIT(FBNIC_TWD_CSUM_OFFSET_MASK, skb->csum_offset / 2)) + return features & ~NETIF_F_CSUM_MASK; + + return features; +} + +static void fbnic_clean_twq0(struct fbnic_napi_vector *nv, int napi_budget, + struct fbnic_ring *ring, bool discard, + unsigned int hw_head) +{ + u64 total_bytes = 0, total_packets = 0; + unsigned int head = ring->head; + struct netdev_queue *txq; + unsigned int clean_desc; + + clean_desc = (hw_head - head) & ring->size_mask; + + while (clean_desc) { + struct sk_buff *skb = ring->tx_buf[head]; + unsigned int desc_cnt; + + desc_cnt = FBNIC_XMIT_CB(skb)->desc_count; + if (desc_cnt > clean_desc) + break; + + ring->tx_buf[head] = NULL; + + clean_desc -= desc_cnt; + + while (!(ring->desc[head] & FBNIC_TWD_TYPE(AL))) { + head++; + head &= ring->size_mask; + desc_cnt--; + } + + fbnic_unmap_single_twd(nv->dev, &ring->desc[head]); + head++; + head &= ring->size_mask; + desc_cnt--; + + while (desc_cnt--) { + fbnic_unmap_page_twd(nv->dev, &ring->desc[head]); + head++; + head &= ring->size_mask; + } + + total_bytes += FBNIC_XMIT_CB(skb)->bytecount; + total_packets += 1; + + napi_consume_skb(skb, napi_budget); + } + + if (!total_bytes) + return; + + ring->head = head; + + txq = txring_txq(nv->napi.dev, ring); + + if (unlikely(discard)) { + netdev_tx_completed_queue(txq, total_packets, total_bytes); + return; + } + + netif_txq_completed_wake(txq, total_packets, total_bytes, + fbnic_desc_unused(ring), + FBNIC_TX_DESC_WAKEUP); +} + +static void fbnic_page_pool_init(struct fbnic_ring *ring, unsigned int idx, + struct page *page) +{ + struct fbnic_rx_buf *rx_buf = &ring->rx_buf[idx]; + + page_pool_fragment_page(page, PAGECNT_BIAS_MAX); + rx_buf->pagecnt_bias = PAGECNT_BIAS_MAX; + rx_buf->page = page; +} + +static struct page *fbnic_page_pool_get(struct fbnic_ring *ring, + unsigned int idx) +{ + struct fbnic_rx_buf *rx_buf = &ring->rx_buf[idx]; + + rx_buf->pagecnt_bias--; + + return rx_buf->page; +} + +static void fbnic_page_pool_drain(struct fbnic_ring *ring, unsigned int idx, + struct fbnic_napi_vector *nv, int budget) +{ + struct fbnic_rx_buf *rx_buf = &ring->rx_buf[idx]; + struct page *page = rx_buf->page; + + if (!page_pool_unref_page(page, rx_buf->pagecnt_bias)) + page_pool_put_unrefed_page(nv->page_pool, page, -1, !!budget); + + rx_buf->page = NULL; +} + +static void fbnic_clean_twq(struct fbnic_napi_vector *nv, int napi_budget, + struct fbnic_q_triad *qt, s32 head0) +{ + if (head0 >= 0) + fbnic_clean_twq0(nv, napi_budget, &qt->sub0, false, head0); +} + +static void +fbnic_clean_tcq(struct fbnic_napi_vector *nv, struct fbnic_q_triad *qt, + int napi_budget) +{ + struct fbnic_ring *cmpl = &qt->cmpl; + __le64 *raw_tcd, done; + u32 head = cmpl->head; + s32 head0 = -1; + + done = (head & (cmpl->size_mask + 1)) ? 0 : cpu_to_le64(FBNIC_TCD_DONE); + raw_tcd = &cmpl->desc[head & cmpl->size_mask]; + + /* Walk the completion queue collecting the heads reported by NIC */ + while ((*raw_tcd & cpu_to_le64(FBNIC_TCD_DONE)) == done) { + u64 tcd; + + dma_rmb(); + + tcd = le64_to_cpu(*raw_tcd); + + switch (FIELD_GET(FBNIC_TCD_TYPE_MASK, tcd)) { + case FBNIC_TCD_TYPE_0: + if (!(tcd & FBNIC_TCD_TWQ1)) + head0 = FIELD_GET(FBNIC_TCD_TYPE0_HEAD0_MASK, + tcd); + /* Currently all err status bits are related to + * timestamps and as those have yet to be added + * they are skipped for now. + */ + break; + default: + break; + } + + raw_tcd++; + head++; + if (!(head & cmpl->size_mask)) { + done ^= cpu_to_le64(FBNIC_TCD_DONE); + raw_tcd = &cmpl->desc[0]; + } + } + + /* Record the current head/tail of the queue */ + if (cmpl->head != head) { + cmpl->head = head; + writel(head & cmpl->size_mask, cmpl->doorbell); + } + + /* Unmap and free processed buffers */ + fbnic_clean_twq(nv, napi_budget, qt, head0); +} + +static void fbnic_clean_bdq(struct fbnic_napi_vector *nv, int napi_budget, + struct fbnic_ring *ring, unsigned int hw_head) +{ + unsigned int head = ring->head; + + if (head == hw_head) + return; + + do { + fbnic_page_pool_drain(ring, head, nv, napi_budget); + + head++; + head &= ring->size_mask; + } while (head != hw_head); + + ring->head = head; +} + +static void fbnic_bd_prep(struct fbnic_ring *bdq, u16 id, struct page *page) +{ + __le64 *bdq_desc = &bdq->desc[id * FBNIC_BD_FRAG_COUNT]; + dma_addr_t dma = page_pool_get_dma_addr(page); + u64 bd, i = FBNIC_BD_FRAG_COUNT; + + bd = (FBNIC_BD_PAGE_ADDR_MASK & dma) | + FIELD_PREP(FBNIC_BD_PAGE_ID_MASK, id); + + /* In the case that a page size is larger than 4K we will map a + * single page to multiple fragments. The fragments will be + * FBNIC_BD_FRAG_COUNT in size and the lower n bits will be use + * to indicate the individual fragment IDs. + */ + do { + *bdq_desc = cpu_to_le64(bd); + bd += FIELD_PREP(FBNIC_BD_DESC_ADDR_MASK, 1) | + FIELD_PREP(FBNIC_BD_DESC_ID_MASK, 1); + } while (--i); +} + +static void fbnic_fill_bdq(struct fbnic_napi_vector *nv, struct fbnic_ring *bdq) +{ + unsigned int count = fbnic_desc_unused(bdq); + unsigned int i = bdq->tail; + + if (!count) + return; + + do { + struct page *page; + + page = page_pool_dev_alloc_pages(nv->page_pool); + if (!page) + break; + + fbnic_page_pool_init(bdq, i, page); + fbnic_bd_prep(bdq, i, page); + + i++; + i &= bdq->size_mask; + + count--; + } while (count); + + if (bdq->tail != i) { + bdq->tail = i; + + /* Force DMA writes to flush before writing to tail */ + dma_wmb(); + + writel(i, bdq->doorbell); + } +} + +static unsigned int fbnic_hdr_pg_start(unsigned int pg_off) +{ + /* The headroom of the first header may be larger than FBNIC_RX_HROOM + * due to alignment. So account for that by just making the page + * offset 0 if we are starting at the first header. + */ + if (ALIGN(FBNIC_RX_HROOM, 128) > FBNIC_RX_HROOM && + pg_off == ALIGN(FBNIC_RX_HROOM, 128)) + return 0; + + return pg_off - FBNIC_RX_HROOM; +} + +static unsigned int fbnic_hdr_pg_end(unsigned int pg_off, unsigned int len) +{ + /* Determine the end of the buffer by finding the start of the next + * and then subtracting the headroom from that frame. + */ + pg_off += len + FBNIC_RX_TROOM + FBNIC_RX_HROOM; + + return ALIGN(pg_off, 128) - FBNIC_RX_HROOM; +} + +static void fbnic_pkt_prepare(struct fbnic_napi_vector *nv, u64 rcd, + struct fbnic_pkt_buff *pkt, + struct fbnic_q_triad *qt) +{ + unsigned int hdr_pg_idx = FIELD_GET(FBNIC_RCD_AL_BUFF_PAGE_MASK, rcd); + unsigned int hdr_pg_off = FIELD_GET(FBNIC_RCD_AL_BUFF_OFF_MASK, rcd); + struct page *page = fbnic_page_pool_get(&qt->sub0, hdr_pg_idx); + unsigned int len = FIELD_GET(FBNIC_RCD_AL_BUFF_LEN_MASK, rcd); + unsigned int frame_sz, hdr_pg_start, hdr_pg_end, headroom; + unsigned char *hdr_start; + + /* data_hard_start should always be NULL when this is called */ + WARN_ON_ONCE(pkt->buff.data_hard_start); + + /* Short-cut the end calculation if we know page is fully consumed */ + hdr_pg_end = FIELD_GET(FBNIC_RCD_AL_PAGE_FIN, rcd) ? + FBNIC_BD_FRAG_SIZE : fbnic_hdr_pg_end(hdr_pg_off, len); + hdr_pg_start = fbnic_hdr_pg_start(hdr_pg_off); + + headroom = hdr_pg_off - hdr_pg_start + FBNIC_RX_PAD; + frame_sz = hdr_pg_end - hdr_pg_start; + xdp_init_buff(&pkt->buff, frame_sz, NULL); + hdr_pg_start += (FBNIC_RCD_AL_BUFF_FRAG_MASK & rcd) * + FBNIC_BD_FRAG_SIZE; + + /* Sync DMA buffer */ + dma_sync_single_range_for_cpu(nv->dev, page_pool_get_dma_addr(page), + hdr_pg_start, frame_sz, + DMA_BIDIRECTIONAL); + + /* Build frame around buffer */ + hdr_start = page_address(page) + hdr_pg_start; + + xdp_prepare_buff(&pkt->buff, hdr_start, headroom, + len - FBNIC_RX_PAD, true); + + pkt->data_truesize = 0; + pkt->data_len = 0; + pkt->nr_frags = 0; +} + +static void fbnic_add_rx_frag(struct fbnic_napi_vector *nv, u64 rcd, + struct fbnic_pkt_buff *pkt, + struct fbnic_q_triad *qt) +{ + unsigned int pg_idx = FIELD_GET(FBNIC_RCD_AL_BUFF_PAGE_MASK, rcd); + unsigned int pg_off = FIELD_GET(FBNIC_RCD_AL_BUFF_OFF_MASK, rcd); + unsigned int len = FIELD_GET(FBNIC_RCD_AL_BUFF_LEN_MASK, rcd); + struct page *page = fbnic_page_pool_get(&qt->sub1, pg_idx); + struct skb_shared_info *shinfo; + unsigned int truesize; + + truesize = FIELD_GET(FBNIC_RCD_AL_PAGE_FIN, rcd) ? + FBNIC_BD_FRAG_SIZE - pg_off : ALIGN(len, 128); + + pg_off += (FBNIC_RCD_AL_BUFF_FRAG_MASK & rcd) * + FBNIC_BD_FRAG_SIZE; + + /* Sync DMA buffer */ + dma_sync_single_range_for_cpu(nv->dev, page_pool_get_dma_addr(page), + pg_off, truesize, DMA_BIDIRECTIONAL); + + /* Add page to xdp shared info */ + shinfo = xdp_get_shared_info_from_buff(&pkt->buff); + + /* We use gso_segs to store truesize */ + pkt->data_truesize += truesize; + + __skb_fill_page_desc_noacc(shinfo, pkt->nr_frags++, page, pg_off, len); + + /* Store data_len in gso_size */ + pkt->data_len += len; +} + +static void fbnic_put_pkt_buff(struct fbnic_napi_vector *nv, + struct fbnic_pkt_buff *pkt, int budget) +{ + struct skb_shared_info *shinfo; + struct page *page; + int nr_frags; + + if (!pkt->buff.data_hard_start) + return; + + shinfo = xdp_get_shared_info_from_buff(&pkt->buff); + nr_frags = pkt->nr_frags; + + while (nr_frags--) { + page = skb_frag_page(&shinfo->frags[nr_frags]); + page_pool_put_full_page(nv->page_pool, page, !!budget); + } + + page = virt_to_page(pkt->buff.data_hard_start); + page_pool_put_full_page(nv->page_pool, page, !!budget); +} + +static struct sk_buff *fbnic_build_skb(struct fbnic_napi_vector *nv, + struct fbnic_pkt_buff *pkt) +{ + unsigned int nr_frags = pkt->nr_frags; + struct skb_shared_info *shinfo; + unsigned int truesize; + struct sk_buff *skb; + + truesize = xdp_data_hard_end(&pkt->buff) + FBNIC_RX_TROOM - + pkt->buff.data_hard_start; + + /* Build frame around buffer */ + skb = napi_build_skb(pkt->buff.data_hard_start, truesize); + if (unlikely(!skb)) + return NULL; + + /* Push data pointer to start of data, put tail to end of data */ + skb_reserve(skb, pkt->buff.data - pkt->buff.data_hard_start); + __skb_put(skb, pkt->buff.data_end - pkt->buff.data); + + /* Add tracking for metadata at the start of the frame */ + skb_metadata_set(skb, pkt->buff.data - pkt->buff.data_meta); + + /* Add Rx frags */ + if (nr_frags) { + /* Verify that shared info didn't move */ + shinfo = xdp_get_shared_info_from_buff(&pkt->buff); + WARN_ON(skb_shinfo(skb) != shinfo); + + skb->truesize += pkt->data_truesize; + skb->data_len += pkt->data_len; + shinfo->nr_frags = nr_frags; + skb->len += pkt->data_len; + } + + skb_mark_for_recycle(skb); + + /* Set MAC header specific fields */ + skb->protocol = eth_type_trans(skb, nv->napi.dev); + + return skb; +} + +static enum pkt_hash_types fbnic_skb_hash_type(u64 rcd) +{ + return (FBNIC_RCD_META_L4_TYPE_MASK & rcd) ? PKT_HASH_TYPE_L4 : + (FBNIC_RCD_META_L3_TYPE_MASK & rcd) ? PKT_HASH_TYPE_L3 : + PKT_HASH_TYPE_L2; +} + +static void fbnic_populate_skb_fields(struct fbnic_napi_vector *nv, + u64 rcd, struct sk_buff *skb, + struct fbnic_q_triad *qt) +{ + struct net_device *netdev = nv->napi.dev; + struct fbnic_ring *rcq = &qt->cmpl; + + fbnic_rx_csum(rcd, skb, rcq); + + if (netdev->features & NETIF_F_RXHASH) + skb_set_hash(skb, + FIELD_GET(FBNIC_RCD_META_RSS_HASH_MASK, rcd), + fbnic_skb_hash_type(rcd)); + + skb_record_rx_queue(skb, rcq->q_idx); +} + +static bool fbnic_rcd_metadata_err(u64 rcd) +{ + return !!(FBNIC_RCD_META_UNCORRECTABLE_ERR_MASK & rcd); +} + +static int fbnic_clean_rcq(struct fbnic_napi_vector *nv, + struct fbnic_q_triad *qt, int budget) +{ + struct fbnic_ring *rcq = &qt->cmpl; + struct fbnic_pkt_buff *pkt; + s32 head0 = -1, head1 = -1; + __le64 *raw_rcd, done; + u32 head = rcq->head; + u64 packets = 0; + + done = (head & (rcq->size_mask + 1)) ? cpu_to_le64(FBNIC_RCD_DONE) : 0; + raw_rcd = &rcq->desc[head & rcq->size_mask]; + pkt = rcq->pkt; + + /* Walk the completion queue collecting the heads reported by NIC */ + while (likely(packets < budget)) { + struct sk_buff *skb = ERR_PTR(-EINVAL); + u64 rcd; + + if ((*raw_rcd & cpu_to_le64(FBNIC_RCD_DONE)) == done) + break; + + dma_rmb(); + + rcd = le64_to_cpu(*raw_rcd); + + switch (FIELD_GET(FBNIC_RCD_TYPE_MASK, rcd)) { + case FBNIC_RCD_TYPE_HDR_AL: + head0 = FIELD_GET(FBNIC_RCD_AL_BUFF_PAGE_MASK, rcd); + fbnic_pkt_prepare(nv, rcd, pkt, qt); + + break; + case FBNIC_RCD_TYPE_PAY_AL: + head1 = FIELD_GET(FBNIC_RCD_AL_BUFF_PAGE_MASK, rcd); + fbnic_add_rx_frag(nv, rcd, pkt, qt); + + break; + case FBNIC_RCD_TYPE_OPT_META: + /* Only type 0 is currently supported */ + if (FIELD_GET(FBNIC_RCD_OPT_META_TYPE_MASK, rcd)) + break; + + /* We currently ignore the action table index */ + break; + case FBNIC_RCD_TYPE_META: + if (likely(!fbnic_rcd_metadata_err(rcd))) + skb = fbnic_build_skb(nv, pkt); + + /* Populate skb and invalidate XDP */ + if (!IS_ERR_OR_NULL(skb)) { + fbnic_populate_skb_fields(nv, rcd, skb, qt); + + packets++; + + napi_gro_receive(&nv->napi, skb); + } else { + fbnic_put_pkt_buff(nv, pkt, 1); + } + + pkt->buff.data_hard_start = NULL; + + break; + } + + raw_rcd++; + head++; + if (!(head & rcq->size_mask)) { + done ^= cpu_to_le64(FBNIC_RCD_DONE); + raw_rcd = &rcq->desc[0]; + } + } + + /* Unmap and free processed buffers */ + if (head0 >= 0) + fbnic_clean_bdq(nv, budget, &qt->sub0, head0); + fbnic_fill_bdq(nv, &qt->sub0); + + if (head1 >= 0) + fbnic_clean_bdq(nv, budget, &qt->sub1, head1); + fbnic_fill_bdq(nv, &qt->sub1); + + /* Record the current head/tail of the queue */ + if (rcq->head != head) { + rcq->head = head; + writel(head & rcq->size_mask, rcq->doorbell); + } + + return packets; +} + +static void fbnic_nv_irq_disable(struct fbnic_napi_vector *nv) +{ + struct fbnic_dev *fbd = nv->fbd; + u32 v_idx = nv->v_idx; + + fbnic_wr32(fbd, FBNIC_INTR_MASK_SET(v_idx / 32), 1 << (v_idx % 32)); +} + +static void fbnic_nv_irq_rearm(struct fbnic_napi_vector *nv) +{ + struct fbnic_dev *fbd = nv->fbd; + u32 v_idx = nv->v_idx; + + fbnic_wr32(fbd, FBNIC_INTR_CQ_REARM(v_idx), + FBNIC_INTR_CQ_REARM_INTR_UNMASK); +} + +static int fbnic_poll(struct napi_struct *napi, int budget) +{ + struct fbnic_napi_vector *nv = container_of(napi, + struct fbnic_napi_vector, + napi); + int i, j, work_done = 0; + + for (i = 0; i < nv->txt_count; i++) + fbnic_clean_tcq(nv, &nv->qt[i], budget); + + for (j = 0; j < nv->rxt_count; j++, i++) + work_done += fbnic_clean_rcq(nv, &nv->qt[i], budget); + + if (work_done >= budget) + return budget; + + if (likely(napi_complete_done(napi, work_done))) + fbnic_nv_irq_rearm(nv); + + return 0; +} + +static irqreturn_t fbnic_msix_clean_rings(int __always_unused irq, void *data) +{ + struct fbnic_napi_vector *nv = data; + + napi_schedule_irqoff(&nv->napi); + + return IRQ_HANDLED; +} + +static void fbnic_remove_tx_ring(struct fbnic_net *fbn, + struct fbnic_ring *txr) +{ + if (!(txr->flags & FBNIC_RING_F_STATS)) + return; + + /* Remove pointer to the Tx ring */ + WARN_ON(fbn->tx[txr->q_idx] && fbn->tx[txr->q_idx] != txr); + fbn->tx[txr->q_idx] = NULL; +} + +static void fbnic_remove_rx_ring(struct fbnic_net *fbn, + struct fbnic_ring *rxr) +{ + if (!(rxr->flags & FBNIC_RING_F_STATS)) + return; + + /* Remove pointer to the Rx ring */ + WARN_ON(fbn->rx[rxr->q_idx] && fbn->rx[rxr->q_idx] != rxr); + fbn->rx[rxr->q_idx] = NULL; +} + +static void fbnic_free_napi_vector(struct fbnic_net *fbn, + struct fbnic_napi_vector *nv) +{ + struct fbnic_dev *fbd = nv->fbd; + u32 v_idx = nv->v_idx; + int i, j; + + for (i = 0; i < nv->txt_count; i++) { + fbnic_remove_tx_ring(fbn, &nv->qt[i].sub0); + fbnic_remove_tx_ring(fbn, &nv->qt[i].cmpl); + } + + for (j = 0; j < nv->rxt_count; j++, i++) { + fbnic_remove_rx_ring(fbn, &nv->qt[i].sub0); + fbnic_remove_rx_ring(fbn, &nv->qt[i].sub1); + fbnic_remove_rx_ring(fbn, &nv->qt[i].cmpl); + } + + fbnic_free_irq(fbd, v_idx, nv); + page_pool_destroy(nv->page_pool); + netif_napi_del(&nv->napi); + list_del(&nv->napis); + kfree(nv); +} + +void fbnic_free_napi_vectors(struct fbnic_net *fbn) +{ + struct fbnic_napi_vector *nv, *temp; + + list_for_each_entry_safe(nv, temp, &fbn->napis, napis) + fbnic_free_napi_vector(fbn, nv); +} + +static void fbnic_name_napi_vector(struct fbnic_napi_vector *nv) +{ + unsigned char *dev_name = nv->napi.dev->name; + + if (!nv->rxt_count) + snprintf(nv->name, sizeof(nv->name), "%s-Tx-%u", dev_name, + nv->v_idx - FBNIC_NON_NAPI_VECTORS); + else + snprintf(nv->name, sizeof(nv->name), "%s-TxRx-%u", dev_name, + nv->v_idx - FBNIC_NON_NAPI_VECTORS); +} + +#define FBNIC_PAGE_POOL_FLAGS \ + (PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV) + +static int fbnic_alloc_nv_page_pool(struct fbnic_net *fbn, + struct fbnic_napi_vector *nv) +{ + struct page_pool_params pp_params = { + .order = 0, + .flags = FBNIC_PAGE_POOL_FLAGS, + .pool_size = (fbn->hpq_size + fbn->ppq_size) * nv->rxt_count, + .nid = NUMA_NO_NODE, + .dev = nv->dev, + .dma_dir = DMA_BIDIRECTIONAL, + .offset = 0, + .max_len = PAGE_SIZE + }; + struct page_pool *pp; + + /* Page pool cannot exceed a size of 32768. This doesn't limit the + * pages on the ring but the number we can have cached waiting on + * the next use. + * + * TBD: Can this be reduced further? Would a multiple of + * NAPI_POLL_WEIGHT possibly make more sense? The question is how + * may pages do we need to hold in reserve to get the best return + * without hogging too much system memory. + */ + if (pp_params.pool_size > 32768) + pp_params.pool_size = 32768; + + pp = page_pool_create(&pp_params); + if (IS_ERR(pp)) + return PTR_ERR(pp); + + nv->page_pool = pp; + + return 0; +} + +static void fbnic_ring_init(struct fbnic_ring *ring, u32 __iomem *doorbell, + int q_idx, u8 flags) +{ + ring->doorbell = doorbell; + ring->q_idx = q_idx; + ring->flags = flags; +} + +static int fbnic_alloc_napi_vector(struct fbnic_dev *fbd, struct fbnic_net *fbn, + unsigned int v_count, unsigned int v_idx, + unsigned int txq_count, unsigned int txq_idx, + unsigned int rxq_count, unsigned int rxq_idx) +{ + int txt_count = txq_count, rxt_count = rxq_count; + u32 __iomem *uc_addr = fbd->uc_addr0; + struct fbnic_napi_vector *nv; + struct fbnic_q_triad *qt; + int qt_count, err; + u32 __iomem *db; + + qt_count = txt_count + rxq_count; + if (!qt_count) + return -EINVAL; + + /* If MMIO has already failed there are no rings to initialize */ + if (!uc_addr) + return -EIO; + + /* Allocate NAPI vector and queue triads */ + nv = kzalloc(struct_size(nv, qt, qt_count), GFP_KERNEL); + if (!nv) + return -ENOMEM; + + /* Record queue triad counts */ + nv->txt_count = txt_count; + nv->rxt_count = rxt_count; + + /* Provide pointer back to fbnic and MSI-X vectors */ + nv->fbd = fbd; + nv->v_idx = v_idx; + + /* Record IRQ to NAPI struct */ + netif_napi_set_irq(&nv->napi, + pci_irq_vector(to_pci_dev(fbd->dev), nv->v_idx)); + + /* Tie napi to netdev */ + list_add(&nv->napis, &fbn->napis); + netif_napi_add(fbn->netdev, &nv->napi, fbnic_poll); + + /* Tie nv back to PCIe dev */ + nv->dev = fbd->dev; + + /* Allocate page pool */ + if (rxq_count) { + err = fbnic_alloc_nv_page_pool(fbn, nv); + if (err) + goto napi_del; + } + + /* Initialize vector name */ + fbnic_name_napi_vector(nv); + + /* Request the IRQ for napi vector */ + err = fbnic_request_irq(fbd, v_idx, &fbnic_msix_clean_rings, + IRQF_SHARED, nv->name, nv); + if (err) + goto pp_destroy; + + /* Initialize queue triads */ + qt = nv->qt; + + while (txt_count) { + /* Configure Tx queue */ + db = &uc_addr[FBNIC_QUEUE(txq_idx) + FBNIC_QUEUE_TWQ0_TAIL]; + + /* Assign Tx queue to netdev if applicable */ + if (txq_count > 0) { + u8 flags = FBNIC_RING_F_CTX | FBNIC_RING_F_STATS; + + fbnic_ring_init(&qt->sub0, db, txq_idx, flags); + fbn->tx[txq_idx] = &qt->sub0; + txq_count--; + } else { + fbnic_ring_init(&qt->sub0, db, 0, + FBNIC_RING_F_DISABLED); + } + + /* Configure Tx completion queue */ + db = &uc_addr[FBNIC_QUEUE(txq_idx) + FBNIC_QUEUE_TCQ_HEAD]; + fbnic_ring_init(&qt->cmpl, db, 0, 0); + + /* Update Tx queue index */ + txt_count--; + txq_idx += v_count; + + /* Move to next queue triad */ + qt++; + } + + while (rxt_count) { + /* Configure header queue */ + db = &uc_addr[FBNIC_QUEUE(rxq_idx) + FBNIC_QUEUE_BDQ_HPQ_TAIL]; + fbnic_ring_init(&qt->sub0, db, 0, FBNIC_RING_F_CTX); + + /* Configure payload queue */ + db = &uc_addr[FBNIC_QUEUE(rxq_idx) + FBNIC_QUEUE_BDQ_PPQ_TAIL]; + fbnic_ring_init(&qt->sub1, db, 0, FBNIC_RING_F_CTX); + + /* Configure Rx completion queue */ + db = &uc_addr[FBNIC_QUEUE(rxq_idx) + FBNIC_QUEUE_RCQ_HEAD]; + fbnic_ring_init(&qt->cmpl, db, rxq_idx, FBNIC_RING_F_STATS); + fbn->rx[rxq_idx] = &qt->cmpl; + + /* Update Rx queue index */ + rxt_count--; + rxq_idx += v_count; + + /* Move to next queue triad */ + qt++; + } + + return 0; + +pp_destroy: + page_pool_destroy(nv->page_pool); +napi_del: + netif_napi_del(&nv->napi); + list_del(&nv->napis); + kfree(nv); + return err; +} + +int fbnic_alloc_napi_vectors(struct fbnic_net *fbn) +{ + unsigned int txq_idx = 0, rxq_idx = 0, v_idx = FBNIC_NON_NAPI_VECTORS; + unsigned int num_tx = fbn->num_tx_queues; + unsigned int num_rx = fbn->num_rx_queues; + unsigned int num_napi = fbn->num_napi; + struct fbnic_dev *fbd = fbn->fbd; + int err; + + /* Allocate 1 Tx queue per napi vector */ + if (num_napi < FBNIC_MAX_TXQS && num_napi == num_tx + num_rx) { + while (num_tx) { + err = fbnic_alloc_napi_vector(fbd, fbn, + num_napi, v_idx, + 1, txq_idx, 0, 0); + if (err) + goto free_vectors; + + /* Update counts and index */ + num_tx--; + txq_idx++; + + v_idx++; + } + } + + /* Allocate Tx/Rx queue pairs per vector, or allocate remaining Rx */ + while (num_rx | num_tx) { + int tqpv = DIV_ROUND_UP(num_tx, num_napi - txq_idx); + int rqpv = DIV_ROUND_UP(num_rx, num_napi - rxq_idx); + + err = fbnic_alloc_napi_vector(fbd, fbn, num_napi, v_idx, + tqpv, txq_idx, rqpv, rxq_idx); + if (err) + goto free_vectors; + + /* Update counts and index */ + num_tx -= tqpv; + txq_idx++; + + num_rx -= rqpv; + rxq_idx++; + + v_idx++; + } + + return 0; + +free_vectors: + fbnic_free_napi_vectors(fbn); + + return -ENOMEM; +} + +static void fbnic_free_ring_resources(struct device *dev, + struct fbnic_ring *ring) +{ + kvfree(ring->buffer); + ring->buffer = NULL; + + /* If size is not set there are no descriptors present */ + if (!ring->size) + return; + + dma_free_coherent(dev, ring->size, ring->desc, ring->dma); + ring->size_mask = 0; + ring->size = 0; +} + +static int fbnic_alloc_tx_ring_desc(struct fbnic_net *fbn, + struct fbnic_ring *txr) +{ + struct device *dev = fbn->netdev->dev.parent; + size_t size; + + /* Round size up to nearest 4K */ + size = ALIGN(array_size(sizeof(*txr->desc), fbn->txq_size), 4096); + + txr->desc = dma_alloc_coherent(dev, size, &txr->dma, + GFP_KERNEL | __GFP_NOWARN); + if (!txr->desc) + return -ENOMEM; + + /* txq_size should be a power of 2, so mask is just that -1 */ + txr->size_mask = fbn->txq_size - 1; + txr->size = size; + + return 0; +} + +static int fbnic_alloc_tx_ring_buffer(struct fbnic_ring *txr) +{ + size_t size = array_size(sizeof(*txr->tx_buf), txr->size_mask + 1); + + txr->tx_buf = kvzalloc(size, GFP_KERNEL | __GFP_NOWARN); + + return txr->tx_buf ? 0 : -ENOMEM; +} + +static int fbnic_alloc_tx_ring_resources(struct fbnic_net *fbn, + struct fbnic_ring *txr) +{ + struct device *dev = fbn->netdev->dev.parent; + int err; + + if (txr->flags & FBNIC_RING_F_DISABLED) + return 0; + + err = fbnic_alloc_tx_ring_desc(fbn, txr); + if (err) + return err; + + if (!(txr->flags & FBNIC_RING_F_CTX)) + return 0; + + err = fbnic_alloc_tx_ring_buffer(txr); + if (err) + goto free_desc; + + return 0; + +free_desc: + fbnic_free_ring_resources(dev, txr); + return err; +} + +static int fbnic_alloc_rx_ring_desc(struct fbnic_net *fbn, + struct fbnic_ring *rxr) +{ + struct device *dev = fbn->netdev->dev.parent; + size_t desc_size = sizeof(*rxr->desc); + u32 rxq_size; + size_t size; + + switch (rxr->doorbell - fbnic_ring_csr_base(rxr)) { + case FBNIC_QUEUE_BDQ_HPQ_TAIL: + rxq_size = fbn->hpq_size / FBNIC_BD_FRAG_COUNT; + desc_size *= FBNIC_BD_FRAG_COUNT; + break; + case FBNIC_QUEUE_BDQ_PPQ_TAIL: + rxq_size = fbn->ppq_size / FBNIC_BD_FRAG_COUNT; + desc_size *= FBNIC_BD_FRAG_COUNT; + break; + case FBNIC_QUEUE_RCQ_HEAD: + rxq_size = fbn->rcq_size; + break; + default: + return -EINVAL; + } + + /* Round size up to nearest 4K */ + size = ALIGN(array_size(desc_size, rxq_size), 4096); + + rxr->desc = dma_alloc_coherent(dev, size, &rxr->dma, + GFP_KERNEL | __GFP_NOWARN); + if (!rxr->desc) + return -ENOMEM; + + /* rxq_size should be a power of 2, so mask is just that -1 */ + rxr->size_mask = rxq_size - 1; + rxr->size = size; + + return 0; +} + +static int fbnic_alloc_rx_ring_buffer(struct fbnic_ring *rxr) +{ + size_t size = array_size(sizeof(*rxr->rx_buf), rxr->size_mask + 1); + + if (rxr->flags & FBNIC_RING_F_CTX) + size = sizeof(*rxr->rx_buf) * (rxr->size_mask + 1); + else + size = sizeof(*rxr->pkt); + + rxr->rx_buf = kvzalloc(size, GFP_KERNEL | __GFP_NOWARN); + + return rxr->rx_buf ? 0 : -ENOMEM; +} + +static int fbnic_alloc_rx_ring_resources(struct fbnic_net *fbn, + struct fbnic_ring *rxr) +{ + struct device *dev = fbn->netdev->dev.parent; + int err; + + err = fbnic_alloc_rx_ring_desc(fbn, rxr); + if (err) + return err; + + err = fbnic_alloc_rx_ring_buffer(rxr); + if (err) + goto free_desc; + + return 0; + +free_desc: + fbnic_free_ring_resources(dev, rxr); + return err; +} + +static void fbnic_free_qt_resources(struct fbnic_net *fbn, + struct fbnic_q_triad *qt) +{ + struct device *dev = fbn->netdev->dev.parent; + + fbnic_free_ring_resources(dev, &qt->cmpl); + fbnic_free_ring_resources(dev, &qt->sub1); + fbnic_free_ring_resources(dev, &qt->sub0); +} + +static int fbnic_alloc_tx_qt_resources(struct fbnic_net *fbn, + struct fbnic_q_triad *qt) +{ + struct device *dev = fbn->netdev->dev.parent; + int err; + + err = fbnic_alloc_tx_ring_resources(fbn, &qt->sub0); + if (err) + return err; + + err = fbnic_alloc_tx_ring_resources(fbn, &qt->cmpl); + if (err) + goto free_sub1; + + return 0; + +free_sub1: + fbnic_free_ring_resources(dev, &qt->sub0); + return err; +} + +static int fbnic_alloc_rx_qt_resources(struct fbnic_net *fbn, + struct fbnic_q_triad *qt) +{ + struct device *dev = fbn->netdev->dev.parent; + int err; + + err = fbnic_alloc_rx_ring_resources(fbn, &qt->sub0); + if (err) + return err; + + err = fbnic_alloc_rx_ring_resources(fbn, &qt->sub1); + if (err) + goto free_sub0; + + err = fbnic_alloc_rx_ring_resources(fbn, &qt->cmpl); + if (err) + goto free_sub1; + + return 0; + +free_sub1: + fbnic_free_ring_resources(dev, &qt->sub1); +free_sub0: + fbnic_free_ring_resources(dev, &qt->sub0); + return err; +} + +static void fbnic_free_nv_resources(struct fbnic_net *fbn, + struct fbnic_napi_vector *nv) +{ + int i, j; + + /* Free Tx Resources */ + for (i = 0; i < nv->txt_count; i++) + fbnic_free_qt_resources(fbn, &nv->qt[i]); + + for (j = 0; j < nv->rxt_count; j++, i++) + fbnic_free_qt_resources(fbn, &nv->qt[i]); +} + +static int fbnic_alloc_nv_resources(struct fbnic_net *fbn, + struct fbnic_napi_vector *nv) +{ + int i, j, err; + + /* Allocate Tx Resources */ + for (i = 0; i < nv->txt_count; i++) { + err = fbnic_alloc_tx_qt_resources(fbn, &nv->qt[i]); + if (err) + goto free_resources; + } + + /* Allocate Rx Resources */ + for (j = 0; j < nv->rxt_count; j++, i++) { + err = fbnic_alloc_rx_qt_resources(fbn, &nv->qt[i]); + if (err) + goto free_resources; + } + + return 0; + +free_resources: + while (i--) + fbnic_free_qt_resources(fbn, &nv->qt[i]); + return err; +} + +void fbnic_free_resources(struct fbnic_net *fbn) +{ + struct fbnic_napi_vector *nv; + + list_for_each_entry(nv, &fbn->napis, napis) + fbnic_free_nv_resources(fbn, nv); +} + +int fbnic_alloc_resources(struct fbnic_net *fbn) +{ + struct fbnic_napi_vector *nv; + int err = -ENODEV; + + list_for_each_entry(nv, &fbn->napis, napis) { + err = fbnic_alloc_nv_resources(fbn, nv); + if (err) + goto free_resources; + } + + return 0; + +free_resources: + list_for_each_entry_continue_reverse(nv, &fbn->napis, napis) + fbnic_free_nv_resources(fbn, nv); + + return err; +} + +static void fbnic_disable_twq0(struct fbnic_ring *txr) +{ + u32 twq_ctl = fbnic_ring_rd32(txr, FBNIC_QUEUE_TWQ0_CTL); + + twq_ctl &= ~FBNIC_QUEUE_TWQ_CTL_ENABLE; + + fbnic_ring_wr32(txr, FBNIC_QUEUE_TWQ0_CTL, twq_ctl); +} + +static void fbnic_disable_tcq(struct fbnic_ring *txr) +{ + fbnic_ring_wr32(txr, FBNIC_QUEUE_TCQ_CTL, 0); + fbnic_ring_wr32(txr, FBNIC_QUEUE_TIM_MASK, FBNIC_QUEUE_TIM_MASK_MASK); +} + +static void fbnic_disable_bdq(struct fbnic_ring *hpq, struct fbnic_ring *ppq) +{ + u32 bdq_ctl = fbnic_ring_rd32(hpq, FBNIC_QUEUE_BDQ_CTL); + + bdq_ctl &= ~FBNIC_QUEUE_BDQ_CTL_ENABLE; + + fbnic_ring_wr32(hpq, FBNIC_QUEUE_BDQ_CTL, bdq_ctl); +} + +static void fbnic_disable_rcq(struct fbnic_ring *rxr) +{ + fbnic_ring_wr32(rxr, FBNIC_QUEUE_RCQ_CTL, 0); + fbnic_ring_wr32(rxr, FBNIC_QUEUE_RIM_MASK, FBNIC_QUEUE_RIM_MASK_MASK); +} + +void fbnic_napi_disable(struct fbnic_net *fbn) +{ + struct fbnic_napi_vector *nv; + + list_for_each_entry(nv, &fbn->napis, napis) { + napi_disable(&nv->napi); + + fbnic_nv_irq_disable(nv); + } +} + +void fbnic_disable(struct fbnic_net *fbn) +{ + struct fbnic_dev *fbd = fbn->fbd; + struct fbnic_napi_vector *nv; + int i, j; + + list_for_each_entry(nv, &fbn->napis, napis) { + /* Disable Tx queue triads */ + for (i = 0; i < nv->txt_count; i++) { + struct fbnic_q_triad *qt = &nv->qt[i]; + + fbnic_disable_twq0(&qt->sub0); + fbnic_disable_tcq(&qt->cmpl); + } + + /* Disable Rx queue triads */ + for (j = 0; j < nv->rxt_count; j++, i++) { + struct fbnic_q_triad *qt = &nv->qt[i]; + + fbnic_disable_bdq(&qt->sub0, &qt->sub1); + fbnic_disable_rcq(&qt->cmpl); + } + } + + fbnic_wrfl(fbd); +} + +static void fbnic_tx_flush(struct fbnic_dev *fbd) +{ + netdev_warn(fbd->netdev, "triggering Tx flush\n"); + + fbnic_rmw32(fbd, FBNIC_TMI_DROP_CTRL, FBNIC_TMI_DROP_CTRL_EN, + FBNIC_TMI_DROP_CTRL_EN); +} + +static void fbnic_tx_flush_off(struct fbnic_dev *fbd) +{ + fbnic_rmw32(fbd, FBNIC_TMI_DROP_CTRL, FBNIC_TMI_DROP_CTRL_EN, 0); +} + +struct fbnic_idle_regs { + u32 reg_base; + u8 reg_cnt; +}; + +static bool fbnic_all_idle(struct fbnic_dev *fbd, + const struct fbnic_idle_regs *regs, + unsigned int nregs) +{ + unsigned int i, j; + + for (i = 0; i < nregs; i++) { + for (j = 0; j < regs[i].reg_cnt; j++) { + if (fbnic_rd32(fbd, regs[i].reg_base + j) != ~0U) + return false; + } + } + return true; +} + +static void fbnic_idle_dump(struct fbnic_dev *fbd, + const struct fbnic_idle_regs *regs, + unsigned int nregs, const char *dir, int err) +{ + unsigned int i, j; + + netdev_err(fbd->netdev, "error waiting for %s idle %d\n", dir, err); + for (i = 0; i < nregs; i++) + for (j = 0; j < regs[i].reg_cnt; j++) + netdev_err(fbd->netdev, "0x%04x: %08x\n", + regs[i].reg_base + j, + fbnic_rd32(fbd, regs[i].reg_base + j)); +} + +int fbnic_wait_all_queues_idle(struct fbnic_dev *fbd, bool may_fail) +{ + static const struct fbnic_idle_regs tx[] = { + { FBNIC_QM_TWQ_IDLE(0), FBNIC_QM_TWQ_IDLE_CNT, }, + { FBNIC_QM_TQS_IDLE(0), FBNIC_QM_TQS_IDLE_CNT, }, + { FBNIC_QM_TDE_IDLE(0), FBNIC_QM_TDE_IDLE_CNT, }, + { FBNIC_QM_TCQ_IDLE(0), FBNIC_QM_TCQ_IDLE_CNT, }, + }, rx[] = { + { FBNIC_QM_HPQ_IDLE(0), FBNIC_QM_HPQ_IDLE_CNT, }, + { FBNIC_QM_PPQ_IDLE(0), FBNIC_QM_PPQ_IDLE_CNT, }, + { FBNIC_QM_RCQ_IDLE(0), FBNIC_QM_RCQ_IDLE_CNT, }, + }; + bool idle; + int err; + + err = read_poll_timeout_atomic(fbnic_all_idle, idle, idle, 2, 500000, + false, fbd, tx, ARRAY_SIZE(tx)); + if (err == -ETIMEDOUT) { + fbnic_tx_flush(fbd); + err = read_poll_timeout_atomic(fbnic_all_idle, idle, idle, + 2, 500000, false, + fbd, tx, ARRAY_SIZE(tx)); + fbnic_tx_flush_off(fbd); + } + if (err) { + fbnic_idle_dump(fbd, tx, ARRAY_SIZE(tx), "Tx", err); + if (may_fail) + return err; + } + + err = read_poll_timeout_atomic(fbnic_all_idle, idle, idle, 2, 500000, + false, fbd, rx, ARRAY_SIZE(rx)); + if (err) + fbnic_idle_dump(fbd, rx, ARRAY_SIZE(rx), "Rx", err); + return err; +} + +void fbnic_flush(struct fbnic_net *fbn) +{ + struct fbnic_napi_vector *nv; + + list_for_each_entry(nv, &fbn->napis, napis) { + int i, j; + + /* Flush any processed Tx Queue Triads and drop the rest */ + for (i = 0; i < nv->txt_count; i++) { + struct fbnic_q_triad *qt = &nv->qt[i]; + struct netdev_queue *tx_queue; + + /* Clean the work queues of unprocessed work */ + fbnic_clean_twq0(nv, 0, &qt->sub0, true, qt->sub0.tail); + + /* Reset completion queue descriptor ring */ + memset(qt->cmpl.desc, 0, qt->cmpl.size); + + /* Nothing else to do if Tx queue is disabled */ + if (qt->sub0.flags & FBNIC_RING_F_DISABLED) + continue; + + /* Reset BQL associated with Tx queue */ + tx_queue = netdev_get_tx_queue(nv->napi.dev, + qt->sub0.q_idx); + netdev_tx_reset_queue(tx_queue); + + /* Disassociate Tx queue from NAPI */ + netif_queue_set_napi(nv->napi.dev, qt->sub0.q_idx, + NETDEV_QUEUE_TYPE_TX, NULL); + } + + /* Flush any processed Rx Queue Triads and drop the rest */ + for (j = 0; j < nv->rxt_count; j++, i++) { + struct fbnic_q_triad *qt = &nv->qt[i]; + + /* Clean the work queues of unprocessed work */ + fbnic_clean_bdq(nv, 0, &qt->sub0, qt->sub0.tail); + fbnic_clean_bdq(nv, 0, &qt->sub1, qt->sub1.tail); + + /* Reset completion queue descriptor ring */ + memset(qt->cmpl.desc, 0, qt->cmpl.size); + + fbnic_put_pkt_buff(nv, qt->cmpl.pkt, 0); + qt->cmpl.pkt->buff.data_hard_start = NULL; + + /* Disassociate Rx queue from NAPI */ + netif_queue_set_napi(nv->napi.dev, qt->cmpl.q_idx, + NETDEV_QUEUE_TYPE_RX, NULL); + } + } +} + +void fbnic_fill(struct fbnic_net *fbn) +{ + struct fbnic_napi_vector *nv; + + list_for_each_entry(nv, &fbn->napis, napis) { + int i, j; + + /* Configure NAPI mapping for Tx */ + for (i = 0; i < nv->txt_count; i++) { + struct fbnic_q_triad *qt = &nv->qt[i]; + + /* Nothing to do if Tx queue is disabled */ + if (qt->sub0.flags & FBNIC_RING_F_DISABLED) + continue; + + /* Associate Tx queue with NAPI */ + netif_queue_set_napi(nv->napi.dev, qt->sub0.q_idx, + NETDEV_QUEUE_TYPE_TX, &nv->napi); + } + + /* Configure NAPI mapping and populate pages + * in the BDQ rings to use for Rx + */ + for (j = 0; j < nv->rxt_count; j++, i++) { + struct fbnic_q_triad *qt = &nv->qt[i]; + + /* Associate Rx queue with NAPI */ + netif_queue_set_napi(nv->napi.dev, qt->cmpl.q_idx, + NETDEV_QUEUE_TYPE_RX, &nv->napi); + + /* Populate the header and payload BDQs */ + fbnic_fill_bdq(nv, &qt->sub0); + fbnic_fill_bdq(nv, &qt->sub1); + } + } +} + +static void fbnic_enable_twq0(struct fbnic_ring *twq) +{ + u32 log_size = fls(twq->size_mask); + + if (!twq->size_mask) + return; + + /* Reset head/tail */ + fbnic_ring_wr32(twq, FBNIC_QUEUE_TWQ0_CTL, FBNIC_QUEUE_TWQ_CTL_RESET); + twq->tail = 0; + twq->head = 0; + + /* Store descriptor ring address and size */ + fbnic_ring_wr32(twq, FBNIC_QUEUE_TWQ0_BAL, lower_32_bits(twq->dma)); + fbnic_ring_wr32(twq, FBNIC_QUEUE_TWQ0_BAH, upper_32_bits(twq->dma)); + + /* Write lower 4 bits of log size as 64K ring size is 0 */ + fbnic_ring_wr32(twq, FBNIC_QUEUE_TWQ0_SIZE, log_size & 0xf); + + fbnic_ring_wr32(twq, FBNIC_QUEUE_TWQ0_CTL, FBNIC_QUEUE_TWQ_CTL_ENABLE); +} + +static void fbnic_enable_tcq(struct fbnic_napi_vector *nv, + struct fbnic_ring *tcq) +{ + u32 log_size = fls(tcq->size_mask); + + if (!tcq->size_mask) + return; + + /* Reset head/tail */ + fbnic_ring_wr32(tcq, FBNIC_QUEUE_TCQ_CTL, FBNIC_QUEUE_TCQ_CTL_RESET); + tcq->tail = 0; + tcq->head = 0; + + /* Store descriptor ring address and size */ + fbnic_ring_wr32(tcq, FBNIC_QUEUE_TCQ_BAL, lower_32_bits(tcq->dma)); + fbnic_ring_wr32(tcq, FBNIC_QUEUE_TCQ_BAH, upper_32_bits(tcq->dma)); + + /* Write lower 4 bits of log size as 64K ring size is 0 */ + fbnic_ring_wr32(tcq, FBNIC_QUEUE_TCQ_SIZE, log_size & 0xf); + + /* Store interrupt information for the completion queue */ + fbnic_ring_wr32(tcq, FBNIC_QUEUE_TIM_CTL, nv->v_idx); + fbnic_ring_wr32(tcq, FBNIC_QUEUE_TIM_THRESHOLD, tcq->size_mask / 2); + fbnic_ring_wr32(tcq, FBNIC_QUEUE_TIM_MASK, 0); + + /* Enable queue */ + fbnic_ring_wr32(tcq, FBNIC_QUEUE_TCQ_CTL, FBNIC_QUEUE_TCQ_CTL_ENABLE); +} + +static void fbnic_enable_bdq(struct fbnic_ring *hpq, struct fbnic_ring *ppq) +{ + u32 bdq_ctl = FBNIC_QUEUE_BDQ_CTL_ENABLE; + u32 log_size; + + /* Reset head/tail */ + fbnic_ring_wr32(hpq, FBNIC_QUEUE_BDQ_CTL, FBNIC_QUEUE_BDQ_CTL_RESET); + ppq->tail = 0; + ppq->head = 0; + hpq->tail = 0; + hpq->head = 0; + + log_size = fls(hpq->size_mask); + + /* Store descriptor ring address and size */ + fbnic_ring_wr32(hpq, FBNIC_QUEUE_BDQ_HPQ_BAL, lower_32_bits(hpq->dma)); + fbnic_ring_wr32(hpq, FBNIC_QUEUE_BDQ_HPQ_BAH, upper_32_bits(hpq->dma)); + + /* Write lower 4 bits of log size as 64K ring size is 0 */ + fbnic_ring_wr32(hpq, FBNIC_QUEUE_BDQ_HPQ_SIZE, log_size & 0xf); + + if (!ppq->size_mask) + goto write_ctl; + + log_size = fls(ppq->size_mask); + + /* Add enabling of PPQ to BDQ control */ + bdq_ctl |= FBNIC_QUEUE_BDQ_CTL_PPQ_ENABLE; + + /* Store descriptor ring address and size */ + fbnic_ring_wr32(ppq, FBNIC_QUEUE_BDQ_PPQ_BAL, lower_32_bits(ppq->dma)); + fbnic_ring_wr32(ppq, FBNIC_QUEUE_BDQ_PPQ_BAH, upper_32_bits(ppq->dma)); + fbnic_ring_wr32(ppq, FBNIC_QUEUE_BDQ_PPQ_SIZE, log_size & 0xf); + +write_ctl: + fbnic_ring_wr32(hpq, FBNIC_QUEUE_BDQ_CTL, bdq_ctl); +} + +static void fbnic_config_drop_mode_rcq(struct fbnic_napi_vector *nv, + struct fbnic_ring *rcq) +{ + u32 drop_mode, rcq_ctl; + + drop_mode = FBNIC_QUEUE_RDE_CTL0_DROP_IMMEDIATE; + + /* Specify packet layout */ + rcq_ctl = FIELD_PREP(FBNIC_QUEUE_RDE_CTL0_DROP_MODE_MASK, drop_mode) | + FIELD_PREP(FBNIC_QUEUE_RDE_CTL0_MIN_HROOM_MASK, FBNIC_RX_HROOM) | + FIELD_PREP(FBNIC_QUEUE_RDE_CTL0_MIN_TROOM_MASK, FBNIC_RX_TROOM); + + fbnic_ring_wr32(rcq, FBNIC_QUEUE_RDE_CTL0, rcq_ctl); +} + +static void fbnic_enable_rcq(struct fbnic_napi_vector *nv, + struct fbnic_ring *rcq) +{ + u32 log_size = fls(rcq->size_mask); + u32 rcq_ctl; + + fbnic_config_drop_mode_rcq(nv, rcq); + + rcq_ctl = FIELD_PREP(FBNIC_QUEUE_RDE_CTL1_PADLEN_MASK, FBNIC_RX_PAD) | + FIELD_PREP(FBNIC_QUEUE_RDE_CTL1_MAX_HDR_MASK, + FBNIC_RX_MAX_HDR) | + FIELD_PREP(FBNIC_QUEUE_RDE_CTL1_PAYLD_OFF_MASK, + FBNIC_RX_PAYLD_OFFSET) | + FIELD_PREP(FBNIC_QUEUE_RDE_CTL1_PAYLD_PG_CL_MASK, + FBNIC_RX_PAYLD_PG_CL); + fbnic_ring_wr32(rcq, FBNIC_QUEUE_RDE_CTL1, rcq_ctl); + + /* Reset head/tail */ + fbnic_ring_wr32(rcq, FBNIC_QUEUE_RCQ_CTL, FBNIC_QUEUE_RCQ_CTL_RESET); + rcq->head = 0; + rcq->tail = 0; + + /* Store descriptor ring address and size */ + fbnic_ring_wr32(rcq, FBNIC_QUEUE_RCQ_BAL, lower_32_bits(rcq->dma)); + fbnic_ring_wr32(rcq, FBNIC_QUEUE_RCQ_BAH, upper_32_bits(rcq->dma)); + + /* Write lower 4 bits of log size as 64K ring size is 0 */ + fbnic_ring_wr32(rcq, FBNIC_QUEUE_RCQ_SIZE, log_size & 0xf); + + /* Store interrupt information for the completion queue */ + fbnic_ring_wr32(rcq, FBNIC_QUEUE_RIM_CTL, nv->v_idx); + fbnic_ring_wr32(rcq, FBNIC_QUEUE_RIM_THRESHOLD, rcq->size_mask / 2); + fbnic_ring_wr32(rcq, FBNIC_QUEUE_RIM_MASK, 0); + + /* Enable queue */ + fbnic_ring_wr32(rcq, FBNIC_QUEUE_RCQ_CTL, FBNIC_QUEUE_RCQ_CTL_ENABLE); +} + +void fbnic_enable(struct fbnic_net *fbn) +{ + struct fbnic_dev *fbd = fbn->fbd; + struct fbnic_napi_vector *nv; + int i, j; + + list_for_each_entry(nv, &fbn->napis, napis) { + /* Setup Tx Queue Triads */ + for (i = 0; i < nv->txt_count; i++) { + struct fbnic_q_triad *qt = &nv->qt[i]; + + fbnic_enable_twq0(&qt->sub0); + fbnic_enable_tcq(nv, &qt->cmpl); + } + + /* Setup Rx Queue Triads */ + for (j = 0; j < nv->rxt_count; j++, i++) { + struct fbnic_q_triad *qt = &nv->qt[i]; + + fbnic_enable_bdq(&qt->sub0, &qt->sub1); + fbnic_config_drop_mode_rcq(nv, &qt->cmpl); + fbnic_enable_rcq(nv, &qt->cmpl); + } + } + + fbnic_wrfl(fbd); +} + +static void fbnic_nv_irq_enable(struct fbnic_napi_vector *nv) +{ + struct fbnic_dev *fbd = nv->fbd; + u32 val; + + val = FBNIC_INTR_CQ_REARM_INTR_UNMASK; + + fbnic_wr32(fbd, FBNIC_INTR_CQ_REARM(nv->v_idx), val); +} + +void fbnic_napi_enable(struct fbnic_net *fbn) +{ + u32 irqs[FBNIC_MAX_MSIX_VECS / 32] = {}; + struct fbnic_dev *fbd = fbn->fbd; + struct fbnic_napi_vector *nv; + int i; + + list_for_each_entry(nv, &fbn->napis, napis) { + napi_enable(&nv->napi); + + fbnic_nv_irq_enable(nv); + + /* Record bit used for NAPI IRQs so we can + * set the mask appropriately + */ + irqs[nv->v_idx / 32] |= BIT(nv->v_idx % 32); + } + + /* Force the first interrupt on the device to guarantee + * that any packets that may have been enqueued during the + * bringup are processed. + */ + for (i = 0; i < ARRAY_SIZE(irqs); i++) { + if (!irqs[i]) + continue; + fbnic_wr32(fbd, FBNIC_INTR_SET(i), irqs[i]); + } + + fbnic_wrfl(fbd); +} + +void fbnic_napi_depletion_check(struct net_device *netdev) +{ + struct fbnic_net *fbn = netdev_priv(netdev); + u32 irqs[FBNIC_MAX_MSIX_VECS / 32] = {}; + struct fbnic_dev *fbd = fbn->fbd; + struct fbnic_napi_vector *nv; + int i, j; + + list_for_each_entry(nv, &fbn->napis, napis) { + /* Find RQs which are completely out of pages */ + for (i = nv->txt_count, j = 0; j < nv->rxt_count; j++, i++) { + /* Assume 4 pages is always enough to fit a packet + * and therefore generate a completion and an IRQ. + */ + if (fbnic_desc_used(&nv->qt[i].sub0) < 4 || + fbnic_desc_used(&nv->qt[i].sub1) < 4) + irqs[nv->v_idx / 32] |= BIT(nv->v_idx % 32); + } + } + + for (i = 0; i < ARRAY_SIZE(irqs); i++) { + if (!irqs[i]) + continue; + fbnic_wr32(fbd, FBNIC_INTR_MASK_CLEAR(i), irqs[i]); + fbnic_wr32(fbd, FBNIC_INTR_SET(i), irqs[i]); + } + + fbnic_wrfl(fbd); +} diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h new file mode 100644 index 000000000000..4a206c0e7192 --- /dev/null +++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h @@ -0,0 +1,127 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Meta Platforms, Inc. and affiliates. */ + +#ifndef _FBNIC_TXRX_H_ +#define _FBNIC_TXRX_H_ + +#include <linux/netdevice.h> +#include <linux/skbuff.h> +#include <linux/types.h> +#include <net/xdp.h> + +struct fbnic_net; + +/* Guarantee we have space needed for storing the buffer + * To store the buffer we need: + * 1 descriptor per page + * + 1 descriptor for skb head + * + 2 descriptors for metadata and optional metadata + * + 7 descriptors to keep tail out of the same cacheline as head + * If we cannot guarantee that then we should return TX_BUSY + */ +#define FBNIC_MAX_SKB_DESC (MAX_SKB_FRAGS + 10) +#define FBNIC_TX_DESC_WAKEUP (FBNIC_MAX_SKB_DESC * 2) +#define FBNIC_TX_DESC_MIN roundup_pow_of_two(FBNIC_TX_DESC_WAKEUP) + +#define FBNIC_MAX_TXQS 128u +#define FBNIC_MAX_RXQS 128u + +#define FBNIC_TXQ_SIZE_DEFAULT 1024 +#define FBNIC_HPQ_SIZE_DEFAULT 256 +#define FBNIC_PPQ_SIZE_DEFAULT 256 +#define FBNIC_RCQ_SIZE_DEFAULT 1024 + +#define FBNIC_RX_TROOM \ + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +#define FBNIC_RX_HROOM \ + (ALIGN(FBNIC_RX_TROOM + NET_SKB_PAD, 128) - FBNIC_RX_TROOM) +#define FBNIC_RX_PAD 0 +#define FBNIC_RX_MAX_HDR (1536 - FBNIC_RX_PAD) +#define FBNIC_RX_PAYLD_OFFSET 0 +#define FBNIC_RX_PAYLD_PG_CL 0 + +#define FBNIC_RING_F_DISABLED BIT(0) +#define FBNIC_RING_F_CTX BIT(1) +#define FBNIC_RING_F_STATS BIT(2) /* Ring's stats may be used */ + +struct fbnic_pkt_buff { + struct xdp_buff buff; + u32 data_truesize; + u16 data_len; + u16 nr_frags; +}; + +/* Pagecnt bias is long max to reserve the last bit to catch overflow + * cases where if we overcharge the bias it will flip over to be negative. + */ +#define PAGECNT_BIAS_MAX LONG_MAX +struct fbnic_rx_buf { + struct page *page; + long pagecnt_bias; +}; + +struct fbnic_ring { + /* Pointer to buffer specific info */ + union { + struct fbnic_pkt_buff *pkt; /* RCQ */ + struct fbnic_rx_buf *rx_buf; /* BDQ */ + void **tx_buf; /* TWQ */ + void *buffer; /* Generic pointer */ + }; + + u32 __iomem *doorbell; /* Pointer to CSR space for ring */ + __le64 *desc; /* Descriptor ring memory */ + u16 size_mask; /* Size of ring in descriptors - 1 */ + u8 q_idx; /* Logical netdev ring index */ + u8 flags; /* Ring flags (FBNIC_RING_F_*) */ + + u32 head, tail; /* Head/Tail of ring */ + + /* Slow path fields follow */ + dma_addr_t dma; /* Phys addr of descriptor memory */ + size_t size; /* Size of descriptor ring in memory */ +}; + +struct fbnic_q_triad { + struct fbnic_ring sub0, sub1, cmpl; +}; + +struct fbnic_napi_vector { + struct napi_struct napi; + struct device *dev; /* Device for DMA unmapping */ + struct page_pool *page_pool; + struct fbnic_dev *fbd; + char name[IFNAMSIZ + 9]; + + u16 v_idx; + u8 txt_count; + u8 rxt_count; + + struct list_head napis; + + struct fbnic_q_triad qt[]; +}; + +#define FBNIC_MAX_TXQS 128u +#define FBNIC_MAX_RXQS 128u + +netdev_tx_t fbnic_xmit_frame(struct sk_buff *skb, struct net_device *dev); +netdev_features_t +fbnic_features_check(struct sk_buff *skb, struct net_device *dev, + netdev_features_t features); + +int fbnic_alloc_napi_vectors(struct fbnic_net *fbn); +void fbnic_free_napi_vectors(struct fbnic_net *fbn); +int fbnic_alloc_resources(struct fbnic_net *fbn); +void fbnic_free_resources(struct fbnic_net *fbn); +void fbnic_napi_enable(struct fbnic_net *fbn); +void fbnic_napi_disable(struct fbnic_net *fbn); +void fbnic_enable(struct fbnic_net *fbn); +void fbnic_disable(struct fbnic_net *fbn); +void fbnic_flush(struct fbnic_net *fbn); +void fbnic_fill(struct fbnic_net *fbn); + +void fbnic_napi_depletion_check(struct net_device *netdev); +int fbnic_wait_all_queues_idle(struct fbnic_dev *fbd, bool may_fail); + +#endif /* _FBNIC_TXRX_H_ */ diff --git a/drivers/net/ethernet/microchip/encx24j600-regmap.c b/drivers/net/ethernet/microchip/encx24j600-regmap.c index 3885d6fbace1..26b00e66d912 100644 --- a/drivers/net/ethernet/microchip/encx24j600-regmap.c +++ b/drivers/net/ethernet/microchip/encx24j600-regmap.c @@ -474,13 +474,13 @@ static struct regmap_config regcfg = { .unlock = regmap_unlock_mutex, }; -static struct regmap_bus regmap_encx24j600 = { +static const struct regmap_bus regmap_encx24j600 = { .write = regmap_encx24j600_write, .read = regmap_encx24j600_read, .reg_update_bits = regmap_encx24j600_reg_update_bits, }; -static struct regmap_config phycfg = { +static const struct regmap_config phycfg = { .name = "phy", .reg_bits = 8, .val_bits = 16, @@ -492,7 +492,7 @@ static struct regmap_config phycfg = { .volatile_reg = encx24j600_phymap_volatile, }; -static struct regmap_bus phymap_encx24j600 = { +static const struct regmap_bus phymap_encx24j600 = { .reg_write = regmap_encx24j600_phy_reg_write, .reg_read = regmap_encx24j600_phy_reg_read, }; diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c index 0d1740d64676..3a63ec091413 100644 --- a/drivers/net/ethernet/microchip/lan743x_ethtool.c +++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c @@ -1029,7 +1029,7 @@ static int lan743x_ethtool_set_rxfh(struct net_device *netdev, } static int lan743x_ethtool_get_ts_info(struct net_device *netdev, - struct ethtool_ts_info *ts_info) + struct kernel_ethtool_ts_info *ts_info) { struct lan743x_adapter *adapter = netdev_priv(netdev); diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c b/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c index 06811c60d598..aec7066d83b3 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c @@ -376,7 +376,6 @@ static void lan966x_get_eth_mac_stats(struct net_device *dev, lan966x->stats[idx + SYS_COUNT_TX_PMAC_BC]; mac_stats->SingleCollisionFrames = lan966x->stats[idx + SYS_COUNT_TX_COL]; - mac_stats->MultipleCollisionFrames = 0; mac_stats->FramesReceivedOK = lan966x->stats[idx + SYS_COUNT_RX_UC] + lan966x->stats[idx + SYS_COUNT_RX_MC] + @@ -384,26 +383,19 @@ static void lan966x_get_eth_mac_stats(struct net_device *dev, mac_stats->FrameCheckSequenceErrors = lan966x->stats[idx + SYS_COUNT_RX_CRC] + lan966x->stats[idx + SYS_COUNT_RX_CRC]; - mac_stats->AlignmentErrors = 0; mac_stats->OctetsTransmittedOK = lan966x->stats[idx + SYS_COUNT_TX_OCT] + lan966x->stats[idx + SYS_COUNT_TX_PMAC_OCT]; mac_stats->FramesWithDeferredXmissions = lan966x->stats[idx + SYS_COUNT_TX_MM_HOLD]; - mac_stats->LateCollisions = 0; - mac_stats->FramesAbortedDueToXSColls = 0; - mac_stats->FramesLostDueToIntMACXmitError = 0; - mac_stats->CarrierSenseErrors = 0; mac_stats->OctetsReceivedOK = lan966x->stats[idx + SYS_COUNT_RX_OCT]; - mac_stats->FramesLostDueToIntMACRcvError = 0; mac_stats->MulticastFramesXmittedOK = lan966x->stats[idx + SYS_COUNT_TX_MC] + lan966x->stats[idx + SYS_COUNT_TX_PMAC_MC]; mac_stats->BroadcastFramesXmittedOK = lan966x->stats[idx + SYS_COUNT_TX_BC] + lan966x->stats[idx + SYS_COUNT_TX_PMAC_BC]; - mac_stats->FramesWithExcessiveDeferral = 0; mac_stats->MulticastFramesReceivedOK = lan966x->stats[idx + SYS_COUNT_RX_MC]; mac_stats->BroadcastFramesReceivedOK = @@ -546,7 +538,7 @@ static int lan966x_set_pauseparam(struct net_device *dev, } static int lan966x_get_ts_info(struct net_device *dev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct lan966x_port *port = netdev_priv(dev); struct lan966x *lan966x = port->lan966x; diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c b/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c index a4414f63c9b1..a1471e38d118 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c @@ -581,7 +581,7 @@ static void lan966x_vcap_move(struct net_device *dev, lan966x_vcap_wait_update(lan966x, admin->tgt_inst); } -static struct vcap_operations lan966x_vcap_ops = { +static const struct vcap_operations lan966x_vcap_ops = { .validate_keyset = lan966x_vcap_validate_keyset, .add_default_fields = lan966x_vcap_add_default_fields, .cache_erase = lan966x_vcap_cache_erase, diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c b/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c index a06dc5a9b355..4f800c1a435d 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c @@ -1183,7 +1183,7 @@ static void sparx5_config_port_stats(struct sparx5 *sparx5, int portno) } static int sparx5_get_ts_info(struct net_device *dev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct sparx5_port *port = netdev_priv(dev); struct sparx5 *sparx5 = port->sparx5; diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c index 187efa1fc904..967c8621c250 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c @@ -1507,7 +1507,7 @@ static void sparx5_vcap_move(struct net_device *ndev, struct vcap_admin *admin, } } -static struct vcap_operations sparx5_vcap_ops = { +static const struct vcap_operations sparx5_vcap_ops = { .validate_keyset = sparx5_vcap_validate_keyset, .add_default_fields = sparx5_vcap_add_default_fields, .cache_erase = sparx5_vcap_cache_erase, diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api.h b/drivers/net/ethernet/microchip/vcap/vcap_api.h index 9eccfa633c1a..6069ad95c27e 100644 --- a/drivers/net/ethernet/microchip/vcap/vcap_api.h +++ b/drivers/net/ethernet/microchip/vcap/vcap_api.h @@ -271,7 +271,7 @@ struct vcap_operations { /* VCAP API Client control interface */ struct vcap_control { - struct vcap_operations *ops; /* client supplied operations */ + const struct vcap_operations *ops; /* client supplied operations */ const struct vcap_info *vcaps; /* client supplied vcap models */ const struct vcap_statistics *stats; /* client supplied vcap stats */ struct list_head list; /* list of vcap instances */ diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs_kunit.c b/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs_kunit.c index b23c11b0647c..9c9d38042125 100644 --- a/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs_kunit.c +++ b/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs_kunit.c @@ -221,7 +221,7 @@ static int vcap_test_port_info(struct net_device *ndev, return 0; } -static struct vcap_operations test_callbacks = { +static const struct vcap_operations test_callbacks = { .validate_keyset = test_val_keyset, .add_default_fields = test_add_def_fields, .cache_erase = test_cache_erase, diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c b/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c index fe4e166de8a0..51d9423b08a6 100644 --- a/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c +++ b/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c @@ -211,7 +211,7 @@ static int vcap_test_port_info(struct net_device *ndev, return 0; } -static struct vcap_operations test_callbacks = { +static const struct vcap_operations test_callbacks = { .validate_keyset = test_val_keyset, .add_default_fields = test_add_def_fields, .cache_erase = test_cache_erase, diff --git a/drivers/net/ethernet/microsoft/Kconfig b/drivers/net/ethernet/microsoft/Kconfig index 286f0d5697a1..901fbffbf718 100644 --- a/drivers/net/ethernet/microsoft/Kconfig +++ b/drivers/net/ethernet/microsoft/Kconfig @@ -18,7 +18,7 @@ if NET_VENDOR_MICROSOFT config MICROSOFT_MANA tristate "Microsoft Azure Network Adapter (MANA) support" depends on PCI_MSI - depends on X86_64 || (ARM64 && !CPU_BIG_ENDIAN && ARM64_4K_PAGES) + depends on X86_64 || (ARM64 && !CPU_BIG_ENDIAN) depends on PCI_HYPERV select AUXILIARY_BUS select PAGE_POOL diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c index 1332db9a08eb..e1d70d21e207 100644 --- a/drivers/net/ethernet/microsoft/mana/gdma_main.c +++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c @@ -182,7 +182,7 @@ int mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length, dma_addr_t dma_handle; void *buf; - if (length < PAGE_SIZE || !is_power_of_2(length)) + if (length < MANA_PAGE_SIZE || !is_power_of_2(length)) return -EINVAL; gmi->dev = gc->dev; @@ -717,7 +717,7 @@ EXPORT_SYMBOL_NS(mana_gd_destroy_dma_region, NET_MANA); static int mana_gd_create_dma_region(struct gdma_dev *gd, struct gdma_mem_info *gmi) { - unsigned int num_page = gmi->length / PAGE_SIZE; + unsigned int num_page = gmi->length / MANA_PAGE_SIZE; struct gdma_create_dma_region_req *req = NULL; struct gdma_create_dma_region_resp resp = {}; struct gdma_context *gc = gd->gdma_context; @@ -727,10 +727,10 @@ static int mana_gd_create_dma_region(struct gdma_dev *gd, int err; int i; - if (length < PAGE_SIZE || !is_power_of_2(length)) + if (length < MANA_PAGE_SIZE || !is_power_of_2(length)) return -EINVAL; - if (offset_in_page(gmi->virt_addr) != 0) + if (!MANA_PAGE_ALIGNED(gmi->virt_addr)) return -EINVAL; hwc = gc->hwc.driver_data; @@ -751,7 +751,7 @@ static int mana_gd_create_dma_region(struct gdma_dev *gd, req->page_addr_list_len = num_page; for (i = 0; i < num_page; i++) - req->page_addr_list[i] = gmi->dma_handle + i * PAGE_SIZE; + req->page_addr_list[i] = gmi->dma_handle + i * MANA_PAGE_SIZE; err = mana_gd_send_request(gc, req_msg_size, req, sizeof(resp), &resp); if (err) diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c index bbc4f9e16c98..cafded2f9382 100644 --- a/drivers/net/ethernet/microsoft/mana/hw_channel.c +++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c @@ -362,12 +362,12 @@ static int mana_hwc_create_cq(struct hw_channel_context *hwc, u16 q_depth, int err; eq_size = roundup_pow_of_two(GDMA_EQE_SIZE * q_depth); - if (eq_size < MINIMUM_SUPPORTED_PAGE_SIZE) - eq_size = MINIMUM_SUPPORTED_PAGE_SIZE; + if (eq_size < MANA_MIN_QSIZE) + eq_size = MANA_MIN_QSIZE; cq_size = roundup_pow_of_two(GDMA_CQE_SIZE * q_depth); - if (cq_size < MINIMUM_SUPPORTED_PAGE_SIZE) - cq_size = MINIMUM_SUPPORTED_PAGE_SIZE; + if (cq_size < MANA_MIN_QSIZE) + cq_size = MANA_MIN_QSIZE; hwc_cq = kzalloc(sizeof(*hwc_cq), GFP_KERNEL); if (!hwc_cq) @@ -429,7 +429,7 @@ static int mana_hwc_alloc_dma_buf(struct hw_channel_context *hwc, u16 q_depth, dma_buf->num_reqs = q_depth; - buf_size = PAGE_ALIGN(q_depth * max_msg_size); + buf_size = MANA_PAGE_ALIGN(q_depth * max_msg_size); gmi = &dma_buf->mem_info; err = mana_gd_alloc_memory(gc, buf_size, gmi); @@ -497,8 +497,8 @@ static int mana_hwc_create_wq(struct hw_channel_context *hwc, else queue_size = roundup_pow_of_two(GDMA_MAX_SQE_SIZE * q_depth); - if (queue_size < MINIMUM_SUPPORTED_PAGE_SIZE) - queue_size = MINIMUM_SUPPORTED_PAGE_SIZE; + if (queue_size < MANA_MIN_QSIZE) + queue_size = MANA_MIN_QSIZE; hwc_wq = kzalloc(sizeof(*hwc_wq), GFP_KERNEL); if (!hwc_wq) diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c index 608ad31a9702..91f10910ea44 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_en.c +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c @@ -481,7 +481,7 @@ static int mana_get_tx_queue(struct net_device *ndev, struct sk_buff *skb, struct sock *sk = skb->sk; int txq; - txq = apc->indir_table[hash & MANA_INDIRECT_TABLE_MASK]; + txq = apc->indir_table[hash & (apc->indir_table_sz - 1)]; if (txq != old_q && sk && sk_fullsock(sk) && rcu_access_pointer(sk->sk_dst_cache)) @@ -721,6 +721,13 @@ static void mana_cleanup_port_context(struct mana_port_context *apc) apc->rxqs = NULL; } +static void mana_cleanup_indir_table(struct mana_port_context *apc) +{ + apc->indir_table_sz = 0; + kfree(apc->indir_table); + kfree(apc->rxobj_table); +} + static int mana_init_port_context(struct mana_port_context *apc) { apc->rxqs = kcalloc(apc->num_queues, sizeof(struct mana_rxq *), @@ -962,7 +969,16 @@ static int mana_query_vport_cfg(struct mana_port_context *apc, u32 vport_index, *max_sq = resp.max_num_sq; *max_rq = resp.max_num_rq; - *num_indir_entry = resp.num_indirection_ent; + if (resp.num_indirection_ent > 0 && + resp.num_indirection_ent <= MANA_INDIRECT_TABLE_MAX_SIZE && + is_power_of_2(resp.num_indirection_ent)) { + *num_indir_entry = resp.num_indirection_ent; + } else { + netdev_warn(apc->ndev, + "Setting indirection table size to default %d for vPort %d\n", + MANA_INDIRECT_TABLE_DEF_SIZE, apc->port_idx); + *num_indir_entry = MANA_INDIRECT_TABLE_DEF_SIZE; + } apc->port_handle = resp.vport; ether_addr_copy(apc->mac_addr, resp.mac_addr); @@ -1054,14 +1070,13 @@ static int mana_cfg_vport_steering(struct mana_port_context *apc, bool update_default_rxobj, bool update_key, bool update_tab) { - u16 num_entries = MANA_INDIRECT_TABLE_SIZE; struct mana_cfg_rx_steer_req_v2 *req; struct mana_cfg_rx_steer_resp resp = {}; struct net_device *ndev = apc->ndev; u32 req_buf_size; int err; - req_buf_size = struct_size(req, indir_tab, num_entries); + req_buf_size = struct_size(req, indir_tab, apc->indir_table_sz); req = kzalloc(req_buf_size, GFP_KERNEL); if (!req) return -ENOMEM; @@ -1072,7 +1087,7 @@ static int mana_cfg_vport_steering(struct mana_port_context *apc, req->hdr.req.msg_version = GDMA_MESSAGE_V2; req->vport = apc->port_handle; - req->num_indir_entries = num_entries; + req->num_indir_entries = apc->indir_table_sz; req->indir_tab_offset = offsetof(struct mana_cfg_rx_steer_req_v2, indir_tab); req->rx_enable = rx; @@ -1111,7 +1126,7 @@ static int mana_cfg_vport_steering(struct mana_port_context *apc, } netdev_info(ndev, "Configured steering vPort %llu entries %u\n", - apc->port_handle, num_entries); + apc->port_handle, apc->indir_table_sz); out: kfree(req); return err; @@ -1889,10 +1904,10 @@ static int mana_create_txq(struct mana_port_context *apc, * to prevent overflow. */ txq_size = MAX_SEND_BUFFERS_PER_QUEUE * 32; - BUILD_BUG_ON(!PAGE_ALIGNED(txq_size)); + BUILD_BUG_ON(!MANA_PAGE_ALIGNED(txq_size)); cq_size = MAX_SEND_BUFFERS_PER_QUEUE * COMP_ENTRY_SIZE; - cq_size = PAGE_ALIGN(cq_size); + cq_size = MANA_PAGE_ALIGN(cq_size); gc = gd->gdma_context; @@ -2189,8 +2204,8 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc, if (err) goto out; - rq_size = PAGE_ALIGN(rq_size); - cq_size = PAGE_ALIGN(cq_size); + rq_size = MANA_PAGE_ALIGN(rq_size); + cq_size = MANA_PAGE_ALIGN(cq_size); /* Create RQ */ memset(&spec, 0, sizeof(spec)); @@ -2344,11 +2359,33 @@ static int mana_create_vport(struct mana_port_context *apc, return mana_create_txq(apc, net); } +static int mana_rss_table_alloc(struct mana_port_context *apc) +{ + if (!apc->indir_table_sz) { + netdev_err(apc->ndev, + "Indirection table size not set for vPort %d\n", + apc->port_idx); + return -EINVAL; + } + + apc->indir_table = kcalloc(apc->indir_table_sz, sizeof(u32), GFP_KERNEL); + if (!apc->indir_table) + return -ENOMEM; + + apc->rxobj_table = kcalloc(apc->indir_table_sz, sizeof(mana_handle_t), GFP_KERNEL); + if (!apc->rxobj_table) { + kfree(apc->indir_table); + return -ENOMEM; + } + + return 0; +} + static void mana_rss_table_init(struct mana_port_context *apc) { int i; - for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) + for (i = 0; i < apc->indir_table_sz; i++) apc->indir_table[i] = ethtool_rxfh_indir_default(i, apc->num_queues); } @@ -2361,7 +2398,7 @@ int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx, int i; if (update_tab) { - for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) { + for (i = 0; i < apc->indir_table_sz; i++) { queue_idx = apc->indir_table[i]; apc->rxobj_table[i] = apc->rxqs[queue_idx]->rxobj; } @@ -2466,7 +2503,6 @@ static int mana_init_port(struct net_device *ndev) struct mana_port_context *apc = netdev_priv(ndev); u32 max_txq, max_rxq, max_queues; int port_idx = apc->port_idx; - u32 num_indirect_entries; int err; err = mana_init_port_context(apc); @@ -2474,7 +2510,7 @@ static int mana_init_port(struct net_device *ndev) return err; err = mana_query_vport_cfg(apc, port_idx, &max_txq, &max_rxq, - &num_indirect_entries); + &apc->indir_table_sz); if (err) { netdev_err(ndev, "Failed to query info for vPort %d\n", port_idx); @@ -2493,8 +2529,7 @@ static int mana_init_port(struct net_device *ndev) return 0; reset_apc: - kfree(apc->rxqs); - apc->rxqs = NULL; + mana_cleanup_port_context(apc); return err; } @@ -2723,6 +2758,10 @@ static int mana_probe_port(struct mana_context *ac, int port_idx, if (err) goto free_net; + err = mana_rss_table_alloc(apc); + if (err) + goto reset_apc; + netdev_lockdep_set_classes(ndev); ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; @@ -2739,14 +2778,15 @@ static int mana_probe_port(struct mana_context *ac, int port_idx, err = register_netdev(ndev); if (err) { netdev_err(ndev, "Unable to register netdev.\n"); - goto reset_apc; + goto free_indir; } return 0; +free_indir: + mana_cleanup_indir_table(apc); reset_apc: - kfree(apc->rxqs); - apc->rxqs = NULL; + mana_cleanup_port_context(apc); free_net: *ndev_storage = NULL; netdev_err(ndev, "Failed to probe vPort %d: %d\n", port_idx, err); @@ -2874,16 +2914,30 @@ int mana_probe(struct gdma_dev *gd, bool resuming) if (!resuming) { for (i = 0; i < ac->num_ports; i++) { err = mana_probe_port(ac, i, &ac->ports[i]); - if (err) + /* we log the port for which the probe failed and stop + * probes for subsequent ports. + * Note that we keep running ports, for which the probes + * were successful, unless add_adev fails too + */ + if (err) { + dev_err(dev, "Probe Failed for port %d\n", i); break; + } } } else { for (i = 0; i < ac->num_ports; i++) { rtnl_lock(); err = mana_attach(ac->ports[i]); rtnl_unlock(); - if (err) + /* we log the port for which the attach failed and stop + * attach for subsequent ports + * Note that we keep running ports, for which the attach + * were successful, unless add_adev fails too + */ + if (err) { + dev_err(dev, "Attach Failed for port %d\n", i); break; + } } } @@ -2899,6 +2953,7 @@ void mana_remove(struct gdma_dev *gd, bool suspending) { struct gdma_context *gc = gd->gdma_context; struct mana_context *ac = gd->driver_data; + struct mana_port_context *apc; struct device *dev = gc->dev; struct net_device *ndev; int err; @@ -2910,6 +2965,7 @@ void mana_remove(struct gdma_dev *gd, bool suspending) for (i = 0; i < ac->num_ports; i++) { ndev = ac->ports[i]; + apc = netdev_priv(ndev); if (!ndev) { if (i == 0) dev_err(dev, "No net device to remove\n"); @@ -2933,6 +2989,7 @@ void mana_remove(struct gdma_dev *gd, bool suspending) } unregister_netdevice(ndev); + mana_cleanup_indir_table(apc); rtnl_unlock(); diff --git a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c index ab2413d71f6c..146d5db1792f 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c +++ b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c @@ -245,7 +245,9 @@ static u32 mana_get_rxfh_key_size(struct net_device *ndev) static u32 mana_rss_indir_size(struct net_device *ndev) { - return MANA_INDIRECT_TABLE_SIZE; + struct mana_port_context *apc = netdev_priv(ndev); + + return apc->indir_table_sz; } static int mana_get_rxfh(struct net_device *ndev, @@ -257,7 +259,7 @@ static int mana_get_rxfh(struct net_device *ndev, rxfh->hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */ if (rxfh->indir) { - for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) + for (i = 0; i < apc->indir_table_sz; i++) rxfh->indir[i] = apc->indir_table[i]; } @@ -273,8 +275,8 @@ static int mana_set_rxfh(struct net_device *ndev, { struct mana_port_context *apc = netdev_priv(ndev); bool update_hash = false, update_table = false; - u32 save_table[MANA_INDIRECT_TABLE_SIZE]; u8 save_key[MANA_HASH_KEY_SIZE]; + u32 *save_table; int i, err; if (!apc->port_is_up) @@ -284,13 +286,19 @@ static int mana_set_rxfh(struct net_device *ndev, rxfh->hfunc != ETH_RSS_HASH_TOP) return -EOPNOTSUPP; + save_table = kcalloc(apc->indir_table_sz, sizeof(u32), GFP_KERNEL); + if (!save_table) + return -ENOMEM; + if (rxfh->indir) { - for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) - if (rxfh->indir[i] >= apc->num_queues) - return -EINVAL; + for (i = 0; i < apc->indir_table_sz; i++) + if (rxfh->indir[i] >= apc->num_queues) { + err = -EINVAL; + goto cleanup; + } update_table = true; - for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) { + for (i = 0; i < apc->indir_table_sz; i++) { save_table[i] = apc->indir_table[i]; apc->indir_table[i] = rxfh->indir[i]; } @@ -306,7 +314,7 @@ static int mana_set_rxfh(struct net_device *ndev, if (err) { /* recover to original values */ if (update_table) { - for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) + for (i = 0; i < apc->indir_table_sz; i++) apc->indir_table[i] = save_table[i]; } @@ -316,6 +324,9 @@ static int mana_set_rxfh(struct net_device *ndev, mana_config_rss(apc, TRI_STATE_TRUE, update_hash, update_table); } +cleanup: + kfree(save_table); + return err; } diff --git a/drivers/net/ethernet/microsoft/mana/shm_channel.c b/drivers/net/ethernet/microsoft/mana/shm_channel.c index 5553af9c8085..0f1679ebad96 100644 --- a/drivers/net/ethernet/microsoft/mana/shm_channel.c +++ b/drivers/net/ethernet/microsoft/mana/shm_channel.c @@ -6,6 +6,7 @@ #include <linux/io.h> #include <linux/mm.h> +#include <net/mana/gdma.h> #include <net/mana/shm_channel.h> #define PAGE_FRAME_L48_WIDTH_BYTES 6 @@ -155,8 +156,8 @@ int mana_smc_setup_hwc(struct shm_channel *sc, bool reset_vf, u64 eq_addr, return err; } - if (!PAGE_ALIGNED(eq_addr) || !PAGE_ALIGNED(cq_addr) || - !PAGE_ALIGNED(rq_addr) || !PAGE_ALIGNED(sq_addr)) + if (!MANA_PAGE_ALIGNED(eq_addr) || !MANA_PAGE_ALIGNED(cq_addr) || + !MANA_PAGE_ALIGNED(rq_addr) || !MANA_PAGE_ALIGNED(sq_addr)) return -EINVAL; if ((eq_msix_index & VECTOR_MASK) != eq_msix_index) @@ -183,7 +184,7 @@ int mana_smc_setup_hwc(struct shm_channel *sc, bool reset_vf, u64 eq_addr, /* EQ addr: low 48 bits of frame address */ shmem = (u64 *)ptr; - frame_addr = PHYS_PFN(eq_addr); + frame_addr = MANA_PFN(eq_addr); *shmem = frame_addr & PAGE_FRAME_L48_MASK; all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) << (frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS); @@ -191,7 +192,7 @@ int mana_smc_setup_hwc(struct shm_channel *sc, bool reset_vf, u64 eq_addr, /* CQ addr: low 48 bits of frame address */ shmem = (u64 *)ptr; - frame_addr = PHYS_PFN(cq_addr); + frame_addr = MANA_PFN(cq_addr); *shmem = frame_addr & PAGE_FRAME_L48_MASK; all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) << (frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS); @@ -199,7 +200,7 @@ int mana_smc_setup_hwc(struct shm_channel *sc, bool reset_vf, u64 eq_addr, /* RQ addr: low 48 bits of frame address */ shmem = (u64 *)ptr; - frame_addr = PHYS_PFN(rq_addr); + frame_addr = MANA_PFN(rq_addr); *shmem = frame_addr & PAGE_FRAME_L48_MASK; all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) << (frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS); @@ -207,7 +208,7 @@ int mana_smc_setup_hwc(struct shm_channel *sc, bool reset_vf, u64 eq_addr, /* SQ addr: low 48 bits of frame address */ shmem = (u64 *)ptr; - frame_addr = PHYS_PFN(sq_addr); + frame_addr = MANA_PFN(sq_addr); *shmem = frame_addr & PAGE_FRAME_L48_MASK; all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) << (frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS); diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c index 21a87a3fc556..7c9540a71725 100644 --- a/drivers/net/ethernet/mscc/ocelot_net.c +++ b/drivers/net/ethernet/mscc/ocelot_net.c @@ -980,7 +980,7 @@ static int ocelot_port_get_sset_count(struct net_device *dev, int sset) } static int ocelot_port_get_ts_info(struct net_device *dev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct ocelot_port_private *priv = netdev_priv(dev); struct ocelot *ocelot = priv->port.ocelot; diff --git a/drivers/net/ethernet/mscc/ocelot_ptp.c b/drivers/net/ethernet/mscc/ocelot_ptp.c index cb32234a5bf1..b3c28260adf8 100644 --- a/drivers/net/ethernet/mscc/ocelot_ptp.c +++ b/drivers/net/ethernet/mscc/ocelot_ptp.c @@ -580,7 +580,7 @@ int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr) EXPORT_SYMBOL(ocelot_hwstamp_set); int ocelot_get_ts_info(struct ocelot *ocelot, int port, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { info->phc_index = ocelot->ptp_clock ? ptp_clock_index(ocelot->ptp_clock) : -1; diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 8e0a890381b6..46ffc2c20893 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -321,6 +321,10 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, flow_rule_match_enc_control(rule, &enc_ctl); + if (flow_rule_has_enc_control_flags(enc_ctl.mask->flags, + extack)) + return -EOPNOTSUPP; + if (enc_ctl.mask->addr_type != 0xffff) { NL_SET_ERR_MSG_MOD(extack, "unsupported offload: wildcarded protocols on tunnels are not supported"); return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/pensando/ionic/ionic.h b/drivers/net/ethernet/pensando/ionic/ionic.h index 2ccc2c2a06e3..1c61390677f7 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic.h +++ b/drivers/net/ethernet/pensando/ionic/ionic.h @@ -18,6 +18,8 @@ struct ionic_lif; #define PCI_DEVICE_ID_PENSANDO_IONIC_ETH_PF 0x1002 #define PCI_DEVICE_ID_PENSANDO_IONIC_ETH_VF 0x1003 +#define IONIC_ASIC_TYPE_ELBA 2 + #define DEVCMD_TIMEOUT 5 #define IONIC_ADMINQ_TIME_SLICE msecs_to_jiffies(100) @@ -47,6 +49,7 @@ struct ionic { struct ionic_dev_bar bars[IONIC_BARS_MAX]; unsigned int num_bars; struct ionic_identity ident; + struct workqueue_struct *wq; struct ionic_lif *lif; unsigned int nnqs_per_lif; unsigned int neqs_per_lif; @@ -54,6 +57,8 @@ struct ionic { unsigned int nrxqs_per_lif; unsigned int nintrs; DECLARE_BITMAP(intrs, IONIC_INTR_CTRL_REGS_MAX); + cpumask_var_t *affinity_masks; + struct delayed_work doorbell_check_dwork; struct work_struct nb_work; struct notifier_block nb; struct rw_semaphore vf_op_lock; /* lock for VF operations */ @@ -93,4 +98,6 @@ int ionic_port_identify(struct ionic *ionic); int ionic_port_init(struct ionic *ionic); int ionic_port_reset(struct ionic *ionic); +bool ionic_doorbell_wa(struct ionic *ionic); + #endif /* _IONIC_H_ */ diff --git a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c index 6ba8d4aca0a0..b93791d6b593 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c @@ -326,6 +326,11 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_out; } +#ifdef CONFIG_PPC64 + /* Ensure MSI/MSI-X interrupts lie within addressable physical memory */ + pdev->no_64bit_msi = 1; +#endif + err = ionic_setup_one(ionic); if (err) goto err_out; @@ -372,6 +377,7 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) mod_timer(&ionic->watchdog_timer, round_jiffies(jiffies + ionic->watchdog_period)); + ionic_queue_doorbell_check(ionic, IONIC_NAPI_DEADLINE); return 0; @@ -406,6 +412,8 @@ static void ionic_remove(struct pci_dev *pdev) if (test_and_clear_bit(IONIC_LIF_F_FW_RESET, ionic->lif->state)) set_bit(IONIC_LIF_F_FW_STOPPING, ionic->lif->state); + if (ionic->lif->doorbell_wa) + cancel_delayed_work_sync(&ionic->doorbell_check_dwork); ionic_lif_unregister(ionic->lif); ionic_devlink_unregister(ionic); ionic_lif_deinit(ionic->lif); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c b/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c index c3ae11a48024..59e5a9f21105 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c @@ -220,7 +220,7 @@ static int netdev_show(struct seq_file *seq, void *v) { struct net_device *netdev = seq->private; - seq_printf(seq, "%s\n", netdev->name); + seq_printf(seq, "%s\n", netdev_name(netdev)); return 0; } diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c index 874499337132..9e42d599840d 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c @@ -43,11 +43,99 @@ static void ionic_watchdog_cb(struct timer_list *t) work->type = IONIC_DW_TYPE_RX_MODE; netdev_dbg(lif->netdev, "deferred: rx_mode\n"); - ionic_lif_deferred_enqueue(&lif->deferred, work); + ionic_lif_deferred_enqueue(lif, work); } } -static void ionic_watchdog_init(struct ionic *ionic) +static void ionic_napi_schedule_do_softirq(struct napi_struct *napi) +{ + local_bh_disable(); + napi_schedule(napi); + local_bh_enable(); +} + +void ionic_doorbell_napi_work(struct work_struct *work) +{ + struct ionic_qcq *qcq = container_of(work, struct ionic_qcq, + doorbell_napi_work); + unsigned long now, then, dif; + + now = READ_ONCE(jiffies); + then = qcq->q.dbell_jiffies; + dif = now - then; + + if (dif > qcq->q.dbell_deadline) + ionic_napi_schedule_do_softirq(&qcq->napi); +} + +static int ionic_get_preferred_cpu(struct ionic *ionic, + struct ionic_intr_info *intr) +{ + int cpu; + + cpu = cpumask_first_and(*intr->affinity_mask, cpu_online_mask); + if (cpu >= nr_cpu_ids) + cpu = cpumask_local_spread(0, dev_to_node(ionic->dev)); + + return cpu; +} + +static void ionic_queue_dbell_napi_work(struct ionic *ionic, + struct ionic_qcq *qcq) +{ + int cpu; + + if (!(qcq->flags & IONIC_QCQ_F_INTR)) + return; + + cpu = ionic_get_preferred_cpu(ionic, &qcq->intr); + queue_work_on(cpu, ionic->wq, &qcq->doorbell_napi_work); +} + +static void ionic_doorbell_check_dwork(struct work_struct *work) +{ + struct ionic *ionic = container_of(work, struct ionic, + doorbell_check_dwork.work); + struct ionic_lif *lif = ionic->lif; + + mutex_lock(&lif->queue_lock); + + if (test_bit(IONIC_LIF_F_FW_STOPPING, lif->state) || + test_bit(IONIC_LIF_F_FW_RESET, lif->state)) { + mutex_unlock(&lif->queue_lock); + return; + } + + ionic_napi_schedule_do_softirq(&lif->adminqcq->napi); + + if (test_bit(IONIC_LIF_F_UP, lif->state)) { + int i; + + for (i = 0; i < lif->nxqs; i++) { + ionic_queue_dbell_napi_work(ionic, lif->txqcqs[i]); + ionic_queue_dbell_napi_work(ionic, lif->rxqcqs[i]); + } + + if (lif->hwstamp_txq && + lif->hwstamp_txq->flags & IONIC_QCQ_F_INTR) + ionic_napi_schedule_do_softirq(&lif->hwstamp_txq->napi); + if (lif->hwstamp_rxq && + lif->hwstamp_rxq->flags & IONIC_QCQ_F_INTR) + ionic_napi_schedule_do_softirq(&lif->hwstamp_rxq->napi); + } + mutex_unlock(&lif->queue_lock); + + ionic_queue_doorbell_check(ionic, IONIC_NAPI_DEADLINE); +} + +bool ionic_doorbell_wa(struct ionic *ionic) +{ + u8 asic_type = ionic->idev.dev_info.asic_type; + + return !asic_type || asic_type == IONIC_ASIC_TYPE_ELBA; +} + +static int ionic_watchdog_init(struct ionic *ionic) { struct ionic_dev *idev = &ionic->idev; @@ -63,6 +151,31 @@ static void ionic_watchdog_init(struct ionic *ionic) idev->fw_status_ready = true; idev->fw_generation = IONIC_FW_STS_F_GENERATION & ioread8(&idev->dev_info_regs->fw_status); + + ionic->wq = alloc_workqueue("%s-wq", WQ_UNBOUND, 0, + dev_name(ionic->dev)); + if (!ionic->wq) { + dev_err(ionic->dev, "alloc_workqueue failed"); + return -ENOMEM; + } + + if (ionic_doorbell_wa(ionic)) + INIT_DELAYED_WORK(&ionic->doorbell_check_dwork, + ionic_doorbell_check_dwork); + + return 0; +} + +void ionic_queue_doorbell_check(struct ionic *ionic, int delay) +{ + int cpu; + + if (!ionic->lif->doorbell_wa) + return; + + cpu = ionic_get_preferred_cpu(ionic, &ionic->lif->adminqcq->intr); + queue_delayed_work_on(cpu, ionic->wq, &ionic->doorbell_check_dwork, + delay); } void ionic_init_devinfo(struct ionic *ionic) @@ -94,6 +207,7 @@ int ionic_dev_setup(struct ionic *ionic) struct device *dev = ionic->dev; int size; u32 sig; + int err; /* BAR0: dev_cmd and interrupts */ if (num_bars < 1) { @@ -129,7 +243,9 @@ int ionic_dev_setup(struct ionic *ionic) return -EFAULT; } - ionic_watchdog_init(ionic); + err = ionic_watchdog_init(ionic); + if (err) + return err; idev->db_pages = bar->vaddr; idev->phy_db_pages = bar->bus_addr; @@ -161,6 +277,7 @@ void ionic_dev_teardown(struct ionic *ionic) idev->phy_cmb_pages = 0; idev->cmb_npages = 0; + destroy_workqueue(ionic->wq); mutex_destroy(&idev->cmb_inuse_lock); } @@ -273,7 +390,7 @@ do_check_time: if (work) { work->type = IONIC_DW_TYPE_LIF_RESET; work->fw_status = fw_status_ready; - ionic_lif_deferred_enqueue(&lif->deferred, work); + ionic_lif_deferred_enqueue(lif, work); } } } @@ -703,10 +820,6 @@ void ionic_q_post(struct ionic_queue *q, bool ring_doorbell) q->dbval | q->head_idx); q->dbell_jiffies = jiffies; - - if (q_to_qcq(q)->napi_qcq) - mod_timer(&q_to_qcq(q)->napi_qcq->napi_deadline, - jiffies + IONIC_NAPI_DEADLINE); } } diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h index b6c01a88098d..c647033f3ad2 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h @@ -28,7 +28,7 @@ #define IONIC_DEV_INFO_REG_COUNT 32 #define IONIC_DEV_CMD_REG_COUNT 32 -#define IONIC_NAPI_DEADLINE (HZ / 200) /* 5ms */ +#define IONIC_NAPI_DEADLINE (HZ) /* 1 sec */ #define IONIC_ADMIN_DOORBELL_DEADLINE (HZ / 2) /* 500ms */ #define IONIC_TX_DOORBELL_DEADLINE (HZ / 100) /* 10ms */ #define IONIC_RX_MIN_DOORBELL_DEADLINE (HZ / 100) /* 10ms */ @@ -280,9 +280,9 @@ struct ionic_intr_info { u64 rearm_count; unsigned int index; unsigned int vector; - unsigned int cpu; u32 dim_coal_hw; - cpumask_t affinity_mask; + cpumask_var_t *affinity_mask; + struct irq_affinity_notify aff_notify; }; struct ionic_cq { @@ -388,6 +388,8 @@ bool ionic_q_is_posted(struct ionic_queue *q, unsigned int pos); int ionic_heartbeat_check(struct ionic *ionic); bool ionic_is_fw_running(struct ionic_dev *idev); +void ionic_doorbell_napi_work(struct work_struct *work); +void ionic_queue_doorbell_check(struct ionic *ionic, int delay); bool ionic_adminq_poke_doorbell(struct ionic_queue *q); bool ionic_txq_poke_doorbell(struct ionic_queue *q); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c index 91183965a6b7..4619fd74f3e3 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c @@ -11,6 +11,8 @@ #include "ionic_ethtool.h" #include "ionic_stats.h" +#define IONIC_MAX_RX_COPYBREAK min(U16_MAX, IONIC_MAX_BUF_LEN) + static void ionic_get_stats_strings(struct ionic_lif *lif, u8 *buf) { u32 i; @@ -872,10 +874,17 @@ static int ionic_set_tunable(struct net_device *dev, const void *data) { struct ionic_lif *lif = netdev_priv(dev); + u32 rx_copybreak; switch (tuna->id) { case ETHTOOL_RX_COPYBREAK: - lif->rx_copybreak = *(u32 *)data; + rx_copybreak = *(u32 *)data; + if (rx_copybreak > IONIC_MAX_RX_COPYBREAK) { + netdev_err(dev, "Max supported rx_copybreak size: %u\n", + IONIC_MAX_RX_COPYBREAK); + return -EINVAL; + } + lif->rx_copybreak = (u16)rx_copybreak; break; default: return -EOPNOTSUPP; @@ -968,7 +977,7 @@ static int ionic_get_module_eeprom(struct net_device *netdev, } static int ionic_get_ts_info(struct net_device *netdev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct ionic_lif *lif = netdev_priv(netdev); struct ionic *ionic = lif->ionic; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_if.h b/drivers/net/ethernet/pensando/ionic/ionic_if.h index 9a1825edf0d0..9c85c0706c6e 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_if.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_if.h @@ -71,7 +71,7 @@ enum ionic_cmd_opcode { IONIC_CMD_FW_CONTROL_V1 = 255, }; -/** +/* * enum ionic_status_code - Device command return codes */ enum ionic_status_code { @@ -112,6 +112,7 @@ enum ionic_notifyq_opcode { /** * struct ionic_admin_cmd - General admin command format * @opcode: Opcode for the command + * @rsvd: reserved byte(s) * @lif_index: LIF index * @cmd_data: Opcode-specific command bytes */ @@ -125,6 +126,7 @@ struct ionic_admin_cmd { /** * struct ionic_admin_comp - General admin command completion format * @status: Status of the command (enum ionic_status_code) + * @rsvd: reserved byte(s) * @comp_index: Index in the descriptor ring for which this is the completion * @cmd_data: Command-specific bytes * @color: Color bit (Always 0 for commands issued to the @@ -147,6 +149,7 @@ static inline u8 color_match(u8 color, u8 done_color) /** * struct ionic_nop_cmd - NOP command * @opcode: opcode + * @rsvd: reserved byte(s) */ struct ionic_nop_cmd { u8 opcode; @@ -156,6 +159,7 @@ struct ionic_nop_cmd { /** * struct ionic_nop_comp - NOP command completion * @status: Status of the command (enum ionic_status_code) + * @rsvd: reserved byte(s) */ struct ionic_nop_comp { u8 status; @@ -166,6 +170,7 @@ struct ionic_nop_comp { * struct ionic_dev_init_cmd - Device init command * @opcode: opcode * @type: Device type + * @rsvd: reserved byte(s) */ struct ionic_dev_init_cmd { u8 opcode; @@ -176,6 +181,7 @@ struct ionic_dev_init_cmd { /** * struct ionic_dev_init_comp - Device init command completion * @status: Status of the command (enum ionic_status_code) + * @rsvd: reserved byte(s) */ struct ionic_dev_init_comp { u8 status; @@ -185,6 +191,7 @@ struct ionic_dev_init_comp { /** * struct ionic_dev_reset_cmd - Device reset command * @opcode: opcode + * @rsvd: reserved byte(s) */ struct ionic_dev_reset_cmd { u8 opcode; @@ -194,6 +201,7 @@ struct ionic_dev_reset_cmd { /** * struct ionic_dev_reset_comp - Reset command completion * @status: Status of the command (enum ionic_status_code) + * @rsvd: reserved byte(s) */ struct ionic_dev_reset_comp { u8 status; @@ -207,6 +215,7 @@ struct ionic_dev_reset_comp { * struct ionic_dev_identify_cmd - Driver/device identify command * @opcode: opcode * @ver: Highest version of identify supported by driver + * @rsvd: reserved byte(s) */ struct ionic_dev_identify_cmd { u8 opcode; @@ -218,6 +227,7 @@ struct ionic_dev_identify_cmd { * struct ionic_dev_identify_comp - Driver/device identify command completion * @status: Status of the command (enum ionic_status_code) * @ver: Version of identify returned by device + * @rsvd: reserved byte(s) */ struct ionic_dev_identify_comp { u8 status; @@ -242,6 +252,7 @@ enum ionic_os_type { * @kernel_ver: Kernel version, numeric format * @kernel_ver_str: Kernel version, string format * @driver_ver_str: Driver version, string format + * @words: word access to struct contents */ union ionic_drv_identity { struct { @@ -267,7 +278,9 @@ enum ionic_dev_capability { * union ionic_dev_identity - device identity information * @version: Version of device identify * @type: Identify type (0 for now) + * @rsvd: reserved byte(s) * @nports: Number of ports provisioned + * @rsvd2: reserved byte(s) * @nlifs: Number of LIFs provisioned * @nintrs: Number of interrupts provisioned * @ndbpgs_per_lif: Number of doorbell pages per LIF @@ -284,6 +297,7 @@ enum ionic_dev_capability { * @hwstamp_mult: Hardware tick to nanosecond multiplier. * @hwstamp_shift: Hardware tick to nanosecond divisor (power of two). * @capabilities: Device capabilities + * @words: word access to struct contents */ union ionic_dev_identity { struct { @@ -317,6 +331,7 @@ enum ionic_lif_type { * @opcode: opcode * @type: LIF type (enum ionic_lif_type) * @ver: Version of identify returned by device + * @rsvd: reserved byte(s) */ struct ionic_lif_identify_cmd { u8 opcode; @@ -329,6 +344,7 @@ struct ionic_lif_identify_cmd { * struct ionic_lif_identify_comp - LIF identify command completion * @status: Status of the command (enum ionic_status_code) * @ver: Version of identify returned by device + * @rsvd2: reserved byte(s) */ struct ionic_lif_identify_comp { u8 status; @@ -416,7 +432,7 @@ enum ionic_txq_feature { }; /** - * struct ionic_hwstamp_bits - Hardware timestamp decoding bits + * enum ionic_hwstamp_bits - Hardware timestamp decoding bits * @IONIC_HWSTAMP_INVALID: Invalid hardware timestamp value * @IONIC_HWSTAMP_CQ_NEGOFFSET: Timestamp field negative offset * from the base cq descriptor. @@ -429,6 +445,7 @@ enum ionic_hwstamp_bits { /** * struct ionic_lif_logical_qtype - Descriptor of logical to HW queue type * @qtype: Hardware Queue Type + * @rsvd: reserved byte(s) * @qid_count: Number of Queue IDs of the logical type * @qid_base: Minimum Queue ID of the logical type */ @@ -454,12 +471,14 @@ enum ionic_lif_state { /** * union ionic_lif_config - LIF configuration * @state: LIF state (enum ionic_lif_state) + * @rsvd: reserved byte(s) * @name: LIF name * @mtu: MTU * @mac: Station MAC address * @vlan: Default Vlan ID * @features: Features (enum ionic_eth_hw_features) * @queue_count: Queue counts per queue-type + * @words: word access to struct contents */ union ionic_lif_config { struct { @@ -481,33 +500,39 @@ union ionic_lif_config { * @capabilities: LIF capabilities * * @eth: Ethernet identify structure - * @version: Ethernet identify structure version - * @max_ucast_filters: Number of perfect unicast addresses supported - * @max_mcast_filters: Number of perfect multicast addresses supported - * @min_frame_size: Minimum size of frames to be sent - * @max_frame_size: Maximum size of frames to be sent - * @hwstamp_tx_modes: Bitmask of BIT_ULL(enum ionic_txstamp_mode) - * @hwstamp_rx_filters: Bitmask of enum ionic_pkt_class - * @config: LIF config struct with features, mtu, mac, q counts + * @eth.version: Ethernet identify structure version + * @eth.rsvd: reserved byte(s) + * @eth.max_ucast_filters: Number of perfect unicast addresses supported + * @eth.max_mcast_filters: Number of perfect multicast addresses supported + * @eth.min_frame_size: Minimum size of frames to be sent + * @eth.max_frame_size: Maximum size of frames to be sent + * @eth.rsvd2: reserved byte(s) + * @eth.hwstamp_tx_modes: Bitmask of BIT_ULL(enum ionic_txstamp_mode) + * @eth.hwstamp_rx_filters: Bitmask of enum ionic_pkt_class + * @eth.rsvd3: reserved byte(s) + * @eth.config: LIF config struct with features, mtu, mac, q counts * * @rdma: RDMA identify structure - * @version: RDMA version of opcodes and queue descriptors - * @qp_opcodes: Number of RDMA queue pair opcodes supported - * @admin_opcodes: Number of RDMA admin opcodes supported - * @npts_per_lif: Page table size per LIF - * @nmrs_per_lif: Number of memory regions per LIF - * @nahs_per_lif: Number of address handles per LIF - * @max_stride: Max work request stride - * @cl_stride: Cache line stride - * @pte_stride: Page table entry stride - * @rrq_stride: Remote RQ work request stride - * @rsq_stride: Remote SQ work request stride - * @dcqcn_profiles: Number of DCQCN profiles - * @aq_qtype: RDMA Admin Qtype - * @sq_qtype: RDMA Send Qtype - * @rq_qtype: RDMA Receive Qtype - * @cq_qtype: RDMA Completion Qtype - * @eq_qtype: RDMA Event Qtype + * @rdma.version: RDMA version of opcodes and queue descriptors + * @rdma.qp_opcodes: Number of RDMA queue pair opcodes supported + * @rdma.admin_opcodes: Number of RDMA admin opcodes supported + * @rdma.rsvd: reserved byte(s) + * @rdma.npts_per_lif: Page table size per LIF + * @rdma.nmrs_per_lif: Number of memory regions per LIF + * @rdma.nahs_per_lif: Number of address handles per LIF + * @rdma.max_stride: Max work request stride + * @rdma.cl_stride: Cache line stride + * @rdma.pte_stride: Page table entry stride + * @rdma.rrq_stride: Remote RQ work request stride + * @rdma.rsq_stride: Remote SQ work request stride + * @rdma.dcqcn_profiles: Number of DCQCN profiles + * @rdma.rsvd_dimensions: reserved byte(s) + * @rdma.aq_qtype: RDMA Admin Qtype + * @rdma.sq_qtype: RDMA Send Qtype + * @rdma.rq_qtype: RDMA Receive Qtype + * @rdma.cq_qtype: RDMA Completion Qtype + * @rdma.eq_qtype: RDMA Event Qtype + * @words: word access to struct contents */ union ionic_lif_identity { struct { @@ -558,7 +583,9 @@ union ionic_lif_identity { * @opcode: Opcode * @type: LIF type (enum ionic_lif_type) * @index: LIF index + * @rsvd: reserved byte(s) * @info_pa: Destination address for LIF info (struct ionic_lif_info) + * @rsvd2: reserved byte(s) */ struct ionic_lif_init_cmd { u8 opcode; @@ -572,7 +599,9 @@ struct ionic_lif_init_cmd { /** * struct ionic_lif_init_comp - LIF init command completion * @status: Status of the command (enum ionic_status_code) + * @rsvd: reserved byte(s) * @hw_index: Hardware index of the initialized LIF + * @rsvd2: reserved byte(s) */ struct ionic_lif_init_comp { u8 status; @@ -584,9 +613,11 @@ struct ionic_lif_init_comp { /** * struct ionic_q_identify_cmd - queue identify command * @opcode: opcode + * @rsvd: reserved byte(s) * @lif_type: LIF type (enum ionic_lif_type) * @type: Logical queue type (enum ionic_logical_qtype) * @ver: Highest queue type version that the driver supports + * @rsvd2: reserved byte(s) */ struct ionic_q_identify_cmd { u8 opcode; @@ -600,8 +631,10 @@ struct ionic_q_identify_cmd { /** * struct ionic_q_identify_comp - queue identify command completion * @status: Status of the command (enum ionic_status_code) + * @rsvd: reserved byte(s) * @comp_index: Index in the descriptor ring for which this is the completion * @ver: Queue type version that can be used with FW + * @rsvd2: reserved byte(s) */ struct ionic_q_identify_comp { u8 status; @@ -615,12 +648,14 @@ struct ionic_q_identify_comp { * union ionic_q_identity - queue identity information * @version: Queue type version that can be used with FW * @supported: Bitfield of queue versions, first bit = ver 0 + * @rsvd: reserved byte(s) * @features: Queue features (enum ionic_q_feature, etc) * @desc_sz: Descriptor size * @comp_sz: Completion descriptor size * @sg_desc_sz: Scatter/Gather descriptor size * @max_sg_elems: Maximum number of Scatter/Gather elements * @sg_desc_stride: Number of Scatter/Gather elements per descriptor + * @words: word access to struct contents */ union ionic_q_identity { struct { @@ -640,8 +675,10 @@ union ionic_q_identity { /** * struct ionic_q_init_cmd - Queue init command * @opcode: opcode + * @rsvd: reserved byte(s) * @type: Logical queue type * @ver: Queue type version + * @rsvd1: reserved byte(s) * @lif_index: LIF index * @index: (LIF, qtype) relative admin queue index * @intr_index: Interrupt control register index, or Event queue index @@ -667,6 +704,7 @@ union ionic_q_identity { * @ring_base: Queue ring base address * @cq_ring_base: Completion queue ring base address * @sg_ring_base: Scatter/Gather ring base address + * @rsvd2: reserved byte(s) * @features: Mask of queue features to enable, if not in the flags above. */ struct ionic_q_init_cmd { @@ -698,9 +736,11 @@ struct ionic_q_init_cmd { /** * struct ionic_q_init_comp - Queue init command completion * @status: Status of the command (enum ionic_status_code) + * @rsvd: reserved byte(s) * @comp_index: Index in the descriptor ring for which this is the completion * @hw_index: Hardware Queue ID * @hw_type: Hardware Queue type + * @rsvd2: reserved byte(s) * @color: Color */ struct ionic_q_init_comp { @@ -800,7 +840,7 @@ enum ionic_txq_desc_opcode { * will set CWR flag in the first segment if * CWR is set in the template header, and * clear CWR in remaining segments. - * @flags: + * flags: * vlan: * Insert an L2 VLAN header using @vlan_tci * encap: @@ -813,13 +853,14 @@ enum ionic_txq_desc_opcode { * TSO start * tso_eot: * TSO end - * @num_sg_elems: Number of scatter-gather elements in SG + * num_sg_elems: Number of scatter-gather elements in SG * descriptor - * @addr: First data buffer's DMA address + * addr: First data buffer's DMA address * (Subsequent data buffers are on txq_sg_desc) * @len: First data buffer's length, in bytes * @vlan_tci: VLAN tag to insert in the packet (if requested * by @V-bit). Includes .1p and .1q tags + * @hword0: half word padding * @hdr_len: Length of packet headers, including * encapsulating outer header, if applicable * Valid for opcodes IONIC_TXQ_DESC_OPCODE_CALC_CSUM and @@ -830,10 +871,12 @@ enum ionic_txq_desc_opcode { * IONIC_TXQ_DESC_OPCODE_TSO, @hdr_len is up to * inner-most L4 payload, so inclusive of * inner-most L4 header. + * @hword1: half word padding * @mss: Desired MSS value for TSO; only applicable for * IONIC_TXQ_DESC_OPCODE_TSO * @csum_start: Offset from packet to first byte checked in L4 checksum * @csum_offset: Offset from csum_start to L4 checksum field + * @hword2: half word padding */ struct ionic_txq_desc { __le64 cmd; @@ -901,6 +944,7 @@ static inline void decode_txq_desc_cmd(u64 cmd, u8 *opcode, u8 *flags, * struct ionic_txq_sg_elem - Transmit scatter-gather (SG) descriptor element * @addr: DMA address of SG element data buffer * @len: Length of SG element data buffer, in bytes + * @rsvd: reserved byte(s) */ struct ionic_txq_sg_elem { __le64 addr; @@ -927,7 +971,9 @@ struct ionic_txq_sg_desc_v1 { /** * struct ionic_txq_comp - Ethernet transmit queue completion descriptor * @status: Status of the command (enum ionic_status_code) + * @rsvd: reserved byte(s) * @comp_index: Index in the descriptor ring for which this is the completion + * @rsvd2: reserved byte(s) * @color: Color bit */ struct ionic_txq_comp { @@ -953,6 +999,7 @@ enum ionic_rxq_desc_opcode { * receive, including actual bytes received, * are recorded in Rx completion descriptor. * + * @rsvd: reserved byte(s) * @len: Data buffer's length, in bytes * @addr: Data buffer's DMA address */ @@ -967,6 +1014,7 @@ struct ionic_rxq_desc { * struct ionic_rxq_sg_elem - Receive scatter-gather (SG) descriptor element * @addr: DMA address of SG element data buffer * @len: Length of SG element data buffer, in bytes + * @rsvd: reserved byte(s) */ struct ionic_rxq_sg_elem { __le64 addr; @@ -1170,6 +1218,7 @@ enum ionic_pkt_class { * @lif_index: LIF index * @index: Queue index * @oper: Operation (enum ionic_q_control_oper) + * @rsvd: reserved byte(s) */ struct ionic_q_control_cmd { u8 opcode; @@ -1182,7 +1231,7 @@ struct ionic_q_control_cmd { typedef struct ionic_admin_comp ionic_q_control_comp; -enum q_control_oper { +enum ionic_q_control_oper { IONIC_Q_DISABLE = 0, IONIC_Q_ENABLE = 1, IONIC_Q_HANG_RESET = 2, @@ -1216,7 +1265,7 @@ enum ionic_xcvr_state { IONIC_XCVR_STATE_SPROM_READ_ERR = 4, }; -/** +/* * enum ionic_xcvr_pid - Supported link modes */ enum ionic_xcvr_pid { @@ -1351,6 +1400,7 @@ struct ionic_xcvr_status { * @fec_type: fec type (enum ionic_port_fec_type) * @pause_type: pause type (enum ionic_port_pause_type) * @loopback_mode: loopback mode (enum ionic_port_loopback_mode) + * @words: word access to struct contents */ union ionic_port_config { struct { @@ -1382,6 +1432,7 @@ union ionic_port_config { * @speed: link speed (in Mbps) * @link_down_count: number of times link went from up to down * @fec_type: fec type (enum ionic_port_fec_type) + * @rsvd: reserved byte(s) * @xcvr: transceiver status */ struct ionic_port_status { @@ -1399,6 +1450,7 @@ struct ionic_port_status { * @opcode: opcode * @index: port index * @ver: Highest version of identify supported by driver + * @rsvd: reserved byte(s) */ struct ionic_port_identify_cmd { u8 opcode; @@ -1411,6 +1463,7 @@ struct ionic_port_identify_cmd { * struct ionic_port_identify_comp - Port identify command completion * @status: Status of the command (enum ionic_status_code) * @ver: Version of identify returned by device + * @rsvd: reserved byte(s) */ struct ionic_port_identify_comp { u8 status; @@ -1422,7 +1475,9 @@ struct ionic_port_identify_comp { * struct ionic_port_init_cmd - Port initialization command * @opcode: opcode * @index: port index + * @rsvd: reserved byte(s) * @info_pa: destination address for port info (struct ionic_port_info) + * @rsvd2: reserved byte(s) */ struct ionic_port_init_cmd { u8 opcode; @@ -1435,6 +1490,7 @@ struct ionic_port_init_cmd { /** * struct ionic_port_init_comp - Port initialization command completion * @status: Status of the command (enum ionic_status_code) + * @rsvd: reserved byte(s) */ struct ionic_port_init_comp { u8 status; @@ -1445,6 +1501,7 @@ struct ionic_port_init_comp { * struct ionic_port_reset_cmd - Port reset command * @opcode: opcode * @index: port index + * @rsvd: reserved byte(s) */ struct ionic_port_reset_cmd { u8 opcode; @@ -1455,6 +1512,7 @@ struct ionic_port_reset_cmd { /** * struct ionic_port_reset_comp - Port reset command completion * @status: Status of the command (enum ionic_status_code) + * @rsvd: reserved byte(s) */ struct ionic_port_reset_comp { u8 status; @@ -1510,6 +1568,7 @@ enum ionic_port_attr { * @opcode: Opcode * @index: Port index * @attr: Attribute type (enum ionic_port_attr) + * @rsvd: reserved byte(s) * @state: Port state * @speed: Port speed * @mtu: Port MTU @@ -1518,6 +1577,7 @@ enum ionic_port_attr { * @pause_type: Port pause type setting * @loopback_mode: Port loopback mode * @stats_ctl: Port stats setting + * @rsvd2: reserved byte(s) */ struct ionic_port_setattr_cmd { u8 opcode; @@ -1540,6 +1600,7 @@ struct ionic_port_setattr_cmd { /** * struct ionic_port_setattr_comp - Port set attr command completion * @status: Status of the command (enum ionic_status_code) + * @rsvd: reserved byte(s) * @color: Color bit */ struct ionic_port_setattr_comp { @@ -1553,6 +1614,7 @@ struct ionic_port_setattr_comp { * @opcode: Opcode * @index: port index * @attr: Attribute type (enum ionic_port_attr) + * @rsvd: reserved byte(s) */ struct ionic_port_getattr_cmd { u8 opcode; @@ -1564,6 +1626,7 @@ struct ionic_port_getattr_cmd { /** * struct ionic_port_getattr_comp - Port get attr command completion * @status: Status of the command (enum ionic_status_code) + * @rsvd: reserved byte(s) * @state: Port state * @speed: Port speed * @mtu: Port MTU @@ -1571,6 +1634,7 @@ struct ionic_port_getattr_cmd { * @fec_type: Port FEC type setting * @pause_type: Port pause type setting * @loopback_mode: Port loopback mode + * @rsvd2: reserved byte(s) * @color: Color bit */ struct ionic_port_getattr_comp { @@ -1593,9 +1657,11 @@ struct ionic_port_getattr_comp { * struct ionic_lif_status - LIF status register * @eid: most recent NotifyQ event id * @port_num: port the LIF is connected to + * @rsvd: reserved byte(s) * @link_status: port status (enum ionic_port_oper_status) * @link_speed: speed of link in Mbps * @link_down_count: number of times link went from up to down + * @rsvd2: reserved byte(s) */ struct ionic_lif_status { __le64 eid; @@ -1610,7 +1676,9 @@ struct ionic_lif_status { /** * struct ionic_lif_reset_cmd - LIF reset command * @opcode: opcode + * @rsvd: reserved byte(s) * @index: LIF index + * @rsvd2: reserved byte(s) */ struct ionic_lif_reset_cmd { u8 opcode; @@ -1643,9 +1711,11 @@ enum ionic_dev_attr { * struct ionic_dev_setattr_cmd - Set Device attributes on the NIC * @opcode: Opcode * @attr: Attribute type (enum ionic_dev_attr) + * @rsvd: reserved byte(s) * @state: Device state (enum ionic_dev_state) * @name: The bus info, e.g. PCI slot-device-function, 0 terminated * @features: Device features + * @rsvd2: reserved byte(s) */ struct ionic_dev_setattr_cmd { u8 opcode; @@ -1662,7 +1732,9 @@ struct ionic_dev_setattr_cmd { /** * struct ionic_dev_setattr_comp - Device set attr command completion * @status: Status of the command (enum ionic_status_code) + * @rsvd: reserved byte(s) * @features: Device features + * @rsvd2: reserved byte(s) * @color: Color bit */ struct ionic_dev_setattr_comp { @@ -1679,6 +1751,7 @@ struct ionic_dev_setattr_comp { * struct ionic_dev_getattr_cmd - Get Device attributes from the NIC * @opcode: opcode * @attr: Attribute type (enum ionic_dev_attr) + * @rsvd: reserved byte(s) */ struct ionic_dev_getattr_cmd { u8 opcode; @@ -1687,9 +1760,11 @@ struct ionic_dev_getattr_cmd { }; /** - * struct ionic_dev_setattr_comp - Device set attr command completion + * struct ionic_dev_getattr_comp - Device set attr command completion * @status: Status of the command (enum ionic_status_code) + * @rsvd: reserved byte(s) * @features: Device features + * @rsvd2: reserved byte(s) * @color: Color bit */ struct ionic_dev_getattr_comp { @@ -1702,7 +1777,7 @@ struct ionic_dev_getattr_comp { u8 color; }; -/** +/* * RSS parameters */ #define IONIC_RSS_HASH_KEY_SIZE 40 @@ -1726,6 +1801,7 @@ enum ionic_rss_hash_types { * @IONIC_LIF_ATTR_RSS: LIF RSS attribute * @IONIC_LIF_ATTR_STATS_CTRL: LIF statistics control attribute * @IONIC_LIF_ATTR_TXSTAMP: LIF TX timestamping mode + * @IONIC_LIF_ATTR_MAX: maximum attribute value */ enum ionic_lif_attr { IONIC_LIF_ATTR_STATE = 0, @@ -1736,6 +1812,7 @@ enum ionic_lif_attr { IONIC_LIF_ATTR_RSS = 5, IONIC_LIF_ATTR_STATS_CTRL = 6, IONIC_LIF_ATTR_TXSTAMP = 7, + IONIC_LIF_ATTR_MAX = 255, }; /** @@ -1749,11 +1826,13 @@ enum ionic_lif_attr { * @mac: Station mac * @features: Features (enum ionic_eth_hw_features) * @rss: RSS properties - * @types: The hash types to enable (see rss_hash_types) - * @key: The hash secret key - * @addr: Address for the indirection table shared memory + * @rss.types: The hash types to enable (see rss_hash_types) + * @rss.key: The hash secret key + * @rss.rsvd: reserved byte(s) + * @rss.addr: Address for the indirection table shared memory * @stats_ctl: stats control commands (enum ionic_stats_ctl_cmd) - * @txstamp: TX Timestamping Mode (enum ionic_txstamp_mode) + * @txstamp_mode: TX Timestamping Mode (enum ionic_txstamp_mode) + * @rsvd: reserved byte(s) */ struct ionic_lif_setattr_cmd { u8 opcode; @@ -1772,7 +1851,7 @@ struct ionic_lif_setattr_cmd { __le64 addr; } rss; u8 stats_ctl; - __le16 txstamp_mode; + __le16 txstamp_mode; u8 rsvd[60]; } __packed; }; @@ -1780,8 +1859,10 @@ struct ionic_lif_setattr_cmd { /** * struct ionic_lif_setattr_comp - LIF set attr command completion * @status: Status of the command (enum ionic_status_code) + * @rsvd: reserved byte(s) * @comp_index: Index in the descriptor ring for which this is the completion * @features: features (enum ionic_eth_hw_features) + * @rsvd2: reserved byte(s) * @color: Color bit */ struct ionic_lif_setattr_comp { @@ -1800,6 +1881,7 @@ struct ionic_lif_setattr_comp { * @opcode: Opcode * @attr: Attribute type (enum ionic_lif_attr) * @index: LIF index + * @rsvd: reserved byte(s) */ struct ionic_lif_getattr_cmd { u8 opcode; @@ -1811,13 +1893,14 @@ struct ionic_lif_getattr_cmd { /** * struct ionic_lif_getattr_comp - LIF get attr command completion * @status: Status of the command (enum ionic_status_code) + * @rsvd: reserved byte(s) * @comp_index: Index in the descriptor ring for which this is the completion * @state: LIF state (enum ionic_lif_state) - * @name: The netdev name string, 0 terminated * @mtu: Mtu * @mac: Station mac * @features: Features (enum ionic_eth_hw_features) - * @txstamp: TX Timestamping Mode (enum ionic_txstamp_mode) + * @txstamp_mode: TX Timestamping Mode (enum ionic_txstamp_mode) + * @rsvd2: reserved byte(s) * @color: Color bit */ struct ionic_lif_getattr_comp { @@ -1838,12 +1921,15 @@ struct ionic_lif_getattr_comp { /** * struct ionic_lif_setphc_cmd - Set LIF PTP Hardware Clock * @opcode: Opcode + * @rsvd1: reserved byte(s) * @lif_index: LIF index + * @rsvd2: reserved byte(s) * @tick: Hardware stamp tick of an instant in time. * @nsec: Nanosecond stamp of the same instant. * @frac: Fractional nanoseconds at the same instant. * @mult: Cycle to nanosecond multiplier. * @shift: Cycle to nanosecond divisor (power of two). + * @rsvd3: reserved byte(s) */ struct ionic_lif_setphc_cmd { u8 opcode; @@ -1870,6 +1956,7 @@ enum ionic_rx_mode { /** * struct ionic_rx_mode_set_cmd - Set LIF's Rx mode command * @opcode: opcode + * @rsvd: reserved byte(s) * @lif_index: LIF index * @rx_mode: Rx mode flags: * IONIC_RX_MODE_F_UNICAST: Accept known unicast packets @@ -1878,6 +1965,7 @@ enum ionic_rx_mode { * IONIC_RX_MODE_F_PROMISC: Accept any packets * IONIC_RX_MODE_F_ALLMULTI: Accept any multicast packets * IONIC_RX_MODE_F_RDMA_SNIFFER: Sniff RDMA packets + * @rsvd2: reserved byte(s) */ struct ionic_rx_mode_set_cmd { u8 opcode; @@ -1904,13 +1992,14 @@ enum ionic_rx_filter_match_type { * @qid: Queue ID * @match: Rx filter match type (see IONIC_RX_FILTER_MATCH_xxx) * @vlan: VLAN filter - * @vlan: VLAN ID + * @vlan.vlan: VLAN ID * @mac: MAC filter - * @addr: MAC address (network-byte order) + * @mac.addr: MAC address (network-byte order) * @mac_vlan: MACVLAN filter - * @vlan: VLAN ID - * @addr: MAC address (network-byte order) + * @mac_vlan.vlan: VLAN ID + * @mac_vlan.addr: MAC address (network-byte order) * @pkt_class: Packet classification filter + * @rsvd: reserved byte(s) */ struct ionic_rx_filter_add_cmd { u8 opcode; @@ -1937,8 +2026,10 @@ struct ionic_rx_filter_add_cmd { /** * struct ionic_rx_filter_add_comp - Add LIF Rx filter command completion * @status: Status of the command (enum ionic_status_code) + * @rsvd: reserved byte(s) * @comp_index: Index in the descriptor ring for which this is the completion * @filter_id: Filter ID + * @rsvd2: reserved byte(s) * @color: Color bit */ struct ionic_rx_filter_add_comp { @@ -1953,8 +2044,10 @@ struct ionic_rx_filter_add_comp { /** * struct ionic_rx_filter_del_cmd - Delete LIF Rx filter command * @opcode: opcode + * @rsvd: reserved byte(s) * @lif_index: LIF index * @filter_id: Filter ID + * @rsvd2: reserved byte(s) */ struct ionic_rx_filter_del_cmd { u8 opcode; @@ -2000,6 +2093,7 @@ enum ionic_vf_link_status { * @trust: enable VF trust * @linkstate: set link up or down * @stats_pa: set DMA address for VF stats + * @pad: reserved byte(s) */ struct ionic_vf_setattr_cmd { u8 opcode; @@ -2031,6 +2125,7 @@ struct ionic_vf_setattr_comp { * @opcode: Opcode * @attr: Attribute type (enum ionic_vf_attr) * @vf_index: VF index + * @rsvd: reserved byte(s) */ struct ionic_vf_getattr_cmd { u8 opcode; @@ -2064,8 +2159,8 @@ enum ionic_vf_ctrl_opcode { /** * struct ionic_vf_ctrl_cmd - VF control command * @opcode: Opcode for the command - * @vf_index: VF Index. It is unused if op START_ALL is used. * @ctrl_opcode: VF control operation type + * @vf_index: VF Index. It is unused if op START_ALL is used. */ struct ionic_vf_ctrl_cmd { u8 opcode; @@ -2089,7 +2184,7 @@ struct ionic_vf_ctrl_comp { * struct ionic_qos_identify_cmd - QoS identify command * @opcode: opcode * @ver: Highest version of identify supported by driver - * + * @rsvd: reserved byte(s) */ struct ionic_qos_identify_cmd { u8 opcode; @@ -2101,6 +2196,7 @@ struct ionic_qos_identify_cmd { * struct ionic_qos_identify_comp - QoS identify command completion * @status: Status of the command (enum ionic_status_code) * @ver: Version of identify returned by device + * @rsvd: reserved byte(s) */ struct ionic_qos_identify_comp { u8 status; @@ -2118,7 +2214,7 @@ struct ionic_qos_identify_comp { #define IONIC_QOS_ALL_PCP 0xFF #define IONIC_DSCP_BLOCK_SIZE 8 -/** +/* * enum ionic_qos_class */ enum ionic_qos_class { @@ -2174,6 +2270,7 @@ enum ionic_qos_sched_type { * @dot1q_pcp: Dot1q pcp value * @ndscp: Number of valid dscp values in the ip_dscp field * @ip_dscp: IP dscp values + * @words: word access to struct contents */ union ionic_qos_config { struct { @@ -2219,8 +2316,9 @@ union ionic_qos_config { * union ionic_qos_identity - QoS identity structure * @version: Version of the identify structure * @type: QoS system type - * @nclasses: Number of usable QoS classes + * @rsvd: reserved byte(s) * @config: Current configuration of classes + * @words: word access to struct contents */ union ionic_qos_identity { struct { @@ -2236,7 +2334,9 @@ union ionic_qos_identity { * struct ionic_qos_init_cmd - QoS config init command * @opcode: Opcode * @group: QoS class id + * @rsvd: reserved byte(s) * @info_pa: destination address for qos info + * @rsvd1: reserved byte(s) */ struct ionic_qos_init_cmd { u8 opcode; @@ -2252,6 +2352,7 @@ typedef struct ionic_admin_comp ionic_qos_init_comp; * struct ionic_qos_reset_cmd - QoS config reset command * @opcode: Opcode * @group: QoS class id + * @rsvd: reserved byte(s) */ struct ionic_qos_reset_cmd { u8 opcode; @@ -2260,8 +2361,10 @@ struct ionic_qos_reset_cmd { }; /** - * struct ionic_qos_clear_port_stats_cmd - Qos config reset command + * struct ionic_qos_clear_stats_cmd - Qos config reset command * @opcode: Opcode + * @group_bitmap: bitmap of groups to be cleared + * @rsvd: reserved byte(s) */ struct ionic_qos_clear_stats_cmd { u8 opcode; @@ -2274,6 +2377,7 @@ typedef struct ionic_admin_comp ionic_qos_reset_comp; /** * struct ionic_fw_download_cmd - Firmware download command * @opcode: opcode + * @rsvd: reserved byte(s) * @addr: dma address of the firmware buffer * @offset: offset of the firmware buffer within the full image * @length: number of valid bytes in the firmware buffer @@ -2297,6 +2401,7 @@ typedef struct ionic_admin_comp ionic_fw_download_comp; * @IONIC_FW_INSTALL_STATUS: Firmware installation status * @IONIC_FW_ACTIVATE_ASYNC: Activate firmware asynchronously * @IONIC_FW_ACTIVATE_STATUS: Firmware activate status + * @IONIC_FW_UPDATE_CLEANUP: Clean up after an interrupted fw update */ enum ionic_fw_control_oper { IONIC_FW_RESET = 0, @@ -2312,8 +2417,10 @@ enum ionic_fw_control_oper { /** * struct ionic_fw_control_cmd - Firmware control command * @opcode: opcode + * @rsvd: reserved byte(s) * @oper: firmware control operation (enum ionic_fw_control_oper) * @slot: slot to activate + * @rsvd1: reserved byte(s) */ struct ionic_fw_control_cmd { u8 opcode; @@ -2326,8 +2433,10 @@ struct ionic_fw_control_cmd { /** * struct ionic_fw_control_comp - Firmware control copletion * @status: Status of the command (enum ionic_status_code) + * @rsvd: reserved byte(s) * @comp_index: Index in the descriptor ring for which this is the completion * @slot: Slot where the firmware was installed + * @rsvd1: reserved byte(s) * @color: Color bit */ struct ionic_fw_control_comp { @@ -2346,7 +2455,9 @@ struct ionic_fw_control_comp { /** * struct ionic_rdma_reset_cmd - Reset RDMA LIF cmd * @opcode: opcode + * @rsvd: reserved byte(s) * @lif_index: LIF index + * @rsvd2: reserved byte(s) * * There is no RDMA specific dev command completion struct. Completion uses * the common struct ionic_admin_comp. Only the status is indicated. @@ -2362,6 +2473,7 @@ struct ionic_rdma_reset_cmd { /** * struct ionic_rdma_queue_cmd - Create RDMA Queue command * @opcode: opcode, 52, 53 + * @rsvd: reserved byte(s) * @lif_index: LIF index * @qid_ver: (qid | (RDMA version << 24)) * @cid: intr, eq_id, or cq_id @@ -2369,6 +2481,7 @@ struct ionic_rdma_reset_cmd { * @depth_log2: log base two of queue depth * @stride_log2: log base two of queue stride * @dma_addr: address of the queue memory + * @rsvd2: reserved byte(s) * * The same command struct is used to create an RDMA event queue, completion * queue, or RDMA admin queue. The cid is an interrupt number for an event @@ -2425,6 +2538,7 @@ struct ionic_notifyq_event { * @ecode: event code = IONIC_EVENT_LINK_CHANGE * @link_status: link up/down, with error bits (enum ionic_port_status) * @link_speed: speed of the network link + * @rsvd: reserved byte(s) * * Sent when the network link state changes between UP and DOWN */ @@ -2442,6 +2556,7 @@ struct ionic_link_change_event { * @ecode: event code = IONIC_EVENT_RESET * @reset_code: reset type * @state: 0=pending, 1=complete, 2=error + * @rsvd: reserved byte(s) * * Sent when the NIC or some subsystem is going to be or * has been reset. @@ -2458,6 +2573,7 @@ struct ionic_reset_event { * struct ionic_heartbeat_event - Sent periodically by NIC to indicate health * @eid: event number * @ecode: event code = IONIC_EVENT_HEARTBEAT + * @rsvd: reserved byte(s) */ struct ionic_heartbeat_event { __le64 eid; @@ -2481,6 +2597,7 @@ struct ionic_log_event { * struct ionic_xcvr_event - Transceiver change event * @eid: event number * @ecode: event code = IONIC_EVENT_XCVR + * @rsvd: reserved byte(s) */ struct ionic_xcvr_event { __le64 eid; @@ -2488,7 +2605,7 @@ struct ionic_xcvr_event { u8 rsvd[54]; }; -/** +/* * struct ionic_port_stats - Port statistics structure */ struct ionic_port_stats { @@ -2646,8 +2763,7 @@ enum ionic_oflow_drop_stats { IONIC_OFLOW_DROP_MAX, }; -/** - * struct port_pb_stats - packet buffers system stats +/* struct ionic_port_pb_stats - packet buffers system stats * uses ionic_pb_buffer_drop_stats for drop_counts[] */ struct ionic_port_pb_stats { @@ -2681,7 +2797,9 @@ struct ionic_port_pb_stats { * @pause_type: supported pause types * @loopback_mode: supported loopback mode * @speeds: supported speeds + * @rsvd2: reserved byte(s) * @config: current port configuration + * @words: word access to struct contents */ union ionic_port_identity { struct { @@ -2707,7 +2825,8 @@ union ionic_port_identity { * @status: Port status data * @stats: Port statistics data * @mgmt_stats: Port management statistics data - * @port_pb_drop_stats: uplink pb drop stats + * @rsvd: reserved byte(s) + * @pb_stats: uplink pb drop stats */ struct ionic_port_info { union ionic_port_config config; @@ -2721,7 +2840,7 @@ struct ionic_port_info { struct ionic_port_pb_stats pb_stats; }; -/** +/* * struct ionic_lif_stats - LIF statistics structure */ struct ionic_lif_stats { @@ -2983,8 +3102,10 @@ struct ionic_hwstamp_regs { * bit 4-7 - 4 bit generation number, changes on fw restart * @fw_heartbeat: Firmware heartbeat counter * @serial_num: Serial number + * @rsvd_pad1024: reserved byte(s) * @fw_version: Firmware version - * @hwstamp_regs: Hardware current timestamp registers + * @hwstamp: Hardware current timestamp registers + * @words: word access to struct contents */ union ionic_dev_info_regs { #define IONIC_DEVINFO_FWVERS_BUFLEN 32 @@ -3014,7 +3135,9 @@ union ionic_dev_info_regs { * @done: Done indicator, bit 0 == 1 when command is complete * @cmd: Opcode-specific command bytes * @comp: Opcode-specific response bytes + * @rsvd: reserved byte(s) * @data: Opcode-specific side-data + * @words: word access to struct contents */ union ionic_dev_cmd_regs { struct { @@ -3032,6 +3155,7 @@ union ionic_dev_cmd_regs { * union ionic_dev_regs - Device register format for bar 0 page 0 * @info: Device info registers * @devcmd: Device command registers + * @words: word access to struct contents */ union ionic_dev_regs { struct { @@ -3098,6 +3222,7 @@ union ionic_adminq_comp { * interrupts when armed. * @qid_lo: Queue destination for the producer index and flags (low bits) * @qid_hi: Queue destination for the producer index and flags (high bits) + * @rsvd2: reserved byte(s) */ struct ionic_doorbell { __le16 p_index; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c index 1837a30ba08a..aa0cc31dfe6e 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@ -126,13 +126,13 @@ static void ionic_lif_deferred_work(struct work_struct *work) } while (true); } -void ionic_lif_deferred_enqueue(struct ionic_deferred *def, +void ionic_lif_deferred_enqueue(struct ionic_lif *lif, struct ionic_deferred_work *work) { - spin_lock_bh(&def->lock); - list_add_tail(&work->list, &def->list); - spin_unlock_bh(&def->lock); - schedule_work(&def->work); + spin_lock_bh(&lif->deferred.lock); + list_add_tail(&work->list, &lif->deferred.list); + spin_unlock_bh(&lif->deferred.lock); + queue_work(lif->ionic->wq, &lif->deferred.work); } static void ionic_link_status_check(struct ionic_lif *lif) @@ -207,19 +207,12 @@ void ionic_link_status_check_request(struct ionic_lif *lif, bool can_sleep) } work->type = IONIC_DW_TYPE_LINK_STATUS; - ionic_lif_deferred_enqueue(&lif->deferred, work); + ionic_lif_deferred_enqueue(lif, work); } else { ionic_link_status_check(lif); } } -static void ionic_napi_deadline(struct timer_list *timer) -{ - struct ionic_qcq *qcq = container_of(timer, struct ionic_qcq, napi_deadline); - - napi_schedule(&qcq->napi); -} - static irqreturn_t ionic_isr(int irq, void *data) { struct napi_struct *napi = data; @@ -237,12 +230,12 @@ static int ionic_request_irq(struct ionic_lif *lif, struct ionic_qcq *qcq) const char *name; if (lif->registered) - name = lif->netdev->name; + name = netdev_name(lif->netdev); else name = dev_name(dev); snprintf(intr->name, sizeof(intr->name), - "%s-%s-%s", IONIC_DRV_NAME, name, q->name); + "%.5s-%.16s-%.8s", IONIC_DRV_NAME, name, q->name); return devm_request_irq(dev, intr->vector, ionic_isr, 0, intr->name, &qcq->napi); @@ -272,6 +265,18 @@ static void ionic_intr_free(struct ionic *ionic, int index) clear_bit(index, ionic->intrs); } +static void ionic_irq_aff_notify(struct irq_affinity_notify *notify, + const cpumask_t *mask) +{ + struct ionic_intr_info *intr = container_of(notify, struct ionic_intr_info, aff_notify); + + cpumask_copy(*intr->affinity_mask, mask); +} + +static void ionic_irq_aff_release(struct kref __always_unused *ref) +{ +} + static int ionic_qcq_enable(struct ionic_qcq *qcq) { struct ionic_queue *q = &qcq->q; @@ -306,8 +311,10 @@ static int ionic_qcq_enable(struct ionic_qcq *qcq) if (qcq->flags & IONIC_QCQ_F_INTR) { napi_enable(&qcq->napi); + irq_set_affinity_notifier(qcq->intr.vector, + &qcq->intr.aff_notify); irq_set_affinity_hint(qcq->intr.vector, - &qcq->intr.affinity_mask); + *qcq->intr.affinity_mask); ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, IONIC_INTR_MASK_CLEAR); } @@ -337,13 +344,15 @@ static int ionic_qcq_disable(struct ionic_lif *lif, struct ionic_qcq *qcq, int f if (qcq->flags & IONIC_QCQ_F_INTR) { struct ionic_dev *idev = &lif->ionic->idev; + if (lif->doorbell_wa) + cancel_work_sync(&qcq->doorbell_napi_work); cancel_work_sync(&qcq->dim.work); ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, IONIC_INTR_MASK_SET); synchronize_irq(qcq->intr.vector); + irq_set_affinity_notifier(qcq->intr.vector, NULL); irq_set_affinity_hint(qcq->intr.vector, NULL); napi_disable(&qcq->napi); - del_timer_sync(&qcq->napi_deadline); } /* If there was a previous fw communcation error, don't bother with @@ -478,11 +487,11 @@ static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq, { n_qcq->intr.vector = src_qcq->intr.vector; n_qcq->intr.index = src_qcq->intr.index; - n_qcq->napi_qcq = src_qcq->napi_qcq; } static int ionic_alloc_qcq_interrupt(struct ionic_lif *lif, struct ionic_qcq *qcq) { + cpumask_var_t *affinity_mask; int err; if (!(qcq->flags & IONIC_QCQ_F_INTR)) { @@ -514,10 +523,19 @@ static int ionic_alloc_qcq_interrupt(struct ionic_lif *lif, struct ionic_qcq *qc } /* try to get the irq on the local numa node first */ - qcq->intr.cpu = cpumask_local_spread(qcq->intr.index, - dev_to_node(lif->ionic->dev)); - if (qcq->intr.cpu != -1) - cpumask_set_cpu(qcq->intr.cpu, &qcq->intr.affinity_mask); + affinity_mask = &lif->ionic->affinity_masks[qcq->intr.index]; + if (cpumask_empty(*affinity_mask)) { + unsigned int cpu; + + cpu = cpumask_local_spread(qcq->intr.index, + dev_to_node(lif->ionic->dev)); + if (cpu != -1) + cpumask_set_cpu(cpu, *affinity_mask); + } + + qcq->intr.affinity_mask = affinity_mask; + qcq->intr.aff_notify.notify = ionic_irq_aff_notify; + qcq->intr.aff_notify.release = ionic_irq_aff_release; netdev_dbg(lif->netdev, "%s: Interrupt index %d\n", qcq->q.name, qcq->intr.index); return 0; @@ -674,6 +692,8 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type, INIT_WORK(&new->dim.work, ionic_dim_work); new->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_CQE; + if (lif->doorbell_wa) + INIT_WORK(&new->doorbell_napi_work, ionic_doorbell_napi_work); *qcq = new; @@ -832,11 +852,8 @@ static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq) q->dbell_deadline = IONIC_TX_DOORBELL_DEADLINE; q->dbell_jiffies = jiffies; - if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) { + if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) netif_napi_add(lif->netdev, &qcq->napi, ionic_tx_napi); - qcq->napi_qcq = qcq; - timer_setup(&qcq->napi_deadline, ionic_napi_deadline, 0); - } qcq->flags |= IONIC_QCQ_F_INITED; @@ -909,9 +926,6 @@ static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq) else netif_napi_add(lif->netdev, &qcq->napi, ionic_txrx_napi); - qcq->napi_qcq = qcq; - timer_setup(&qcq->napi_deadline, ionic_napi_deadline, 0); - qcq->flags |= IONIC_QCQ_F_INITED; return 0; @@ -1166,7 +1180,6 @@ static int ionic_adminq_napi(struct napi_struct *napi, int budget) struct ionic_dev *idev = &lif->ionic->idev; unsigned long irqflags; unsigned int flags = 0; - bool resched = false; int rx_work = 0; int tx_work = 0; int n_work = 0; @@ -1182,6 +1195,7 @@ static int ionic_adminq_napi(struct napi_struct *napi, int budget) if (lif->adminqcq && lif->adminqcq->flags & IONIC_QCQ_F_INITED) a_work = ionic_cq_service(&lif->adminqcq->cq, budget, ionic_adminq_service, NULL, NULL); + spin_unlock_irqrestore(&lif->adminq_lock, irqflags); if (lif->hwstamp_rxq) @@ -1203,15 +1217,14 @@ static int ionic_adminq_napi(struct napi_struct *napi, int budget) ionic_intr_credits(idev->intr_ctrl, intr->index, credits, flags); } - if (!a_work && ionic_adminq_poke_doorbell(&lif->adminqcq->q)) - resched = true; - if (lif->hwstamp_rxq && !rx_work && ionic_rxq_poke_doorbell(&lif->hwstamp_rxq->q)) - resched = true; - if (lif->hwstamp_txq && !tx_work && ionic_txq_poke_doorbell(&lif->hwstamp_txq->q)) - resched = true; - if (resched) - mod_timer(&lif->adminqcq->napi_deadline, - jiffies + IONIC_NAPI_DEADLINE); + if (lif->doorbell_wa) { + if (!a_work) + ionic_adminq_poke_doorbell(&lif->adminqcq->q); + if (lif->hwstamp_rxq && !rx_work) + ionic_rxq_poke_doorbell(&lif->hwstamp_rxq->q); + if (lif->hwstamp_txq && !tx_work) + ionic_txq_poke_doorbell(&lif->hwstamp_txq->q); + } return work_done; } @@ -1383,7 +1396,7 @@ static void ionic_ndo_set_rx_mode(struct net_device *netdev) } work->type = IONIC_DW_TYPE_RX_MODE; netdev_dbg(lif->netdev, "deferred: rx_mode\n"); - ionic_lif_deferred_enqueue(&lif->deferred, work); + ionic_lif_deferred_enqueue(lif, work); } static __le64 ionic_netdev_features_to_nic(netdev_features_t features) @@ -3139,6 +3152,44 @@ err_out: return err; } +static int ionic_affinity_masks_alloc(struct ionic *ionic) +{ + cpumask_var_t *affinity_masks; + int nintrs = ionic->nintrs; + int i; + + affinity_masks = kcalloc(nintrs, sizeof(cpumask_var_t), GFP_KERNEL); + if (!affinity_masks) + return -ENOMEM; + + for (i = 0; i < nintrs; i++) { + if (!zalloc_cpumask_var_node(&affinity_masks[i], GFP_KERNEL, + dev_to_node(ionic->dev))) + goto err_out; + } + + ionic->affinity_masks = affinity_masks; + + return 0; + +err_out: + for (--i; i >= 0; i--) + free_cpumask_var(affinity_masks[i]); + kfree(affinity_masks); + + return -ENOMEM; +} + +static void ionic_affinity_masks_free(struct ionic *ionic) +{ + int i; + + for (i = 0; i < ionic->nintrs; i++) + free_cpumask_var(ionic->affinity_masks[i]); + kfree(ionic->affinity_masks); + ionic->affinity_masks = NULL; +} + int ionic_lif_alloc(struct ionic *ionic) { struct device *dev = ionic->dev; @@ -3230,11 +3281,15 @@ int ionic_lif_alloc(struct ionic *ionic) ionic_debugfs_add_lif(lif); + err = ionic_affinity_masks_alloc(ionic); + if (err) + goto err_out_free_lif_info; + /* allocate control queues and txrx queue arrays */ ionic_lif_queue_identify(lif); err = ionic_qcqs_alloc(lif); if (err) - goto err_out_free_lif_info; + goto err_out_free_affinity_masks; /* allocate rss indirection table */ tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); @@ -3256,6 +3311,8 @@ int ionic_lif_alloc(struct ionic *ionic) err_out_free_qcqs: ionic_qcqs_free(lif); +err_out_free_affinity_masks: + ionic_affinity_masks_free(lif->ionic); err_out_free_lif_info: dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa); lif->info = NULL; @@ -3356,6 +3413,7 @@ int ionic_restart_lif(struct ionic_lif *lif) clear_bit(IONIC_LIF_F_FW_RESET, lif->state); ionic_link_status_check_request(lif, CAN_SLEEP); netif_device_attach(lif->netdev); + ionic_queue_doorbell_check(ionic, IONIC_NAPI_DEADLINE); return 0; @@ -3386,6 +3444,7 @@ static void ionic_lif_handle_fw_up(struct ionic_lif *lif) * just need to reanimate it. */ ionic_init_devinfo(ionic); + ionic_reset(ionic); err = ionic_identify(ionic); if (err) goto err_out; @@ -3428,6 +3487,8 @@ void ionic_lif_free(struct ionic_lif *lif) if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) ionic_lif_reset(lif); + ionic_affinity_masks_free(lif->ionic); + /* free lif info */ kfree(lif->identity); dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa); @@ -3501,14 +3562,11 @@ static int ionic_lif_adminq_init(struct ionic_lif *lif) netif_napi_add(lif->netdev, &qcq->napi, ionic_adminq_napi); - qcq->napi_qcq = qcq; - timer_setup(&qcq->napi_deadline, ionic_napi_deadline, 0); - napi_enable(&qcq->napi); if (qcq->flags & IONIC_QCQ_F_INTR) { irq_set_affinity_hint(qcq->intr.vector, - &qcq->intr.affinity_mask); + *qcq->intr.affinity_mask); ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, IONIC_INTR_MASK_CLEAR); } @@ -3695,6 +3753,7 @@ int ionic_lif_init(struct ionic_lif *lif) goto err_out_notifyq_deinit; lif->rx_copybreak = IONIC_RX_COPYBREAK_DEFAULT; + lif->doorbell_wa = ionic_doorbell_wa(lif->ionic); set_bit(IONIC_LIF_F_INITED, lif->state); @@ -3729,7 +3788,7 @@ static void ionic_lif_set_netdev_info(struct ionic_lif *lif) }, }; - strscpy(ctx.cmd.lif_setattr.name, lif->netdev->name, + strscpy(ctx.cmd.lif_setattr.name, netdev_name(lif->netdev), sizeof(ctx.cmd.lif_setattr.name)); ionic_adminq_post_wait(lif, &ctx); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h index 08f4266fe2aa..3e1005293c4a 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h @@ -84,12 +84,11 @@ struct ionic_qcq { u32 cmb_pgid; u32 cmb_order; struct dim dim; - struct timer_list napi_deadline; struct ionic_queue q; struct ionic_cq cq; struct napi_struct napi; - struct ionic_qcq *napi_qcq; struct ionic_intr_info intr; + struct work_struct doorbell_napi_work; struct dentry *dentry; }; @@ -207,11 +206,12 @@ struct ionic_lif { unsigned int nxqs; unsigned int ntxq_descs; unsigned int nrxq_descs; - u32 rx_copybreak; u64 rxq_features; - u16 rx_mode; u64 hw_features; + u16 rx_copybreak; + u16 rx_mode; bool registered; + bool doorbell_wa; u16 lif_type; unsigned int link_down_count; unsigned int nmcast; @@ -226,11 +226,11 @@ struct ionic_lif { u32 info_sz; struct ionic_qtype_info qtype_info[IONIC_QTYPE_MAX]; - u16 rss_types; u8 rss_hash_key[IONIC_RSS_HASH_KEY_SIZE]; u8 *rss_ind_tbl; dma_addr_t rss_ind_tbl_pa; u32 rss_ind_tbl_sz; + u16 rss_types; struct ionic_rx_filters rx_filters; u32 rx_coalesce_usecs; /* what the user asked for */ @@ -333,7 +333,7 @@ static inline bool ionic_txq_hwstamp_enabled(struct ionic_queue *q) void ionic_link_status_check_request(struct ionic_lif *lif, bool can_sleep); void ionic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *ns); -void ionic_lif_deferred_enqueue(struct ionic_deferred *def, +void ionic_lif_deferred_enqueue(struct ionic_lif *lif, struct ionic_deferred_work *work); int ionic_lif_alloc(struct ionic *ionic); int ionic_lif_init(struct ionic_lif *lif); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c index c1259324b0be..0f817c3f92d8 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_main.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c @@ -287,7 +287,7 @@ bool ionic_notifyq_service(struct ionic_cq *cq) clear_bit(IONIC_LIF_F_FW_STOPPING, lif->state); } else { work->type = IONIC_DW_TYPE_LIF_RESET; - ionic_lif_deferred_enqueue(&lif->deferred, work); + ionic_lif_deferred_enqueue(lif, work); } } break; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c index 9fdd7cd3ef19..fc79baad4561 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c @@ -518,7 +518,7 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats, XDP_PACKET_HEADROOM, frag_len, false); dma_sync_single_range_for_cpu(rxq->dev, ionic_rx_buf_pa(buf_info), - XDP_PACKET_HEADROOM, len, + XDP_PACKET_HEADROOM, frag_len, DMA_FROM_DEVICE); prefetchw(&xdp_buf.data_hard_start); @@ -596,7 +596,7 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats, buf_info->page_offset, true); __netif_tx_unlock(nq); - if (err) { + if (unlikely(err)) { netdev_dbg(netdev, "tx ionic_xdp_post_frame err %d\n", err); goto out_xdp_abort; } @@ -608,7 +608,7 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats, case XDP_REDIRECT: err = xdp_do_redirect(netdev, &xdp_buf, xdp_prog); - if (err) { + if (unlikely(err)) { netdev_dbg(netdev, "xdp_do_redirect err %d\n", err); goto out_xdp_abort; } @@ -878,9 +878,6 @@ void ionic_rx_fill(struct ionic_queue *q) q->dbell_deadline = IONIC_RX_MIN_DOORBELL_DEADLINE; q->dbell_jiffies = jiffies; - - mod_timer(&q_to_qcq(q)->napi_qcq->napi_deadline, - jiffies + IONIC_NAPI_DEADLINE); } void ionic_rx_empty(struct ionic_queue *q) @@ -963,8 +960,8 @@ int ionic_tx_napi(struct napi_struct *napi, int budget) work_done, flags); } - if (!work_done && ionic_txq_poke_doorbell(&qcq->q)) - mod_timer(&qcq->napi_deadline, jiffies + IONIC_NAPI_DEADLINE); + if (!work_done && cq->bound_q->lif->doorbell_wa) + ionic_txq_poke_doorbell(&qcq->q); return work_done; } @@ -1006,8 +1003,8 @@ int ionic_rx_napi(struct napi_struct *napi, int budget) work_done, flags); } - if (!work_done && ionic_rxq_poke_doorbell(&qcq->q)) - mod_timer(&qcq->napi_deadline, jiffies + IONIC_NAPI_DEADLINE); + if (!work_done && cq->bound_q->lif->doorbell_wa) + ionic_rxq_poke_doorbell(&qcq->q); return work_done; } @@ -1020,7 +1017,6 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget) struct ionic_qcq *txqcq; struct ionic_lif *lif; struct ionic_cq *txcq; - bool resched = false; u32 rx_work_done = 0; u32 tx_work_done = 0; u32 flags = 0; @@ -1052,12 +1048,12 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget) tx_work_done + rx_work_done, flags); } - if (!rx_work_done && ionic_rxq_poke_doorbell(&rxqcq->q)) - resched = true; - if (!tx_work_done && ionic_txq_poke_doorbell(&txqcq->q)) - resched = true; - if (resched) - mod_timer(&rxqcq->napi_deadline, jiffies + IONIC_NAPI_DEADLINE); + if (lif->doorbell_wa) { + if (!rx_work_done) + ionic_rxq_poke_doorbell(&rxqcq->q); + if (!tx_work_done) + ionic_txq_poke_doorbell(&txqcq->q); + } return rx_work_done; } @@ -1069,7 +1065,7 @@ static dma_addr_t ionic_tx_map_single(struct ionic_queue *q, dma_addr_t dma_addr; dma_addr = dma_map_single(dev, data, len, DMA_TO_DEVICE); - if (dma_mapping_error(dev, dma_addr)) { + if (unlikely(dma_mapping_error(dev, dma_addr))) { net_warn_ratelimited("%s: DMA single map failed on %s!\n", dev_name(dev), q->name); q_to_tx_stats(q)->dma_map_err++; @@ -1086,7 +1082,7 @@ static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q, dma_addr_t dma_addr; dma_addr = skb_frag_dma_map(dev, frag, offset, len, DMA_TO_DEVICE); - if (dma_mapping_error(dev, dma_addr)) { + if (unlikely(dma_mapping_error(dev, dma_addr))) { net_warn_ratelimited("%s: DMA frag map failed on %s!\n", dev_name(dev), q->name); q_to_tx_stats(q)->dma_map_err++; @@ -1332,7 +1328,7 @@ static int ionic_tx_tcp_inner_pseudo_csum(struct sk_buff *skb) int err; err = skb_cow_head(skb, 0); - if (err) + if (unlikely(err)) return err; if (skb->protocol == cpu_to_be16(ETH_P_IP)) { @@ -1356,7 +1352,7 @@ static int ionic_tx_tcp_pseudo_csum(struct sk_buff *skb) int err; err = skb_cow_head(skb, 0); - if (err) + if (unlikely(err)) return err; if (skb->protocol == cpu_to_be16(ETH_P_IP)) { @@ -1373,7 +1369,7 @@ static int ionic_tx_tcp_pseudo_csum(struct sk_buff *skb) } static void ionic_tx_tso_post(struct net_device *netdev, struct ionic_queue *q, - struct ionic_tx_desc_info *desc_info, + struct ionic_txq_desc *desc, struct sk_buff *skb, dma_addr_t addr, u8 nsge, u16 len, unsigned int hdrlen, unsigned int mss, @@ -1381,7 +1377,6 @@ static void ionic_tx_tso_post(struct net_device *netdev, struct ionic_queue *q, u16 vlan_tci, bool has_vlan, bool start, bool done) { - struct ionic_txq_desc *desc = &q->txq[q->head_idx]; u8 flags = 0; u64 cmd; @@ -1461,7 +1456,7 @@ static int ionic_tx_tso(struct net_device *netdev, struct ionic_queue *q, err = ionic_tx_tcp_inner_pseudo_csum(skb); else err = ionic_tx_tcp_pseudo_csum(skb); - if (err) { + if (unlikely(err)) { /* clean up mapping from ionic_tx_map_skb */ ionic_tx_desc_unmap_bufs(q, desc_info); return err; @@ -1519,10 +1514,9 @@ static int ionic_tx_tso(struct net_device *netdev, struct ionic_queue *q, seg_rem = min(tso_rem, mss); done = (tso_rem == 0); /* post descriptor */ - ionic_tx_tso_post(netdev, q, desc_info, skb, - desc_addr, desc_nsge, desc_len, - hdrlen, mss, outer_csum, vlan_tci, has_vlan, - start, done); + ionic_tx_tso_post(netdev, q, desc, skb, desc_addr, desc_nsge, + desc_len, hdrlen, mss, outer_csum, vlan_tci, + has_vlan, start, done); start = false; /* Buffer information is stored with the first tso descriptor */ desc_info = &q->tx_info[q->head_idx]; @@ -1747,7 +1741,7 @@ static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb) linearize: if (too_many_frags) { err = skb_linearize(skb); - if (err) + if (unlikely(err)) return err; q_to_tx_stats(q)->linearize++; } @@ -1781,7 +1775,7 @@ static netdev_tx_t ionic_start_hwstamp_xmit(struct sk_buff *skb, else err = ionic_tx(netdev, q, skb); - if (err) + if (unlikely(err)) goto err_out_drop; return NETDEV_TX_OK; @@ -1827,7 +1821,7 @@ netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev) else err = ionic_tx(netdev, q, skb); - if (err) + if (unlikely(err)) goto err_out_drop; return NETDEV_TX_OK; diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c index 2fcbcecb41d1..fef4b2b0b1f2 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c @@ -571,9 +571,6 @@ static u64 ctx_addr_sig_regs[][3] = { #define CRB_CTX_ADDR_REG_HI(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][2]) #define CRB_CTX_SIGNATURE_REG(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][1]) -#define lower32(x) ((u32)((x) & 0xffffffff)) -#define upper32(x) ((u32)(((u64)(x) >> 32) & 0xffffffff)) - static struct netxen_recv_crb recv_crb_registers[] = { /* Instance 0 */ { @@ -723,9 +720,9 @@ netxen_init_old_ctx(struct netxen_adapter *adapter) NETXEN_CTX_SIGNATURE_V2 : NETXEN_CTX_SIGNATURE; NXWR32(adapter, CRB_CTX_ADDR_REG_LO(port), - lower32(recv_ctx->phys_addr)); + lower_32_bits(recv_ctx->phys_addr)); NXWR32(adapter, CRB_CTX_ADDR_REG_HI(port), - upper32(recv_ctx->phys_addr)); + upper_32_bits(recv_ctx->phys_addr)); NXWR32(adapter, CRB_CTX_SIGNATURE_REG(port), signature | port); return 0; diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index f497f6ca1018..97b059be1041 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -1137,7 +1137,7 @@ static int qede_set_channels(struct net_device *dev, } static int qede_get_ts_info(struct net_device *dev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct qede_dev *edev = netdev_priv(dev); diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c index 747cc5e2bb78..63e3dac4d5f7 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c @@ -321,7 +321,7 @@ int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *ifr) sizeof(config)) ? -EFAULT : 0; } -int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *info) +int qede_ptp_get_ts_info(struct qede_dev *edev, struct kernel_ethtool_ts_info *info) { struct qede_ptp *ptp = edev->ptp; diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.h b/drivers/net/ethernet/qlogic/qede/qede_ptp.h index 1db0f021c645..adafc894797e 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ptp.h +++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.h @@ -17,7 +17,7 @@ void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb); int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *req); void qede_ptp_disable(struct qede_dev *edev); int qede_ptp_enable(struct qede_dev *edev); -int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *ts); +int qede_ptp_get_ts_info(struct qede_dev *edev, struct kernel_ethtool_ts_info *ts); static inline void qede_ptp_record_rx_ts(struct qede_dev *edev, union eth_rx_cqe *cqe, diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 7b9e04884575..714d2e804694 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -1608,7 +1608,7 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) if (!tp->dash_enabled) { rtl_set_d3_pll_down(tp, !wolopts); - tp->dev->wol_enabled = wolopts ? 1 : 0; + tp->dev->ethtool->wol_enabled = wolopts ? 1 : 0; } } @@ -2274,7 +2274,9 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii) /* 8168B family. */ { 0x7c8, 0x380, RTL_GIGA_MAC_VER_17 }, - { 0x7c8, 0x300, RTL_GIGA_MAC_VER_11 }, + /* This one is very old and rare, let's see if anybody complains. + * { 0x7c8, 0x300, RTL_GIGA_MAC_VER_11 }, + */ /* 8101 family. */ { 0x7c8, 0x448, RTL_GIGA_MAC_VER_39 }, @@ -5086,12 +5088,10 @@ static void rtl_set_irq_mask(struct rtl8169_private *tp) tp->irq_mask = RxOK | RxErr | TxOK | TxErr | LinkChg; if (tp->mac_version <= RTL_GIGA_MAC_VER_06) - tp->irq_mask |= SYSErr | RxOverflow | RxFIFOOver; + tp->irq_mask |= SYSErr | RxFIFOOver; else if (tp->mac_version == RTL_GIGA_MAC_VER_11) /* special workaround needed */ tp->irq_mask |= RxFIFOOver; - else - tp->irq_mask |= RxOverflow; } static int rtl_alloc_irq(struct rtl8169_private *tp) @@ -5478,7 +5478,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) rtl_set_d3_pll_down(tp, true); } else { rtl_set_d3_pll_down(tp, false); - dev->wol_enabled = 1; + dev->ethtool->wol_enabled = 1; } jumbo_max = rtl_jumbo_max(tp); diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig index b03fae7a0f72..9b7559c88bee 100644 --- a/drivers/net/ethernet/renesas/Kconfig +++ b/drivers/net/ethernet/renesas/Kconfig @@ -33,6 +33,7 @@ config RAVB select CRC32 select MII select MDIO_BITBANG + select PAGE_POOL select PHYLIB select RESET_CONTROLLER help @@ -58,4 +59,14 @@ config RENESAS_GEN4_PTP help Renesas R-Car Gen4 gPTP device driver. +config RTSN + tristate "Renesas Ethernet-TSN support" + depends on ARCH_RENESAS || COMPILE_TEST + depends on PTP_1588_CLOCK + select CRC32 + select PHYLIB + select RENESAS_GEN4_PTP + help + Renesas Ethernet-TSN device driver. + endif # NET_VENDOR_RENESAS diff --git a/drivers/net/ethernet/renesas/Makefile b/drivers/net/ethernet/renesas/Makefile index 9070acfd6aaf..f65fc76f8b4d 100644 --- a/drivers/net/ethernet/renesas/Makefile +++ b/drivers/net/ethernet/renesas/Makefile @@ -11,3 +11,5 @@ obj-$(CONFIG_RAVB) += ravb.o obj-$(CONFIG_RENESAS_ETHER_SWITCH) += rswitch.o obj-$(CONFIG_RENESAS_GEN4_PTP) += rcar_gen4_ptp.o + +obj-$(CONFIG_RTSN) += rtsn.o diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h index b48935ec7e28..9893c91af105 100644 --- a/drivers/net/ethernet/renesas/ravb.h +++ b/drivers/net/ethernet/renesas/ravb.h @@ -19,6 +19,7 @@ #include <linux/phy.h> #include <linux/platform_device.h> #include <linux/ptp_clock_kernel.h> +#include <net/page_pool/types.h> #define BE_TX_RING_SIZE 64 /* TX ring size for Best Effort */ #define BE_RX_RING_SIZE 1024 /* RX ring size for Best Effort */ @@ -257,6 +258,7 @@ enum APSR_BIT { APSR_CMSW = 0x00000010, APSR_RDM = 0x00002000, APSR_TDM = 0x00004000, + APSR_MIISELECT = 0x01000000, /* R-Car V4M only */ }; /* RCR */ @@ -1039,7 +1041,7 @@ struct ravb_ptp { }; struct ravb_hw_info { - bool (*receive)(struct net_device *ndev, int *quota, int q); + int (*receive)(struct net_device *ndev, int budget, int q); void (*set_rate)(struct net_device *ndev); int (*set_feature)(struct net_device *ndev, netdev_features_t features); int (*dmac_init)(struct net_device *ndev); @@ -1051,9 +1053,10 @@ struct ravb_hw_info { int stats_len; u32 tccr_mask; u32 rx_max_frame_size; - u32 rx_max_desc_use; + u32 rx_buffer_size; u32 rx_desc_size; unsigned aligned_tx: 1; + unsigned coalesce_irqs:1; /* Needs software IRQ coalescing */ /* hardware features */ unsigned internal_delay:1; /* AVB-DMAC has internal delays */ @@ -1070,6 +1073,11 @@ struct ravb_hw_info { unsigned half_duplex:1; /* E-MAC supports half duplex mode */ }; +struct ravb_rx_buffer { + struct page *page; + unsigned int offset; +}; + struct ravb_private { struct net_device *ndev; struct platform_device *pdev; @@ -1093,7 +1101,8 @@ struct ravb_private { struct ravb_tx_desc *tx_ring[NUM_TX_QUEUE]; void *tx_align[NUM_TX_QUEUE]; struct sk_buff *rx_1st_skb; - struct sk_buff **rx_skb[NUM_RX_QUEUE]; + struct page_pool *rx_pool[NUM_RX_QUEUE]; + struct ravb_rx_buffer *rx_buffers[NUM_RX_QUEUE]; struct sk_buff **tx_skb[NUM_TX_QUEUE]; u32 rx_over_errors; u32 rx_fifo_errors; diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 4d100283c30f..c02fb296bf7d 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -30,6 +30,7 @@ #include <linux/reset.h> #include <linux/math64.h> #include <net/ip.h> +#include <net/page_pool/helpers.h> #include "ravb.h" @@ -113,25 +114,6 @@ static void ravb_set_rate_rcar(struct net_device *ndev) } } -static struct sk_buff * -ravb_alloc_skb(struct net_device *ndev, const struct ravb_hw_info *info, - gfp_t gfp_mask) -{ - struct sk_buff *skb; - u32 reserve; - - skb = __netdev_alloc_skb(ndev, info->rx_max_frame_size + RAVB_ALIGN - 1, - gfp_mask); - if (!skb) - return NULL; - - reserve = (unsigned long)skb->data & (RAVB_ALIGN - 1); - if (reserve) - skb_reserve(skb, RAVB_ALIGN - reserve); - - return skb; -} - /* Get MAC address from the MAC address registers * * Ethernet AVB device doesn't have ROM for MAC address. @@ -257,21 +239,10 @@ static void ravb_rx_ring_free(struct net_device *ndev, int q) { struct ravb_private *priv = netdev_priv(ndev); unsigned int ring_size; - unsigned int i; if (!priv->rx_ring[q].raw) return; - for (i = 0; i < priv->num_rx_ring[q]; i++) { - struct ravb_rx_desc *desc = ravb_rx_get_desc(priv, q, i); - - if (!dma_mapping_error(ndev->dev.parent, - le32_to_cpu(desc->dptr))) - dma_unmap_single(ndev->dev.parent, - le32_to_cpu(desc->dptr), - priv->info->rx_max_frame_size, - DMA_FROM_DEVICE); - } ring_size = priv->info->rx_desc_size * (priv->num_rx_ring[q] + 1); dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q].raw, priv->rx_desc_dma[q]); @@ -298,13 +269,16 @@ static void ravb_ring_free(struct net_device *ndev, int q) priv->tx_ring[q] = NULL; } - /* Free RX skb ringbuffer */ - if (priv->rx_skb[q]) { - for (i = 0; i < priv->num_rx_ring[q]; i++) - dev_kfree_skb(priv->rx_skb[q][i]); + /* Free RX buffers */ + for (i = 0; i < priv->num_rx_ring[q]; i++) { + if (priv->rx_buffers[q][i].page) + page_pool_put_page(priv->rx_pool[q], + priv->rx_buffers[q][i].page, + 0, true); } - kfree(priv->rx_skb[q]); - priv->rx_skb[q] = NULL; + kfree(priv->rx_buffers[q]); + priv->rx_buffers[q] = NULL; + page_pool_destroy(priv->rx_pool[q]); /* Free aligned TX buffers */ kfree(priv->tx_align[q]); @@ -317,35 +291,64 @@ static void ravb_ring_free(struct net_device *ndev, int q) priv->tx_skb[q] = NULL; } -static void ravb_rx_ring_format(struct net_device *ndev, int q) +static int +ravb_alloc_rx_buffer(struct net_device *ndev, int q, u32 entry, gfp_t gfp_mask, + struct ravb_rx_desc *rx_desc) { struct ravb_private *priv = netdev_priv(ndev); - struct ravb_rx_desc *rx_desc; - unsigned int rx_ring_size; + const struct ravb_hw_info *info = priv->info; + struct ravb_rx_buffer *rx_buff; dma_addr_t dma_addr; - unsigned int i; + unsigned int size; - rx_ring_size = priv->info->rx_desc_size * priv->num_rx_ring[q]; - memset(priv->rx_ring[q].raw, 0, rx_ring_size); - /* Build RX ring buffer */ - for (i = 0; i < priv->num_rx_ring[q]; i++) { - /* RX descriptor */ - rx_desc = ravb_rx_get_desc(priv, q, i); - rx_desc->ds_cc = cpu_to_le16(priv->info->rx_max_desc_use); - dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data, - priv->info->rx_max_frame_size, - DMA_FROM_DEVICE); + rx_buff = &priv->rx_buffers[q][entry]; + size = info->rx_buffer_size; + rx_buff->page = page_pool_alloc(priv->rx_pool[q], &rx_buff->offset, + &size, gfp_mask); + if (unlikely(!rx_buff->page)) { /* We just set the data size to 0 for a failed mapping which * should prevent DMA from happening... */ - if (dma_mapping_error(ndev->dev.parent, dma_addr)) - rx_desc->ds_cc = cpu_to_le16(0); - rx_desc->dptr = cpu_to_le32(dma_addr); + rx_desc->ds_cc = cpu_to_le16(0); + return -ENOMEM; + } + + dma_addr = page_pool_get_dma_addr(rx_buff->page) + rx_buff->offset; + dma_sync_single_for_device(ndev->dev.parent, dma_addr, + info->rx_buffer_size, DMA_FROM_DEVICE); + rx_desc->dptr = cpu_to_le32(dma_addr); + + /* The end of the RX buffer is used to store skb shared data, so we need + * to ensure that the hardware leaves enough space for this. + */ + rx_desc->ds_cc = cpu_to_le16(info->rx_buffer_size - + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) - + ETH_FCS_LEN + sizeof(__sum16)); + return 0; +} + +static u32 +ravb_rx_ring_refill(struct net_device *ndev, int q, u32 count, gfp_t gfp_mask) +{ + struct ravb_private *priv = netdev_priv(ndev); + struct ravb_rx_desc *rx_desc; + u32 i, entry; + + for (i = 0; i < count; i++) { + entry = (priv->dirty_rx[q] + i) % priv->num_rx_ring[q]; + rx_desc = ravb_rx_get_desc(priv, q, entry); + + if (!priv->rx_buffers[q][entry].page) { + if (unlikely(ravb_alloc_rx_buffer(ndev, q, entry, + gfp_mask, rx_desc))) + break; + } + /* Descriptor type must be set after all the above writes */ + dma_wmb(); rx_desc->die_dt = DT_FEMPTY; } - rx_desc = ravb_rx_get_desc(priv, q, i); - rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]); - rx_desc->die_dt = DT_LINKFIX; /* type */ + + return i; } /* Format skb and descriptor buffer for Ethernet AVB */ @@ -353,6 +356,7 @@ static void ravb_ring_format(struct net_device *ndev, int q) { struct ravb_private *priv = netdev_priv(ndev); unsigned int num_tx_desc = priv->num_tx_desc; + struct ravb_rx_desc *rx_desc; struct ravb_tx_desc *tx_desc; struct ravb_desc *desc; unsigned int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q] * @@ -364,7 +368,13 @@ static void ravb_ring_format(struct net_device *ndev, int q) priv->dirty_rx[q] = 0; priv->dirty_tx[q] = 0; - ravb_rx_ring_format(ndev, q); + /* Regular RX descriptors have already been initialized by + * ravb_rx_ring_refill(), we just need to initialize the final link + * descriptor. + */ + rx_desc = ravb_rx_get_desc(priv, q, priv->num_rx_ring[q]); + rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]); + rx_desc->die_dt = DT_LINKFIX; /* type */ memset(priv->tx_ring[q], 0, tx_ring_size); /* Build TX ring buffer */ @@ -408,26 +418,47 @@ static void *ravb_alloc_rx_desc(struct net_device *ndev, int q) static int ravb_ring_init(struct net_device *ndev, int q) { struct ravb_private *priv = netdev_priv(ndev); - const struct ravb_hw_info *info = priv->info; unsigned int num_tx_desc = priv->num_tx_desc; + struct page_pool_params params = { + .order = 0, + .flags = PP_FLAG_DMA_MAP, + .pool_size = priv->num_rx_ring[q], + .nid = NUMA_NO_NODE, + .dev = ndev->dev.parent, + .dma_dir = DMA_FROM_DEVICE, + }; unsigned int ring_size; - struct sk_buff *skb; - unsigned int i; + u32 num_filled; + + /* Allocate RX page pool and buffers */ + priv->rx_pool[q] = page_pool_create(¶ms); + if (IS_ERR(priv->rx_pool[q])) + goto error; - /* Allocate RX and TX skb rings */ - priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q], - sizeof(*priv->rx_skb[q]), GFP_KERNEL); + /* Allocate RX buffers */ + priv->rx_buffers[q] = kcalloc(priv->num_rx_ring[q], + sizeof(*priv->rx_buffers[q]), GFP_KERNEL); + if (!priv->rx_buffers[q]) + goto error; + + /* Allocate TX skb rings */ priv->tx_skb[q] = kcalloc(priv->num_tx_ring[q], sizeof(*priv->tx_skb[q]), GFP_KERNEL); - if (!priv->rx_skb[q] || !priv->tx_skb[q]) + if (!priv->tx_skb[q]) goto error; - for (i = 0; i < priv->num_rx_ring[q]; i++) { - skb = ravb_alloc_skb(ndev, info, GFP_KERNEL); - if (!skb) - goto error; - priv->rx_skb[q][i] = skb; - } + /* Allocate all RX descriptors. */ + if (!ravb_alloc_rx_desc(ndev, q)) + goto error; + + /* Populate RX ring buffer. */ + priv->dirty_rx[q] = 0; + ring_size = priv->info->rx_desc_size * priv->num_rx_ring[q]; + memset(priv->rx_ring[q].raw, 0, ring_size); + num_filled = ravb_rx_ring_refill(ndev, q, priv->num_rx_ring[q], + GFP_KERNEL); + if (num_filled != priv->num_rx_ring[q]) + goto error; if (num_tx_desc > 1) { /* Allocate rings for the aligned buffers */ @@ -437,12 +468,6 @@ static int ravb_ring_init(struct net_device *ndev, int q) goto error; } - /* Allocate all RX descriptors. */ - if (!ravb_alloc_rx_desc(ndev, q)) - goto error; - - priv->dirty_rx[q] = 0; - /* Allocate all TX descriptors. */ ring_size = sizeof(struct ravb_tx_desc) * (priv->num_tx_ring[q] * num_tx_desc + 1); @@ -554,6 +579,16 @@ static void ravb_emac_init_rcar(struct net_device *ndev) ravb_write(ndev, ECSIPR_ICDIP | ECSIPR_MPDIP | ECSIPR_LCHNGIP, ECSIPR); } +static void ravb_emac_init_rcar_gen4(struct net_device *ndev) +{ + struct ravb_private *priv = netdev_priv(ndev); + bool mii = priv->phy_interface == PHY_INTERFACE_MODE_MII; + + ravb_modify(ndev, APSR, APSR_MIISELECT, mii ? APSR_MIISELECT : 0); + + ravb_emac_init_rcar(ndev); +} + /* E-MAC init function */ static void ravb_emac_init(struct net_device *ndev) { @@ -706,7 +741,9 @@ static void ravb_get_tx_tstamp(struct net_device *ndev) static void ravb_rx_csum_gbeth(struct sk_buff *skb) { + struct skb_shared_info *shinfo = skb_shinfo(skb); __wsum csum_ip_hdr, csum_proto; + skb_frag_t *last_frag; u8 *hw_csum; /* The hardware checksum status is contained in sizeof(__sum16) * 2 = 4 @@ -716,12 +753,24 @@ static void ravb_rx_csum_gbeth(struct sk_buff *skb) if (unlikely(skb->len < sizeof(__sum16) * 2)) return; - hw_csum = skb_tail_pointer(skb) - sizeof(__sum16); + if (skb_is_nonlinear(skb)) { + last_frag = &shinfo->frags[shinfo->nr_frags - 1]; + hw_csum = skb_frag_address(last_frag) + + skb_frag_size(last_frag); + } else { + hw_csum = skb_tail_pointer(skb); + } + + hw_csum -= sizeof(__sum16); csum_proto = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum)); hw_csum -= sizeof(__sum16); csum_ip_hdr = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum)); - skb_trim(skb, skb->len - 2 * sizeof(__sum16)); + + if (skb_is_nonlinear(skb)) + skb_frag_size_sub(last_frag, 2 * sizeof(__sum16)); + else + skb_trim(skb, skb->len - 2 * sizeof(__sum16)); /* TODO: IPV6 Rx checksum */ if (skb->protocol == htons(ETH_P_IP) && !csum_ip_hdr && !csum_proto) @@ -743,30 +792,14 @@ static void ravb_rx_csum(struct sk_buff *skb) skb_trim(skb, skb->len - sizeof(__sum16)); } -static struct sk_buff *ravb_get_skb_gbeth(struct net_device *ndev, int entry, - struct ravb_rx_desc *desc) -{ - struct ravb_private *priv = netdev_priv(ndev); - struct sk_buff *skb; - - skb = priv->rx_skb[RAVB_BE][entry]; - priv->rx_skb[RAVB_BE][entry] = NULL; - dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), - ALIGN(priv->info->rx_max_frame_size, 16), - DMA_FROM_DEVICE); - - return skb; -} - /* Packet receive function for Gigabit Ethernet */ -static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q) +static int ravb_rx_gbeth(struct net_device *ndev, int budget, int q) { struct ravb_private *priv = netdev_priv(ndev); const struct ravb_hw_info *info = priv->info; struct net_device_stats *stats; struct ravb_rx_desc *desc; struct sk_buff *skb; - dma_addr_t dma_addr; int rx_packets = 0; u8 desc_status; u16 desc_len; @@ -781,7 +814,7 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q) for (i = 0; i < limit; i++, priv->cur_rx[q]++) { entry = priv->cur_rx[q] % priv->num_rx_ring[q]; desc = &priv->rx_ring[q].desc[entry]; - if (rx_packets == *quota || desc->die_dt == DT_FEMPTY) + if (rx_packets == budget || desc->die_dt == DT_FEMPTY) break; /* Descriptor type must be checked before all other reads */ @@ -807,87 +840,110 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q) if (desc_status & MSC_CEEF) stats->rx_missed_errors++; } else { + struct ravb_rx_buffer *rx_buff; + void *rx_addr; + + rx_buff = &priv->rx_buffers[q][entry]; + rx_addr = page_address(rx_buff->page) + rx_buff->offset; die_dt = desc->die_dt & 0xF0; + dma_sync_single_for_cpu(ndev->dev.parent, + le32_to_cpu(desc->dptr), + desc_len, DMA_FROM_DEVICE); + switch (die_dt) { case DT_FSINGLE: - skb = ravb_get_skb_gbeth(ndev, entry, desc); - skb_put(skb, desc_len); - skb->protocol = eth_type_trans(skb, ndev); - if (ndev->features & NETIF_F_RXCSUM) - ravb_rx_csum_gbeth(skb); - napi_gro_receive(&priv->napi[q], skb); - rx_packets++; - stats->rx_bytes += desc_len; - break; case DT_FSTART: - priv->rx_1st_skb = ravb_get_skb_gbeth(ndev, entry, desc); - skb_put(priv->rx_1st_skb, desc_len); + /* Start of packet: Set initial data length. */ + skb = napi_build_skb(rx_addr, + info->rx_buffer_size); + if (unlikely(!skb)) { + stats->rx_errors++; + page_pool_put_page(priv->rx_pool[q], + rx_buff->page, 0, + true); + goto refill; + } + skb_mark_for_recycle(skb); + skb_put(skb, desc_len); + + /* Save this skb if the packet spans multiple + * descriptors. + */ + if (die_dt == DT_FSTART) + priv->rx_1st_skb = skb; break; + case DT_FMID: - skb = ravb_get_skb_gbeth(ndev, entry, desc); - skb_copy_to_linear_data_offset(priv->rx_1st_skb, - priv->rx_1st_skb->len, - skb->data, - desc_len); - skb_put(priv->rx_1st_skb, desc_len); - dev_kfree_skb(skb); - break; case DT_FEND: - skb = ravb_get_skb_gbeth(ndev, entry, desc); - skb_copy_to_linear_data_offset(priv->rx_1st_skb, - priv->rx_1st_skb->len, - skb->data, - desc_len); - skb_put(priv->rx_1st_skb, desc_len); - dev_kfree_skb(skb); - priv->rx_1st_skb->protocol = - eth_type_trans(priv->rx_1st_skb, ndev); + /* Continuing a packet: Add this buffer as an RX + * frag. + */ + + /* rx_1st_skb will be NULL if napi_build_skb() + * failed for the first descriptor of a + * multi-descriptor packet. + */ + if (unlikely(!priv->rx_1st_skb)) { + stats->rx_errors++; + page_pool_put_page(priv->rx_pool[q], + rx_buff->page, 0, + true); + + /* We may find a DT_FSINGLE or DT_FSTART + * descriptor in the queue which we can + * process, so don't give up yet. + */ + continue; + } + skb_add_rx_frag(priv->rx_1st_skb, + skb_shinfo(priv->rx_1st_skb)->nr_frags, + rx_buff->page, rx_buff->offset, + desc_len, info->rx_buffer_size); + + /* Set skb to point at the whole packet so that + * we only need one code path for finishing a + * packet. + */ + skb = priv->rx_1st_skb; + } + + switch (die_dt) { + case DT_FSINGLE: + case DT_FEND: + /* Finishing a packet: Determine protocol & + * checksum, hand off to NAPI and update our + * stats. + */ + skb->protocol = eth_type_trans(skb, ndev); if (ndev->features & NETIF_F_RXCSUM) - ravb_rx_csum_gbeth(priv->rx_1st_skb); - stats->rx_bytes += priv->rx_1st_skb->len; - napi_gro_receive(&priv->napi[q], - priv->rx_1st_skb); + ravb_rx_csum_gbeth(skb); + stats->rx_bytes += skb->len; + napi_gro_receive(&priv->napi[q], skb); rx_packets++; - break; + + /* Clear rx_1st_skb so that it will only be + * non-NULL when valid. + */ + priv->rx_1st_skb = NULL; } + + /* Mark this RX buffer as consumed. */ + rx_buff->page = NULL; } } +refill: /* Refill the RX ring buffers. */ - for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) { - entry = priv->dirty_rx[q] % priv->num_rx_ring[q]; - desc = &priv->rx_ring[q].desc[entry]; - desc->ds_cc = cpu_to_le16(priv->info->rx_max_desc_use); - - if (!priv->rx_skb[q][entry]) { - skb = ravb_alloc_skb(ndev, info, GFP_ATOMIC); - if (!skb) - break; - dma_addr = dma_map_single(ndev->dev.parent, - skb->data, - priv->info->rx_max_frame_size, - DMA_FROM_DEVICE); - skb_checksum_none_assert(skb); - /* We just set the data size to 0 for a failed mapping - * which should prevent DMA from happening... - */ - if (dma_mapping_error(ndev->dev.parent, dma_addr)) - desc->ds_cc = cpu_to_le16(0); - desc->dptr = cpu_to_le32(dma_addr); - priv->rx_skb[q][entry] = skb; - } - /* Descriptor type must be set after all the above writes */ - dma_wmb(); - desc->die_dt = DT_FEMPTY; - } + priv->dirty_rx[q] += ravb_rx_ring_refill(ndev, q, + priv->cur_rx[q] - priv->dirty_rx[q], + GFP_ATOMIC); stats->rx_packets += rx_packets; - *quota -= rx_packets; - return *quota == 0; + return rx_packets; } /* Packet receive function for Ethernet AVB */ -static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q) +static int ravb_rx_rcar(struct net_device *ndev, int budget, int q) { struct ravb_private *priv = netdev_priv(ndev); const struct ravb_hw_info *info = priv->info; @@ -895,7 +951,6 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q) struct ravb_ex_rx_desc *desc; unsigned int limit, i; struct sk_buff *skb; - dma_addr_t dma_addr; struct timespec64 ts; int rx_packets = 0; u8 desc_status; @@ -906,7 +961,7 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q) for (i = 0; i < limit; i++, priv->cur_rx[q]++) { entry = priv->cur_rx[q] % priv->num_rx_ring[q]; desc = &priv->rx_ring[q].ex_desc[entry]; - if (rx_packets == *quota || desc->die_dt == DT_FEMPTY) + if (rx_packets == budget || desc->die_dt == DT_FEMPTY) break; /* Descriptor type must be checked before all other reads */ @@ -934,12 +989,23 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q) stats->rx_missed_errors++; } else { u32 get_ts = priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE; - - skb = priv->rx_skb[q][entry]; - priv->rx_skb[q][entry] = NULL; - dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), - priv->info->rx_max_frame_size, - DMA_FROM_DEVICE); + struct ravb_rx_buffer *rx_buff; + void *rx_addr; + + rx_buff = &priv->rx_buffers[q][entry]; + rx_addr = page_address(rx_buff->page) + rx_buff->offset; + dma_sync_single_for_cpu(ndev->dev.parent, + le32_to_cpu(desc->dptr), + pkt_len, DMA_FROM_DEVICE); + + skb = napi_build_skb(rx_addr, info->rx_buffer_size); + if (unlikely(!skb)) { + stats->rx_errors++; + page_pool_put_page(priv->rx_pool[q], + rx_buff->page, 0, true); + break; + } + skb_mark_for_recycle(skb); get_ts &= (q == RAVB_NC) ? RAVB_RXTSTAMP_TYPE_V2_L2_EVENT : ~RAVB_RXTSTAMP_TYPE_V2_L2_EVENT; @@ -961,48 +1027,28 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q) napi_gro_receive(&priv->napi[q], skb); rx_packets++; stats->rx_bytes += pkt_len; + + /* Mark this RX buffer as consumed. */ + rx_buff->page = NULL; } } /* Refill the RX ring buffers. */ - for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) { - entry = priv->dirty_rx[q] % priv->num_rx_ring[q]; - desc = &priv->rx_ring[q].ex_desc[entry]; - desc->ds_cc = cpu_to_le16(priv->info->rx_max_desc_use); - - if (!priv->rx_skb[q][entry]) { - skb = ravb_alloc_skb(ndev, info, GFP_ATOMIC); - if (!skb) - break; /* Better luck next round. */ - dma_addr = dma_map_single(ndev->dev.parent, skb->data, - priv->info->rx_max_frame_size, - DMA_FROM_DEVICE); - skb_checksum_none_assert(skb); - /* We just set the data size to 0 for a failed mapping - * which should prevent DMA from happening... - */ - if (dma_mapping_error(ndev->dev.parent, dma_addr)) - desc->ds_cc = cpu_to_le16(0); - desc->dptr = cpu_to_le32(dma_addr); - priv->rx_skb[q][entry] = skb; - } - /* Descriptor type must be set after all the above writes */ - dma_wmb(); - desc->die_dt = DT_FEMPTY; - } + priv->dirty_rx[q] += ravb_rx_ring_refill(ndev, q, + priv->cur_rx[q] - priv->dirty_rx[q], + GFP_ATOMIC); stats->rx_packets += rx_packets; - *quota -= rx_packets; - return *quota == 0; + return rx_packets; } /* Packet receive function for Ethernet AVB */ -static bool ravb_rx(struct net_device *ndev, int *quota, int q) +static int ravb_rx(struct net_device *ndev, int budget, int q) { struct ravb_private *priv = netdev_priv(ndev); const struct ravb_hw_info *info = priv->info; - return info->receive(ndev, quota, q); + return info->receive(ndev, budget, q); } static void ravb_rcv_snd_disable(struct net_device *ndev) @@ -1319,13 +1365,12 @@ static int ravb_poll(struct napi_struct *napi, int budget) unsigned long flags; int q = napi - priv->napi; int mask = BIT(q); - int quota = budget; - bool unmask; + int work_done; /* Processing RX Descriptor Ring */ /* Clear RX interrupt */ ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0); - unmask = !ravb_rx(ndev, "a, q); + work_done = ravb_rx(ndev, budget, q); /* Processing TX Descriptor Ring */ spin_lock_irqsave(&priv->lock, flags); @@ -1344,24 +1389,20 @@ static int ravb_poll(struct napi_struct *napi, int budget) if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors) ndev->stats.rx_fifo_errors = priv->rx_fifo_errors; - if (!unmask) - goto out; - - napi_complete(napi); - - /* Re-enable RX/TX interrupts */ - spin_lock_irqsave(&priv->lock, flags); - if (!info->irq_en_dis) { - ravb_modify(ndev, RIC0, mask, mask); - ravb_modify(ndev, TIC, mask, mask); - } else { - ravb_write(ndev, mask, RIE0); - ravb_write(ndev, mask, TIE); + if (work_done < budget && napi_complete_done(napi, work_done)) { + /* Re-enable RX/TX interrupts */ + spin_lock_irqsave(&priv->lock, flags); + if (!info->irq_en_dis) { + ravb_modify(ndev, RIC0, mask, mask); + ravb_modify(ndev, TIC, mask, mask); + } else { + ravb_write(ndev, mask, RIE0); + ravb_write(ndev, mask, TIE); + } + spin_unlock_irqrestore(&priv->lock, flags); } - spin_unlock_irqrestore(&priv->lock, flags); -out: - return budget - quota; + return work_done; } static void ravb_set_duplex_gbeth(struct net_device *ndev) @@ -1696,7 +1737,7 @@ static int ravb_set_ringparam(struct net_device *ndev, } static int ravb_get_ts_info(struct net_device *ndev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct ravb_private *priv = netdev_priv(ndev); const struct ravb_hw_info *hw_info = priv->info; @@ -2621,6 +2662,28 @@ static int ravb_mdio_release(struct ravb_private *priv) return 0; } +static const struct ravb_hw_info ravb_gen2_hw_info = { + .receive = ravb_rx_rcar, + .set_rate = ravb_set_rate_rcar, + .set_feature = ravb_set_features_rcar, + .dmac_init = ravb_dmac_init_rcar, + .emac_init = ravb_emac_init_rcar, + .gstrings_stats = ravb_gstrings_stats, + .gstrings_size = sizeof(ravb_gstrings_stats), + .net_hw_features = NETIF_F_RXCSUM, + .net_features = NETIF_F_RXCSUM, + .stats_len = ARRAY_SIZE(ravb_gstrings_stats), + .tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3, + .rx_max_frame_size = SZ_2K, + .rx_buffer_size = SZ_2K + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), + .rx_desc_size = sizeof(struct ravb_ex_rx_desc), + .aligned_tx = 1, + .gptp = 1, + .nc_queues = 1, + .magic_pkt = 1, +}; + static const struct ravb_hw_info ravb_gen3_hw_info = { .receive = ravb_rx_rcar, .set_rate = ravb_set_rate_rcar, @@ -2634,7 +2697,8 @@ static const struct ravb_hw_info ravb_gen3_hw_info = { .stats_len = ARRAY_SIZE(ravb_gstrings_stats), .tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3, .rx_max_frame_size = SZ_2K, - .rx_max_desc_use = SZ_2K - ETH_FCS_LEN + sizeof(__sum16), + .rx_buffer_size = SZ_2K + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), .rx_desc_size = sizeof(struct ravb_ex_rx_desc), .internal_delay = 1, .tx_counters = 1, @@ -2645,12 +2709,12 @@ static const struct ravb_hw_info ravb_gen3_hw_info = { .magic_pkt = 1, }; -static const struct ravb_hw_info ravb_gen2_hw_info = { +static const struct ravb_hw_info ravb_gen4_hw_info = { .receive = ravb_rx_rcar, .set_rate = ravb_set_rate_rcar, .set_feature = ravb_set_features_rcar, .dmac_init = ravb_dmac_init_rcar, - .emac_init = ravb_emac_init_rcar, + .emac_init = ravb_emac_init_rcar_gen4, .gstrings_stats = ravb_gstrings_stats, .gstrings_size = sizeof(ravb_gstrings_stats), .net_hw_features = NETIF_F_RXCSUM, @@ -2658,10 +2722,14 @@ static const struct ravb_hw_info ravb_gen2_hw_info = { .stats_len = ARRAY_SIZE(ravb_gstrings_stats), .tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3, .rx_max_frame_size = SZ_2K, - .rx_max_desc_use = SZ_2K - ETH_FCS_LEN + sizeof(__sum16), + .rx_buffer_size = SZ_2K + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), .rx_desc_size = sizeof(struct ravb_ex_rx_desc), - .aligned_tx = 1, - .gptp = 1, + .internal_delay = 1, + .tx_counters = 1, + .multi_irqs = 1, + .irq_en_dis = 1, + .ccc_gac = 1, .nc_queues = 1, .magic_pkt = 1, }; @@ -2679,7 +2747,8 @@ static const struct ravb_hw_info ravb_rzv2m_hw_info = { .stats_len = ARRAY_SIZE(ravb_gstrings_stats), .tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3, .rx_max_frame_size = SZ_2K, - .rx_max_desc_use = SZ_2K - ETH_FCS_LEN + sizeof(__sum16), + .rx_buffer_size = SZ_2K + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), .rx_desc_size = sizeof(struct ravb_ex_rx_desc), .multi_irqs = 1, .err_mgmt_irqs = 1, @@ -2702,9 +2771,10 @@ static const struct ravb_hw_info gbeth_hw_info = { .stats_len = ARRAY_SIZE(ravb_gstrings_stats_gbeth), .tccr_mask = TCCR_TSRQ0, .rx_max_frame_size = SZ_8K, - .rx_max_desc_use = 4080, + .rx_buffer_size = SZ_2K, .rx_desc_size = sizeof(struct ravb_rx_desc), .aligned_tx = 1, + .coalesce_irqs = 1, .tx_counters = 1, .carrier_counters = 1, .half_duplex = 1, @@ -2716,7 +2786,7 @@ static const struct of_device_id ravb_match_table[] = { { .compatible = "renesas,etheravb-rcar-gen2", .data = &ravb_gen2_hw_info }, { .compatible = "renesas,etheravb-r8a7795", .data = &ravb_gen3_hw_info }, { .compatible = "renesas,etheravb-rcar-gen3", .data = &ravb_gen3_hw_info }, - { .compatible = "renesas,etheravb-rcar-gen4", .data = &ravb_gen3_hw_info }, + { .compatible = "renesas,etheravb-rcar-gen4", .data = &ravb_gen4_hw_info }, { .compatible = "renesas,etheravb-rzv2m", .data = &ravb_rzv2m_hw_info }, { .compatible = "renesas,rzg2l-gbeth", .data = &gbeth_hw_info }, { } @@ -2981,6 +3051,12 @@ static int ravb_probe(struct platform_device *pdev) if (info->nc_queues) netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll); + if (info->coalesce_irqs) { + netdev_sw_irq_coalesce_default_on(ndev); + if (num_present_cpus() == 1) + dev_set_threaded(ndev, true); + } + /* Network device register */ error = register_netdev(ndev); if (error) diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c index 24c90d8f5a44..ff50e20856ec 100644 --- a/drivers/net/ethernet/renesas/rswitch.c +++ b/drivers/net/ethernet/renesas/rswitch.c @@ -1809,7 +1809,7 @@ static const struct net_device_ops rswitch_netdev_ops = { .ndo_set_mac_address = eth_mac_addr, }; -static int rswitch_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info) +static int rswitch_get_ts_info(struct net_device *ndev, struct kernel_ethtool_ts_info *info) { struct rswitch_device *rdev = netdev_priv(ndev); diff --git a/drivers/net/ethernet/renesas/rtsn.c b/drivers/net/ethernet/renesas/rtsn.c new file mode 100644 index 000000000000..577227c007ab --- /dev/null +++ b/drivers/net/ethernet/renesas/rtsn.c @@ -0,0 +1,1391 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* Renesas Ethernet-TSN device driver + * + * Copyright (C) 2022 Renesas Electronics Corporation + * Copyright (C) 2023 Niklas Söderlund <niklas.soderlund@ragnatech.se> + */ + +#include <linux/clk.h> +#include <linux/dma-mapping.h> +#include <linux/etherdevice.h> +#include <linux/ethtool.h> +#include <linux/module.h> +#include <linux/net_tstamp.h> +#include <linux/of.h> +#include <linux/of_mdio.h> +#include <linux/of_net.h> +#include <linux/phy.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/reset.h> +#include <linux/spinlock.h> + +#include "rtsn.h" +#include "rcar_gen4_ptp.h" + +struct rtsn_private { + struct net_device *ndev; + struct platform_device *pdev; + void __iomem *base; + struct rcar_gen4_ptp_private *ptp_priv; + struct clk *clk; + struct reset_control *reset; + + u32 num_tx_ring; + u32 num_rx_ring; + u32 tx_desc_bat_size; + dma_addr_t tx_desc_bat_dma; + struct rtsn_desc *tx_desc_bat; + u32 rx_desc_bat_size; + dma_addr_t rx_desc_bat_dma; + struct rtsn_desc *rx_desc_bat; + dma_addr_t tx_desc_dma; + dma_addr_t rx_desc_dma; + struct rtsn_ext_desc *tx_ring; + struct rtsn_ext_ts_desc *rx_ring; + struct sk_buff **tx_skb; + struct sk_buff **rx_skb; + spinlock_t lock; /* Register access lock */ + u32 cur_tx; + u32 dirty_tx; + u32 cur_rx; + u32 dirty_rx; + u8 ts_tag; + struct napi_struct napi; + struct rtnl_link_stats64 stats; + + struct mii_bus *mii; + phy_interface_t iface; + int link; + int speed; + + int tx_data_irq; + int rx_data_irq; +}; + +static u32 rtsn_read(struct rtsn_private *priv, enum rtsn_reg reg) +{ + return ioread32(priv->base + reg); +} + +static void rtsn_write(struct rtsn_private *priv, enum rtsn_reg reg, u32 data) +{ + iowrite32(data, priv->base + reg); +} + +static void rtsn_modify(struct rtsn_private *priv, enum rtsn_reg reg, + u32 clear, u32 set) +{ + rtsn_write(priv, reg, (rtsn_read(priv, reg) & ~clear) | set); +} + +static int rtsn_reg_wait(struct rtsn_private *priv, enum rtsn_reg reg, + u32 mask, u32 expected) +{ + u32 val; + + return readl_poll_timeout(priv->base + reg, val, + (val & mask) == expected, + RTSN_INTERVAL_US, RTSN_TIMEOUT_US); +} + +static void rtsn_ctrl_data_irq(struct rtsn_private *priv, bool enable) +{ + if (enable) { + rtsn_write(priv, TDIE0, TDIE_TDID_TDX(TX_CHAIN_IDX)); + rtsn_write(priv, RDIE0, RDIE_RDID_RDX(RX_CHAIN_IDX)); + } else { + rtsn_write(priv, TDID0, TDIE_TDID_TDX(TX_CHAIN_IDX)); + rtsn_write(priv, RDID0, RDIE_RDID_RDX(RX_CHAIN_IDX)); + } +} + +static void rtsn_get_timestamp(struct rtsn_private *priv, struct timespec64 *ts) +{ + struct rcar_gen4_ptp_private *ptp_priv = priv->ptp_priv; + + ptp_priv->info.gettime64(&ptp_priv->info, ts); +} + +static int rtsn_tx_free(struct net_device *ndev, bool free_txed_only) +{ + struct rtsn_private *priv = netdev_priv(ndev); + struct rtsn_ext_desc *desc; + struct sk_buff *skb; + int free_num = 0; + int entry, size; + + for (; priv->cur_tx - priv->dirty_tx > 0; priv->dirty_tx++) { + entry = priv->dirty_tx % priv->num_tx_ring; + desc = &priv->tx_ring[entry]; + if (free_txed_only && (desc->die_dt & DT_MASK) != DT_FEMPTY) + break; + + dma_rmb(); + size = le16_to_cpu(desc->info_ds) & TX_DS; + skb = priv->tx_skb[entry]; + if (skb) { + if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { + struct skb_shared_hwtstamps shhwtstamps; + struct timespec64 ts; + + rtsn_get_timestamp(priv, &ts); + memset(&shhwtstamps, 0, sizeof(shhwtstamps)); + shhwtstamps.hwtstamp = timespec64_to_ktime(ts); + skb_tstamp_tx(skb, &shhwtstamps); + } + dma_unmap_single(ndev->dev.parent, + le32_to_cpu(desc->dptr), + size, DMA_TO_DEVICE); + dev_kfree_skb_any(priv->tx_skb[entry]); + free_num++; + + priv->stats.tx_packets++; + priv->stats.tx_bytes += size; + } + + desc->die_dt = DT_EEMPTY; + } + + desc = &priv->tx_ring[priv->num_tx_ring]; + desc->die_dt = DT_LINK; + + return free_num; +} + +static int rtsn_rx(struct net_device *ndev, int budget) +{ + struct rtsn_private *priv = netdev_priv(ndev); + unsigned int ndescriptors; + unsigned int rx_packets; + unsigned int i; + bool get_ts; + + get_ts = priv->ptp_priv->tstamp_rx_ctrl & + RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT; + + ndescriptors = priv->dirty_rx + priv->num_rx_ring - priv->cur_rx; + rx_packets = 0; + for (i = 0; i < ndescriptors; i++) { + const unsigned int entry = priv->cur_rx % priv->num_rx_ring; + struct rtsn_ext_ts_desc *desc = &priv->rx_ring[entry]; + struct sk_buff *skb; + dma_addr_t dma_addr; + u16 pkt_len; + + /* Stop processing descriptors if budget is consumed. */ + if (rx_packets >= budget) + break; + + /* Stop processing descriptors on first empty. */ + if ((desc->die_dt & DT_MASK) == DT_FEMPTY) + break; + + dma_rmb(); + pkt_len = le16_to_cpu(desc->info_ds) & RX_DS; + + skb = priv->rx_skb[entry]; + priv->rx_skb[entry] = NULL; + dma_addr = le32_to_cpu(desc->dptr); + dma_unmap_single(ndev->dev.parent, dma_addr, PKT_BUF_SZ, + DMA_FROM_DEVICE); + + /* Get timestamp if enabled. */ + if (get_ts) { + struct skb_shared_hwtstamps *shhwtstamps; + struct timespec64 ts; + + shhwtstamps = skb_hwtstamps(skb); + memset(shhwtstamps, 0, sizeof(*shhwtstamps)); + + ts.tv_sec = (u64)le32_to_cpu(desc->ts_sec); + ts.tv_nsec = le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff)); + + shhwtstamps->hwtstamp = timespec64_to_ktime(ts); + } + + skb_put(skb, pkt_len); + skb->protocol = eth_type_trans(skb, ndev); + napi_gro_receive(&priv->napi, skb); + + /* Update statistics. */ + priv->stats.rx_packets++; + priv->stats.rx_bytes += pkt_len; + + /* Update counters. */ + priv->cur_rx++; + rx_packets++; + } + + /* Refill the RX ring buffers */ + for (; priv->cur_rx - priv->dirty_rx > 0; priv->dirty_rx++) { + const unsigned int entry = priv->dirty_rx % priv->num_rx_ring; + struct rtsn_ext_ts_desc *desc = &priv->rx_ring[entry]; + struct sk_buff *skb; + dma_addr_t dma_addr; + + desc->info_ds = cpu_to_le16(PKT_BUF_SZ); + + if (!priv->rx_skb[entry]) { + skb = napi_alloc_skb(&priv->napi, + PKT_BUF_SZ + RTSN_ALIGN - 1); + if (!skb) + break; + skb_reserve(skb, NET_IP_ALIGN); + dma_addr = dma_map_single(ndev->dev.parent, skb->data, + le16_to_cpu(desc->info_ds), + DMA_FROM_DEVICE); + if (dma_mapping_error(ndev->dev.parent, dma_addr)) + desc->info_ds = cpu_to_le16(0); + desc->dptr = cpu_to_le32(dma_addr); + skb_checksum_none_assert(skb); + priv->rx_skb[entry] = skb; + } + + dma_wmb(); + desc->die_dt = DT_FEMPTY | D_DIE; + } + + priv->rx_ring[priv->num_rx_ring].die_dt = DT_LINK; + + return rx_packets; +} + +static int rtsn_poll(struct napi_struct *napi, int budget) +{ + struct rtsn_private *priv; + struct net_device *ndev; + unsigned long flags; + int work_done; + + ndev = napi->dev; + priv = netdev_priv(ndev); + + /* Processing RX Descriptor Ring */ + work_done = rtsn_rx(ndev, budget); + + /* Processing TX Descriptor Ring */ + spin_lock_irqsave(&priv->lock, flags); + rtsn_tx_free(ndev, true); + netif_wake_subqueue(ndev, 0); + spin_unlock_irqrestore(&priv->lock, flags); + + /* Re-enable TX/RX interrupts */ + if (work_done < budget && napi_complete_done(napi, work_done)) { + spin_lock_irqsave(&priv->lock, flags); + rtsn_ctrl_data_irq(priv, true); + spin_unlock_irqrestore(&priv->lock, flags); + } + + return work_done; +} + +static int rtsn_desc_alloc(struct rtsn_private *priv) +{ + struct device *dev = &priv->pdev->dev; + unsigned int i; + + priv->tx_desc_bat_size = sizeof(struct rtsn_desc) * TX_NUM_CHAINS; + priv->tx_desc_bat = dma_alloc_coherent(dev, priv->tx_desc_bat_size, + &priv->tx_desc_bat_dma, + GFP_KERNEL); + + if (!priv->tx_desc_bat) + return -ENOMEM; + + for (i = 0; i < TX_NUM_CHAINS; i++) + priv->tx_desc_bat[i].die_dt = DT_EOS; + + priv->rx_desc_bat_size = sizeof(struct rtsn_desc) * RX_NUM_CHAINS; + priv->rx_desc_bat = dma_alloc_coherent(dev, priv->rx_desc_bat_size, + &priv->rx_desc_bat_dma, + GFP_KERNEL); + + if (!priv->rx_desc_bat) + return -ENOMEM; + + for (i = 0; i < RX_NUM_CHAINS; i++) + priv->rx_desc_bat[i].die_dt = DT_EOS; + + return 0; +} + +static void rtsn_desc_free(struct rtsn_private *priv) +{ + if (priv->tx_desc_bat) + dma_free_coherent(&priv->pdev->dev, priv->tx_desc_bat_size, + priv->tx_desc_bat, priv->tx_desc_bat_dma); + priv->tx_desc_bat = NULL; + + if (priv->rx_desc_bat) + dma_free_coherent(&priv->pdev->dev, priv->rx_desc_bat_size, + priv->rx_desc_bat, priv->rx_desc_bat_dma); + priv->rx_desc_bat = NULL; +} + +static void rtsn_chain_free(struct rtsn_private *priv) +{ + struct device *dev = &priv->pdev->dev; + + dma_free_coherent(dev, + sizeof(struct rtsn_ext_desc) * (priv->num_tx_ring + 1), + priv->tx_ring, priv->tx_desc_dma); + priv->tx_ring = NULL; + + dma_free_coherent(dev, + sizeof(struct rtsn_ext_ts_desc) * (priv->num_rx_ring + 1), + priv->rx_ring, priv->rx_desc_dma); + priv->rx_ring = NULL; + + kfree(priv->tx_skb); + priv->tx_skb = NULL; + + kfree(priv->rx_skb); + priv->rx_skb = NULL; +} + +static int rtsn_chain_init(struct rtsn_private *priv, int tx_size, int rx_size) +{ + struct net_device *ndev = priv->ndev; + struct sk_buff *skb; + int i; + + priv->num_tx_ring = tx_size; + priv->num_rx_ring = rx_size; + + priv->tx_skb = kcalloc(tx_size, sizeof(*priv->tx_skb), GFP_KERNEL); + priv->rx_skb = kcalloc(rx_size, sizeof(*priv->rx_skb), GFP_KERNEL); + + if (!priv->rx_skb || !priv->tx_skb) + goto error; + + for (i = 0; i < rx_size; i++) { + skb = netdev_alloc_skb(ndev, PKT_BUF_SZ + RTSN_ALIGN - 1); + if (!skb) + goto error; + skb_reserve(skb, NET_IP_ALIGN); + priv->rx_skb[i] = skb; + } + + /* Allocate TX, RX descriptors */ + priv->tx_ring = dma_alloc_coherent(ndev->dev.parent, + sizeof(struct rtsn_ext_desc) * (tx_size + 1), + &priv->tx_desc_dma, GFP_KERNEL); + priv->rx_ring = dma_alloc_coherent(ndev->dev.parent, + sizeof(struct rtsn_ext_ts_desc) * (rx_size + 1), + &priv->rx_desc_dma, GFP_KERNEL); + + if (!priv->tx_ring || !priv->rx_ring) + goto error; + + return 0; +error: + rtsn_chain_free(priv); + + return -ENOMEM; +} + +static void rtsn_chain_format(struct rtsn_private *priv) +{ + struct net_device *ndev = priv->ndev; + struct rtsn_ext_ts_desc *rx_desc; + struct rtsn_ext_desc *tx_desc; + struct rtsn_desc *bat_desc; + dma_addr_t dma_addr; + unsigned int i; + + priv->cur_tx = 0; + priv->cur_rx = 0; + priv->dirty_rx = 0; + priv->dirty_tx = 0; + + /* TX */ + memset(priv->tx_ring, 0, sizeof(*tx_desc) * priv->num_tx_ring); + for (i = 0, tx_desc = priv->tx_ring; i < priv->num_tx_ring; i++, tx_desc++) + tx_desc->die_dt = DT_EEMPTY | D_DIE; + + tx_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma); + tx_desc->die_dt = DT_LINK; + + bat_desc = &priv->tx_desc_bat[TX_CHAIN_IDX]; + bat_desc->die_dt = DT_LINK; + bat_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma); + + /* RX */ + memset(priv->rx_ring, 0, sizeof(*rx_desc) * priv->num_rx_ring); + for (i = 0, rx_desc = priv->rx_ring; i < priv->num_rx_ring; i++, rx_desc++) { + dma_addr = dma_map_single(ndev->dev.parent, + priv->rx_skb[i]->data, PKT_BUF_SZ, + DMA_FROM_DEVICE); + if (!dma_mapping_error(ndev->dev.parent, dma_addr)) + rx_desc->info_ds = cpu_to_le16(PKT_BUF_SZ); + rx_desc->dptr = cpu_to_le32((u32)dma_addr); + rx_desc->die_dt = DT_FEMPTY | D_DIE; + } + rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma); + rx_desc->die_dt = DT_LINK; + + bat_desc = &priv->rx_desc_bat[RX_CHAIN_IDX]; + bat_desc->die_dt = DT_LINK; + bat_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma); +} + +static int rtsn_dmac_init(struct rtsn_private *priv) +{ + int ret; + + ret = rtsn_chain_init(priv, TX_CHAIN_SIZE, RX_CHAIN_SIZE); + if (ret) + return ret; + + rtsn_chain_format(priv); + + return 0; +} + +static enum rtsn_mode rtsn_read_mode(struct rtsn_private *priv) +{ + return (rtsn_read(priv, OSR) & OSR_OPS) >> 1; +} + +static int rtsn_wait_mode(struct rtsn_private *priv, enum rtsn_mode mode) +{ + unsigned int i; + + /* Need to busy loop as mode changes can happen in atomic context. */ + for (i = 0; i < RTSN_TIMEOUT_US / RTSN_INTERVAL_US; i++) { + if (rtsn_read_mode(priv) == mode) + return 0; + + udelay(RTSN_INTERVAL_US); + } + + return -ETIMEDOUT; +} + +static int rtsn_change_mode(struct rtsn_private *priv, enum rtsn_mode mode) +{ + int ret; + + rtsn_write(priv, OCR, mode); + ret = rtsn_wait_mode(priv, mode); + if (ret) + netdev_err(priv->ndev, "Failed to switch operation mode\n"); + return ret; +} + +static int rtsn_get_data_irq_status(struct rtsn_private *priv) +{ + u32 val; + + val = rtsn_read(priv, TDIS0) | TDIS_TDS(TX_CHAIN_IDX); + val |= rtsn_read(priv, RDIS0) | RDIS_RDS(RX_CHAIN_IDX); + + return val; +} + +static irqreturn_t rtsn_irq(int irq, void *dev_id) +{ + struct rtsn_private *priv = dev_id; + int ret = IRQ_NONE; + + spin_lock(&priv->lock); + + if (rtsn_get_data_irq_status(priv)) { + /* Clear TX/RX irq status */ + rtsn_write(priv, TDIS0, TDIS_TDS(TX_CHAIN_IDX)); + rtsn_write(priv, RDIS0, RDIS_RDS(RX_CHAIN_IDX)); + + if (napi_schedule_prep(&priv->napi)) { + /* Disable TX/RX interrupts */ + rtsn_ctrl_data_irq(priv, false); + + __napi_schedule(&priv->napi); + } + + ret = IRQ_HANDLED; + } + + spin_unlock(&priv->lock); + + return ret; +} + +static int rtsn_request_irq(unsigned int irq, irq_handler_t handler, + unsigned long flags, struct rtsn_private *priv, + const char *ch) +{ + char *name; + int ret; + + name = devm_kasprintf(&priv->pdev->dev, GFP_KERNEL, "%s:%s", + priv->ndev->name, ch); + if (!name) + return -ENOMEM; + + ret = request_irq(irq, handler, flags, name, priv); + if (ret) + netdev_err(priv->ndev, "Cannot request IRQ %s\n", name); + + return ret; +} + +static void rtsn_free_irqs(struct rtsn_private *priv) +{ + free_irq(priv->tx_data_irq, priv); + free_irq(priv->rx_data_irq, priv); +} + +static int rtsn_request_irqs(struct rtsn_private *priv) +{ + int ret; + + priv->rx_data_irq = platform_get_irq_byname(priv->pdev, "rx"); + if (priv->rx_data_irq < 0) + return priv->rx_data_irq; + + priv->tx_data_irq = platform_get_irq_byname(priv->pdev, "tx"); + if (priv->tx_data_irq < 0) + return priv->tx_data_irq; + + ret = rtsn_request_irq(priv->tx_data_irq, rtsn_irq, 0, priv, "tx"); + if (ret) + return ret; + + ret = rtsn_request_irq(priv->rx_data_irq, rtsn_irq, 0, priv, "rx"); + if (ret) { + free_irq(priv->tx_data_irq, priv); + return ret; + } + + return 0; +} + +static int rtsn_reset(struct rtsn_private *priv) +{ + reset_control_reset(priv->reset); + mdelay(1); + + return rtsn_wait_mode(priv, OCR_OPC_DISABLE); +} + +static int rtsn_axibmi_init(struct rtsn_private *priv) +{ + int ret; + + ret = rtsn_reg_wait(priv, RR, RR_RST, RR_RST_COMPLETE); + if (ret) + return ret; + + /* Set AXIWC */ + rtsn_write(priv, AXIWC, AXIWC_DEFAULT); + + /* Set AXIRC */ + rtsn_write(priv, AXIRC, AXIRC_DEFAULT); + + /* TX Descriptor chain setting */ + rtsn_write(priv, TATLS0, TATLS0_TEDE | TATLS0_TATEN(TX_CHAIN_IDX)); + rtsn_write(priv, TATLS1, priv->tx_desc_bat_dma + TX_CHAIN_ADDR_OFFSET); + rtsn_write(priv, TATLR, TATLR_TATL); + + ret = rtsn_reg_wait(priv, TATLR, TATLR_TATL, 0); + if (ret) + return ret; + + /* RX Descriptor chain setting */ + rtsn_write(priv, RATLS0, + RATLS0_RETS | RATLS0_REDE | RATLS0_RATEN(RX_CHAIN_IDX)); + rtsn_write(priv, RATLS1, priv->rx_desc_bat_dma + RX_CHAIN_ADDR_OFFSET); + rtsn_write(priv, RATLR, RATLR_RATL); + + ret = rtsn_reg_wait(priv, RATLR, RATLR_RATL, 0); + if (ret) + return ret; + + /* Enable TX/RX interrupts */ + rtsn_ctrl_data_irq(priv, true); + + return 0; +} + +static void rtsn_mhd_init(struct rtsn_private *priv) +{ + /* TX General setting */ + rtsn_write(priv, TGC1, TGC1_STTV_DEFAULT | TGC1_TQTM_SFM); + rtsn_write(priv, TMS0, TMS_MFS_MAX); + + /* RX Filter IP */ + rtsn_write(priv, CFCR0, CFCR_SDID(RX_CHAIN_IDX)); + rtsn_write(priv, FMSCR, FMSCR_FMSIE(RX_CHAIN_IDX)); +} + +static int rtsn_get_phy_params(struct rtsn_private *priv) +{ + int ret; + + ret = of_get_phy_mode(priv->pdev->dev.of_node, &priv->iface); + if (ret) + return ret; + + switch (priv->iface) { + case PHY_INTERFACE_MODE_MII: + priv->speed = 100; + break; + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_TXID: + priv->speed = 1000; + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static void rtsn_set_phy_interface(struct rtsn_private *priv) +{ + u32 val; + + switch (priv->iface) { + case PHY_INTERFACE_MODE_MII: + val = MPIC_PIS_MII; + break; + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_TXID: + val = MPIC_PIS_GMII; + break; + default: + return; + } + + rtsn_modify(priv, MPIC, MPIC_PIS_MASK, val); +} + +static void rtsn_set_rate(struct rtsn_private *priv) +{ + u32 val; + + switch (priv->speed) { + case 10: + val = MPIC_LSC_10M; + break; + case 100: + val = MPIC_LSC_100M; + break; + case 1000: + val = MPIC_LSC_1G; + break; + default: + return; + } + + rtsn_modify(priv, MPIC, MPIC_LSC_MASK, val); +} + +static int rtsn_rmac_init(struct rtsn_private *priv) +{ + const u8 *mac_addr = priv->ndev->dev_addr; + int ret; + + /* Set MAC address */ + rtsn_write(priv, MRMAC0, (mac_addr[0] << 8) | mac_addr[1]); + rtsn_write(priv, MRMAC1, (mac_addr[2] << 24) | (mac_addr[3] << 16) | + (mac_addr[4] << 8) | mac_addr[5]); + + /* Set xMII type */ + rtsn_set_phy_interface(priv); + rtsn_set_rate(priv); + + /* Enable MII */ + rtsn_modify(priv, MPIC, MPIC_PSMCS_MASK | MPIC_PSMHT_MASK, + MPIC_PSMCS_DEFAULT | MPIC_PSMHT_DEFAULT); + + /* Link verification */ + rtsn_modify(priv, MLVC, MLVC_PLV, MLVC_PLV); + ret = rtsn_reg_wait(priv, MLVC, MLVC_PLV, 0); + if (ret) + return ret; + + return ret; +} + +static int rtsn_hw_init(struct rtsn_private *priv) +{ + int ret; + + ret = rtsn_reset(priv); + if (ret) + return ret; + + /* Change to CONFIG mode */ + ret = rtsn_change_mode(priv, OCR_OPC_CONFIG); + if (ret) + return ret; + + ret = rtsn_axibmi_init(priv); + if (ret) + return ret; + + rtsn_mhd_init(priv); + + ret = rtsn_rmac_init(priv); + if (ret) + return ret; + + ret = rtsn_change_mode(priv, OCR_OPC_DISABLE); + if (ret) + return ret; + + /* Change to OPERATION mode */ + ret = rtsn_change_mode(priv, OCR_OPC_OPERATION); + + return ret; +} + +static int rtsn_mii_access(struct mii_bus *bus, bool read, int phyad, + int regad, u16 data) +{ + struct rtsn_private *priv = bus->priv; + u32 val; + int ret; + + val = MPSM_PDA(phyad) | MPSM_PRA(regad) | MPSM_PSME; + + if (!read) + val |= MPSM_PSMAD | MPSM_PRD_SET(data); + + rtsn_write(priv, MPSM, val); + + ret = rtsn_reg_wait(priv, MPSM, MPSM_PSME, 0); + if (ret) + return ret; + + if (read) + ret = MPSM_PRD_GET(rtsn_read(priv, MPSM)); + + return ret; +} + +static int rtsn_mii_read(struct mii_bus *bus, int addr, int regnum) +{ + return rtsn_mii_access(bus, true, addr, regnum, 0); +} + +static int rtsn_mii_write(struct mii_bus *bus, int addr, int regnum, u16 val) +{ + return rtsn_mii_access(bus, false, addr, regnum, val); +} + +static int rtsn_mdio_alloc(struct rtsn_private *priv) +{ + struct platform_device *pdev = priv->pdev; + struct device *dev = &pdev->dev; + struct device_node *mdio_node; + struct mii_bus *mii; + int ret; + + mii = mdiobus_alloc(); + if (!mii) + return -ENOMEM; + + mdio_node = of_get_child_by_name(dev->of_node, "mdio"); + if (!mdio_node) { + ret = -ENODEV; + goto out_free_bus; + } + + /* Enter config mode before registering the MDIO bus */ + ret = rtsn_reset(priv); + if (ret) + goto out_free_bus; + + ret = rtsn_change_mode(priv, OCR_OPC_CONFIG); + if (ret) + goto out_free_bus; + + rtsn_modify(priv, MPIC, MPIC_PSMCS_MASK | MPIC_PSMHT_MASK, + MPIC_PSMCS_DEFAULT | MPIC_PSMHT_DEFAULT); + + /* Register the MDIO bus */ + mii->name = "rtsn_mii"; + snprintf(mii->id, MII_BUS_ID_SIZE, "%s-%x", + pdev->name, pdev->id); + mii->priv = priv; + mii->read = rtsn_mii_read; + mii->write = rtsn_mii_write; + mii->parent = dev; + + ret = of_mdiobus_register(mii, mdio_node); + of_node_put(mdio_node); + if (ret) + goto out_free_bus; + + priv->mii = mii; + + return 0; + +out_free_bus: + mdiobus_free(mii); + return ret; +} + +static void rtsn_mdio_free(struct rtsn_private *priv) +{ + mdiobus_unregister(priv->mii); + mdiobus_free(priv->mii); + priv->mii = NULL; +} + +static void rtsn_adjust_link(struct net_device *ndev) +{ + struct rtsn_private *priv = netdev_priv(ndev); + struct phy_device *phydev = ndev->phydev; + bool new_state = false; + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + + if (phydev->link) { + if (phydev->speed != priv->speed) { + new_state = true; + priv->speed = phydev->speed; + } + + if (!priv->link) { + new_state = true; + priv->link = phydev->link; + } + } else if (priv->link) { + new_state = true; + priv->link = 0; + priv->speed = 0; + } + + if (new_state) { + /* Need to transition to CONFIG mode before reconfiguring and + * then back to the original mode. Any state change to/from + * CONFIG or OPERATION must go over DISABLED to stop Rx/Tx. + */ + enum rtsn_mode orgmode = rtsn_read_mode(priv); + + /* Transit to CONFIG */ + if (orgmode != OCR_OPC_CONFIG) { + if (orgmode != OCR_OPC_DISABLE && + rtsn_change_mode(priv, OCR_OPC_DISABLE)) + goto out; + if (rtsn_change_mode(priv, OCR_OPC_CONFIG)) + goto out; + } + + rtsn_set_rate(priv); + + /* Transition to original mode */ + if (orgmode != OCR_OPC_CONFIG) { + if (rtsn_change_mode(priv, OCR_OPC_DISABLE)) + goto out; + if (orgmode != OCR_OPC_DISABLE && + rtsn_change_mode(priv, orgmode)) + goto out; + } + } +out: + spin_unlock_irqrestore(&priv->lock, flags); + + if (new_state) + phy_print_status(phydev); +} + +static int rtsn_phy_init(struct rtsn_private *priv) +{ + struct device_node *np = priv->ndev->dev.parent->of_node; + struct phy_device *phydev; + struct device_node *phy; + + priv->link = 0; + + phy = of_parse_phandle(np, "phy-handle", 0); + if (!phy) + return -ENOENT; + + phydev = of_phy_connect(priv->ndev, phy, rtsn_adjust_link, 0, + priv->iface); + of_node_put(phy); + if (!phydev) + return -ENOENT; + + /* Only support full-duplex mode */ + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT); + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT); + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT); + + phy_attached_info(phydev); + + return 0; +} + +static void rtsn_phy_deinit(struct rtsn_private *priv) +{ + phy_disconnect(priv->ndev->phydev); + priv->ndev->phydev = NULL; +} + +static int rtsn_init(struct rtsn_private *priv) +{ + int ret; + + ret = rtsn_desc_alloc(priv); + if (ret) + return ret; + + ret = rtsn_dmac_init(priv); + if (ret) + goto error_free_desc; + + ret = rtsn_hw_init(priv); + if (ret) + goto error_free_chain; + + ret = rtsn_phy_init(priv); + if (ret) + goto error_free_chain; + + ret = rtsn_request_irqs(priv); + if (ret) + goto error_free_phy; + + return 0; +error_free_phy: + rtsn_phy_deinit(priv); +error_free_chain: + rtsn_chain_free(priv); +error_free_desc: + rtsn_desc_free(priv); + return ret; +} + +static void rtsn_deinit(struct rtsn_private *priv) +{ + rtsn_free_irqs(priv); + rtsn_phy_deinit(priv); + rtsn_chain_free(priv); + rtsn_desc_free(priv); +} + +static void rtsn_parse_mac_address(struct device_node *np, + struct net_device *ndev) +{ + struct rtsn_private *priv = netdev_priv(ndev); + u8 addr[ETH_ALEN]; + u32 mrmac0; + u32 mrmac1; + + /* Try to read address from Device Tree. */ + if (!of_get_mac_address(np, addr)) { + eth_hw_addr_set(ndev, addr); + return; + } + + /* Try to read address from device. */ + mrmac0 = rtsn_read(priv, MRMAC0); + mrmac1 = rtsn_read(priv, MRMAC1); + + addr[0] = (mrmac0 >> 8) & 0xff; + addr[1] = (mrmac0 >> 0) & 0xff; + addr[2] = (mrmac1 >> 24) & 0xff; + addr[3] = (mrmac1 >> 16) & 0xff; + addr[4] = (mrmac1 >> 8) & 0xff; + addr[5] = (mrmac1 >> 0) & 0xff; + + if (is_valid_ether_addr(addr)) { + eth_hw_addr_set(ndev, addr); + return; + } + + /* Fallback to a random address */ + eth_hw_addr_random(ndev); +} + +static int rtsn_open(struct net_device *ndev) +{ + struct rtsn_private *priv = netdev_priv(ndev); + int ret; + + napi_enable(&priv->napi); + + ret = rtsn_init(priv); + if (ret) { + napi_disable(&priv->napi); + return ret; + } + + phy_start(ndev->phydev); + + netif_start_queue(ndev); + + return 0; +} + +static int rtsn_stop(struct net_device *ndev) +{ + struct rtsn_private *priv = netdev_priv(ndev); + + phy_stop(priv->ndev->phydev); + napi_disable(&priv->napi); + rtsn_change_mode(priv, OCR_OPC_DISABLE); + rtsn_deinit(priv); + + return 0; +} + +static netdev_tx_t rtsn_start_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + struct rtsn_private *priv = netdev_priv(ndev); + struct rtsn_ext_desc *desc; + int ret = NETDEV_TX_OK; + unsigned long flags; + dma_addr_t dma_addr; + int entry; + + spin_lock_irqsave(&priv->lock, flags); + + /* Drop packet if it won't fit in a single descriptor. */ + if (skb->len >= TX_DS) { + priv->stats.tx_dropped++; + priv->stats.tx_errors++; + goto out; + } + + if (priv->cur_tx - priv->dirty_tx > priv->num_tx_ring) { + netif_stop_subqueue(ndev, 0); + ret = NETDEV_TX_BUSY; + goto out; + } + + if (skb_put_padto(skb, ETH_ZLEN)) + goto out; + + dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb->len, + DMA_TO_DEVICE); + if (dma_mapping_error(ndev->dev.parent, dma_addr)) { + dev_kfree_skb_any(skb); + goto out; + } + + entry = priv->cur_tx % priv->num_tx_ring; + priv->tx_skb[entry] = skb; + desc = &priv->tx_ring[entry]; + desc->dptr = cpu_to_le32(dma_addr); + desc->info_ds = cpu_to_le16(skb->len); + desc->info1 = cpu_to_le64(skb->len); + + if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + priv->ts_tag++; + desc->info_ds |= cpu_to_le16(TXC); + desc->info = priv->ts_tag; + } + + skb_tx_timestamp(skb); + dma_wmb(); + + desc->die_dt = DT_FSINGLE | D_DIE; + priv->cur_tx++; + + /* Start xmit */ + rtsn_write(priv, TRCR0, BIT(TX_CHAIN_IDX)); +out: + spin_unlock_irqrestore(&priv->lock, flags); + return ret; +} + +static void rtsn_get_stats64(struct net_device *ndev, + struct rtnl_link_stats64 *storage) +{ + struct rtsn_private *priv = netdev_priv(ndev); + *storage = priv->stats; +} + +static int rtsn_do_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd) +{ + if (!netif_running(ndev)) + return -ENODEV; + + return phy_do_ioctl_running(ndev, ifr, cmd); +} + +static int rtsn_hwtstamp_get(struct net_device *ndev, + struct kernel_hwtstamp_config *config) +{ + struct rcar_gen4_ptp_private *ptp_priv; + struct rtsn_private *priv; + + if (!netif_running(ndev)) + return -ENODEV; + + priv = netdev_priv(ndev); + ptp_priv = priv->ptp_priv; + + config->flags = 0; + + config->tx_type = + ptp_priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; + + switch (ptp_priv->tstamp_rx_ctrl & RCAR_GEN4_RXTSTAMP_TYPE) { + case RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT: + config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; + break; + case RCAR_GEN4_RXTSTAMP_TYPE_ALL: + config->rx_filter = HWTSTAMP_FILTER_ALL; + break; + default: + config->rx_filter = HWTSTAMP_FILTER_NONE; + break; + } + + return 0; +} + +static int rtsn_hwtstamp_set(struct net_device *ndev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) +{ + struct rcar_gen4_ptp_private *ptp_priv; + struct rtsn_private *priv; + u32 tstamp_rx_ctrl; + u32 tstamp_tx_ctrl; + + if (!netif_running(ndev)) + return -ENODEV; + + priv = netdev_priv(ndev); + ptp_priv = priv->ptp_priv; + + if (config->flags) + return -EINVAL; + + switch (config->tx_type) { + case HWTSTAMP_TX_OFF: + tstamp_tx_ctrl = 0; + break; + case HWTSTAMP_TX_ON: + tstamp_tx_ctrl = RCAR_GEN4_TXTSTAMP_ENABLED; + break; + default: + return -ERANGE; + } + + switch (config->rx_filter) { + case HWTSTAMP_FILTER_NONE: + tstamp_rx_ctrl = 0; + break; + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + tstamp_rx_ctrl = RCAR_GEN4_RXTSTAMP_ENABLED | + RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT; + break; + default: + config->rx_filter = HWTSTAMP_FILTER_ALL; + tstamp_rx_ctrl = RCAR_GEN4_RXTSTAMP_ENABLED | + RCAR_GEN4_RXTSTAMP_TYPE_ALL; + break; + } + + ptp_priv->tstamp_tx_ctrl = tstamp_tx_ctrl; + ptp_priv->tstamp_rx_ctrl = tstamp_rx_ctrl; + + return 0; +} + +static const struct net_device_ops rtsn_netdev_ops = { + .ndo_open = rtsn_open, + .ndo_stop = rtsn_stop, + .ndo_start_xmit = rtsn_start_xmit, + .ndo_get_stats64 = rtsn_get_stats64, + .ndo_eth_ioctl = rtsn_do_ioctl, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_hwtstamp_set = rtsn_hwtstamp_set, + .ndo_hwtstamp_get = rtsn_hwtstamp_get, +}; + +static int rtsn_get_ts_info(struct net_device *ndev, + struct kernel_ethtool_ts_info *info) +{ + struct rtsn_private *priv = netdev_priv(ndev); + + info->phc_index = ptp_clock_index(priv->ptp_priv->clock); + info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON); + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL); + + return 0; +} + +static const struct ethtool_ops rtsn_ethtool_ops = { + .nway_reset = phy_ethtool_nway_reset, + .get_link = ethtool_op_get_link, + .get_ts_info = rtsn_get_ts_info, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, +}; + +static const struct of_device_id rtsn_match_table[] = { + { .compatible = "renesas,r8a779g0-ethertsn", }, + { /* Sentinel */ } +}; + +MODULE_DEVICE_TABLE(of, rtsn_match_table); + +static int rtsn_probe(struct platform_device *pdev) +{ + struct rtsn_private *priv; + struct net_device *ndev; + struct resource *res; + int ret; + + ndev = alloc_etherdev_mqs(sizeof(struct rtsn_private), TX_NUM_CHAINS, + RX_NUM_CHAINS); + if (!ndev) + return -ENOMEM; + + priv = netdev_priv(ndev); + priv->pdev = pdev; + priv->ndev = ndev; + priv->ptp_priv = rcar_gen4_ptp_alloc(pdev); + + spin_lock_init(&priv->lock); + platform_set_drvdata(pdev, priv); + + priv->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(priv->clk)) { + ret = PTR_ERR(priv->clk); + goto error_free; + } + + priv->reset = devm_reset_control_get(&pdev->dev, NULL); + if (IS_ERR(priv->reset)) { + ret = PTR_ERR(priv->reset); + goto error_free; + } + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tsnes"); + if (!res) { + dev_err(&pdev->dev, "Can't find tsnes resource\n"); + ret = -EINVAL; + goto error_free; + } + + priv->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(priv->base)) { + ret = PTR_ERR(priv->base); + goto error_free; + } + + SET_NETDEV_DEV(ndev, &pdev->dev); + + ndev->features = NETIF_F_RXCSUM; + ndev->hw_features = NETIF_F_RXCSUM; + ndev->base_addr = res->start; + ndev->netdev_ops = &rtsn_netdev_ops; + ndev->ethtool_ops = &rtsn_ethtool_ops; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gptp"); + if (!res) { + dev_err(&pdev->dev, "Can't find gptp resource\n"); + ret = -EINVAL; + goto error_free; + } + + priv->ptp_priv->addr = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(priv->ptp_priv->addr)) { + ret = PTR_ERR(priv->ptp_priv->addr); + goto error_free; + } + + ret = rtsn_get_phy_params(priv); + if (ret) + goto error_free; + + pm_runtime_enable(&pdev->dev); + pm_runtime_get_sync(&pdev->dev); + + netif_napi_add(ndev, &priv->napi, rtsn_poll); + + rtsn_parse_mac_address(pdev->dev.of_node, ndev); + + dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + + device_set_wakeup_capable(&pdev->dev, 1); + + ret = rcar_gen4_ptp_register(priv->ptp_priv, RCAR_GEN4_PTP_REG_LAYOUT, + clk_get_rate(priv->clk)); + if (ret) + goto error_pm; + + ret = rtsn_mdio_alloc(priv); + if (ret) + goto error_ptp; + + ret = register_netdev(ndev); + if (ret) + goto error_mdio; + + netdev_info(ndev, "MAC address %pM\n", ndev->dev_addr); + + return 0; + +error_mdio: + rtsn_mdio_free(priv); +error_ptp: + rcar_gen4_ptp_unregister(priv->ptp_priv); +error_pm: + netif_napi_del(&priv->napi); + rtsn_change_mode(priv, OCR_OPC_DISABLE); + pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); +error_free: + free_netdev(ndev); + + return ret; +} + +static int rtsn_remove(struct platform_device *pdev) +{ + struct rtsn_private *priv = platform_get_drvdata(pdev); + + unregister_netdev(priv->ndev); + rtsn_mdio_free(priv); + rcar_gen4_ptp_unregister(priv->ptp_priv); + rtsn_change_mode(priv, OCR_OPC_DISABLE); + netif_napi_del(&priv->napi); + + pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); + + free_netdev(priv->ndev); + + return 0; +} + +static struct platform_driver rtsn_driver = { + .probe = rtsn_probe, + .remove = rtsn_remove, + .driver = { + .name = "rtsn", + .of_match_table = rtsn_match_table, + } +}; +module_platform_driver(rtsn_driver); + +MODULE_AUTHOR("Phong Hoang, Niklas Söderlund"); +MODULE_DESCRIPTION("Renesas Ethernet-TSN device driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/renesas/rtsn.h b/drivers/net/ethernet/renesas/rtsn.h new file mode 100644 index 000000000000..3183e80d7e6b --- /dev/null +++ b/drivers/net/ethernet/renesas/rtsn.h @@ -0,0 +1,464 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* Renesas Ethernet-TSN device driver + * + * Copyright (C) 2022 Renesas Electronics Corporation + * Copyright (C) 2023 Niklas Söderlund <niklas.soderlund@ragnatech.se> + */ + +#ifndef __RTSN_H__ +#define __RTSN_H__ + +#include <linux/types.h> + +#define AXIBMI 0x0000 +#define TSNMHD 0x1000 +#define RMSO 0x2000 +#define RMRO 0x3800 + +enum rtsn_reg { + AXIWC = AXIBMI + 0x0000, + AXIRC = AXIBMI + 0x0004, + TDPC0 = AXIBMI + 0x0010, + TFT = AXIBMI + 0x0090, + TATLS0 = AXIBMI + 0x00a0, + TATLS1 = AXIBMI + 0x00a4, + TATLR = AXIBMI + 0x00a8, + RATLS0 = AXIBMI + 0x00b0, + RATLS1 = AXIBMI + 0x00b4, + RATLR = AXIBMI + 0x00b8, + TSA0 = AXIBMI + 0x00c0, + TSS0 = AXIBMI + 0x00c4, + TRCR0 = AXIBMI + 0x0140, + RIDAUAS0 = AXIBMI + 0x0180, + RR = AXIBMI + 0x0200, + TATS = AXIBMI + 0x0210, + TATSR0 = AXIBMI + 0x0214, + TATSR1 = AXIBMI + 0x0218, + TATSR2 = AXIBMI + 0x021c, + RATS = AXIBMI + 0x0220, + RATSR0 = AXIBMI + 0x0224, + RATSR1 = AXIBMI + 0x0228, + RATSR2 = AXIBMI + 0x022c, + RIDASM0 = AXIBMI + 0x0240, + RIDASAM0 = AXIBMI + 0x0244, + RIDACAM0 = AXIBMI + 0x0248, + EIS0 = AXIBMI + 0x0300, + EIE0 = AXIBMI + 0x0304, + EID0 = AXIBMI + 0x0308, + EIS1 = AXIBMI + 0x0310, + EIE1 = AXIBMI + 0x0314, + EID1 = AXIBMI + 0x0318, + TCEIS0 = AXIBMI + 0x0340, + TCEIE0 = AXIBMI + 0x0344, + TCEID0 = AXIBMI + 0x0348, + RFSEIS0 = AXIBMI + 0x04c0, + RFSEIE0 = AXIBMI + 0x04c4, + RFSEID0 = AXIBMI + 0x04c8, + RFEIS0 = AXIBMI + 0x0540, + RFEIE0 = AXIBMI + 0x0544, + RFEID0 = AXIBMI + 0x0548, + RCEIS0 = AXIBMI + 0x05c0, + RCEIE0 = AXIBMI + 0x05c4, + RCEID0 = AXIBMI + 0x05c8, + RIDAOIS = AXIBMI + 0x0640, + RIDAOIE = AXIBMI + 0x0644, + RIDAOID = AXIBMI + 0x0648, + TSFEIS = AXIBMI + 0x06c0, + TSFEIE = AXIBMI + 0x06c4, + TSFEID = AXIBMI + 0x06c8, + TSCEIS = AXIBMI + 0x06d0, + TSCEIE = AXIBMI + 0x06d4, + TSCEID = AXIBMI + 0x06d8, + DIS = AXIBMI + 0x0b00, + DIE = AXIBMI + 0x0b04, + DID = AXIBMI + 0x0b08, + TDIS0 = AXIBMI + 0x0b10, + TDIE0 = AXIBMI + 0x0b14, + TDID0 = AXIBMI + 0x0b18, + RDIS0 = AXIBMI + 0x0b90, + RDIE0 = AXIBMI + 0x0b94, + RDID0 = AXIBMI + 0x0b98, + TSDIS = AXIBMI + 0x0c10, + TSDIE = AXIBMI + 0x0c14, + TSDID = AXIBMI + 0x0c18, + GPOUT = AXIBMI + 0x6000, + + OCR = TSNMHD + 0x0000, + OSR = TSNMHD + 0x0004, + SWR = TSNMHD + 0x0008, + SIS = TSNMHD + 0x000c, + GIS = TSNMHD + 0x0010, + GIE = TSNMHD + 0x0014, + GID = TSNMHD + 0x0018, + TIS1 = TSNMHD + 0x0020, + TIE1 = TSNMHD + 0x0024, + TID1 = TSNMHD + 0x0028, + TIS2 = TSNMHD + 0x0030, + TIE2 = TSNMHD + 0x0034, + TID2 = TSNMHD + 0x0038, + RIS = TSNMHD + 0x0040, + RIE = TSNMHD + 0x0044, + RID = TSNMHD + 0x0048, + TGC1 = TSNMHD + 0x0050, + TGC2 = TSNMHD + 0x0054, + TFS0 = TSNMHD + 0x0060, + TCF0 = TSNMHD + 0x0070, + TCR1 = TSNMHD + 0x0080, + TCR2 = TSNMHD + 0x0084, + TCR3 = TSNMHD + 0x0088, + TCR4 = TSNMHD + 0x008c, + TMS0 = TSNMHD + 0x0090, + TSR1 = TSNMHD + 0x00b0, + TSR2 = TSNMHD + 0x00b4, + TSR3 = TSNMHD + 0x00b8, + TSR4 = TSNMHD + 0x00bc, + TSR5 = TSNMHD + 0x00c0, + RGC = TSNMHD + 0x00d0, + RDFCR = TSNMHD + 0x00d4, + RCFCR = TSNMHD + 0x00d8, + REFCNCR = TSNMHD + 0x00dc, + RSR1 = TSNMHD + 0x00e0, + RSR2 = TSNMHD + 0x00e4, + RSR3 = TSNMHD + 0x00e8, + TCIS = TSNMHD + 0x01e0, + TCIE = TSNMHD + 0x01e4, + TCID = TSNMHD + 0x01e8, + TPTPC = TSNMHD + 0x01f0, + TTML = TSNMHD + 0x01f4, + TTJ = TSNMHD + 0x01f8, + TCC = TSNMHD + 0x0200, + TCS = TSNMHD + 0x0204, + TGS = TSNMHD + 0x020c, + TACST0 = TSNMHD + 0x0210, + TACST1 = TSNMHD + 0x0214, + TACST2 = TSNMHD + 0x0218, + TALIT0 = TSNMHD + 0x0220, + TALIT1 = TSNMHD + 0x0224, + TALIT2 = TSNMHD + 0x0228, + TAEN0 = TSNMHD + 0x0230, + TAEN1 = TSNMHD + 0x0234, + TASFE = TSNMHD + 0x0240, + TACLL0 = TSNMHD + 0x0250, + TACLL1 = TSNMHD + 0x0254, + TACLL2 = TSNMHD + 0x0258, + CACC = TSNMHD + 0x0260, + CCS = TSNMHD + 0x0264, + CAIV0 = TSNMHD + 0x0270, + CAUL0 = TSNMHD + 0x0290, + TOCST0 = TSNMHD + 0x0300, + TOCST1 = TSNMHD + 0x0304, + TOCST2 = TSNMHD + 0x0308, + TOLIT0 = TSNMHD + 0x0310, + TOLIT1 = TSNMHD + 0x0314, + TOLIT2 = TSNMHD + 0x0318, + TOEN0 = TSNMHD + 0x0320, + TOEN1 = TSNMHD + 0x0324, + TOSFE = TSNMHD + 0x0330, + TCLR0 = TSNMHD + 0x0340, + TCLR1 = TSNMHD + 0x0344, + TCLR2 = TSNMHD + 0x0348, + TSMS = TSNMHD + 0x0350, + COCC = TSNMHD + 0x0360, + COIV0 = TSNMHD + 0x03b0, + COUL0 = TSNMHD + 0x03d0, + QSTMACU0 = TSNMHD + 0x0400, + QSTMACD0 = TSNMHD + 0x0404, + QSTMAMU0 = TSNMHD + 0x0408, + QSTMAMD0 = TSNMHD + 0x040c, + QSFTVL0 = TSNMHD + 0x0410, + QSFTVLM0 = TSNMHD + 0x0414, + QSFTMSD0 = TSNMHD + 0x0418, + QSFTGMI0 = TSNMHD + 0x041c, + QSFTLS = TSNMHD + 0x0600, + QSFTLIS = TSNMHD + 0x0604, + QSFTLIE = TSNMHD + 0x0608, + QSFTLID = TSNMHD + 0x060c, + QSMSMC = TSNMHD + 0x0610, + QSGTMC = TSNMHD + 0x0614, + QSEIS = TSNMHD + 0x0618, + QSEIE = TSNMHD + 0x061c, + QSEID = TSNMHD + 0x0620, + QGACST0 = TSNMHD + 0x0630, + QGACST1 = TSNMHD + 0x0634, + QGACST2 = TSNMHD + 0x0638, + QGALIT1 = TSNMHD + 0x0640, + QGALIT2 = TSNMHD + 0x0644, + QGAEN0 = TSNMHD + 0x0648, + QGAEN1 = TSNMHD + 0x074c, + QGIGS = TSNMHD + 0x0650, + QGGC = TSNMHD + 0x0654, + QGATL0 = TSNMHD + 0x0664, + QGATL1 = TSNMHD + 0x0668, + QGATL2 = TSNMHD + 0x066c, + QGOCST0 = TSNMHD + 0x0670, + QGOCST1 = TSNMHD + 0x0674, + QGOCST2 = TSNMHD + 0x0678, + QGOLIT0 = TSNMHD + 0x067c, + QGOLIT1 = TSNMHD + 0x0680, + QGOLIT2 = TSNMHD + 0x0684, + QGOEN0 = TSNMHD + 0x0688, + QGOEN1 = TSNMHD + 0x068c, + QGTRO = TSNMHD + 0x0690, + QGTR1 = TSNMHD + 0x0694, + QGTR2 = TSNMHD + 0x0698, + QGFSMS = TSNMHD + 0x069c, + QTMIS = TSNMHD + 0x06e0, + QTMIE = TSNMHD + 0x06e4, + QTMID = TSNMHD + 0x06e8, + QMEC = TSNMHD + 0x0700, + QMMC = TSNMHD + 0x0704, + QRFDC = TSNMHD + 0x0708, + QYFDC = TSNMHD + 0x070c, + QVTCMC0 = TSNMHD + 0x0710, + QMCBSC0 = TSNMHD + 0x0750, + QMCIRC0 = TSNMHD + 0x0790, + QMEBSC0 = TSNMHD + 0x07d0, + QMEIRC0 = TSNMHD + 0x0710, + QMCFC = TSNMHD + 0x0850, + QMEIS = TSNMHD + 0x0860, + QMEIE = TSNMHD + 0x0864, + QMEID = TSNMHD + 0x086c, + QSMFC0 = TSNMHD + 0x0870, + QMSPPC0 = TSNMHD + 0x08b0, + QMSRPC0 = TSNMHD + 0x08f0, + QGPPC0 = TSNMHD + 0x0930, + QGRPC0 = TSNMHD + 0x0950, + QMDPC0 = TSNMHD + 0x0970, + QMGPC0 = TSNMHD + 0x09b0, + QMYPC0 = TSNMHD + 0x09f0, + QMRPC0 = TSNMHD + 0x0a30, + MQSTMACU = TSNMHD + 0x0a70, + MQSTMACD = TSNMHD + 0x0a74, + MQSTMAMU = TSNMHD + 0x0a78, + MQSTMAMD = TSNMHD + 0x0a7c, + MQSFTVL = TSNMHD + 0x0a80, + MQSFTVLM = TSNMHD + 0x0a84, + MQSFTMSD = TSNMHD + 0x0a88, + MQSFTGMI = TSNMHD + 0x0a8c, + + CFCR0 = RMSO + 0x0800, + FMSCR = RMSO + 0x0c10, + + MMC = RMRO + 0x0000, + MPSM = RMRO + 0x0010, + MPIC = RMRO + 0x0014, + MTFFC = RMRO + 0x0020, + MTPFC = RMRO + 0x0024, + MTATC0 = RMRO + 0x0040, + MRGC = RMRO + 0x0080, + MRMAC0 = RMRO + 0x0084, + MRMAC1 = RMRO + 0x0088, + MRAFC = RMRO + 0x008c, + MRSCE = RMRO + 0x0090, + MRSCP = RMRO + 0x0094, + MRSCC = RMRO + 0x0098, + MRFSCE = RMRO + 0x009c, + MRFSCP = RMRO + 0x00a0, + MTRC = RMRO + 0x00a4, + MPFC = RMRO + 0x0100, + MLVC = RMRO + 0x0340, + MEEEC = RMRO + 0x0350, + MLBC = RMRO + 0x0360, + MGMR = RMRO + 0x0400, + MMPFTCT = RMRO + 0x0410, + MAPFTCT = RMRO + 0x0414, + MPFRCT = RMRO + 0x0418, + MFCICT = RMRO + 0x041c, + MEEECT = RMRO + 0x0420, + MEIS = RMRO + 0x0500, + MEIE = RMRO + 0x0504, + MEID = RMRO + 0x0508, + MMIS0 = RMRO + 0x0510, + MMIE0 = RMRO + 0x0514, + MMID0 = RMRO + 0x0518, + MMIS1 = RMRO + 0x0520, + MMIE1 = RMRO + 0x0524, + MMID1 = RMRO + 0x0528, + MMIS2 = RMRO + 0x0530, + MMIE2 = RMRO + 0x0534, + MMID2 = RMRO + 0x0538, + MXMS = RMRO + 0x0600, + +}; + +/* AXIBMI */ +#define RR_RATRR BIT(0) +#define RR_TATRR BIT(1) +#define RR_RST (RR_RATRR | RR_TATRR) +#define RR_RST_COMPLETE 0x03 + +#define AXIWC_DEFAULT 0xffff +#define AXIRC_DEFAULT 0xffff + +#define TATLS0_TEDE BIT(1) +#define TATLS0_TATEN_SHIFT 24 +#define TATLS0_TATEN(n) ((n) << TATLS0_TATEN_SHIFT) +#define TATLR_TATL BIT(31) + +#define RATLS0_RETS BIT(2) +#define RATLS0_REDE BIT(3) +#define RATLS0_RATEN_SHIFT 24 +#define RATLS0_RATEN(n) ((n) << RATLS0_RATEN_SHIFT) +#define RATLR_RATL BIT(31) + +#define DIE_DID_TDICX(n) BIT((n)) +#define DIE_DID_RDICX(n) BIT((n) + 8) +#define TDIE_TDID_TDX(n) BIT(n) +#define RDIE_RDID_RDX(n) BIT(n) +#define TDIS_TDS(n) BIT(n) +#define RDIS_RDS(n) BIT(n) + +/* MHD */ +#define OSR_OPS 0x07 +#define SWR_SWR BIT(0) + +#define TGC1_TQTM_SFM 0xff00 +#define TGC1_STTV_DEFAULT 0x03 + +#define TMS_MFS_MAX 0x2800 + +/* RMAC System */ +#define CFCR_SDID(n) ((n) << 16) +#define FMSCR_FMSIE(n) ((n) << 0) + +/* RMAC */ +#define MPIC_PIS_MASK GENMASK(1, 0) +#define MPIC_PIS_MII 0 +#define MPIC_PIS_RMII 0x01 +#define MPIC_PIS_GMII 0x02 +#define MPIC_PIS_RGMII 0x03 +#define MPIC_LSC_SHIFT 2 +#define MPIC_LSC_MASK GENMASK(3, MPIC_LSC_SHIFT) +#define MPIC_LSC_10M (0 << MPIC_LSC_SHIFT) +#define MPIC_LSC_100M (0x01 << MPIC_LSC_SHIFT) +#define MPIC_LSC_1G (0x02 << MPIC_LSC_SHIFT) +#define MPIC_PSMCS_SHIFT 16 +#define MPIC_PSMCS_MASK GENMASK(21, MPIC_PSMCS_SHIFT) +#define MPIC_PSMCS_DEFAULT (0x0a << MPIC_PSMCS_SHIFT) +#define MPIC_PSMHT_SHIFT 24 +#define MPIC_PSMHT_MASK GENMASK(26, MPIC_PSMHT_SHIFT) +#define MPIC_PSMHT_DEFAULT (0x07 << MPIC_PSMHT_SHIFT) + +#define MLVC_PASE BIT(8) +#define MLVC_PSE BIT(16) +#define MLVC_PLV BIT(17) + +#define MPSM_PSME BIT(0) +#define MPSM_PSMAD BIT(1) +#define MPSM_PDA_SHIFT 3 +#define MPSM_PDA_MASK GENMASK(7, 3) +#define MPSM_PDA(n) (((n) << MPSM_PDA_SHIFT) & MPSM_PDA_MASK) +#define MPSM_PRA_SHIFT 8 +#define MPSM_PRA_MASK GENMASK(12, 8) +#define MPSM_PRA(n) (((n) << MPSM_PRA_SHIFT) & MPSM_PRA_MASK) +#define MPSM_PRD_SHIFT 16 +#define MPSM_PRD_SET(n) ((n) << MPSM_PRD_SHIFT) +#define MPSM_PRD_GET(n) ((n) >> MPSM_PRD_SHIFT) + +#define GPOUT_RDM BIT(13) +#define GPOUT_TDM BIT(14) + +/* RTSN */ +#define RTSN_INTERVAL_US 1000 +#define RTSN_TIMEOUT_US 1000000 + +#define TX_NUM_CHAINS 1 +#define RX_NUM_CHAINS 1 + +#define TX_CHAIN_SIZE 1024 +#define RX_CHAIN_SIZE 1024 + +#define TX_CHAIN_IDX 0 +#define RX_CHAIN_IDX 0 + +#define TX_CHAIN_ADDR_OFFSET (sizeof(struct rtsn_desc) * TX_CHAIN_IDX) +#define RX_CHAIN_ADDR_OFFSET (sizeof(struct rtsn_desc) * RX_CHAIN_IDX) + +#define PKT_BUF_SZ 1584 +#define RTSN_ALIGN 128 + +enum rtsn_mode { + OCR_OPC_DISABLE, + OCR_OPC_CONFIG, + OCR_OPC_OPERATION, +}; + +/* Descriptors */ +enum RX_DS_CC_BIT { + RX_DS = 0x0fff, /* Data size */ + RX_TR = 0x1000, /* Truncation indication */ + RX_EI = 0x2000, /* Error indication */ + RX_PS = 0xc000, /* Padding selection */ +}; + +enum TX_FS_TAGL_BIT { + TX_DS = 0x0fff, /* Data size */ + TX_TAGL = 0xf000, /* Frame tag LSBs */ +}; + +enum DIE_DT { + /* HW/SW arbitration */ + DT_FEMPTY_IS = 0x10, + DT_FEMPTY_IC = 0x20, + DT_FEMPTY_ND = 0x30, + DT_FEMPTY = 0x40, + DT_FEMPTY_START = 0x50, + DT_FEMPTY_MID = 0x60, + DT_FEMPTY_END = 0x70, + + /* Frame data */ + DT_FSINGLE = 0x80, + DT_FSTART = 0x90, + DT_FMID = 0xa0, + DT_FEND = 0xb0, + + /* Chain control */ + DT_LEMPTY = 0xc0, + DT_EEMPTY = 0xd0, + DT_LINK = 0xe0, + DT_EOS = 0xf0, + + DT_MASK = 0xf0, + D_DIE = 0x08, +}; + +struct rtsn_desc { + __le16 info_ds; + __u8 info; + u8 die_dt; + __le32 dptr; +} __packed; + +struct rtsn_ts_desc { + __le16 info_ds; + __u8 info; + u8 die_dt; + __le32 dptr; + __le32 ts_nsec; + __le32 ts_sec; +} __packed; + +struct rtsn_ext_desc { + __le16 info_ds; + __u8 info; + u8 die_dt; + __le32 dptr; + __le64 info1; +} __packed; + +struct rtsn_ext_ts_desc { + __le16 info_ds; + __u8 info; + u8 die_dt; + __le32 dptr; + __le64 info1; + __le32 ts_nsec; + __le32 ts_sec; +} __packed; + +enum EXT_INFO_DS_BIT { + TXC = 0x4000, +}; + +#endif diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 8fa6c0e9195b..7d69302ffa0a 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -1396,7 +1396,7 @@ static void efx_ef10_table_reset_mc_allocations(struct efx_nic *efx) efx_mcdi_filter_table_reset_mc_allocations(efx); nic_data->must_restore_piobufs = true; efx_ef10_forget_old_piobufs(efx); - efx->rss_context.context_id = EFX_MCDI_RSS_CONTEXT_INVALID; + efx->rss_context.priv.context_id = EFX_MCDI_RSS_CONTEXT_INVALID; /* Driver-created vswitches and vports must be re-created */ nic_data->must_probe_vswitching = true; diff --git a/drivers/net/ethernet/sfc/ef100_ethtool.c b/drivers/net/ethernet/sfc/ef100_ethtool.c index cf55202b3a7b..896ffca4aee2 100644 --- a/drivers/net/ethernet/sfc/ef100_ethtool.c +++ b/drivers/net/ethernet/sfc/ef100_ethtool.c @@ -59,8 +59,12 @@ const struct ethtool_ops ef100_ethtool_ops = { .get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size, .get_rxfh_key_size = efx_ethtool_get_rxfh_key_size, + .rxfh_priv_size = sizeof(struct efx_rss_context_priv), .get_rxfh = efx_ethtool_get_rxfh, .set_rxfh = efx_ethtool_set_rxfh, + .create_rxfh_context = efx_ethtool_create_rxfh_context, + .modify_rxfh_context = efx_ethtool_modify_rxfh_context, + .remove_rxfh_context = efx_ethtool_remove_rxfh_context, .get_module_info = efx_ethtool_get_module_info, .get_module_eeprom = efx_ethtool_get_module_eeprom, diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index e9d9de8e648a..6f1a01ded7d4 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -299,7 +299,7 @@ static int efx_probe_nic(struct efx_nic *efx) if (efx->n_channels > 1) netdev_rss_key_fill(efx->rss_context.rx_hash_key, sizeof(efx->rss_context.rx_hash_key)); - efx_set_default_rx_indir_table(efx, &efx->rss_context); + efx_set_default_rx_indir_table(efx, efx->rss_context.rx_indir_table); /* Initialise the interrupt moderation settings */ efx->irq_mod_step_us = DIV_ROUND_UP(efx->timer_quantum_ns, 1000); diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h index 48d3623735ba..7a6cab883d66 100644 --- a/drivers/net/ethernet/sfc/efx.h +++ b/drivers/net/ethernet/sfc/efx.h @@ -158,7 +158,7 @@ static inline s32 efx_filter_get_rx_ids(struct efx_nic *efx, } /* RSS contexts */ -static inline bool efx_rss_active(struct efx_rss_context *ctx) +static inline bool efx_rss_active(struct efx_rss_context_priv *ctx) { return ctx->context_id != EFX_MCDI_RSS_CONTEXT_INVALID; } diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c index 4ebd5ae23eca..13cf647051af 100644 --- a/drivers/net/ethernet/sfc/efx_common.c +++ b/drivers/net/ethernet/sfc/efx_common.c @@ -714,7 +714,7 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method) mutex_lock(&efx->mac_lock); down_write(&efx->filter_sem); - mutex_lock(&efx->rss_lock); + mutex_lock(&efx->net_dev->ethtool->rss_lock); efx->type->fini(efx); } @@ -777,7 +777,7 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok) if (efx->type->rx_restore_rss_contexts) efx->type->rx_restore_rss_contexts(efx); - mutex_unlock(&efx->rss_lock); + mutex_unlock(&efx->net_dev->ethtool->rss_lock); efx->type->filter_table_restore(efx); up_write(&efx->filter_sem); @@ -793,7 +793,7 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok) fail: efx->port_initialized = false; - mutex_unlock(&efx->rss_lock); + mutex_unlock(&efx->net_dev->ethtool->rss_lock); up_write(&efx->filter_sem); mutex_unlock(&efx->mac_lock); @@ -1000,9 +1000,7 @@ int efx_init_struct(struct efx_nic *efx, struct pci_dev *pci_dev) efx->type->rx_hash_offset - efx->type->rx_prefix_size; efx->rx_packet_ts_offset = efx->type->rx_ts_offset - efx->type->rx_prefix_size; - INIT_LIST_HEAD(&efx->rss_context.list); - efx->rss_context.context_id = EFX_MCDI_RSS_CONTEXT_INVALID; - mutex_init(&efx->rss_lock); + efx->rss_context.priv.context_id = EFX_MCDI_RSS_CONTEXT_INVALID; efx->vport_id = EVB_PORT_ID_ASSIGNED; spin_lock_init(&efx->stats_lock); efx->vi_stride = EFX_DEFAULT_VI_STRIDE; diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c index 37c69c8d90b1..7c887160e2ef 100644 --- a/drivers/net/ethernet/sfc/ethtool.c +++ b/drivers/net/ethernet/sfc/ethtool.c @@ -226,7 +226,7 @@ static void efx_ethtool_get_fec_stats(struct net_device *net_dev, } static int efx_ethtool_get_ts_info(struct net_device *net_dev, - struct ethtool_ts_info *ts_info) + struct kernel_ethtool_ts_info *ts_info) { struct efx_nic *efx = efx_netdev_priv(net_dev); @@ -268,8 +268,12 @@ const struct ethtool_ops efx_ethtool_ops = { .set_rxnfc = efx_ethtool_set_rxnfc, .get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size, .get_rxfh_key_size = efx_ethtool_get_rxfh_key_size, + .rxfh_priv_size = sizeof(struct efx_rss_context_priv), .get_rxfh = efx_ethtool_get_rxfh, .set_rxfh = efx_ethtool_set_rxfh, + .create_rxfh_context = efx_ethtool_create_rxfh_context, + .modify_rxfh_context = efx_ethtool_modify_rxfh_context, + .remove_rxfh_context = efx_ethtool_remove_rxfh_context, .get_ts_info = efx_ethtool_get_ts_info, .get_module_info = efx_ethtool_get_module_info, .get_module_eeprom = efx_ethtool_get_module_eeprom, diff --git a/drivers/net/ethernet/sfc/ethtool_common.c b/drivers/net/ethernet/sfc/ethtool_common.c index 7d5e5db4eac5..6ded44b86052 100644 --- a/drivers/net/ethernet/sfc/ethtool_common.c +++ b/drivers/net/ethernet/sfc/ethtool_common.c @@ -820,10 +820,10 @@ int efx_ethtool_get_rxnfc(struct net_device *net_dev, return 0; case ETHTOOL_GRXFH: { - struct efx_rss_context *ctx = &efx->rss_context; + struct efx_rss_context_priv *ctx = &efx->rss_context.priv; __u64 data; - mutex_lock(&efx->rss_lock); + mutex_lock(&net_dev->ethtool->rss_lock); if (info->flow_type & FLOW_RSS && info->rss_context) { ctx = efx_find_rss_context_entry(efx, info->rss_context); if (!ctx) { @@ -864,7 +864,7 @@ int efx_ethtool_get_rxnfc(struct net_device *net_dev, out_setdata_unlock: info->data = data; out_unlock: - mutex_unlock(&efx->rss_lock); + mutex_unlock(&net_dev->ethtool->rss_lock); return rc; } @@ -1163,46 +1163,14 @@ u32 efx_ethtool_get_rxfh_key_size(struct net_device *net_dev) return efx->type->rx_hash_key_size; } -static int efx_ethtool_get_rxfh_context(struct net_device *net_dev, - struct ethtool_rxfh_param *rxfh) -{ - struct efx_nic *efx = efx_netdev_priv(net_dev); - struct efx_rss_context *ctx; - int rc = 0; - - if (!efx->type->rx_pull_rss_context_config) - return -EOPNOTSUPP; - - mutex_lock(&efx->rss_lock); - ctx = efx_find_rss_context_entry(efx, rxfh->rss_context); - if (!ctx) { - rc = -ENOENT; - goto out_unlock; - } - rc = efx->type->rx_pull_rss_context_config(efx, ctx); - if (rc) - goto out_unlock; - - rxfh->hfunc = ETH_RSS_HASH_TOP; - if (rxfh->indir) - memcpy(rxfh->indir, ctx->rx_indir_table, - sizeof(ctx->rx_indir_table)); - if (rxfh->key) - memcpy(rxfh->key, ctx->rx_hash_key, - efx->type->rx_hash_key_size); -out_unlock: - mutex_unlock(&efx->rss_lock); - return rc; -} - int efx_ethtool_get_rxfh(struct net_device *net_dev, struct ethtool_rxfh_param *rxfh) { struct efx_nic *efx = efx_netdev_priv(net_dev); int rc; - if (rxfh->rss_context) - return efx_ethtool_get_rxfh_context(net_dev, rxfh); + if (rxfh->rss_context) /* core should never call us for these */ + return -EINVAL; rc = efx->type->rx_pull_rss_config(efx); if (rc) @@ -1218,68 +1186,85 @@ int efx_ethtool_get_rxfh(struct net_device *net_dev, return 0; } -static int efx_ethtool_set_rxfh_context(struct net_device *net_dev, - struct ethtool_rxfh_param *rxfh, - struct netlink_ext_ack *extack) +int efx_ethtool_modify_rxfh_context(struct net_device *net_dev, + struct ethtool_rxfh_context *ctx, + const struct ethtool_rxfh_param *rxfh, + struct netlink_ext_ack *extack) { struct efx_nic *efx = efx_netdev_priv(net_dev); - u32 *rss_context = &rxfh->rss_context; - struct efx_rss_context *ctx; - u32 *indir = rxfh->indir; - bool allocated = false; - u8 *key = rxfh->key; - int rc; + struct efx_rss_context_priv *priv; + const u32 *indir = rxfh->indir; + const u8 *key = rxfh->key; - if (!efx->type->rx_push_rss_context_config) + if (!efx->type->rx_push_rss_context_config) { + NL_SET_ERR_MSG_MOD(extack, + "NIC type does not support custom contexts"); return -EOPNOTSUPP; - - mutex_lock(&efx->rss_lock); - - if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) { - if (rxfh->rss_delete) { - /* alloc + delete == Nothing to do */ - rc = -EINVAL; - goto out_unlock; - } - ctx = efx_alloc_rss_context_entry(efx); - if (!ctx) { - rc = -ENOMEM; - goto out_unlock; - } - ctx->context_id = EFX_MCDI_RSS_CONTEXT_INVALID; - /* Initialise indir table and key to defaults */ - efx_set_default_rx_indir_table(efx, ctx); - netdev_rss_key_fill(ctx->rx_hash_key, sizeof(ctx->rx_hash_key)); - allocated = true; - } else { - ctx = efx_find_rss_context_entry(efx, *rss_context); - if (!ctx) { - rc = -ENOENT; - goto out_unlock; - } } - - if (rxfh->rss_delete) { - /* delete this context */ - rc = efx->type->rx_push_rss_context_config(efx, ctx, NULL, NULL); - if (!rc) - efx_free_rss_context_entry(ctx); - goto out_unlock; + /* Hash function is Toeplitz, cannot be changed */ + if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE && + rxfh->hfunc != ETH_RSS_HASH_TOP) { + NL_SET_ERR_MSG_MOD(extack, "Only Toeplitz hash is supported"); + return -EOPNOTSUPP; } + priv = ethtool_rxfh_context_priv(ctx); + if (!key) - key = ctx->rx_hash_key; + key = ethtool_rxfh_context_key(ctx); if (!indir) - indir = ctx->rx_indir_table; + indir = ethtool_rxfh_context_indir(ctx); - rc = efx->type->rx_push_rss_context_config(efx, ctx, indir, key); - if (rc && allocated) - efx_free_rss_context_entry(ctx); - else - *rss_context = ctx->user_id; -out_unlock: - mutex_unlock(&efx->rss_lock); - return rc; + return efx->type->rx_push_rss_context_config(efx, priv, indir, key, + false); +} + +int efx_ethtool_create_rxfh_context(struct net_device *net_dev, + struct ethtool_rxfh_context *ctx, + const struct ethtool_rxfh_param *rxfh, + struct netlink_ext_ack *extack) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + struct efx_rss_context_priv *priv; + + priv = ethtool_rxfh_context_priv(ctx); + + priv->context_id = EFX_MCDI_RSS_CONTEXT_INVALID; + priv->rx_hash_udp_4tuple = false; + /* Generate default indir table and/or key if not specified. + * We use ctx as a place to store these; this is fine because + * we're doing a create, so if we fail then the ctx will just + * be deleted. + */ + if (!rxfh->indir) + efx_set_default_rx_indir_table(efx, ethtool_rxfh_context_indir(ctx)); + if (!rxfh->key) + netdev_rss_key_fill(ethtool_rxfh_context_key(ctx), + ctx->key_size); + if (rxfh->hfunc == ETH_RSS_HASH_NO_CHANGE) + ctx->hfunc = ETH_RSS_HASH_TOP; + if (rxfh->input_xfrm == RXH_XFRM_NO_CHANGE) + ctx->input_xfrm = 0; + return efx_ethtool_modify_rxfh_context(net_dev, ctx, rxfh, extack); +} + +int efx_ethtool_remove_rxfh_context(struct net_device *net_dev, + struct ethtool_rxfh_context *ctx, + u32 rss_context, + struct netlink_ext_ack *extack) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + struct efx_rss_context_priv *priv; + + if (!efx->type->rx_push_rss_context_config) { + NL_SET_ERR_MSG_MOD(extack, + "NIC type does not support custom contexts"); + return -EOPNOTSUPP; + } + + priv = ethtool_rxfh_context_priv(ctx); + return efx->type->rx_push_rss_context_config(efx, priv, NULL, NULL, + true); } int efx_ethtool_set_rxfh(struct net_device *net_dev, @@ -1295,8 +1280,9 @@ int efx_ethtool_set_rxfh(struct net_device *net_dev, rxfh->hfunc != ETH_RSS_HASH_TOP) return -EOPNOTSUPP; - if (rxfh->rss_context) - return efx_ethtool_set_rxfh_context(net_dev, rxfh, extack); + /* Custom contexts should use new API */ + if (WARN_ON_ONCE(rxfh->rss_context)) + return -EIO; if (!indir && !key) return 0; diff --git a/drivers/net/ethernet/sfc/ethtool_common.h b/drivers/net/ethernet/sfc/ethtool_common.h index a680e5980213..fc52e891637d 100644 --- a/drivers/net/ethernet/sfc/ethtool_common.h +++ b/drivers/net/ethernet/sfc/ethtool_common.h @@ -49,6 +49,18 @@ int efx_ethtool_get_rxfh(struct net_device *net_dev, int efx_ethtool_set_rxfh(struct net_device *net_dev, struct ethtool_rxfh_param *rxfh, struct netlink_ext_ack *extack); +int efx_ethtool_create_rxfh_context(struct net_device *net_dev, + struct ethtool_rxfh_context *ctx, + const struct ethtool_rxfh_param *rxfh, + struct netlink_ext_ack *extack); +int efx_ethtool_modify_rxfh_context(struct net_device *net_dev, + struct ethtool_rxfh_context *ctx, + const struct ethtool_rxfh_param *rxfh, + struct netlink_ext_ack *extack); +int efx_ethtool_remove_rxfh_context(struct net_device *net_dev, + struct ethtool_rxfh_context *ctx, + u32 rss_context, + struct netlink_ext_ack *extack); int efx_ethtool_reset(struct net_device *net_dev, u32 *flags); int efx_ethtool_get_module_eeprom(struct net_device *net_dev, struct ethtool_eeprom *ee, diff --git a/drivers/net/ethernet/sfc/falcon/falcon.c b/drivers/net/ethernet/sfc/falcon/falcon.c index 7a1c9337081b..36114ce88034 100644 --- a/drivers/net/ethernet/sfc/falcon/falcon.c +++ b/drivers/net/ethernet/sfc/falcon/falcon.c @@ -367,7 +367,7 @@ static const struct i2c_algo_bit_data falcon_i2c_bit_operations = { .getsda = falcon_getsda, .getscl = falcon_getscl, .udelay = 5, - /* Wait up to 50 ms for slave to let us pull SCL high */ + /* Wait up to 50 ms for target to let us pull SCL high */ .timeout = DIV_ROUND_UP(HZ, 20), }; diff --git a/drivers/net/ethernet/sfc/falcon/nic.h b/drivers/net/ethernet/sfc/falcon/nic.h index 9f413474bd9f..ada6e036fd97 100644 --- a/drivers/net/ethernet/sfc/falcon/nic.h +++ b/drivers/net/ethernet/sfc/falcon/nic.h @@ -297,7 +297,7 @@ static inline struct falcon_board *falcon_board(struct ef4_nic *efx) return &data->board; } -struct ethtool_ts_info; +struct kernel_ethtool_ts_info; extern const struct ef4_nic_type falcon_a1_nic_type; extern const struct ef4_nic_type falcon_b0_nic_type; diff --git a/drivers/net/ethernet/sfc/mcdi_filters.c b/drivers/net/ethernet/sfc/mcdi_filters.c index 4ff6586116ee..6ef96292909a 100644 --- a/drivers/net/ethernet/sfc/mcdi_filters.c +++ b/drivers/net/ethernet/sfc/mcdi_filters.c @@ -194,7 +194,7 @@ efx_mcdi_filter_push_prep_set_match_fields(struct efx_nic *efx, static void efx_mcdi_filter_push_prep(struct efx_nic *efx, const struct efx_filter_spec *spec, efx_dword_t *inbuf, u64 handle, - struct efx_rss_context *ctx, + struct efx_rss_context_priv *ctx, bool replacing) { u32 flags = spec->flags; @@ -245,7 +245,7 @@ static void efx_mcdi_filter_push_prep(struct efx_nic *efx, static int efx_mcdi_filter_push(struct efx_nic *efx, const struct efx_filter_spec *spec, u64 *handle, - struct efx_rss_context *ctx, bool replacing) + struct efx_rss_context_priv *ctx, bool replacing) { MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN); MCDI_DECLARE_BUF(outbuf, MC_CMD_FILTER_OP_EXT_OUT_LEN); @@ -345,9 +345,9 @@ static s32 efx_mcdi_filter_insert_locked(struct efx_nic *efx, bool replace_equal) { DECLARE_BITMAP(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT); + struct efx_rss_context_priv *ctx = NULL; struct efx_mcdi_filter_table *table; struct efx_filter_spec *saved_spec; - struct efx_rss_context *ctx = NULL; unsigned int match_pri, hash; unsigned int priv_flags; bool rss_locked = false; @@ -380,12 +380,12 @@ static s32 efx_mcdi_filter_insert_locked(struct efx_nic *efx, bitmap_zero(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT); if (spec->flags & EFX_FILTER_FLAG_RX_RSS) { - mutex_lock(&efx->rss_lock); + mutex_lock(&efx->net_dev->ethtool->rss_lock); rss_locked = true; if (spec->rss_context) ctx = efx_find_rss_context_entry(efx, spec->rss_context); else - ctx = &efx->rss_context; + ctx = &efx->rss_context.priv; if (!ctx) { rc = -ENOENT; goto out_unlock; @@ -548,7 +548,7 @@ static s32 efx_mcdi_filter_insert_locked(struct efx_nic *efx, out_unlock: if (rss_locked) - mutex_unlock(&efx->rss_lock); + mutex_unlock(&efx->net_dev->ethtool->rss_lock); up_write(&table->lock); return rc; } @@ -611,13 +611,13 @@ static int efx_mcdi_filter_remove_internal(struct efx_nic *efx, new_spec.priority = EFX_FILTER_PRI_AUTO; new_spec.flags = (EFX_FILTER_FLAG_RX | - (efx_rss_active(&efx->rss_context) ? + (efx_rss_active(&efx->rss_context.priv) ? EFX_FILTER_FLAG_RX_RSS : 0)); new_spec.dmaq_id = 0; new_spec.rss_context = 0; rc = efx_mcdi_filter_push(efx, &new_spec, &table->entry[filter_idx].handle, - &efx->rss_context, + &efx->rss_context.priv, true); if (rc == 0) @@ -764,7 +764,7 @@ static int efx_mcdi_filter_insert_addr_list(struct efx_nic *efx, ids = vlan->uc; } - filter_flags = efx_rss_active(&efx->rss_context) ? EFX_FILTER_FLAG_RX_RSS : 0; + filter_flags = efx_rss_active(&efx->rss_context.priv) ? EFX_FILTER_FLAG_RX_RSS : 0; /* Insert/renew filters */ for (i = 0; i < addr_count; i++) { @@ -833,7 +833,7 @@ static int efx_mcdi_filter_insert_def(struct efx_nic *efx, int rc; u16 *id; - filter_flags = efx_rss_active(&efx->rss_context) ? EFX_FILTER_FLAG_RX_RSS : 0; + filter_flags = efx_rss_active(&efx->rss_context.priv) ? EFX_FILTER_FLAG_RX_RSS : 0; efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); @@ -1375,8 +1375,8 @@ void efx_mcdi_filter_table_restore(struct efx_nic *efx) struct efx_mcdi_filter_table *table = efx->filter_state; unsigned int invalid_filters = 0, failed = 0; struct efx_mcdi_filter_vlan *vlan; + struct efx_rss_context_priv *ctx; struct efx_filter_spec *spec; - struct efx_rss_context *ctx; unsigned int filter_idx; u32 mcdi_flags; int match_pri; @@ -1388,7 +1388,7 @@ void efx_mcdi_filter_table_restore(struct efx_nic *efx) return; down_write(&table->lock); - mutex_lock(&efx->rss_lock); + mutex_lock(&efx->net_dev->ethtool->rss_lock); for (filter_idx = 0; filter_idx < EFX_MCDI_FILTER_TBL_ROWS; filter_idx++) { spec = efx_mcdi_filter_entry_spec(table, filter_idx); @@ -1407,7 +1407,7 @@ void efx_mcdi_filter_table_restore(struct efx_nic *efx) if (spec->rss_context) ctx = efx_find_rss_context_entry(efx, spec->rss_context); else - ctx = &efx->rss_context; + ctx = &efx->rss_context.priv; if (spec->flags & EFX_FILTER_FLAG_RX_RSS) { if (!ctx) { netif_warn(efx, drv, efx->net_dev, @@ -1444,7 +1444,7 @@ not_restored: } } - mutex_unlock(&efx->rss_lock); + mutex_unlock(&efx->net_dev->ethtool->rss_lock); up_write(&table->lock); /* @@ -1861,7 +1861,8 @@ out_unlock: RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN |\ RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_LBN) -int efx_mcdi_get_rss_context_flags(struct efx_nic *efx, u32 context, u32 *flags) +static int efx_mcdi_get_rss_context_flags(struct efx_nic *efx, u32 context, + u32 *flags) { /* * Firmware had a bug (sfc bug 61952) where it would not actually @@ -1909,8 +1910,8 @@ int efx_mcdi_get_rss_context_flags(struct efx_nic *efx, u32 context, u32 *flags) * Defaults are 4-tuple for TCP and 2-tuple for UDP and other-IP, so we * just need to set the UDP ports flags (for both IP versions). */ -void efx_mcdi_set_rss_context_flags(struct efx_nic *efx, - struct efx_rss_context *ctx) +static void efx_mcdi_set_rss_context_flags(struct efx_nic *efx, + struct efx_rss_context_priv *ctx) { MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN); u32 flags; @@ -1931,7 +1932,7 @@ void efx_mcdi_set_rss_context_flags(struct efx_nic *efx, } static int efx_mcdi_filter_alloc_rss_context(struct efx_nic *efx, bool exclusive, - struct efx_rss_context *ctx, + struct efx_rss_context_priv *ctx, unsigned *context_size) { MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN); @@ -2032,25 +2033,26 @@ void efx_mcdi_rx_free_indir_table(struct efx_nic *efx) { int rc; - if (efx->rss_context.context_id != EFX_MCDI_RSS_CONTEXT_INVALID) { - rc = efx_mcdi_filter_free_rss_context(efx, efx->rss_context.context_id); + if (efx->rss_context.priv.context_id != EFX_MCDI_RSS_CONTEXT_INVALID) { + rc = efx_mcdi_filter_free_rss_context(efx, efx->rss_context.priv.context_id); WARN_ON(rc != 0); } - efx->rss_context.context_id = EFX_MCDI_RSS_CONTEXT_INVALID; + efx->rss_context.priv.context_id = EFX_MCDI_RSS_CONTEXT_INVALID; } static int efx_mcdi_filter_rx_push_shared_rss_config(struct efx_nic *efx, unsigned *context_size) { struct efx_mcdi_filter_table *table = efx->filter_state; - int rc = efx_mcdi_filter_alloc_rss_context(efx, false, &efx->rss_context, - context_size); + int rc = efx_mcdi_filter_alloc_rss_context(efx, false, + &efx->rss_context.priv, + context_size); if (rc != 0) return rc; table->rx_rss_context_exclusive = false; - efx_set_default_rx_indir_table(efx, &efx->rss_context); + efx_set_default_rx_indir_table(efx, efx->rss_context.rx_indir_table); return 0; } @@ -2058,26 +2060,27 @@ static int efx_mcdi_filter_rx_push_exclusive_rss_config(struct efx_nic *efx, const u32 *rx_indir_table, const u8 *key) { + u32 old_rx_rss_context = efx->rss_context.priv.context_id; struct efx_mcdi_filter_table *table = efx->filter_state; - u32 old_rx_rss_context = efx->rss_context.context_id; int rc; - if (efx->rss_context.context_id == EFX_MCDI_RSS_CONTEXT_INVALID || + if (efx->rss_context.priv.context_id == EFX_MCDI_RSS_CONTEXT_INVALID || !table->rx_rss_context_exclusive) { - rc = efx_mcdi_filter_alloc_rss_context(efx, true, &efx->rss_context, - NULL); + rc = efx_mcdi_filter_alloc_rss_context(efx, true, + &efx->rss_context.priv, + NULL); if (rc == -EOPNOTSUPP) return rc; else if (rc != 0) goto fail1; } - rc = efx_mcdi_filter_populate_rss_table(efx, efx->rss_context.context_id, - rx_indir_table, key); + rc = efx_mcdi_filter_populate_rss_table(efx, efx->rss_context.priv.context_id, + rx_indir_table, key); if (rc != 0) goto fail2; - if (efx->rss_context.context_id != old_rx_rss_context && + if (efx->rss_context.priv.context_id != old_rx_rss_context && old_rx_rss_context != EFX_MCDI_RSS_CONTEXT_INVALID) WARN_ON(efx_mcdi_filter_free_rss_context(efx, old_rx_rss_context) != 0); table->rx_rss_context_exclusive = true; @@ -2091,9 +2094,9 @@ static int efx_mcdi_filter_rx_push_exclusive_rss_config(struct efx_nic *efx, return 0; fail2: - if (old_rx_rss_context != efx->rss_context.context_id) { - WARN_ON(efx_mcdi_filter_free_rss_context(efx, efx->rss_context.context_id) != 0); - efx->rss_context.context_id = old_rx_rss_context; + if (old_rx_rss_context != efx->rss_context.priv.context_id) { + WARN_ON(efx_mcdi_filter_free_rss_context(efx, efx->rss_context.priv.context_id) != 0); + efx->rss_context.priv.context_id = old_rx_rss_context; } fail1: netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); @@ -2101,33 +2104,28 @@ fail1: } int efx_mcdi_rx_push_rss_context_config(struct efx_nic *efx, - struct efx_rss_context *ctx, + struct efx_rss_context_priv *ctx, const u32 *rx_indir_table, - const u8 *key) + const u8 *key, bool delete) { int rc; - WARN_ON(!mutex_is_locked(&efx->rss_lock)); + WARN_ON(!mutex_is_locked(&efx->net_dev->ethtool->rss_lock)); if (ctx->context_id == EFX_MCDI_RSS_CONTEXT_INVALID) { + if (delete) + /* already wasn't in HW, nothing to do */ + return 0; rc = efx_mcdi_filter_alloc_rss_context(efx, true, ctx, NULL); if (rc) return rc; } - if (!rx_indir_table) /* Delete this context */ + if (delete) /* Delete this context */ return efx_mcdi_filter_free_rss_context(efx, ctx->context_id); - rc = efx_mcdi_filter_populate_rss_table(efx, ctx->context_id, - rx_indir_table, key); - if (rc) - return rc; - - memcpy(ctx->rx_indir_table, rx_indir_table, - sizeof(efx->rss_context.rx_indir_table)); - memcpy(ctx->rx_hash_key, key, efx->type->rx_hash_key_size); - - return 0; + return efx_mcdi_filter_populate_rss_table(efx, ctx->context_id, + rx_indir_table, key); } int efx_mcdi_rx_pull_rss_context_config(struct efx_nic *efx, @@ -2139,16 +2137,16 @@ int efx_mcdi_rx_pull_rss_context_config(struct efx_nic *efx, size_t outlen; int rc, i; - WARN_ON(!mutex_is_locked(&efx->rss_lock)); + WARN_ON(!mutex_is_locked(&efx->net_dev->ethtool->rss_lock)); BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN != MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN); - if (ctx->context_id == EFX_MCDI_RSS_CONTEXT_INVALID) + if (ctx->priv.context_id == EFX_MCDI_RSS_CONTEXT_INVALID) return -ENOENT; MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID, - ctx->context_id); + ctx->priv.context_id); BUILD_BUG_ON(ARRAY_SIZE(ctx->rx_indir_table) != MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_LEN); rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_TABLE, inbuf, sizeof(inbuf), @@ -2164,7 +2162,7 @@ int efx_mcdi_rx_pull_rss_context_config(struct efx_nic *efx, RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE)[i]; MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID, - ctx->context_id); + ctx->priv.context_id); BUILD_BUG_ON(ARRAY_SIZE(ctx->rx_hash_key) != MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN); rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_KEY, inbuf, sizeof(inbuf), @@ -2186,35 +2184,42 @@ int efx_mcdi_rx_pull_rss_config(struct efx_nic *efx) { int rc; - mutex_lock(&efx->rss_lock); + mutex_lock(&efx->net_dev->ethtool->rss_lock); rc = efx_mcdi_rx_pull_rss_context_config(efx, &efx->rss_context); - mutex_unlock(&efx->rss_lock); + mutex_unlock(&efx->net_dev->ethtool->rss_lock); return rc; } void efx_mcdi_rx_restore_rss_contexts(struct efx_nic *efx) { struct efx_mcdi_filter_table *table = efx->filter_state; - struct efx_rss_context *ctx; + struct ethtool_rxfh_context *ctx; + unsigned long context; int rc; - WARN_ON(!mutex_is_locked(&efx->rss_lock)); + WARN_ON(!mutex_is_locked(&efx->net_dev->ethtool->rss_lock)); if (!table->must_restore_rss_contexts) return; - list_for_each_entry(ctx, &efx->rss_context.list, list) { + xa_for_each(&efx->net_dev->ethtool->rss_ctx, context, ctx) { + struct efx_rss_context_priv *priv; + u32 *indir; + u8 *key; + + priv = ethtool_rxfh_context_priv(ctx); /* previous NIC RSS context is gone */ - ctx->context_id = EFX_MCDI_RSS_CONTEXT_INVALID; + priv->context_id = EFX_MCDI_RSS_CONTEXT_INVALID; /* so try to allocate a new one */ - rc = efx_mcdi_rx_push_rss_context_config(efx, ctx, - ctx->rx_indir_table, - ctx->rx_hash_key); + indir = ethtool_rxfh_context_indir(ctx); + key = ethtool_rxfh_context_key(ctx); + rc = efx_mcdi_rx_push_rss_context_config(efx, priv, indir, key, + false); if (rc) netif_warn(efx, probe, efx->net_dev, - "failed to restore RSS context %u, rc=%d" + "failed to restore RSS context %lu, rc=%d" "; RSS filters may fail to be applied\n", - ctx->user_id, rc); + context, rc); } table->must_restore_rss_contexts = false; } @@ -2276,7 +2281,7 @@ int efx_mcdi_vf_rx_push_rss_config(struct efx_nic *efx, bool user, { if (user) return -EOPNOTSUPP; - if (efx->rss_context.context_id != EFX_MCDI_RSS_CONTEXT_INVALID) + if (efx->rss_context.priv.context_id != EFX_MCDI_RSS_CONTEXT_INVALID) return 0; return efx_mcdi_filter_rx_push_shared_rss_config(efx, NULL); } @@ -2295,7 +2300,7 @@ int efx_mcdi_push_default_indir_table(struct efx_nic *efx, efx_mcdi_rx_free_indir_table(efx); if (rss_spread > 1) { - efx_set_default_rx_indir_table(efx, &efx->rss_context); + efx_set_default_rx_indir_table(efx, efx->rss_context.rx_indir_table); rc = efx->type->rx_push_rss_config(efx, false, efx->rss_context.rx_indir_table, NULL); } diff --git a/drivers/net/ethernet/sfc/mcdi_filters.h b/drivers/net/ethernet/sfc/mcdi_filters.h index c0d6558b9fd2..11b9f87ed9e1 100644 --- a/drivers/net/ethernet/sfc/mcdi_filters.h +++ b/drivers/net/ethernet/sfc/mcdi_filters.h @@ -145,9 +145,9 @@ void efx_mcdi_filter_del_vlan(struct efx_nic *efx, u16 vid); void efx_mcdi_rx_free_indir_table(struct efx_nic *efx); int efx_mcdi_rx_push_rss_context_config(struct efx_nic *efx, - struct efx_rss_context *ctx, + struct efx_rss_context_priv *ctx, const u32 *rx_indir_table, - const u8 *key); + const u8 *key, bool delete); int efx_mcdi_pf_rx_push_rss_config(struct efx_nic *efx, bool user, const u32 *rx_indir_table, const u8 *key); @@ -161,10 +161,6 @@ int efx_mcdi_push_default_indir_table(struct efx_nic *efx, int efx_mcdi_rx_pull_rss_config(struct efx_nic *efx); int efx_mcdi_rx_pull_rss_context_config(struct efx_nic *efx, struct efx_rss_context *ctx); -int efx_mcdi_get_rss_context_flags(struct efx_nic *efx, u32 context, - u32 *flags); -void efx_mcdi_set_rss_context_flags(struct efx_nic *efx, - struct efx_rss_context *ctx); void efx_mcdi_rx_restore_rss_contexts(struct efx_nic *efx); static inline void efx_mcdi_update_rx_scatter(struct efx_nic *efx) diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index f2dd7feb0e0c..b85c51cbe7f9 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -737,21 +737,24 @@ struct vfdi_status; /* The reserved RSS context value */ #define EFX_MCDI_RSS_CONTEXT_INVALID 0xffffffff /** - * struct efx_rss_context - A user-defined RSS context for filtering - * @list: node of linked list on which this struct is stored + * struct efx_rss_context_priv - driver private data for an RSS context * @context_id: the RSS_CONTEXT_ID returned by MC firmware, or * %EFX_MCDI_RSS_CONTEXT_INVALID if this context is not present on the NIC. - * For Siena, 0 if RSS is active, else %EFX_MCDI_RSS_CONTEXT_INVALID. - * @user_id: the rss_context ID exposed to userspace over ethtool. * @rx_hash_udp_4tuple: UDP 4-tuple hashing enabled + */ +struct efx_rss_context_priv { + u32 context_id; + bool rx_hash_udp_4tuple; +}; + +/** + * struct efx_rss_context - an RSS context + * @priv: hardware-specific state * @rx_hash_key: Toeplitz hash key for this RSS context * @indir_table: Indirection table for this RSS context */ struct efx_rss_context { - struct list_head list; - u32 context_id; - u32 user_id; - bool rx_hash_udp_4tuple; + struct efx_rss_context_priv priv; u8 rx_hash_key[40]; u32 rx_indir_table[128]; }; @@ -883,9 +886,7 @@ struct efx_mae; * @rx_packet_ts_offset: Offset of timestamp from start of packet data * (valid only if channel->sync_timestamps_enabled; always negative) * @rx_scatter: Scatter mode enabled for receives - * @rss_context: Main RSS context. Its @list member is the head of the list of - * RSS contexts created by user requests - * @rss_lock: Protects custom RSS context software state in @rss_context.list + * @rss_context: Main RSS context. * @vport_id: The function's vport ID, only relevant for PFs * @int_error_count: Number of internal errors seen recently * @int_error_expire: Time at which error count will be expired @@ -1052,7 +1053,6 @@ struct efx_nic { int rx_packet_ts_offset; bool rx_scatter; struct efx_rss_context rss_context; - struct mutex rss_lock; u32 vport_id; unsigned int_error_count; @@ -1416,9 +1416,9 @@ struct efx_nic_type { const u32 *rx_indir_table, const u8 *key); int (*rx_pull_rss_config)(struct efx_nic *efx); int (*rx_push_rss_context_config)(struct efx_nic *efx, - struct efx_rss_context *ctx, + struct efx_rss_context_priv *ctx, const u32 *rx_indir_table, - const u8 *key); + const u8 *key, bool delete); int (*rx_pull_rss_context_config)(struct efx_nic *efx, struct efx_rss_context *ctx); void (*rx_restore_rss_contexts)(struct efx_nic *efx); diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c index c3bffbf0ba2b..6fd2fdbaa418 100644 --- a/drivers/net/ethernet/sfc/ptp.c +++ b/drivers/net/ethernet/sfc/ptp.c @@ -1864,7 +1864,7 @@ static int efx_ptp_ts_init(struct efx_nic *efx, struct kernel_hwtstamp_config *i return 0; } -void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info) +void efx_ptp_get_ts_info(struct efx_nic *efx, struct kernel_ethtool_ts_info *ts_info) { struct efx_ptp_data *ptp = efx->ptp_data; struct efx_nic *primary = efx->primary; diff --git a/drivers/net/ethernet/sfc/ptp.h b/drivers/net/ethernet/sfc/ptp.h index 2f30dbb490d2..6946203499ef 100644 --- a/drivers/net/ethernet/sfc/ptp.h +++ b/drivers/net/ethernet/sfc/ptp.h @@ -12,7 +12,7 @@ #include <linux/net_tstamp.h> #include "net_driver.h" -struct ethtool_ts_info; +struct kernel_ethtool_ts_info; int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel); void efx_ptp_defer_probe_with_channel(struct efx_nic *efx); struct efx_channel *efx_ptp_channel(struct efx_nic *efx); @@ -23,7 +23,8 @@ int efx_ptp_set_ts_config(struct efx_nic *efx, struct netlink_ext_ack *extack); int efx_ptp_get_ts_config(struct efx_nic *efx, struct kernel_hwtstamp_config *config); -void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info); +void efx_ptp_get_ts_info(struct efx_nic *efx, + struct kernel_ethtool_ts_info *ts_info); bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb); int efx_ptp_get_mode(struct efx_nic *efx); int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted, diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c index dcd901eccfc8..0b7dc75c40f9 100644 --- a/drivers/net/ethernet/sfc/rx_common.c +++ b/drivers/net/ethernet/sfc/rx_common.c @@ -557,69 +557,25 @@ efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf, napi_gro_frags(napi); } -/* RSS contexts. We're using linked lists and crappy O(n) algorithms, because - * (a) this is an infrequent control-plane operation and (b) n is small (max 64) - */ -struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx) +struct efx_rss_context_priv *efx_find_rss_context_entry(struct efx_nic *efx, + u32 id) { - struct list_head *head = &efx->rss_context.list; - struct efx_rss_context *ctx, *new; - u32 id = 1; /* Don't use zero, that refers to the master RSS context */ - - WARN_ON(!mutex_is_locked(&efx->rss_lock)); + struct ethtool_rxfh_context *ctx; - /* Search for first gap in the numbering */ - list_for_each_entry(ctx, head, list) { - if (ctx->user_id != id) - break; - id++; - /* Check for wrap. If this happens, we have nearly 2^32 - * allocated RSS contexts, which seems unlikely. - */ - if (WARN_ON_ONCE(!id)) - return NULL; - } + WARN_ON(!mutex_is_locked(&efx->net_dev->ethtool->rss_lock)); - /* Create the new entry */ - new = kmalloc(sizeof(*new), GFP_KERNEL); - if (!new) + ctx = xa_load(&efx->net_dev->ethtool->rss_ctx, id); + if (!ctx) return NULL; - new->context_id = EFX_MCDI_RSS_CONTEXT_INVALID; - new->rx_hash_udp_4tuple = false; - - /* Insert the new entry into the gap */ - new->user_id = id; - list_add_tail(&new->list, &ctx->list); - return new; -} - -struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id) -{ - struct list_head *head = &efx->rss_context.list; - struct efx_rss_context *ctx; - - WARN_ON(!mutex_is_locked(&efx->rss_lock)); - - list_for_each_entry(ctx, head, list) - if (ctx->user_id == id) - return ctx; - return NULL; -} - -void efx_free_rss_context_entry(struct efx_rss_context *ctx) -{ - list_del(&ctx->list); - kfree(ctx); + return ethtool_rxfh_context_priv(ctx); } -void efx_set_default_rx_indir_table(struct efx_nic *efx, - struct efx_rss_context *ctx) +void efx_set_default_rx_indir_table(struct efx_nic *efx, u32 *indir) { size_t i; - for (i = 0; i < ARRAY_SIZE(ctx->rx_indir_table); i++) - ctx->rx_indir_table[i] = - ethtool_rxfh_indir_default(i, efx->rss_spread); + for (i = 0; i < ARRAY_SIZE(efx->rss_context.rx_indir_table); i++) + indir[i] = ethtool_rxfh_indir_default(i, efx->rss_spread); } /** diff --git a/drivers/net/ethernet/sfc/rx_common.h b/drivers/net/ethernet/sfc/rx_common.h index fbd2769307f9..75fa84192362 100644 --- a/drivers/net/ethernet/sfc/rx_common.h +++ b/drivers/net/ethernet/sfc/rx_common.h @@ -84,11 +84,9 @@ void efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf, unsigned int n_frags, u8 *eh, __wsum csum); -struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx); -struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id); -void efx_free_rss_context_entry(struct efx_rss_context *ctx); -void efx_set_default_rx_indir_table(struct efx_nic *efx, - struct efx_rss_context *ctx); +struct efx_rss_context_priv *efx_find_rss_context_entry(struct efx_nic *efx, + u32 id); +void efx_set_default_rx_indir_table(struct efx_nic *efx, u32 *indir); bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec); bool efx_filter_spec_equal(const struct efx_filter_spec *left, diff --git a/drivers/net/ethernet/sfc/siena/ethtool.c b/drivers/net/ethernet/sfc/siena/ethtool.c index 14dd3893bdef..4c182d4edfc2 100644 --- a/drivers/net/ethernet/sfc/siena/ethtool.c +++ b/drivers/net/ethernet/sfc/siena/ethtool.c @@ -226,7 +226,7 @@ static void efx_ethtool_get_fec_stats(struct net_device *net_dev, } static int efx_ethtool_get_ts_info(struct net_device *net_dev, - struct ethtool_ts_info *ts_info) + struct kernel_ethtool_ts_info *ts_info) { struct efx_nic *efx = netdev_priv(net_dev); diff --git a/drivers/net/ethernet/sfc/siena/ptp.c b/drivers/net/ethernet/sfc/siena/ptp.c index 4b5e2f0ba350..c473a4b6dd44 100644 --- a/drivers/net/ethernet/sfc/siena/ptp.c +++ b/drivers/net/ethernet/sfc/siena/ptp.c @@ -1780,7 +1780,7 @@ static int efx_ptp_ts_init(struct efx_nic *efx, } void efx_siena_ptp_get_ts_info(struct efx_nic *efx, - struct ethtool_ts_info *ts_info) + struct kernel_ethtool_ts_info *ts_info) { struct efx_ptp_data *ptp = efx->ptp_data; struct efx_nic *primary = efx->primary; diff --git a/drivers/net/ethernet/sfc/siena/ptp.h b/drivers/net/ethernet/sfc/siena/ptp.h index 6352f84424f6..b6133e7c5608 100644 --- a/drivers/net/ethernet/sfc/siena/ptp.h +++ b/drivers/net/ethernet/sfc/siena/ptp.h @@ -12,7 +12,7 @@ #include <linux/net_tstamp.h> #include "net_driver.h" -struct ethtool_ts_info; +struct kernel_ethtool_ts_info; void efx_siena_ptp_defer_probe_with_channel(struct efx_nic *efx); struct efx_channel *efx_siena_ptp_channel(struct efx_nic *efx); int efx_siena_ptp_set_ts_config(struct efx_nic *efx, @@ -21,7 +21,7 @@ int efx_siena_ptp_set_ts_config(struct efx_nic *efx, int efx_siena_ptp_get_ts_config(struct efx_nic *efx, struct kernel_hwtstamp_config *config); void efx_siena_ptp_get_ts_info(struct efx_nic *efx, - struct ethtool_ts_info *ts_info); + struct kernel_ethtool_ts_info *ts_info); bool efx_siena_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb); int efx_siena_ptp_get_mode(struct efx_nic *efx); int efx_siena_ptp_change_mode(struct efx_nic *efx, bool enable_wanted, diff --git a/drivers/net/ethernet/sfc/tc.c b/drivers/net/ethernet/sfc/tc.c index 9d140203e273..0d93164988fc 100644 --- a/drivers/net/ethernet/sfc/tc.c +++ b/drivers/net/ethernet/sfc/tc.c @@ -387,11 +387,8 @@ static int efx_tc_flower_parse_match(struct efx_nic *efx, struct flow_match_control fm; flow_rule_match_enc_control(rule, &fm); - if (fm.mask->flags) { - NL_SET_ERR_MSG_FMT_MOD(extack, "Unsupported match on enc_control.flags %#x", - fm.mask->flags); + if (flow_rule_has_enc_control_flags(fm.mask->flags, extack)) return -EOPNOTSUPP; - } if (!IS_ALL_ONES(fm.mask->addr_type)) { NL_SET_ERR_MSG_FMT_MOD(extack, "Unsupported enc addr_type mask %u (key %u)", fm.mask->addr_type, diff --git a/drivers/net/ethernet/smsc/smc9194.c b/drivers/net/ethernet/smsc/smc9194.c index af661c65ffe2..e2e7b1c68563 100644 --- a/drivers/net/ethernet/smsc/smc9194.c +++ b/drivers/net/ethernet/smsc/smc9194.c @@ -1501,6 +1501,7 @@ static void smc_set_multicast_list(struct net_device *dev) #ifdef MODULE static struct net_device *devSMC9194; +MODULE_DESCRIPTION("SMC 9194 Ethernet driver"); MODULE_LICENSE("GPL"); module_param_hw(io, int, ioport, 0); diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c index 78ff3af7911a..907498848028 100644 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c @@ -1574,12 +1574,8 @@ smc_ethtool_set_link_ksettings(struct net_device *dev, (cmd->base.port != PORT_TP && cmd->base.port != PORT_AUI)) return -EINVAL; -// lp->port = cmd->base.port; lp->ctl_rfduplx = cmd->base.duplex == DUPLEX_FULL; -// if (netif_running(dev)) -// smc_set_port(dev); - ret = 0; } diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h index 45ef5ac0788a..38aa4374e813 100644 --- a/drivers/net/ethernet/smsc/smc91x.h +++ b/drivers/net/ethernet/smsc/smc91x.h @@ -142,14 +142,14 @@ static inline void _SMC_outw_align4(u16 val, void __iomem *ioaddr, int reg, #define SMC_CAN_USE_32BIT 0 #define SMC_NOWAIT 1 -static inline void mcf_insw(void *a, unsigned char *p, int l) +static inline void mcf_insw(void __iomem *a, unsigned char *p, int l) { u16 *wp = (u16 *) p; while (l-- > 0) *wp++ = readw(a); } -static inline void mcf_outsw(void *a, unsigned char *p, int l) +static inline void mcf_outsw(void __iomem *a, unsigned char *p, int l) { u16 *wp = (u16 *) p; while (l-- > 0) diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 9cd62b2110a1..cd36ff4da68c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -271,8 +271,6 @@ struct stmmac_safety_stats { /* PCS defines */ #define STMMAC_PCS_RGMII (1 << 0) #define STMMAC_PCS_SGMII (1 << 1) -#define STMMAC_PCS_TBI (1 << 2) -#define STMMAC_PCS_RTBI (1 << 3) #define SF_DMA_MODE 1 /* DMA STORE-AND-FORWARD Operation Mode */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c index e73fa34237d3..83ad7c7935e3 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c @@ -248,7 +248,7 @@ static void intel_speed_mode_2500(struct net_device *ndev, void *intel_data) dev_info(priv->device, "Link Speed Mode: 2.5Gbps\n"); priv->plat->max_speed = 2500; priv->plat->phy_interface = PHY_INTERFACE_MODE_2500BASEX; - priv->plat->mdio_bus_data->xpcs_an_inband = false; + priv->plat->mdio_bus_data->default_an_inband = false; } else { priv->plat->max_speed = 1000; } @@ -444,6 +444,16 @@ static void common_default_data(struct plat_stmmacenet_data *plat) plat->rx_queues_cfg[0].pkt_route = 0x0; } +static struct phylink_pcs *intel_mgbe_select_pcs(struct stmmac_priv *priv, + phy_interface_t interface) +{ + /* plat->mdio_bus_data->has_xpcs has been set true, so there + * should always be an XPCS. The original code would always + * return this if present. + */ + return &priv->hw->xpcs->pcs; +} + static int intel_mgbe_common_data(struct pci_dev *pdev, struct plat_stmmacenet_data *plat) { @@ -586,19 +596,9 @@ static int intel_mgbe_common_data(struct pci_dev *pdev, /* Intel mgbe SGMII interface uses pcs-xcps */ if (plat->phy_interface == PHY_INTERFACE_MODE_SGMII || plat->phy_interface == PHY_INTERFACE_MODE_1000BASEX) { - plat->mdio_bus_data->has_xpcs = true; - plat->mdio_bus_data->xpcs_an_inband = true; - } - - /* For fixed-link setup, we clear xpcs_an_inband */ - if (fwnode) { - struct fwnode_handle *fixed_node; - - fixed_node = fwnode_get_named_child_node(fwnode, "fixed-link"); - if (fixed_node) - plat->mdio_bus_data->xpcs_an_inband = false; - - fwnode_handle_put(fixed_node); + plat->mdio_bus_data->pcs_mask = BIT(INTEL_MGBE_XPCS_ADDR); + plat->mdio_bus_data->default_an_inband = true; + plat->select_pcs = intel_mgbe_select_pcs; } /* Ensure mdio bus scan skips intel serdes and pcs-xpcs */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c index 466c4002f00d..901a3c1959fa 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c @@ -21,6 +21,7 @@ #define RGMII_IO_MACRO_CONFIG2 0x1C #define RGMII_IO_MACRO_DEBUG1 0x20 #define EMAC_SYSTEM_LOW_POWER_DEBUG 0x28 +#define EMAC_WRAPPER_SGMII_PHY_CNTRL1 0xf4 /* RGMII_IO_MACRO_CONFIG fields */ #define RGMII_CONFIG_FUNC_CLK_EN BIT(30) @@ -79,6 +80,9 @@ #define ETHQOS_MAC_CTRL_SPEED_MODE BIT(14) #define ETHQOS_MAC_CTRL_PORT_SEL BIT(15) +/* EMAC_WRAPPER_SGMII_PHY_CNTRL1 bits */ +#define SGMII_PHY_CNTRL1_SGMII_TX_TO_RX_LOOPBACK_EN BIT(3) + #define SGMII_10M_RX_CLK_DVDR 0x31 struct ethqos_emac_por { @@ -95,6 +99,7 @@ struct ethqos_emac_driver_data { bool has_integrated_pcs; u32 dma_addr_width; struct dwmac4_addrs dwmac4_addrs; + bool needs_sgmii_loopback; }; struct qcom_ethqos { @@ -114,6 +119,7 @@ struct qcom_ethqos { unsigned int num_por; bool rgmii_config_loopback_en; bool has_emac_ge_3; + bool needs_sgmii_loopback; }; static int rgmii_readl(struct qcom_ethqos *ethqos, unsigned int offset) @@ -191,8 +197,22 @@ ethqos_update_link_clk(struct qcom_ethqos *ethqos, unsigned int speed) clk_set_rate(ethqos->link_clk, ethqos->link_clk_rate); } +static void +qcom_ethqos_set_sgmii_loopback(struct qcom_ethqos *ethqos, bool enable) +{ + if (!ethqos->needs_sgmii_loopback || + ethqos->phy_mode != PHY_INTERFACE_MODE_2500BASEX) + return; + + rgmii_updatel(ethqos, + SGMII_PHY_CNTRL1_SGMII_TX_TO_RX_LOOPBACK_EN, + enable ? SGMII_PHY_CNTRL1_SGMII_TX_TO_RX_LOOPBACK_EN : 0, + EMAC_WRAPPER_SGMII_PHY_CNTRL1); +} + static void ethqos_set_func_clk_en(struct qcom_ethqos *ethqos) { + qcom_ethqos_set_sgmii_loopback(ethqos, true); rgmii_updatel(ethqos, RGMII_CONFIG_FUNC_CLK_EN, RGMII_CONFIG_FUNC_CLK_EN, RGMII_IO_MACRO_CONFIG); } @@ -277,6 +297,7 @@ static const struct ethqos_emac_driver_data emac_v4_0_0_data = { .has_emac_ge_3 = true, .link_clk_name = "phyaux", .has_integrated_pcs = true, + .needs_sgmii_loopback = true, .dma_addr_width = 36, .dwmac4_addrs = { .dma_chan = 0x00008100, @@ -607,6 +628,14 @@ static int ethqos_configure_rgmii(struct qcom_ethqos *ethqos) return 0; } +static void ethqos_set_serdes_speed(struct qcom_ethqos *ethqos, int speed) +{ + if (ethqos->serdes_speed != speed) { + phy_set_speed(ethqos->serdes_phy, speed); + ethqos->serdes_speed = speed; + } +} + /* On interface toggle MAC registers gets reset. * Configure MAC block for SGMII on ethernet phy link up */ @@ -624,9 +653,7 @@ static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos) rgmii_updatel(ethqos, RGMII_CONFIG2_RGMII_CLK_SEL_CFG, RGMII_CONFIG2_RGMII_CLK_SEL_CFG, RGMII_IO_MACRO_CONFIG2); - if (ethqos->serdes_speed != SPEED_2500) - phy_set_speed(ethqos->serdes_phy, SPEED_2500); - ethqos->serdes_speed = SPEED_2500; + ethqos_set_serdes_speed(ethqos, SPEED_2500); stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 0, 0, 0); break; case SPEED_1000: @@ -634,16 +661,12 @@ static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos) rgmii_updatel(ethqos, RGMII_CONFIG2_RGMII_CLK_SEL_CFG, RGMII_CONFIG2_RGMII_CLK_SEL_CFG, RGMII_IO_MACRO_CONFIG2); - if (ethqos->serdes_speed != SPEED_1000) - phy_set_speed(ethqos->serdes_phy, SPEED_1000); - ethqos->serdes_speed = SPEED_1000; + ethqos_set_serdes_speed(ethqos, SPEED_1000); stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, 0, 0); break; case SPEED_100: val |= ETHQOS_MAC_CTRL_PORT_SEL | ETHQOS_MAC_CTRL_SPEED_MODE; - if (ethqos->serdes_speed != SPEED_1000) - phy_set_speed(ethqos->serdes_phy, SPEED_1000); - ethqos->serdes_speed = SPEED_1000; + ethqos_set_serdes_speed(ethqos, SPEED_1000); stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, 0, 0); break; case SPEED_10: @@ -653,9 +676,7 @@ static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos) FIELD_PREP(RGMII_CONFIG_SGMII_CLK_DVDR, SGMII_10M_RX_CLK_DVDR), RGMII_IO_MACRO_CONFIG); - if (ethqos->serdes_speed != SPEED_1000) - phy_set_speed(ethqos->serdes_phy, ethqos->speed); - ethqos->serdes_speed = SPEED_1000; + ethqos_set_serdes_speed(ethqos, SPEED_1000); stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, 0, 0); break; } @@ -665,6 +686,14 @@ static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos) return val; } +static void qcom_ethqos_speed_mode_2500(struct net_device *ndev, void *data) +{ + struct stmmac_priv *priv = netdev_priv(ndev); + + priv->plat->max_speed = 2500; + priv->plat->phy_interface = PHY_INTERFACE_MODE_2500BASEX; +} + static int ethqos_configure(struct qcom_ethqos *ethqos) { return ethqos->configure_func(ethqos); @@ -674,6 +703,7 @@ static void ethqos_fix_mac_speed(void *priv, unsigned int speed, unsigned int mo { struct qcom_ethqos *ethqos = priv; + qcom_ethqos_set_sgmii_loopback(ethqos, false); ethqos->speed = speed; ethqos_update_link_clk(ethqos, speed); ethqos_configure(ethqos); @@ -787,6 +817,9 @@ static int qcom_ethqos_probe(struct platform_device *pdev) case PHY_INTERFACE_MODE_RGMII_TXID: ethqos->configure_func = ethqos_configure_rgmii; break; + case PHY_INTERFACE_MODE_2500BASEX: + plat_dat->speed_mode_2500 = qcom_ethqos_speed_mode_2500; + fallthrough; case PHY_INTERFACE_MODE_SGMII: ethqos->configure_func = ethqos_configure_sgmii; break; @@ -809,6 +842,7 @@ static int qcom_ethqos_probe(struct platform_device *pdev) ethqos->num_por = data->num_por; ethqos->rgmii_config_loopback_en = data->rgmii_config_loopback_en; ethqos->has_emac_ge_3 = data->has_emac_ge_3; + ethqos->needs_sgmii_loopback = data->needs_sgmii_loopback; ethqos->link_clk = devm_clk_get(dev, data->link_clk_name ?: "rgmii"); if (IS_ERR(ethqos->link_clk)) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rzn1.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rzn1.c index 848cf3c01f4a..59a7bd560f96 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rzn1.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rzn1.c @@ -39,6 +39,12 @@ static void rzn1_dwmac_pcs_exit(struct stmmac_priv *priv) miic_destroy(priv->hw->phylink_pcs); } +static struct phylink_pcs *rzn1_dwmac_select_pcs(struct stmmac_priv *priv, + phy_interface_t interface) +{ + return priv->hw->phylink_pcs; +} + static int rzn1_dwmac_probe(struct platform_device *pdev) { struct plat_stmmacenet_data *plat_dat; @@ -57,6 +63,7 @@ static int rzn1_dwmac_probe(struct platform_device *pdev) plat_dat->bsp_priv = plat_dat; plat_dat->pcs_init = rzn1_dwmac_pcs_init; plat_dat->pcs_exit = rzn1_dwmac_pcs_exit; + plat_dat->select_pcs = rzn1_dwmac_select_pcs; ret = stmmac_dvr_probe(dev, plat_dat, &stmmac_res); if (ret) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c index b3d45f9dfb55..fdb4c773ec98 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c @@ -429,6 +429,12 @@ static void socfpga_dwmac_pcs_exit(struct stmmac_priv *priv) lynx_pcs_destroy(priv->hw->phylink_pcs); } +static struct phylink_pcs *socfpga_dwmac_select_pcs(struct stmmac_priv *priv, + phy_interface_t interface) +{ + return priv->hw->phylink_pcs; +} + static int socfpga_dwmac_probe(struct platform_device *pdev) { struct plat_stmmacenet_data *plat_dat; @@ -478,6 +484,7 @@ static int socfpga_dwmac_probe(struct platform_device *pdev) plat_dat->fix_mac_speed = socfpga_dwmac_fix_mac_speed; plat_dat->pcs_init = socfpga_dwmac_pcs_init; plat_dat->pcs_exit = socfpga_dwmac_pcs_exit; + plat_dat->select_pcs = socfpga_dwmac_select_pcs; ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); if (ret) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c index c92dfc4ecf57..c1732955a697 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c @@ -53,12 +53,23 @@ #define SYSCFG_MCU_ETH_SEL_MII 0 #define SYSCFG_MCU_ETH_SEL_RMII 1 -/* STM32MP1 register definitions +/* STM32MP2 register definitions */ +#define SYSCFG_MP2_ETH_MASK GENMASK(31, 0) + +#define SYSCFG_ETHCR_ETH_PTP_CLK_SEL BIT(2) +#define SYSCFG_ETHCR_ETH_CLK_SEL BIT(1) +#define SYSCFG_ETHCR_ETH_REF_CLK_SEL BIT(0) + +#define SYSCFG_ETHCR_ETH_SEL_MII 0 +#define SYSCFG_ETHCR_ETH_SEL_RGMII BIT(4) +#define SYSCFG_ETHCR_ETH_SEL_RMII BIT(6) + +/* STM32MPx register definitions * * Below table summarizes the clock requirement and clock sources for * supported phy interface modes. * __________________________________________________________________________ - *|PHY_MODE | Normal | PHY wo crystal| PHY wo crystal |No 125Mhz from PHY| + *|PHY_MODE | Normal | PHY wo crystal| PHY wo crystal |No 125MHz from PHY| *| | | 25MHz | 50MHz | | * --------------------------------------------------------------------------- *| MII | - | eth-ck | n/a | n/a | @@ -90,6 +101,7 @@ struct stm32_dwmac { int eth_ref_clk_sel_reg; int irq_pwr_wakeup; u32 mode_reg; /* MAC glue-logic mode register */ + u32 mode_mask; struct regmap *regmap; u32 speed; const struct stm32_ops *ops; @@ -102,8 +114,9 @@ struct stm32_ops { void (*resume)(struct stm32_dwmac *dwmac); int (*parse_data)(struct stm32_dwmac *dwmac, struct device *dev); - u32 syscfg_eth_mask; bool clk_rx_enable_in_suspend; + bool is_mp13, is_mp2; + u32 syscfg_clr_off; }; static int stm32_dwmac_clk_enable(struct stm32_dwmac *dwmac, bool resume) @@ -157,65 +170,190 @@ static int stm32_dwmac_init(struct plat_stmmacenet_data *plat_dat, bool resume) return stm32_dwmac_clk_enable(dwmac, resume); } -static int stm32mp1_set_mode(struct plat_stmmacenet_data *plat_dat) +static int stm32mp1_select_ethck_external(struct plat_stmmacenet_data *plat_dat) { struct stm32_dwmac *dwmac = plat_dat->bsp_priv; - u32 reg = dwmac->mode_reg, clk_rate; - int val; - clk_rate = clk_get_rate(dwmac->clk_eth_ck); - dwmac->enable_eth_ck = false; switch (plat_dat->mac_interface) { case PHY_INTERFACE_MODE_MII: - if (clk_rate == ETH_CK_F_25M && dwmac->ext_phyclk) - dwmac->enable_eth_ck = true; - val = SYSCFG_PMCR_ETH_SEL_MII; - pr_debug("SYSCFG init : PHY_INTERFACE_MODE_MII\n"); + dwmac->enable_eth_ck = dwmac->ext_phyclk; + return 0; + case PHY_INTERFACE_MODE_GMII: + dwmac->enable_eth_ck = dwmac->eth_clk_sel_reg || + dwmac->ext_phyclk; + return 0; + case PHY_INTERFACE_MODE_RMII: + dwmac->enable_eth_ck = dwmac->eth_ref_clk_sel_reg || + dwmac->ext_phyclk; + return 0; + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_TXID: + dwmac->enable_eth_ck = dwmac->eth_clk_sel_reg || + dwmac->ext_phyclk; + return 0; + default: + dwmac->enable_eth_ck = false; + dev_err(dwmac->dev, "Mode %s not supported", + phy_modes(plat_dat->mac_interface)); + return -EINVAL; + } +} + +static int stm32mp1_validate_ethck_rate(struct plat_stmmacenet_data *plat_dat) +{ + struct stm32_dwmac *dwmac = plat_dat->bsp_priv; + const u32 clk_rate = clk_get_rate(dwmac->clk_eth_ck); + + if (!dwmac->enable_eth_ck) + return 0; + + switch (plat_dat->mac_interface) { + case PHY_INTERFACE_MODE_MII: + case PHY_INTERFACE_MODE_GMII: + if (clk_rate == ETH_CK_F_25M) + return 0; + break; + case PHY_INTERFACE_MODE_RMII: + if (clk_rate == ETH_CK_F_25M || clk_rate == ETH_CK_F_50M) + return 0; + break; + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_TXID: + if (clk_rate == ETH_CK_F_25M || clk_rate == ETH_CK_F_125M) + return 0; + break; + default: + break; + } + + dev_err(dwmac->dev, "Mode %s does not match eth-ck frequency %d Hz", + phy_modes(plat_dat->mac_interface), clk_rate); + return -EINVAL; +} + +static int stm32mp1_configure_pmcr(struct plat_stmmacenet_data *plat_dat) +{ + struct stm32_dwmac *dwmac = plat_dat->bsp_priv; + u32 reg = dwmac->mode_reg; + int val = 0; + + switch (plat_dat->mac_interface) { + case PHY_INTERFACE_MODE_MII: + /* + * STM32MP15xx supports both MII and GMII, STM32MP13xx MII only. + * SYSCFG_PMCSETR ETH_SELMII is present only on STM32MP15xx and + * acts as a selector between 0:GMII and 1:MII. As STM32MP13xx + * supports only MII, ETH_SELMII is not present. + */ + if (!dwmac->ops->is_mp13) /* Select MII mode on STM32MP15xx */ + val |= SYSCFG_PMCR_ETH_SEL_MII; break; case PHY_INTERFACE_MODE_GMII: val = SYSCFG_PMCR_ETH_SEL_GMII; - if (clk_rate == ETH_CK_F_25M && - (dwmac->eth_clk_sel_reg || dwmac->ext_phyclk)) { - dwmac->enable_eth_ck = true; + if (dwmac->enable_eth_ck) val |= SYSCFG_PMCR_ETH_CLK_SEL; - } - pr_debug("SYSCFG init : PHY_INTERFACE_MODE_GMII\n"); break; case PHY_INTERFACE_MODE_RMII: val = SYSCFG_PMCR_ETH_SEL_RMII; - if ((clk_rate == ETH_CK_F_25M || clk_rate == ETH_CK_F_50M) && - (dwmac->eth_ref_clk_sel_reg || dwmac->ext_phyclk)) { - dwmac->enable_eth_ck = true; + if (dwmac->enable_eth_ck) val |= SYSCFG_PMCR_ETH_REF_CLK_SEL; - } - pr_debug("SYSCFG init : PHY_INTERFACE_MODE_RMII\n"); break; case PHY_INTERFACE_MODE_RGMII: case PHY_INTERFACE_MODE_RGMII_ID: case PHY_INTERFACE_MODE_RGMII_RXID: case PHY_INTERFACE_MODE_RGMII_TXID: val = SYSCFG_PMCR_ETH_SEL_RGMII; - if ((clk_rate == ETH_CK_F_25M || clk_rate == ETH_CK_F_125M) && - (dwmac->eth_clk_sel_reg || dwmac->ext_phyclk)) { - dwmac->enable_eth_ck = true; + if (dwmac->enable_eth_ck) val |= SYSCFG_PMCR_ETH_CLK_SEL; - } - pr_debug("SYSCFG init : PHY_INTERFACE_MODE_RGMII\n"); break; default: - pr_debug("SYSCFG init : Do not manage %d interface\n", - plat_dat->mac_interface); + dev_err(dwmac->dev, "Mode %s not supported", + phy_modes(plat_dat->mac_interface)); /* Do not manage others interfaces */ return -EINVAL; } + dev_dbg(dwmac->dev, "Mode %s", phy_modes(plat_dat->mac_interface)); + + /* Shift value at correct ethernet MAC offset in SYSCFG_PMCSETR */ + val <<= ffs(dwmac->mode_mask) - ffs(SYSCFG_MP1_ETH_MASK); + /* Need to update PMCCLRR (clear register) */ - regmap_write(dwmac->regmap, reg + SYSCFG_PMCCLRR_OFFSET, - dwmac->ops->syscfg_eth_mask); + regmap_write(dwmac->regmap, dwmac->ops->syscfg_clr_off, + dwmac->mode_mask); /* Update PMCSETR (set register) */ return regmap_update_bits(dwmac->regmap, reg, - dwmac->ops->syscfg_eth_mask, val); + dwmac->mode_mask, val); +} + +static int stm32mp2_configure_syscfg(struct plat_stmmacenet_data *plat_dat) +{ + struct stm32_dwmac *dwmac = plat_dat->bsp_priv; + u32 reg = dwmac->mode_reg; + int val = 0; + + switch (plat_dat->mac_interface) { + case PHY_INTERFACE_MODE_MII: + /* ETH_REF_CLK_SEL bit in SYSCFG register is not applicable in MII mode */ + break; + case PHY_INTERFACE_MODE_RMII: + val = SYSCFG_ETHCR_ETH_SEL_RMII; + if (dwmac->enable_eth_ck) { + /* Internal clock ETH_CLK of 50MHz from RCC is used */ + val |= SYSCFG_ETHCR_ETH_REF_CLK_SEL; + } + break; + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_TXID: + val = SYSCFG_ETHCR_ETH_SEL_RGMII; + fallthrough; + case PHY_INTERFACE_MODE_GMII: + if (dwmac->enable_eth_ck) { + /* Internal clock ETH_CLK of 125MHz from RCC is used */ + val |= SYSCFG_ETHCR_ETH_CLK_SEL; + } + break; + default: + dev_err(dwmac->dev, "Mode %s not supported", + phy_modes(plat_dat->mac_interface)); + /* Do not manage others interfaces */ + return -EINVAL; + } + + dev_dbg(dwmac->dev, "Mode %s", phy_modes(plat_dat->mac_interface)); + + /* Select PTP (IEEE1588) clock selection from RCC (ck_ker_ethxptp) */ + val |= SYSCFG_ETHCR_ETH_PTP_CLK_SEL; + + /* Update ETHCR (set register) */ + return regmap_update_bits(dwmac->regmap, reg, + SYSCFG_MP2_ETH_MASK, val); +} + +static int stm32mp1_set_mode(struct plat_stmmacenet_data *plat_dat) +{ + struct stm32_dwmac *dwmac = plat_dat->bsp_priv; + int ret; + + ret = stm32mp1_select_ethck_external(plat_dat); + if (ret) + return ret; + + ret = stm32mp1_validate_ethck_rate(plat_dat); + if (ret) + return ret; + + if (!dwmac->ops->is_mp2) + return stm32mp1_configure_pmcr(plat_dat); + else + return stm32mp2_configure_syscfg(plat_dat); } static int stm32mcu_set_mode(struct plat_stmmacenet_data *plat_dat) @@ -227,21 +365,21 @@ static int stm32mcu_set_mode(struct plat_stmmacenet_data *plat_dat) switch (plat_dat->mac_interface) { case PHY_INTERFACE_MODE_MII: val = SYSCFG_MCU_ETH_SEL_MII; - pr_debug("SYSCFG init : PHY_INTERFACE_MODE_MII\n"); break; case PHY_INTERFACE_MODE_RMII: val = SYSCFG_MCU_ETH_SEL_RMII; - pr_debug("SYSCFG init : PHY_INTERFACE_MODE_RMII\n"); break; default: - pr_debug("SYSCFG init : Do not manage %d interface\n", - plat_dat->mac_interface); + dev_err(dwmac->dev, "Mode %s not supported", + phy_modes(plat_dat->mac_interface)); /* Do not manage others interfaces */ return -EINVAL; } + dev_dbg(dwmac->dev, "Mode %s", phy_modes(plat_dat->mac_interface)); + return regmap_update_bits(dwmac->regmap, reg, - dwmac->ops->syscfg_eth_mask, val << 23); + SYSCFG_MCU_ETH_MASK, val << 23); } static void stm32_dwmac_clk_disable(struct stm32_dwmac *dwmac, bool suspend) @@ -286,8 +424,24 @@ static int stm32_dwmac_parse_data(struct stm32_dwmac *dwmac, return PTR_ERR(dwmac->regmap); err = of_property_read_u32_index(np, "st,syscon", 1, &dwmac->mode_reg); - if (err) + if (err) { dev_err(dev, "Can't get sysconfig mode offset (%d)\n", err); + return err; + } + + if (dwmac->ops->is_mp2) + return 0; + + dwmac->mode_mask = SYSCFG_MP1_ETH_MASK; + err = of_property_read_u32_index(np, "st,syscon", 2, &dwmac->mode_mask); + if (err) { + if (dwmac->ops->is_mp13) { + dev_err(dev, "Sysconfig register mask must be set (%d)\n", err); + } else { + dev_dbg(dev, "Warning sysconfig register mask not set\n"); + err = 0; + } + } return err; } @@ -305,7 +459,7 @@ static int stm32mp1_parse_data(struct stm32_dwmac *dwmac, /* Gigabit Ethernet 125MHz clock selection. */ dwmac->eth_clk_sel_reg = of_property_read_bool(np, "st,eth-clk-sel"); - /* Ethernet 50Mhz RMII clock selection */ + /* Ethernet 50MHz RMII clock selection */ dwmac->eth_ref_clk_sel_reg = of_property_read_bool(np, "st,eth-ref-clk-sel"); @@ -478,8 +632,7 @@ static SIMPLE_DEV_PM_OPS(stm32_dwmac_pm_ops, stm32_dwmac_suspend, stm32_dwmac_resume); static struct stm32_ops stm32mcu_dwmac_data = { - .set_mode = stm32mcu_set_mode, - .syscfg_eth_mask = SYSCFG_MCU_ETH_MASK + .set_mode = stm32mcu_set_mode }; static struct stm32_ops stm32mp1_dwmac_data = { @@ -487,13 +640,35 @@ static struct stm32_ops stm32mp1_dwmac_data = { .suspend = stm32mp1_suspend, .resume = stm32mp1_resume, .parse_data = stm32mp1_parse_data, - .syscfg_eth_mask = SYSCFG_MP1_ETH_MASK, + .syscfg_clr_off = 0x44, + .is_mp13 = false, + .clk_rx_enable_in_suspend = true +}; + +static struct stm32_ops stm32mp13_dwmac_data = { + .set_mode = stm32mp1_set_mode, + .suspend = stm32mp1_suspend, + .resume = stm32mp1_resume, + .parse_data = stm32mp1_parse_data, + .syscfg_clr_off = 0x08, + .is_mp13 = true, + .clk_rx_enable_in_suspend = true +}; + +static struct stm32_ops stm32mp25_dwmac_data = { + .set_mode = stm32mp1_set_mode, + .suspend = stm32mp1_suspend, + .resume = stm32mp1_resume, + .parse_data = stm32mp1_parse_data, + .is_mp2 = true, .clk_rx_enable_in_suspend = true }; static const struct of_device_id stm32_dwmac_match[] = { { .compatible = "st,stm32-dwmac", .data = &stm32mcu_dwmac_data}, { .compatible = "st,stm32mp1-dwmac", .data = &stm32mp1_dwmac_data}, + { .compatible = "st,stm32mp13-dwmac", .data = &stm32mp13_dwmac_data}, + { .compatible = "st,stm32mp25-dwmac", .data = &stm32mp25_dwmac_data}, { } }; MODULE_DEVICE_TABLE(of, stm32_dwmac_match); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c index 8555299443f4..d413d76a8936 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c @@ -15,7 +15,7 @@ #include <linux/crc32.h> #include <linux/slab.h> #include <linux/ethtool.h> -#include <asm/io.h> +#include <linux/io.h> #include "stmmac.h" #include "stmmac_pcs.h" #include "dwmac1000.h" @@ -404,11 +404,6 @@ static void dwmac1000_ctrl_ane(void __iomem *ioaddr, bool ane, bool srgmi_ral, dwmac_ctrl_ane(ioaddr, GMAC_PCS_BASE, ane, srgmi_ral, loopback); } -static void dwmac1000_rane(void __iomem *ioaddr, bool restart) -{ - dwmac_rane(ioaddr, GMAC_PCS_BASE, restart); -} - static void dwmac1000_get_adv_lp(void __iomem *ioaddr, struct rgmii_adv *adv) { dwmac_get_adv_lp(ioaddr, GMAC_PCS_BASE, adv); @@ -519,7 +514,6 @@ const struct stmmac_ops dwmac1000_ops = { .set_eee_pls = dwmac1000_set_eee_pls, .debug = dwmac1000_debug, .pcs_ctrl_ane = dwmac1000_ctrl_ane, - .pcs_rane = dwmac1000_rane, .pcs_get_adv_lp = dwmac1000_get_adv_lp, .set_mac_loopback = dwmac1000_set_mac_loopback, }; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c index daf79cdbd3ec..adccdd816ea9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c @@ -12,7 +12,7 @@ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> *******************************************************************************/ -#include <asm/io.h> +#include <linux/io.h> #include "dwmac1000.h" #include "dwmac_dma.h" diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c index 7667d103cd0e..14e847c0e1a9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c @@ -15,7 +15,7 @@ *******************************************************************************/ #include <linux/crc32.h> -#include <asm/io.h> +#include <linux/io.h> #include "stmmac.h" #include "dwmac100.h" diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c index dea270f60cc3..b402fb54f613 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c @@ -14,7 +14,7 @@ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> *******************************************************************************/ -#include <asm/io.h> +#include <linux/io.h> #include "dwmac100.h" #include "dwmac_dma.h" diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c index b25774d69195..dbd9f93b2460 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c @@ -758,11 +758,6 @@ static void dwmac4_ctrl_ane(void __iomem *ioaddr, bool ane, bool srgmi_ral, dwmac_ctrl_ane(ioaddr, GMAC_PCS_BASE, ane, srgmi_ral, loopback); } -static void dwmac4_rane(void __iomem *ioaddr, bool restart) -{ - dwmac_rane(ioaddr, GMAC_PCS_BASE, restart); -} - static void dwmac4_get_adv_lp(void __iomem *ioaddr, struct rgmii_adv *adv) { dwmac_get_adv_lp(ioaddr, GMAC_PCS_BASE, adv); @@ -1215,7 +1210,6 @@ const struct stmmac_ops dwmac4_ops = { .set_eee_timer = dwmac4_set_eee_timer, .set_eee_pls = dwmac4_set_eee_pls, .pcs_ctrl_ane = dwmac4_ctrl_ane, - .pcs_rane = dwmac4_rane, .pcs_get_adv_lp = dwmac4_get_adv_lp, .debug = dwmac4_debug, .set_filter = dwmac4_set_filter, @@ -1260,7 +1254,6 @@ const struct stmmac_ops dwmac410_ops = { .set_eee_timer = dwmac4_set_eee_timer, .set_eee_pls = dwmac4_set_eee_pls, .pcs_ctrl_ane = dwmac4_ctrl_ane, - .pcs_rane = dwmac4_rane, .pcs_get_adv_lp = dwmac4_get_adv_lp, .debug = dwmac4_debug, .set_filter = dwmac4_set_filter, @@ -1309,7 +1302,6 @@ const struct stmmac_ops dwmac510_ops = { .set_eee_timer = dwmac4_set_eee_timer, .set_eee_pls = dwmac4_set_eee_pls, .pcs_ctrl_ane = dwmac4_ctrl_ane, - .pcs_rane = dwmac4_rane, .pcs_get_adv_lp = dwmac4_get_adv_lp, .debug = dwmac4_debug, .set_filter = dwmac4_set_filter, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c index f8e7775bb633..6a987cf598e4 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c @@ -1554,9 +1554,6 @@ const struct stmmac_ops dwxgmac210_ops = { .reset_eee_mode = dwxgmac2_reset_eee_mode, .set_eee_timer = dwxgmac2_set_eee_timer, .set_eee_pls = dwxgmac2_set_eee_pls, - .pcs_ctrl_ane = NULL, - .pcs_rane = NULL, - .pcs_get_adv_lp = NULL, .debug = NULL, .set_filter = dwxgmac2_set_filter, .safety_feat_config = dwxgmac3_safety_feat_config, @@ -1614,9 +1611,6 @@ const struct stmmac_ops dwxlgmac2_ops = { .reset_eee_mode = dwxgmac2_reset_eee_mode, .set_eee_timer = dwxgmac2_set_eee_timer, .set_eee_pls = dwxgmac2_set_eee_pls, - .pcs_ctrl_ane = NULL, - .pcs_rane = NULL, - .pcs_get_adv_lp = NULL, .debug = NULL, .set_filter = dwxgmac2_set_filter, .safety_feat_config = dwxgmac3_safety_feat_config, diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h index 90384db228b5..97934ccba5b1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.h +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h @@ -370,7 +370,6 @@ struct stmmac_ops { /* PCS calls */ void (*pcs_ctrl_ane)(void __iomem *ioaddr, bool ane, bool srgmi_ral, bool loopback); - void (*pcs_rane)(void __iomem *ioaddr, bool restart); void (*pcs_get_adv_lp)(void __iomem *ioaddr, struct rgmii_adv *adv); /* Safety Features */ int (*safety_feat_config)(void __iomem *ioaddr, unsigned int asp, @@ -484,8 +483,6 @@ struct stmmac_ops { stmmac_do_void_callback(__priv, mac, debug, __priv, __args) #define stmmac_pcs_ctrl_ane(__priv, __args...) \ stmmac_do_void_callback(__priv, mac, pcs_ctrl_ane, __args) -#define stmmac_pcs_rane(__priv, __args...) \ - stmmac_do_void_callback(__priv, mac, pcs_rane, __priv, __args) #define stmmac_pcs_get_adv_lp(__priv, __args...) \ stmmac_do_void_callback(__priv, mac, pcs_get_adv_lp, __args) #define stmmac_safety_feat_config(__priv, __args...) \ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index 542e2633a6f5..7008219fd88d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -11,10 +11,10 @@ #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/interrupt.h> +#include <linux/io.h> #include <linux/mii.h> #include <linux/phylink.h> #include <linux/net_tstamp.h> -#include <asm/io.h> #include "stmmac.h" #include "dwmac_dma.h" @@ -1199,7 +1199,7 @@ static int stmmac_set_channels(struct net_device *dev, } static int stmmac_get_ts_info(struct net_device *dev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct stmmac_priv *priv = netdev_priv(dev); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index c58782c41417..4b6a359e5a94 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -471,13 +471,6 @@ bool stmmac_eee_init(struct stmmac_priv *priv) { int eee_tw_timer = priv->eee_tw_timer; - /* Using PCS we cannot dial with the phy registers at this stage - * so we do not support extra feature like EEE. - */ - if (priv->hw->pcs == STMMAC_PCS_TBI || - priv->hw->pcs == STMMAC_PCS_RTBI) - return false; - /* Check if MAC core supports the EEE feature. */ if (!priv->dma_cap.eee) return false; @@ -956,11 +949,15 @@ static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config, phy_interface_t interface) { struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); + struct phylink_pcs *pcs; - if (priv->hw->xpcs) - return &priv->hw->xpcs->pcs; + if (priv->plat->select_pcs) { + pcs = priv->plat->select_pcs(priv, interface); + if (!IS_ERR(pcs)) + return pcs; + } - return priv->hw->phylink_pcs; + return NULL; } static void stmmac_mac_config(struct phylink_config *config, unsigned int mode, @@ -1228,8 +1225,8 @@ static int stmmac_phy_setup(struct stmmac_priv *priv) mdio_bus_data = priv->plat->mdio_bus_data; if (mdio_bus_data) - priv->phylink_config.ovr_an_inband = - mdio_bus_data->xpcs_an_inband; + priv->phylink_config.default_an_inband = + mdio_bus_data->default_an_inband; /* Set the platform/firmware specified interface mode. Note, phylink * deals with the PHY interface mode, not the MAC interface mode. @@ -3953,9 +3950,7 @@ static int __stmmac_open(struct net_device *dev, if (ret < 0) return ret; - if (priv->hw->pcs != STMMAC_PCS_TBI && - priv->hw->pcs != STMMAC_PCS_RTBI && - (!priv->hw->xpcs || + if ((!priv->hw->xpcs || xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) { ret = stmmac_init_phy(dev); if (ret) { @@ -4097,8 +4092,6 @@ static int stmmac_release(struct net_device *dev) if (priv->plat->serdes_powerdown) priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv); - netif_carrier_off(dev); - stmmac_release_ptp(priv); pm_runtime_put(priv->device); @@ -4244,18 +4237,32 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) { struct dma_desc *desc, *first, *mss_desc = NULL; struct stmmac_priv *priv = netdev_priv(dev); - int nfrags = skb_shinfo(skb)->nr_frags; - u32 queue = skb_get_queue_mapping(skb); + int tmp_pay_len = 0, first_tx, nfrags; unsigned int first_entry, tx_packets; struct stmmac_txq_stats *txq_stats; - int tmp_pay_len = 0, first_tx; struct stmmac_tx_queue *tx_q; - bool has_vlan, set_ic; + u32 pay_len, mss, queue; u8 proto_hdr_len, hdr; - u32 pay_len, mss; dma_addr_t des; + bool set_ic; int i; + /* Always insert VLAN tag to SKB payload for TSO frames. + * + * Never insert VLAN tag by HW, since segments splited by + * TSO engine will be un-tagged by mistake. + */ + if (skb_vlan_tag_present(skb)) { + skb = __vlan_hwaccel_push_inside(skb); + if (unlikely(!skb)) { + priv->xstats.tx_dropped++; + return NETDEV_TX_OK; + } + } + + nfrags = skb_shinfo(skb)->nr_frags; + queue = skb_get_queue_mapping(skb); + tx_q = &priv->dma_conf.tx_queue[queue]; txq_stats = &priv->xstats.txq_stats[queue]; first_tx = tx_q->cur_tx; @@ -4308,9 +4315,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) skb->data_len); } - /* Check if VLAN can be inserted by HW */ - has_vlan = stmmac_vlan_insert(priv, skb, tx_q); - first_entry = tx_q->cur_tx; WARN_ON(tx_q->tx_skbuff[first_entry]); @@ -4320,9 +4324,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) desc = &tx_q->dma_tx[first_entry]; first = desc; - if (has_vlan) - stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT); - /* first descriptor: fill Headers on Buf1 */ des = dma_map_single(priv->device, skb->data, skb_headlen(skb), DMA_TO_DEVICE); @@ -7690,8 +7691,6 @@ int stmmac_dvr_probe(struct device *device, ndev->features |= NETIF_F_RXHASH; ndev->vlan_features |= ndev->features; - /* TSO doesn't work on VLANs yet */ - ndev->vlan_features &= ~NETIF_F_TSO; /* MTU range: 46 - hw-specific max */ ndev->min_mtu = ETH_ZLEN - ETH_HLEN; @@ -7740,16 +7739,12 @@ int stmmac_dvr_probe(struct device *device, if (!pm_runtime_enabled(device)) pm_runtime_enable(device); - if (priv->hw->pcs != STMMAC_PCS_TBI && - priv->hw->pcs != STMMAC_PCS_RTBI) { - /* MDIO bus Registration */ - ret = stmmac_mdio_register(ndev); - if (ret < 0) { - dev_err_probe(priv->device, ret, - "%s: MDIO bus (id: %d) registration failed\n", - __func__, priv->plat->bus_id); - goto error_mdio_register; - } + ret = stmmac_mdio_register(ndev); + if (ret < 0) { + dev_err_probe(priv->device, ret, + "MDIO bus (id: %d) registration failed\n", + priv->plat->bus_id); + goto error_mdio_register; } if (priv->plat->speed_mode_2500) @@ -7791,9 +7786,7 @@ error_netdev_register: error_phy_setup: stmmac_pcs_clean(ndev); error_pcs_setup: - if (priv->hw->pcs != STMMAC_PCS_TBI && - priv->hw->pcs != STMMAC_PCS_RTBI) - stmmac_mdio_unregister(ndev); + stmmac_mdio_unregister(ndev); error_mdio_register: stmmac_napi_del(ndev); error_hw_init: @@ -7822,7 +7815,6 @@ void stmmac_dvr_remove(struct device *dev) stmmac_stop_all_dma(priv); stmmac_mac_set(priv, priv->ioaddr, false); - netif_carrier_off(ndev); unregister_netdev(ndev); #ifdef CONFIG_DEBUG_FS @@ -7834,10 +7826,8 @@ void stmmac_dvr_remove(struct device *dev) reset_control_assert(priv->plat->stmmac_ahb_rst); stmmac_pcs_clean(ndev); + stmmac_mdio_unregister(ndev); - if (priv->hw->pcs != STMMAC_PCS_TBI && - priv->hw->pcs != STMMAC_PCS_RTBI) - stmmac_mdio_unregister(ndev); destroy_workqueue(priv->wq); mutex_destroy(&priv->lock); bitmap_free(priv->af_xdp_zc_qps); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index aa43117134d3..03f90676b3ad 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c @@ -497,35 +497,33 @@ int stmmac_mdio_reset(struct mii_bus *bus) int stmmac_pcs_setup(struct net_device *ndev) { + struct fwnode_handle *devnode, *pcsnode; struct dw_xpcs *xpcs = NULL; struct stmmac_priv *priv; - int ret = -ENODEV; - int mode, addr; + int addr, mode, ret; priv = netdev_priv(ndev); mode = priv->plat->phy_interface; + devnode = priv->plat->port_node; if (priv->plat->pcs_init) { ret = priv->plat->pcs_init(priv); + } else if (fwnode_property_present(devnode, "pcs-handle")) { + pcsnode = fwnode_find_reference(devnode, "pcs-handle", 0); + xpcs = xpcs_create_fwnode(pcsnode, mode); + fwnode_handle_put(pcsnode); + ret = PTR_ERR_OR_ZERO(xpcs); } else if (priv->plat->mdio_bus_data && - priv->plat->mdio_bus_data->has_xpcs) { - /* Try to probe the XPCS by scanning all addresses */ - for (addr = 0; addr < PHY_MAX_ADDR; addr++) { - xpcs = xpcs_create_mdiodev(priv->mii, addr, mode); - if (IS_ERR(xpcs)) - continue; - - ret = 0; - break; - } + priv->plat->mdio_bus_data->pcs_mask) { + addr = ffs(priv->plat->mdio_bus_data->pcs_mask) - 1; + xpcs = xpcs_create_mdiodev(priv->mii, addr, mode); + ret = PTR_ERR_OR_ZERO(xpcs); } else { return 0; } - if (ret) { - dev_warn(priv->device, "No xPCS found\n"); - return ret; - } + if (ret) + return dev_err_probe(priv->device, ret, "No xPCS found\n"); priv->hw->xpcs = xpcs; @@ -610,7 +608,7 @@ int stmmac_mdio_register(struct net_device *ndev) snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s-%x", new_bus->name, priv->plat->bus_id); new_bus->priv = ndev; - new_bus->phy_mask = mdio_bus_data->phy_mask; + new_bus->phy_mask = mdio_bus_data->phy_mask | mdio_bus_data->pcs_mask; new_bus->parent = priv->device; err = of_mdiobus_register(new_bus, mdio_node); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h index 13a30e6df4c1..1bdf87b237c4 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h @@ -75,23 +75,6 @@ static inline void dwmac_pcs_isr(void __iomem *ioaddr, u32 reg, } /** - * dwmac_rane - To restart ANE - * @ioaddr: IO registers pointer - * @reg: Base address of the AN Control Register. - * @restart: to restart ANE - * Description: this is to just restart the Auto-Negotiation. - */ -static inline void dwmac_rane(void __iomem *ioaddr, u32 reg, bool restart) -{ - u32 value = readl(ioaddr + GMAC_AN_CTRL(reg)); - - if (restart) - value |= GMAC_AN_CTRL_RAN; - - writel(value, ioaddr + GMAC_AN_CTRL(reg)); -} - -/** * dwmac_ctrl_ane - To program the AN Control Register. * @ioaddr: IO registers pointer * @reg: Base address of the AN Control Register. diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 54797edc9b38..ad868e8d195d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -764,8 +764,8 @@ EXPORT_SYMBOL_GPL(stmmac_get_platform_resources); * Description: Call the platform's init callback (if any) and propagate * the return value. */ -int stmmac_pltfr_init(struct platform_device *pdev, - struct plat_stmmacenet_data *plat) +static int stmmac_pltfr_init(struct platform_device *pdev, + struct plat_stmmacenet_data *plat) { int ret = 0; @@ -774,7 +774,6 @@ int stmmac_pltfr_init(struct platform_device *pdev, return ret; } -EXPORT_SYMBOL_GPL(stmmac_pltfr_init); /** * stmmac_pltfr_exit @@ -782,13 +781,12 @@ EXPORT_SYMBOL_GPL(stmmac_pltfr_init); * @plat: driver data platform structure * Description: Call the platform's exit callback (if any). */ -void stmmac_pltfr_exit(struct platform_device *pdev, - struct plat_stmmacenet_data *plat) +static void stmmac_pltfr_exit(struct platform_device *pdev, + struct plat_stmmacenet_data *plat) { if (plat->exit) plat->exit(pdev, plat->bsp_priv); } -EXPORT_SYMBOL_GPL(stmmac_pltfr_exit); /** * stmmac_pltfr_probe diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h index bb6fc7e59aed..72dc1a32e46d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h @@ -17,11 +17,6 @@ devm_stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac); int stmmac_get_platform_resources(struct platform_device *pdev, struct stmmac_resources *stmmac_res); -int stmmac_pltfr_init(struct platform_device *pdev, - struct plat_stmmacenet_data *plat); -void stmmac_pltfr_exit(struct platform_device *pdev, - struct plat_stmmacenet_data *plat); - int stmmac_pltfr_probe(struct platform_device *pdev, struct plat_stmmacenet_data *plat, struct stmmac_resources *res); diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c index f8e133604146..131786aa4d5b 100644 --- a/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c +++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c @@ -21,8 +21,6 @@ #include "dwc-xlgmac.h" #include "dwc-xlgmac-reg.h" -MODULE_LICENSE("Dual BSD/GPL"); - static int debug = -1; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "DWC ethernet debug level (0=none,...,16=all)"); @@ -725,3 +723,8 @@ void xlgmac_print_all_hw_features(struct xlgmac_pdata *pdata) XLGMAC_PR("=====================================================\n"); XLGMAC_PR("\n"); } + +MODULE_DESCRIPTION(XLGMAC_DRV_DESC); +MODULE_VERSION(XLGMAC_DRV_VERSION); +MODULE_AUTHOR("Jie Deng <jiedeng@synopsys.com>"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-pci.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-pci.c index fa8604d7b797..36fe538e3332 100644 --- a/drivers/net/ethernet/synopsys/dwc-xlgmac-pci.c +++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-pci.c @@ -71,8 +71,3 @@ static struct pci_driver xlgmac_pci_driver = { }; module_pci_driver(xlgmac_pci_driver); - -MODULE_DESCRIPTION(XLGMAC_DRV_DESC); -MODULE_VERSION(XLGMAC_DRV_VERSION); -MODULE_AUTHOR("Jie Deng <jiedeng@synopsys.com>"); -MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/ethernet/tehuti/Kconfig b/drivers/net/ethernet/tehuti/Kconfig index 8735633765a1..6db2c9817445 100644 --- a/drivers/net/ethernet/tehuti/Kconfig +++ b/drivers/net/ethernet/tehuti/Kconfig @@ -23,4 +23,19 @@ config TEHUTI help Tehuti Networks 10G Ethernet NIC +config TEHUTI_TN40 + tristate "Tehuti Networks TN40xx 10G Ethernet adapters" + depends on PCI + select PAGE_POOL + select FW_LOADER + select PHYLINK + help + This driver supports 10G Ethernet adapters using Tehuti Networks + TN40xx chips. Currently, adapters with Applied Micro Circuits + Corporation QT2025 are supported; Tehuti Networks TN9310, + DLink DXE-810S, ASUS XG-C100F, and Edimax EN-9320. + + To compile this driver as a module, choose M here: the module + will be called tn40xx. + endif # NET_VENDOR_TEHUTI diff --git a/drivers/net/ethernet/tehuti/Makefile b/drivers/net/ethernet/tehuti/Makefile index 13a0ddd62088..0d4f4d63a65c 100644 --- a/drivers/net/ethernet/tehuti/Makefile +++ b/drivers/net/ethernet/tehuti/Makefile @@ -4,3 +4,6 @@ # obj-$(CONFIG_TEHUTI) += tehuti.o + +tn40xx-y := tn40.o tn40_mdio.o tn40_phy.o +obj-$(CONFIG_TEHUTI_TN40) += tn40xx.o diff --git a/drivers/net/ethernet/tehuti/tn40.c b/drivers/net/ethernet/tehuti/tn40.c new file mode 100644 index 000000000000..259bdac24cf2 --- /dev/null +++ b/drivers/net/ethernet/tehuti/tn40.c @@ -0,0 +1,1850 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) Tehuti Networks Ltd. */ + +#include <linux/bitfield.h> +#include <linux/ethtool.h> +#include <linux/firmware.h> +#include <linux/if_vlan.h> +#include <linux/iopoll.h> +#include <linux/netdevice.h> +#include <linux/pci.h> +#include <linux/phylink.h> +#include <linux/vmalloc.h> +#include <net/netdev_queues.h> +#include <net/page_pool/helpers.h> + +#include "tn40.h" + +#define TN40_SHORT_PACKET_SIZE 60 +#define TN40_FIRMWARE_NAME "tehuti/bdx.bin" + +static void tn40_enable_interrupts(struct tn40_priv *priv) +{ + tn40_write_reg(priv, TN40_REG_IMR, priv->isr_mask); +} + +static void tn40_disable_interrupts(struct tn40_priv *priv) +{ + tn40_write_reg(priv, TN40_REG_IMR, 0); +} + +static int tn40_fifo_alloc(struct tn40_priv *priv, struct tn40_fifo *f, + int fsz_type, + u16 reg_cfg0, u16 reg_cfg1, + u16 reg_rptr, u16 reg_wptr) +{ + u16 memsz = TN40_FIFO_SIZE * (1 << fsz_type); + u64 cfg_base; + + memset(f, 0, sizeof(struct tn40_fifo)); + /* 1K extra space is allocated at the end of the fifo to simplify + * processing of descriptors that wraps around fifo's end. + */ + f->va = dma_alloc_coherent(&priv->pdev->dev, + memsz + TN40_FIFO_EXTRA_SPACE, &f->da, + GFP_KERNEL); + if (!f->va) + return -ENOMEM; + + f->reg_cfg0 = reg_cfg0; + f->reg_cfg1 = reg_cfg1; + f->reg_rptr = reg_rptr; + f->reg_wptr = reg_wptr; + f->rptr = 0; + f->wptr = 0; + f->memsz = memsz; + f->size_mask = memsz - 1; + cfg_base = lower_32_bits((f->da & TN40_TX_RX_CFG0_BASE) | fsz_type); + tn40_write_reg(priv, reg_cfg0, cfg_base); + tn40_write_reg(priv, reg_cfg1, upper_32_bits(f->da)); + return 0; +} + +static void tn40_fifo_free(struct tn40_priv *priv, struct tn40_fifo *f) +{ + dma_free_coherent(&priv->pdev->dev, + f->memsz + TN40_FIFO_EXTRA_SPACE, f->va, f->da); +} + +static struct tn40_rxdb *tn40_rxdb_alloc(int nelem) +{ + size_t size = sizeof(struct tn40_rxdb) + (nelem * sizeof(int)) + + (nelem * sizeof(struct tn40_rx_map)); + struct tn40_rxdb *db; + int i; + + db = vzalloc(size); + if (db) { + db->stack = (int *)(db + 1); + db->elems = (void *)(db->stack + nelem); + db->nelem = nelem; + db->top = nelem; + /* make the first alloc close to db struct */ + for (i = 0; i < nelem; i++) + db->stack[i] = nelem - i - 1; + } + return db; +} + +static void tn40_rxdb_free(struct tn40_rxdb *db) +{ + vfree(db); +} + +static int tn40_rxdb_alloc_elem(struct tn40_rxdb *db) +{ + return db->stack[--db->top]; +} + +static void *tn40_rxdb_addr_elem(struct tn40_rxdb *db, unsigned int n) +{ + return db->elems + n; +} + +static int tn40_rxdb_available(struct tn40_rxdb *db) +{ + return db->top; +} + +static void tn40_rxdb_free_elem(struct tn40_rxdb *db, unsigned int n) +{ + db->stack[db->top++] = n; +} + +/** + * tn40_create_rx_ring - Initialize RX all related HW and SW resources + * @priv: NIC private structure + * + * create_rx_ring creates rxf and rxd fifos, updates the relevant HW registers, + * preallocates skbs for rx. It assumes that Rx is disabled in HW funcs are + * grouped for better cache usage + * + * RxD fifo is smaller then RxF fifo by design. Upon high load, RxD will be + * filled and packets will be dropped by the NIC without getting into the host + * or generating interrupts. In this situation the host has no chance of + * processing all the packets. Dropping packets by the NIC is cheaper, since it + * takes 0 CPU cycles. + * + * Return: 0 on success and negative value on error. + */ +static int tn40_create_rx_ring(struct tn40_priv *priv) +{ + struct page_pool_params pp = { + .dev = &priv->pdev->dev, + .napi = &priv->napi, + .dma_dir = DMA_FROM_DEVICE, + .netdev = priv->ndev, + .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, + .max_len = PAGE_SIZE, + }; + int ret, pkt_size, nr; + + priv->page_pool = page_pool_create(&pp); + if (IS_ERR(priv->page_pool)) + return PTR_ERR(priv->page_pool); + + ret = tn40_fifo_alloc(priv, &priv->rxd_fifo0.m, priv->rxd_size, + TN40_REG_RXD_CFG0_0, TN40_REG_RXD_CFG1_0, + TN40_REG_RXD_RPTR_0, TN40_REG_RXD_WPTR_0); + if (ret) + goto err_destroy_page_pool; + + ret = tn40_fifo_alloc(priv, &priv->rxf_fifo0.m, priv->rxf_size, + TN40_REG_RXF_CFG0_0, TN40_REG_RXF_CFG1_0, + TN40_REG_RXF_RPTR_0, TN40_REG_RXF_WPTR_0); + if (ret) + goto err_free_rxd; + + pkt_size = priv->ndev->mtu + VLAN_ETH_HLEN; + priv->rxf_fifo0.m.pktsz = pkt_size; + nr = priv->rxf_fifo0.m.memsz / sizeof(struct tn40_rxf_desc); + priv->rxdb0 = tn40_rxdb_alloc(nr); + if (!priv->rxdb0) { + ret = -ENOMEM; + goto err_free_rxf; + } + return 0; +err_free_rxf: + tn40_fifo_free(priv, &priv->rxf_fifo0.m); +err_free_rxd: + tn40_fifo_free(priv, &priv->rxd_fifo0.m); +err_destroy_page_pool: + page_pool_destroy(priv->page_pool); + return ret; +} + +static void tn40_rx_free_buffers(struct tn40_priv *priv) +{ + struct tn40_rxdb *db = priv->rxdb0; + struct tn40_rx_map *dm; + u16 i; + + netdev_dbg(priv->ndev, "total =%d free =%d busy =%d\n", db->nelem, + tn40_rxdb_available(db), + db->nelem - tn40_rxdb_available(db)); + + for (i = 0; i < db->nelem; i++) { + dm = tn40_rxdb_addr_elem(db, i); + if (dm->page) + page_pool_put_full_page(priv->page_pool, dm->page, + false); + } +} + +static void tn40_destroy_rx_ring(struct tn40_priv *priv) +{ + if (priv->rxdb0) { + tn40_rx_free_buffers(priv); + tn40_rxdb_free(priv->rxdb0); + priv->rxdb0 = NULL; + } + tn40_fifo_free(priv, &priv->rxf_fifo0.m); + tn40_fifo_free(priv, &priv->rxd_fifo0.m); + page_pool_destroy(priv->page_pool); +} + +static void tn40_set_rx_desc(struct tn40_priv *priv, int idx, u64 dma) +{ + struct tn40_rxf_fifo *f = &priv->rxf_fifo0; + struct tn40_rxf_desc *rxfd; + int delta; + + rxfd = (struct tn40_rxf_desc *)(f->m.va + f->m.wptr); + rxfd->info = cpu_to_le32(0x10003); /* INFO =1 BC =3 */ + rxfd->va_lo = cpu_to_le32(idx); + rxfd->pa_lo = cpu_to_le32(lower_32_bits(dma)); + rxfd->pa_hi = cpu_to_le32(upper_32_bits(dma)); + rxfd->len = cpu_to_le32(f->m.pktsz); + f->m.wptr += sizeof(struct tn40_rxf_desc); + delta = f->m.wptr - f->m.memsz; + if (unlikely(delta >= 0)) { + f->m.wptr = delta; + if (delta > 0) { + memcpy(f->m.va, f->m.va + f->m.memsz, delta); + netdev_dbg(priv->ndev, + "wrapped rxd descriptor\n"); + } + } +} + +/** + * tn40_rx_alloc_buffers - Fill rxf fifo with buffers. + * + * @priv: NIC's private structure + * + * rx_alloc_buffers allocates buffers via the page pool API, builds rxf descs + * and pushes them (rxf descr) into the rxf fifo. The pages are stored in rxdb. + * To calculate the free space, we uses the cached values of RPTR and WPTR + * when needed. This function also updates RPTR and WPTR. + */ +static void tn40_rx_alloc_buffers(struct tn40_priv *priv) +{ + struct tn40_rxf_fifo *f = &priv->rxf_fifo0; + struct tn40_rxdb *db = priv->rxdb0; + struct tn40_rx_map *dm; + struct page *page; + int dno, i, idx; + + dno = tn40_rxdb_available(db) - 1; + for (i = dno; i > 0; i--) { + page = page_pool_dev_alloc_pages(priv->page_pool); + if (!page) + break; + + idx = tn40_rxdb_alloc_elem(db); + tn40_set_rx_desc(priv, idx, page_pool_get_dma_addr(page)); + dm = tn40_rxdb_addr_elem(db, idx); + dm->page = page; + } + if (i != dno) + tn40_write_reg(priv, f->m.reg_wptr, + f->m.wptr & TN40_TXF_WPTR_WR_PTR); + netdev_dbg(priv->ndev, "write_reg 0x%04x f->m.reg_wptr 0x%x\n", + f->m.reg_wptr, f->m.wptr & TN40_TXF_WPTR_WR_PTR); + netdev_dbg(priv->ndev, "read_reg 0x%04x f->m.reg_rptr=0x%x\n", + f->m.reg_rptr, tn40_read_reg(priv, f->m.reg_rptr)); + netdev_dbg(priv->ndev, "write_reg 0x%04x f->m.reg_wptr=0x%x\n", + f->m.reg_wptr, tn40_read_reg(priv, f->m.reg_wptr)); +} + +static void tn40_recycle_rx_buffer(struct tn40_priv *priv, + struct tn40_rxd_desc *rxdd) +{ + struct tn40_rxf_fifo *f = &priv->rxf_fifo0; + struct tn40_rx_map *dm; + int idx; + + idx = le32_to_cpu(rxdd->va_lo); + dm = tn40_rxdb_addr_elem(priv->rxdb0, idx); + tn40_set_rx_desc(priv, idx, page_pool_get_dma_addr(dm->page)); + + tn40_write_reg(priv, f->m.reg_wptr, f->m.wptr & TN40_TXF_WPTR_WR_PTR); +} + +static int tn40_rx_receive(struct tn40_priv *priv, int budget) +{ + struct tn40_rxd_fifo *f = &priv->rxd_fifo0; + u32 rxd_val1, rxd_err, pkt_id; + int tmp_len, size, done = 0; + struct tn40_rxdb *db = NULL; + struct tn40_rxd_desc *rxdd; + struct tn40_rx_map *dm; + struct sk_buff *skb; + u16 len, rxd_vlan; + int idx; + + f->m.wptr = tn40_read_reg(priv, f->m.reg_wptr) & TN40_TXF_WPTR_WR_PTR; + size = f->m.wptr - f->m.rptr; + if (size < 0) + size += f->m.memsz; /* Size is negative :-) */ + + while (size > 0) { + rxdd = (struct tn40_rxd_desc *)(f->m.va + f->m.rptr); + db = priv->rxdb0; + + /* We have a chicken and egg problem here. If the + * descriptor is wrapped we first need to copy the tail + * of the descriptor to the end of the buffer before + * extracting values from the descriptor. However in + * order to know if the descriptor is wrapped we need to + * obtain the length of the descriptor from (the + * wrapped) descriptor. Luckily the length is the first + * word of the descriptor. Descriptor lengths are + * multiples of 8 bytes so in case of a wrapped + * descriptor the first 8 bytes guaranteed to appear + * before the end of the buffer. We first obtain the + * length, we then copy the rest of the descriptor if + * needed and then extract the rest of the values from + * the descriptor. + * + * Do not change the order of operations as it will + * break the code!!! + */ + rxd_val1 = le32_to_cpu(rxdd->rxd_val1); + tmp_len = TN40_GET_RXD_BC(rxd_val1) << 3; + pkt_id = TN40_GET_RXD_PKT_ID(rxd_val1); + size -= tmp_len; + /* CHECK FOR A PARTIALLY ARRIVED DESCRIPTOR */ + if (size < 0) { + netdev_dbg(priv->ndev, + "%s partially arrived desc tmp_len %d\n", + __func__, tmp_len); + break; + } + /* make sure that the descriptor fully is arrived + * before reading the rest of the descriptor. + */ + rmb(); + + /* A special treatment is given to non-contiguous + * descriptors that start near the end, wraps around + * and continue at the beginning. The second part is + * copied right after the first, and then descriptor + * is interpreted as normal. The fifo has an extra + * space to allow such operations. + */ + + /* HAVE WE REACHED THE END OF THE QUEUE? */ + f->m.rptr += tmp_len; + tmp_len = f->m.rptr - f->m.memsz; + if (unlikely(tmp_len >= 0)) { + f->m.rptr = tmp_len; + if (tmp_len > 0) { + /* COPY PARTIAL DESCRIPTOR + * TO THE END OF THE QUEUE + */ + netdev_dbg(priv->ndev, + "wrapped desc rptr=%d tmp_len=%d\n", + f->m.rptr, tmp_len); + memcpy(f->m.va + f->m.memsz, f->m.va, tmp_len); + } + } + idx = le32_to_cpu(rxdd->va_lo); + dm = tn40_rxdb_addr_elem(db, idx); + prefetch(dm); + + len = le16_to_cpu(rxdd->len); + rxd_vlan = le16_to_cpu(rxdd->rxd_vlan); + /* CHECK FOR ERRORS */ + rxd_err = TN40_GET_RXD_ERR(rxd_val1); + if (unlikely(rxd_err)) { + u64_stats_update_begin(&priv->syncp); + priv->stats.rx_errors++; + u64_stats_update_end(&priv->syncp); + tn40_recycle_rx_buffer(priv, rxdd); + continue; + } + + skb = napi_build_skb(page_address(dm->page), PAGE_SIZE); + if (!skb) { + u64_stats_update_begin(&priv->syncp); + priv->stats.rx_dropped++; + priv->alloc_fail++; + u64_stats_update_end(&priv->syncp); + tn40_recycle_rx_buffer(priv, rxdd); + break; + } + skb_mark_for_recycle(skb); + skb_put(skb, len); + skb->protocol = eth_type_trans(skb, priv->ndev); + skb->ip_summed = + (pkt_id == 0) ? CHECKSUM_NONE : CHECKSUM_UNNECESSARY; + if (TN40_GET_RXD_VTAG(rxd_val1)) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + TN40_GET_RXD_VLAN_TCI(rxd_vlan)); + + dm->page = NULL; + tn40_rxdb_free_elem(db, idx); + + napi_gro_receive(&priv->napi, skb); + + u64_stats_update_begin(&priv->syncp); + priv->stats.rx_bytes += len; + u64_stats_update_end(&priv->syncp); + + if (unlikely(++done >= budget)) + break; + } + u64_stats_update_begin(&priv->syncp); + priv->stats.rx_packets += done; + u64_stats_update_end(&priv->syncp); + /* FIXME: Do something to minimize pci accesses */ + tn40_write_reg(priv, f->m.reg_rptr, f->m.rptr & TN40_TXF_WPTR_WR_PTR); + tn40_rx_alloc_buffers(priv); + return done; +} + +/* TX HW/SW interaction overview + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * There are 2 types of TX communication channels between driver and NIC. + * 1) TX Free Fifo - TXF - Holds ack descriptors for sent packets. + * 2) TX Data Fifo - TXD - Holds descriptors of full buffers. + * + * Currently the NIC supports TSO, checksumming and gather DMA + * UFO and IP fragmentation is on the way. + * + * RX SW Data Structures + * ~~~~~~~~~~~~~~~~~~~~~ + * TXDB is used to keep track of all skbs owned by SW and their DMA addresses. + * For TX case, ownership lasts from getting the packet via hard_xmit and + * until the HW acknowledges sending the packet by TXF descriptors. + * TXDB is implemented as a cyclic buffer. + * + * FIFO objects keep info about the fifo's size and location, relevant HW + * registers, usage and skb db. Each RXD and RXF fifo has their own fifo + * structure. Implemented as simple struct. + * + * TX SW Execution Flow + * ~~~~~~~~~~~~~~~~~~~~ + * OS calls the driver's hard_xmit method with a packet to send. The driver + * creates DMA mappings, builds TXD descriptors and kicks the HW by updating + * TXD WPTR. + * + * When a packet is sent, The HW write a TXF descriptor and the SW + * frees the original skb. To prevent TXD fifo overflow without + * reading HW registers every time, the SW deploys "tx level" + * technique. Upon startup, the tx level is initialized to TXD fifo + * length. For every sent packet, the SW gets its TXD descriptor size + * (from a pre-calculated array) and subtracts it from tx level. The + * size is also stored in txdb. When a TXF ack arrives, the SW fetched + * the size of the original TXD descriptor from the txdb and adds it + * to the tx level. When the Tx level drops below some predefined + * threshold, the driver stops the TX queue. When the TX level rises + * above that level, the tx queue is enabled again. + * + * This technique avoids excessive reading of RPTR and WPTR registers. + * As our benchmarks shows, it adds 1.5 Gbit/sec to NIC's throughput. + */ +static void tn40_do_tx_db_ptr_next(struct tn40_txdb *db, + struct tn40_tx_map **pptr) +{ + ++*pptr; + if (unlikely(*pptr == db->end)) + *pptr = db->start; +} + +static void tn40_tx_db_inc_rptr(struct tn40_txdb *db) +{ + tn40_do_tx_db_ptr_next(db, &db->rptr); +} + +static void tn40_tx_db_inc_wptr(struct tn40_txdb *db) +{ + tn40_do_tx_db_ptr_next(db, &db->wptr); +} + +static int tn40_tx_db_init(struct tn40_txdb *d, int sz_type) +{ + int memsz = TN40_FIFO_SIZE * (1 << (sz_type + 1)); + + d->start = vzalloc(memsz); + if (!d->start) + return -ENOMEM; + /* In order to differentiate between an empty db state and a full db + * state at least one element should always be empty in order to + * avoid rptr == wptr, which means that the db is empty. + */ + d->size = memsz / sizeof(struct tn40_tx_map) - 1; + d->end = d->start + d->size + 1; /* just after last element */ + + /* All dbs are created empty */ + d->rptr = d->start; + d->wptr = d->start; + return 0; +} + +static void tn40_tx_db_close(struct tn40_txdb *d) +{ + if (d->start) { + vfree(d->start); + d->start = NULL; + } +} + +/* Sizes of tx desc (including padding if needed) as function of the SKB's + * frag number + * 7 - is number of lwords in txd with one phys buffer + * 3 - is number of lwords used for every additional phys buffer + * for (i = 0; i < TN40_MAX_PBL; i++) { + * lwords = 7 + (i * 3); + * if (lwords & 1) + * lwords++; pad it with 1 lword + * tn40_txd_sizes[i].bytes = lwords << 2; + * tn40_txd_sizes[i].qwords = lwords >> 1; + * } + */ +static struct { + u16 bytes; + u16 qwords; /* qword = 64 bit */ +} tn40_txd_sizes[] = { + {0x20, 0x04}, + {0x28, 0x05}, + {0x38, 0x07}, + {0x40, 0x08}, + {0x50, 0x0a}, + {0x58, 0x0b}, + {0x68, 0x0d}, + {0x70, 0x0e}, + {0x80, 0x10}, + {0x88, 0x11}, + {0x98, 0x13}, + {0xa0, 0x14}, + {0xb0, 0x16}, + {0xb8, 0x17}, + {0xc8, 0x19}, + {0xd0, 0x1a}, + {0xe0, 0x1c}, + {0xe8, 0x1d}, + {0xf8, 0x1f}, +}; + +static void tn40_pbl_set(struct tn40_pbl *pbl, dma_addr_t dma, int len) +{ + pbl->len = cpu_to_le32(len); + pbl->pa_lo = cpu_to_le32(lower_32_bits(dma)); + pbl->pa_hi = cpu_to_le32(upper_32_bits(dma)); +} + +static void tn40_txdb_set(struct tn40_txdb *db, dma_addr_t dma, int len) +{ + db->wptr->len = len; + db->wptr->addr.dma = dma; +} + +struct tn40_mapping_info { + dma_addr_t dma; + size_t size; +}; + +/** + * tn40_tx_map_skb - create and store DMA mappings for skb's data blocks + * @priv: NIC private structure + * @skb: socket buffer to map + * @txdd: pointer to tx descriptor to be updated + * @pkt_len: pointer to unsigned long value + * + * This function creates DMA mappings for skb's data blocks and writes them to + * PBL of a new tx descriptor. It also stores them in the tx db, so they could + * be unmapped after the data has been sent. It is the responsibility of the + * caller to make sure that there is enough space in the txdb. The last + * element holds a pointer to skb itself and is marked with a zero length. + * + * Return: 0 on success and negative value on error. + */ +static int tn40_tx_map_skb(struct tn40_priv *priv, struct sk_buff *skb, + struct tn40_txd_desc *txdd, unsigned int *pkt_len) +{ + struct tn40_mapping_info info[TN40_MAX_PBL]; + int nr_frags = skb_shinfo(skb)->nr_frags; + struct tn40_pbl *pbl = &txdd->pbl[0]; + struct tn40_txdb *db = &priv->txdb; + unsigned int size; + int i, len, ret; + dma_addr_t dma; + + netdev_dbg(priv->ndev, "TX skb %p skbLen %d dataLen %d frags %d\n", skb, + skb->len, skb->data_len, nr_frags); + if (nr_frags > TN40_MAX_PBL - 1) { + ret = skb_linearize(skb); + if (ret) + return ret; + nr_frags = skb_shinfo(skb)->nr_frags; + } + /* initial skb */ + len = skb->len - skb->data_len; + dma = dma_map_single(&priv->pdev->dev, skb->data, len, + DMA_TO_DEVICE); + ret = dma_mapping_error(&priv->pdev->dev, dma); + if (ret) + return ret; + + tn40_txdb_set(db, dma, len); + tn40_pbl_set(pbl++, db->wptr->addr.dma, db->wptr->len); + *pkt_len = db->wptr->len; + + for (i = 0; i < nr_frags; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + size = skb_frag_size(frag); + dma = skb_frag_dma_map(&priv->pdev->dev, frag, 0, + size, DMA_TO_DEVICE); + + ret = dma_mapping_error(&priv->pdev->dev, dma); + if (ret) + goto mapping_error; + info[i].dma = dma; + info[i].size = size; + } + + for (i = 0; i < nr_frags; i++) { + tn40_tx_db_inc_wptr(db); + tn40_txdb_set(db, info[i].dma, info[i].size); + tn40_pbl_set(pbl++, db->wptr->addr.dma, db->wptr->len); + *pkt_len += db->wptr->len; + } + + /* SHORT_PKT_FIX */ + if (skb->len < TN40_SHORT_PACKET_SIZE) + ++nr_frags; + + /* Add skb clean up info. */ + tn40_tx_db_inc_wptr(db); + db->wptr->len = -tn40_txd_sizes[nr_frags].bytes; + db->wptr->addr.skb = skb; + tn40_tx_db_inc_wptr(db); + + return 0; + mapping_error: + dma_unmap_page(&priv->pdev->dev, db->wptr->addr.dma, db->wptr->len, + DMA_TO_DEVICE); + for (; i > 0; i--) + dma_unmap_page(&priv->pdev->dev, info[i - 1].dma, + info[i - 1].size, DMA_TO_DEVICE); + return -ENOMEM; +} + +static int tn40_create_tx_ring(struct tn40_priv *priv) +{ + int ret; + + ret = tn40_fifo_alloc(priv, &priv->txd_fifo0.m, priv->txd_size, + TN40_REG_TXD_CFG0_0, TN40_REG_TXD_CFG1_0, + TN40_REG_TXD_RPTR_0, TN40_REG_TXD_WPTR_0); + if (ret) + return ret; + + ret = tn40_fifo_alloc(priv, &priv->txf_fifo0.m, priv->txf_size, + TN40_REG_TXF_CFG0_0, TN40_REG_TXF_CFG1_0, + TN40_REG_TXF_RPTR_0, TN40_REG_TXF_WPTR_0); + if (ret) + goto err_free_txd; + + /* The TX db has to keep mappings for all packets sent (on + * TxD) and not yet reclaimed (on TxF). + */ + ret = tn40_tx_db_init(&priv->txdb, max(priv->txd_size, priv->txf_size)); + if (ret) + goto err_free_txf; + + /* SHORT_PKT_FIX */ + priv->b0_len = 64; + priv->b0_va = dma_alloc_coherent(&priv->pdev->dev, priv->b0_len, + &priv->b0_dma, GFP_KERNEL); + if (!priv->b0_va) + goto err_free_db; + + priv->tx_level = TN40_MAX_TX_LEVEL; + priv->tx_update_mark = priv->tx_level - 1024; + return 0; +err_free_db: + tn40_tx_db_close(&priv->txdb); +err_free_txf: + tn40_fifo_free(priv, &priv->txf_fifo0.m); +err_free_txd: + tn40_fifo_free(priv, &priv->txd_fifo0.m); + return -ENOMEM; +} + +/** + * tn40_tx_space - Calculate the available space in the TX fifo. + * @priv: NIC private structure + * + * Return: available space in TX fifo in bytes + */ +static int tn40_tx_space(struct tn40_priv *priv) +{ + struct tn40_txd_fifo *f = &priv->txd_fifo0; + int fsize; + + f->m.rptr = tn40_read_reg(priv, f->m.reg_rptr) & TN40_TXF_WPTR_WR_PTR; + fsize = f->m.rptr - f->m.wptr; + if (fsize <= 0) + fsize = f->m.memsz + fsize; + return fsize; +} + +#define TN40_TXD_FULL_CHECKSUM 7 + +static netdev_tx_t tn40_start_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + struct tn40_priv *priv = netdev_priv(ndev); + struct tn40_txd_fifo *f = &priv->txd_fifo0; + int txd_checksum = TN40_TXD_FULL_CHECKSUM; + struct tn40_txd_desc *txdd; + int nr_frags, len, err; + unsigned int pkt_len; + int txd_vlan_id = 0; + int txd_lgsnd = 0; + int txd_vtag = 0; + int txd_mss = 0; + + /* Build tx descriptor */ + txdd = (struct tn40_txd_desc *)(f->m.va + f->m.wptr); + err = tn40_tx_map_skb(priv, skb, txdd, &pkt_len); + if (err) { + u64_stats_update_begin(&priv->syncp); + priv->stats.tx_dropped++; + u64_stats_update_end(&priv->syncp); + dev_kfree_skb(skb); + return NETDEV_TX_OK; + } + nr_frags = skb_shinfo(skb)->nr_frags; + if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) + txd_checksum = 0; + + if (skb_shinfo(skb)->gso_size) { + txd_mss = skb_shinfo(skb)->gso_size; + txd_lgsnd = 1; + netdev_dbg(priv->ndev, "skb %p pkt len %d gso size = %d\n", skb, + pkt_len, txd_mss); + } + if (skb_vlan_tag_present(skb)) { + /* Don't cut VLAN ID to 12 bits */ + txd_vlan_id = skb_vlan_tag_get(skb); + txd_vtag = 1; + } + txdd->va_hi = 0; + txdd->va_lo = 0; + txdd->length = cpu_to_le16(pkt_len); + txdd->mss = cpu_to_le16(txd_mss); + txdd->txd_val1 = + cpu_to_le32(TN40_TXD_W1_VAL + (tn40_txd_sizes[nr_frags].qwords, txd_checksum, + txd_vtag, txd_lgsnd, txd_vlan_id)); + netdev_dbg(priv->ndev, "=== w1 qwords[%d] %d =====\n", nr_frags, + tn40_txd_sizes[nr_frags].qwords); + netdev_dbg(priv->ndev, "=== TxD desc =====================\n"); + netdev_dbg(priv->ndev, "=== w1: 0x%x ================\n", + txdd->txd_val1); + netdev_dbg(priv->ndev, "=== w2: mss 0x%x len 0x%x\n", txdd->mss, + txdd->length); + /* SHORT_PKT_FIX */ + if (pkt_len < TN40_SHORT_PACKET_SIZE) { + struct tn40_pbl *pbl = &txdd->pbl[++nr_frags]; + + txdd->length = cpu_to_le16(TN40_SHORT_PACKET_SIZE); + txdd->txd_val1 = + cpu_to_le32(TN40_TXD_W1_VAL + (tn40_txd_sizes[nr_frags].qwords, + txd_checksum, txd_vtag, txd_lgsnd, + txd_vlan_id)); + pbl->len = cpu_to_le32(TN40_SHORT_PACKET_SIZE - pkt_len); + pbl->pa_lo = cpu_to_le32(lower_32_bits(priv->b0_dma)); + pbl->pa_hi = cpu_to_le32(upper_32_bits(priv->b0_dma)); + netdev_dbg(priv->ndev, "=== SHORT_PKT_FIX ==============\n"); + netdev_dbg(priv->ndev, "=== nr_frags : %d ==============\n", + nr_frags); + } + + /* Increment TXD write pointer. In case of fifo wrapping copy + * reminder of the descriptor to the beginning. + */ + f->m.wptr += tn40_txd_sizes[nr_frags].bytes; + len = f->m.wptr - f->m.memsz; + if (unlikely(len >= 0)) { + f->m.wptr = len; + if (len > 0) + memcpy(f->m.va, f->m.va + f->m.memsz, len); + } + /* Force memory writes to complete before letting the HW know + * there are new descriptors to fetch. + */ + wmb(); + + priv->tx_level -= tn40_txd_sizes[nr_frags].bytes; + if (priv->tx_level > priv->tx_update_mark) { + tn40_write_reg(priv, f->m.reg_wptr, + f->m.wptr & TN40_TXF_WPTR_WR_PTR); + } else { + if (priv->tx_noupd++ > TN40_NO_UPD_PACKETS) { + priv->tx_noupd = 0; + tn40_write_reg(priv, f->m.reg_wptr, + f->m.wptr & TN40_TXF_WPTR_WR_PTR); + } + } + + u64_stats_update_begin(&priv->syncp); + priv->stats.tx_packets++; + priv->stats.tx_bytes += pkt_len; + u64_stats_update_end(&priv->syncp); + if (priv->tx_level < TN40_MIN_TX_LEVEL) { + netdev_dbg(priv->ndev, "TX Q STOP level %d\n", priv->tx_level); + netif_stop_queue(ndev); + } + + return NETDEV_TX_OK; +} + +static void tn40_tx_cleanup(struct tn40_priv *priv) +{ + struct tn40_txf_fifo *f = &priv->txf_fifo0; + struct tn40_txdb *db = &priv->txdb; + int tx_level = 0; + + f->m.wptr = tn40_read_reg(priv, f->m.reg_wptr) & TN40_TXF_WPTR_MASK; + + netif_tx_lock(priv->ndev); + while (f->m.wptr != f->m.rptr) { + f->m.rptr += TN40_TXF_DESC_SZ; + f->m.rptr &= f->m.size_mask; + /* Unmap all fragments */ + /* First has to come tx_maps containing DMA */ + do { + dma_addr_t addr = db->rptr->addr.dma; + size_t size = db->rptr->len; + + netif_tx_unlock(priv->ndev); + dma_unmap_page(&priv->pdev->dev, addr, + size, DMA_TO_DEVICE); + netif_tx_lock(priv->ndev); + tn40_tx_db_inc_rptr(db); + } while (db->rptr->len > 0); + tx_level -= db->rptr->len; /* '-' Because the len is negative */ + + /* Now should come skb pointer - free it */ + dev_kfree_skb_any(db->rptr->addr.skb); + netdev_dbg(priv->ndev, "dev_kfree_skb_any %p %d\n", + db->rptr->addr.skb, -db->rptr->len); + tn40_tx_db_inc_rptr(db); + } + + /* Let the HW know which TXF descriptors were cleaned */ + tn40_write_reg(priv, f->m.reg_rptr, f->m.rptr & TN40_TXF_WPTR_WR_PTR); + + /* We reclaimed resources, so in case the Q is stopped by xmit + * callback, we resume the transmission and use tx_lock to + * synchronize with xmit. + */ + priv->tx_level += tx_level; + if (priv->tx_noupd) { + priv->tx_noupd = 0; + tn40_write_reg(priv, priv->txd_fifo0.m.reg_wptr, + priv->txd_fifo0.m.wptr & TN40_TXF_WPTR_WR_PTR); + } + if (unlikely(netif_queue_stopped(priv->ndev) && + netif_carrier_ok(priv->ndev) && + (priv->tx_level >= TN40_MAX_TX_LEVEL / 2))) { + netdev_dbg(priv->ndev, "TX Q WAKE level %d\n", priv->tx_level); + netif_wake_queue(priv->ndev); + } + netif_tx_unlock(priv->ndev); +} + +static void tn40_tx_free_skbs(struct tn40_priv *priv) +{ + struct tn40_txdb *db = &priv->txdb; + + while (db->rptr != db->wptr) { + if (likely(db->rptr->len)) + dma_unmap_page(&priv->pdev->dev, db->rptr->addr.dma, + db->rptr->len, DMA_TO_DEVICE); + else + dev_kfree_skb(db->rptr->addr.skb); + tn40_tx_db_inc_rptr(db); + } +} + +static void tn40_destroy_tx_ring(struct tn40_priv *priv) +{ + tn40_tx_free_skbs(priv); + tn40_fifo_free(priv, &priv->txd_fifo0.m); + tn40_fifo_free(priv, &priv->txf_fifo0.m); + tn40_tx_db_close(&priv->txdb); + /* SHORT_PKT_FIX */ + if (priv->b0_len) { + dma_free_coherent(&priv->pdev->dev, priv->b0_len, priv->b0_va, + priv->b0_dma); + priv->b0_len = 0; + } +} + +/** + * tn40_tx_push_desc - Push a descriptor to TxD fifo. + * + * @priv: NIC private structure + * @data: desc's data + * @size: desc's size + * + * This function pushes desc to TxD fifo and overlaps it if needed. + * + * This function does not check for available space, nor does it check + * that the data size is smaller than the fifo size. Checking for + * space is the responsibility of the caller. + */ +static void tn40_tx_push_desc(struct tn40_priv *priv, void *data, int size) +{ + struct tn40_txd_fifo *f = &priv->txd_fifo0; + int i = f->m.memsz - f->m.wptr; + + if (size == 0) + return; + + if (i > size) { + memcpy(f->m.va + f->m.wptr, data, size); + f->m.wptr += size; + } else { + memcpy(f->m.va + f->m.wptr, data, i); + f->m.wptr = size - i; + memcpy(f->m.va, data + i, f->m.wptr); + } + tn40_write_reg(priv, f->m.reg_wptr, f->m.wptr & TN40_TXF_WPTR_WR_PTR); +} + +/** + * tn40_tx_push_desc_safe - push descriptor to TxD fifo in a safe way. + * + * @priv: NIC private structure + * @data: descriptor data + * @size: descriptor size + * + * This function does check for available space and, if necessary, + * waits for the NIC to read existing data before writing new data. + */ +static void tn40_tx_push_desc_safe(struct tn40_priv *priv, void *data, int size) +{ + int timer = 0; + + while (size > 0) { + /* We subtract 8 because when the fifo is full rptr == + * wptr, which also means that fifo is empty, we can + * understand the difference, but could the HW do the + * same ??? + */ + int avail = tn40_tx_space(priv) - 8; + + if (avail <= 0) { + if (timer++ > 300) /* Prevent endless loop */ + break; + /* Give the HW a chance to clean the fifo */ + usleep_range(50, 60); + continue; + } + avail = min(avail, size); + netdev_dbg(priv->ndev, + "about to push %d bytes starting %p size %d\n", + avail, data, size); + tn40_tx_push_desc(priv, data, avail); + size -= avail; + data += avail; + } +} + +int tn40_set_link_speed(struct tn40_priv *priv, u32 speed) +{ + u32 val; + int i; + + netdev_dbg(priv->ndev, "speed %d\n", speed); + switch (speed) { + case SPEED_10000: + case SPEED_5000: + case SPEED_2500: + netdev_dbg(priv->ndev, "link_speed %d\n", speed); + + tn40_write_reg(priv, 0x1010, 0x217); /*ETHSD.REFCLK_CONF */ + tn40_write_reg(priv, 0x104c, 0x4c); /*ETHSD.L0_RX_PCNT */ + tn40_write_reg(priv, 0x1050, 0x4c); /*ETHSD.L1_RX_PCNT */ + tn40_write_reg(priv, 0x1054, 0x4c); /*ETHSD.L2_RX_PCNT */ + tn40_write_reg(priv, 0x1058, 0x4c); /*ETHSD.L3_RX_PCNT */ + tn40_write_reg(priv, 0x102c, 0x434); /*ETHSD.L0_TX_PCNT */ + tn40_write_reg(priv, 0x1030, 0x434); /*ETHSD.L1_TX_PCNT */ + tn40_write_reg(priv, 0x1034, 0x434); /*ETHSD.L2_TX_PCNT */ + tn40_write_reg(priv, 0x1038, 0x434); /*ETHSD.L3_TX_PCNT */ + tn40_write_reg(priv, 0x6300, 0x0400); /*MAC.PCS_CTRL */ + + tn40_write_reg(priv, 0x1018, 0x00); /*Mike2 */ + udelay(5); + tn40_write_reg(priv, 0x1018, 0x04); /*Mike2 */ + udelay(5); + tn40_write_reg(priv, 0x1018, 0x06); /*Mike2 */ + udelay(5); + /*MikeFix1 */ + /*L0: 0x103c , L1: 0x1040 , L2: 0x1044 , L3: 0x1048 =0x81644 */ + tn40_write_reg(priv, 0x103c, 0x81644); /*ETHSD.L0_TX_DCNT */ + tn40_write_reg(priv, 0x1040, 0x81644); /*ETHSD.L1_TX_DCNT */ + tn40_write_reg(priv, 0x1044, 0x81644); /*ETHSD.L2_TX_DCNT */ + tn40_write_reg(priv, 0x1048, 0x81644); /*ETHSD.L3_TX_DCNT */ + tn40_write_reg(priv, 0x1014, 0x043); /*ETHSD.INIT_STAT */ + for (i = 1000; i; i--) { + usleep_range(50, 60); + /*ETHSD.INIT_STAT */ + val = tn40_read_reg(priv, 0x1014); + if (val & (1 << 9)) { + /*ETHSD.INIT_STAT */ + tn40_write_reg(priv, 0x1014, 0x3); + /*ETHSD.INIT_STAT */ + val = tn40_read_reg(priv, 0x1014); + + break; + } + } + if (!i) + netdev_err(priv->ndev, "MAC init timeout!\n"); + + tn40_write_reg(priv, 0x6350, 0x0); /*MAC.PCS_IF_MODE */ + tn40_write_reg(priv, TN40_REG_CTRLST, 0xC13); /*0x93//0x13 */ + tn40_write_reg(priv, 0x111c, 0x7ff); /*MAC.MAC_RST_CNT */ + usleep_range(2000, 2100); + + tn40_write_reg(priv, 0x111c, 0x0); /*MAC.MAC_RST_CNT */ + break; + + case SPEED_1000: + case SPEED_100: + tn40_write_reg(priv, 0x1010, 0x613); /*ETHSD.REFCLK_CONF */ + tn40_write_reg(priv, 0x104c, 0x4d); /*ETHSD.L0_RX_PCNT */ + tn40_write_reg(priv, 0x1050, 0x0); /*ETHSD.L1_RX_PCNT */ + tn40_write_reg(priv, 0x1054, 0x0); /*ETHSD.L2_RX_PCNT */ + tn40_write_reg(priv, 0x1058, 0x0); /*ETHSD.L3_RX_PCNT */ + tn40_write_reg(priv, 0x102c, 0x35); /*ETHSD.L0_TX_PCNT */ + tn40_write_reg(priv, 0x1030, 0x0); /*ETHSD.L1_TX_PCNT */ + tn40_write_reg(priv, 0x1034, 0x0); /*ETHSD.L2_TX_PCNT */ + tn40_write_reg(priv, 0x1038, 0x0); /*ETHSD.L3_TX_PCNT */ + tn40_write_reg(priv, 0x6300, 0x01140); /*MAC.PCS_CTRL */ + + tn40_write_reg(priv, 0x1014, 0x043); /*ETHSD.INIT_STAT */ + for (i = 1000; i; i--) { + usleep_range(50, 60); + val = tn40_read_reg(priv, 0x1014); /*ETHSD.INIT_STAT */ + if (val & (1 << 9)) { + /*ETHSD.INIT_STAT */ + tn40_write_reg(priv, 0x1014, 0x3); + /*ETHSD.INIT_STAT */ + val = tn40_read_reg(priv, 0x1014); + + break; + } + } + if (!i) + netdev_err(priv->ndev, "MAC init timeout!\n"); + + tn40_write_reg(priv, 0x6350, 0x2b); /*MAC.PCS_IF_MODE 1g */ + tn40_write_reg(priv, 0x6310, 0x9801); /*MAC.PCS_DEV_AB */ + + tn40_write_reg(priv, 0x6314, 0x1); /*MAC.PCS_PART_AB */ + tn40_write_reg(priv, 0x6348, 0xc8); /*MAC.PCS_LINK_LO */ + tn40_write_reg(priv, 0x634c, 0xc8); /*MAC.PCS_LINK_HI */ + usleep_range(50, 60); + tn40_write_reg(priv, TN40_REG_CTRLST, 0xC13); /*0x93//0x13 */ + tn40_write_reg(priv, 0x111c, 0x7ff); /*MAC.MAC_RST_CNT */ + usleep_range(2000, 2100); + + tn40_write_reg(priv, 0x111c, 0x0); /*MAC.MAC_RST_CNT */ + tn40_write_reg(priv, 0x6300, 0x1140); /*MAC.PCS_CTRL */ + break; + + case 0: /* Link down */ + tn40_write_reg(priv, 0x104c, 0x0); /*ETHSD.L0_RX_PCNT */ + tn40_write_reg(priv, 0x1050, 0x0); /*ETHSD.L1_RX_PCNT */ + tn40_write_reg(priv, 0x1054, 0x0); /*ETHSD.L2_RX_PCNT */ + tn40_write_reg(priv, 0x1058, 0x0); /*ETHSD.L3_RX_PCNT */ + tn40_write_reg(priv, 0x102c, 0x0); /*ETHSD.L0_TX_PCNT */ + tn40_write_reg(priv, 0x1030, 0x0); /*ETHSD.L1_TX_PCNT */ + tn40_write_reg(priv, 0x1034, 0x0); /*ETHSD.L2_TX_PCNT */ + tn40_write_reg(priv, 0x1038, 0x0); /*ETHSD.L3_TX_PCNT */ + + tn40_write_reg(priv, TN40_REG_CTRLST, 0x800); + tn40_write_reg(priv, 0x111c, 0x7ff); /*MAC.MAC_RST_CNT */ + usleep_range(2000, 2100); + + tn40_write_reg(priv, 0x111c, 0x0); /*MAC.MAC_RST_CNT */ + break; + + default: + netdev_err(priv->ndev, + "Link speed was not identified yet (%d)\n", speed); + speed = 0; + break; + } + return speed; +} + +static void tn40_link_changed(struct tn40_priv *priv) +{ + u32 link = tn40_read_reg(priv, + TN40_REG_MAC_LNK_STAT) & TN40_MAC_LINK_STAT; + + netdev_dbg(priv->ndev, "link changed %u\n", link); +} + +static void tn40_isr_extra(struct tn40_priv *priv, u32 isr) +{ + if (isr & (TN40_IR_LNKCHG0 | TN40_IR_LNKCHG1 | TN40_IR_TMR0)) { + netdev_dbg(priv->ndev, "isr = 0x%x\n", isr); + tn40_link_changed(priv); + } +} + +static irqreturn_t tn40_isr_napi(int irq, void *dev) +{ + struct tn40_priv *priv = netdev_priv((struct net_device *)dev); + u32 isr; + + isr = tn40_read_reg(priv, TN40_REG_ISR_MSK0); + + if (unlikely(!isr)) { + tn40_enable_interrupts(priv); + return IRQ_NONE; /* Not our interrupt */ + } + + if (isr & TN40_IR_EXTRA) + tn40_isr_extra(priv, isr); + + if (isr & (TN40_IR_RX_DESC_0 | TN40_IR_TX_FREE_0 | TN40_IR_TMR1)) { + if (likely(napi_schedule_prep(&priv->napi))) { + __napi_schedule(&priv->napi); + return IRQ_HANDLED; + } + /* We get here if an interrupt has slept into the + * small time window between these lines in + * tn40_poll: tn40_enable_interrupts(priv); return 0; + * + * Currently interrupts are disabled (since we read + * the ISR register) and we have failed to register + * the next poll. So we read the regs to trigger the + * chip and allow further interrupts. + */ + tn40_read_reg(priv, TN40_REG_TXF_WPTR_0); + tn40_read_reg(priv, TN40_REG_RXD_WPTR_0); + } + + tn40_enable_interrupts(priv); + return IRQ_HANDLED; +} + +static int tn40_poll(struct napi_struct *napi, int budget) +{ + struct tn40_priv *priv = container_of(napi, struct tn40_priv, napi); + int work_done; + + tn40_tx_cleanup(priv); + + if (!budget) + return 0; + + work_done = tn40_rx_receive(priv, budget); + if (work_done == budget) + return budget; + + if (napi_complete_done(napi, work_done)) + tn40_enable_interrupts(priv); + return work_done; +} + +static int tn40_fw_load(struct tn40_priv *priv) +{ + const struct firmware *fw = NULL; + int master, ret; + u32 val; + + ret = request_firmware(&fw, TN40_FIRMWARE_NAME, &priv->pdev->dev); + if (ret) + return ret; + + master = tn40_read_reg(priv, TN40_REG_INIT_SEMAPHORE); + if (!tn40_read_reg(priv, TN40_REG_INIT_STATUS) && master) { + netdev_dbg(priv->ndev, "Loading FW...\n"); + tn40_tx_push_desc_safe(priv, (void *)fw->data, fw->size); + msleep(100); + } + ret = read_poll_timeout(tn40_read_reg, val, val, 2000, 400000, false, + priv, TN40_REG_INIT_STATUS); + if (master) + tn40_write_reg(priv, TN40_REG_INIT_SEMAPHORE, 1); + + if (ret) { + netdev_err(priv->ndev, "firmware loading failed\n"); + netdev_dbg(priv->ndev, "VPC: 0x%x VIC: 0x%x STATUS: 0x%xd\n", + tn40_read_reg(priv, TN40_REG_VPC), + tn40_read_reg(priv, TN40_REG_VIC), + tn40_read_reg(priv, TN40_REG_INIT_STATUS)); + ret = -EIO; + } else { + netdev_dbg(priv->ndev, "firmware loading success\n"); + } + release_firmware(fw); + return ret; +} + +static void tn40_restore_mac(struct net_device *ndev, struct tn40_priv *priv) +{ + u32 val; + + netdev_dbg(priv->ndev, "mac0 =%x mac1 =%x mac2 =%x\n", + tn40_read_reg(priv, TN40_REG_UNC_MAC0_A), + tn40_read_reg(priv, TN40_REG_UNC_MAC1_A), + tn40_read_reg(priv, TN40_REG_UNC_MAC2_A)); + + val = (ndev->dev_addr[0] << 8) | (ndev->dev_addr[1]); + tn40_write_reg(priv, TN40_REG_UNC_MAC2_A, val); + val = (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]); + tn40_write_reg(priv, TN40_REG_UNC_MAC1_A, val); + val = (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]); + tn40_write_reg(priv, TN40_REG_UNC_MAC0_A, val); + + /* More then IP MAC address */ + tn40_write_reg(priv, TN40_REG_MAC_ADDR_0, + (ndev->dev_addr[3] << 24) | (ndev->dev_addr[2] << 16) | + (ndev->dev_addr[1] << 8) | (ndev->dev_addr[0])); + tn40_write_reg(priv, TN40_REG_MAC_ADDR_1, + (ndev->dev_addr[5] << 8) | (ndev->dev_addr[4])); + + netdev_dbg(priv->ndev, "mac0 =%x mac1 =%x mac2 =%x\n", + tn40_read_reg(priv, TN40_REG_UNC_MAC0_A), + tn40_read_reg(priv, TN40_REG_UNC_MAC1_A), + tn40_read_reg(priv, TN40_REG_UNC_MAC2_A)); +} + +static void tn40_hw_start(struct tn40_priv *priv) +{ + tn40_write_reg(priv, TN40_REG_FRM_LENGTH, 0X3FE0); + tn40_write_reg(priv, TN40_REG_GMAC_RXF_A, 0X10fd); + /*MikeFix1 */ + /*L0: 0x103c , L1: 0x1040 , L2: 0x1044 , L3: 0x1048 =0x81644 */ + tn40_write_reg(priv, 0x103c, 0x81644); /*ETHSD.L0_TX_DCNT */ + tn40_write_reg(priv, 0x1040, 0x81644); /*ETHSD.L1_TX_DCNT */ + tn40_write_reg(priv, 0x1044, 0x81644); /*ETHSD.L2_TX_DCNT */ + tn40_write_reg(priv, 0x1048, 0x81644); /*ETHSD.L3_TX_DCNT */ + tn40_write_reg(priv, TN40_REG_RX_FIFO_SECTION, 0x10); + tn40_write_reg(priv, TN40_REG_TX_FIFO_SECTION, 0xE00010); + tn40_write_reg(priv, TN40_REG_RX_FULLNESS, 0); + tn40_write_reg(priv, TN40_REG_TX_FULLNESS, 0); + + tn40_write_reg(priv, TN40_REG_VGLB, 0); + tn40_write_reg(priv, TN40_REG_MAX_FRAME_A, + priv->rxf_fifo0.m.pktsz & TN40_MAX_FRAME_AB_VAL); + tn40_write_reg(priv, TN40_REG_RDINTCM0, priv->rdintcm); + tn40_write_reg(priv, TN40_REG_RDINTCM2, 0); + + /* old val = 0x300064 */ + tn40_write_reg(priv, TN40_REG_TDINTCM0, priv->tdintcm); + + /* Enable timer interrupt once in 2 secs. */ + tn40_restore_mac(priv->ndev, priv); + + /* Pause frame */ + tn40_write_reg(priv, 0x12E0, 0x28); + tn40_write_reg(priv, TN40_REG_PAUSE_QUANT, 0xFFFF); + tn40_write_reg(priv, 0x6064, 0xF); + + tn40_write_reg(priv, TN40_REG_GMAC_RXF_A, + TN40_GMAC_RX_FILTER_OSEN | TN40_GMAC_RX_FILTER_TXFC | + TN40_GMAC_RX_FILTER_AM | TN40_GMAC_RX_FILTER_AB); + + tn40_enable_interrupts(priv); +} + +static int tn40_hw_reset(struct tn40_priv *priv) +{ + u32 val; + + /* Reset sequences: read, write 1, read, write 0 */ + val = tn40_read_reg(priv, TN40_REG_CLKPLL); + tn40_write_reg(priv, TN40_REG_CLKPLL, (val | TN40_CLKPLL_SFTRST) + 0x8); + usleep_range(50, 60); + val = tn40_read_reg(priv, TN40_REG_CLKPLL); + tn40_write_reg(priv, TN40_REG_CLKPLL, val & ~TN40_CLKPLL_SFTRST); + + /* Check that the PLLs are locked and reset ended */ + val = read_poll_timeout(tn40_read_reg, val, + (val & TN40_CLKPLL_LKD) == TN40_CLKPLL_LKD, + 10000, 700000, false, priv, TN40_REG_CLKPLL); + if (val) + return -EIO; + + usleep_range(50, 60); + /* Do any PCI-E read transaction */ + tn40_read_reg(priv, TN40_REG_RXD_CFG0_0); + return 0; +} + +static void tn40_sw_reset(struct tn40_priv *priv) +{ + int i, ret; + u32 val; + + /* 1. load MAC (obsolete) */ + /* 2. disable Rx (and Tx) */ + tn40_write_reg(priv, TN40_REG_GMAC_RXF_A, 0); + msleep(100); + /* 3. Disable port */ + tn40_write_reg(priv, TN40_REG_DIS_PORT, 1); + /* 4. Disable queue */ + tn40_write_reg(priv, TN40_REG_DIS_QU, 1); + /* 5. Wait until hw is disabled */ + ret = read_poll_timeout(tn40_read_reg, val, val & 1, 10000, 500000, + false, priv, TN40_REG_RST_PORT); + if (ret) + netdev_err(priv->ndev, "SW reset timeout. continuing anyway\n"); + + /* 6. Disable interrupts */ + tn40_write_reg(priv, TN40_REG_RDINTCM0, 0); + tn40_write_reg(priv, TN40_REG_TDINTCM0, 0); + tn40_write_reg(priv, TN40_REG_IMR, 0); + tn40_read_reg(priv, TN40_REG_ISR); + + /* 7. Reset queue */ + tn40_write_reg(priv, TN40_REG_RST_QU, 1); + /* 8. Reset port */ + tn40_write_reg(priv, TN40_REG_RST_PORT, 1); + /* 9. Zero all read and write pointers */ + for (i = TN40_REG_TXD_WPTR_0; i <= TN40_REG_TXF_RPTR_3; i += 0x10) + tn40_write_reg(priv, i, 0); + /* 10. Unset port disable */ + tn40_write_reg(priv, TN40_REG_DIS_PORT, 0); + /* 11. Unset queue disable */ + tn40_write_reg(priv, TN40_REG_DIS_QU, 0); + /* 12. Unset queue reset */ + tn40_write_reg(priv, TN40_REG_RST_QU, 0); + /* 13. Unset port reset */ + tn40_write_reg(priv, TN40_REG_RST_PORT, 0); + /* 14. Enable Rx */ + /* Skipped. will be done later */ +} + +static int tn40_start(struct tn40_priv *priv) +{ + int ret; + + ret = tn40_create_tx_ring(priv); + if (ret) { + netdev_err(priv->ndev, "failed to tx init %d\n", ret); + return ret; + } + + ret = tn40_create_rx_ring(priv); + if (ret) { + netdev_err(priv->ndev, "failed to rx init %d\n", ret); + goto err_tx_ring; + } + + tn40_rx_alloc_buffers(priv); + if (tn40_rxdb_available(priv->rxdb0) != 1) { + ret = -ENOMEM; + netdev_err(priv->ndev, "failed to allocate rx buffers\n"); + goto err_rx_ring; + } + + ret = request_irq(priv->pdev->irq, &tn40_isr_napi, IRQF_SHARED, + priv->ndev->name, priv->ndev); + if (ret) { + netdev_err(priv->ndev, "failed to request irq %d\n", ret); + goto err_rx_ring; + } + + tn40_hw_start(priv); + return 0; +err_rx_ring: + tn40_destroy_rx_ring(priv); +err_tx_ring: + tn40_destroy_tx_ring(priv); + return ret; +} + +static void tn40_stop(struct tn40_priv *priv) +{ + tn40_disable_interrupts(priv); + free_irq(priv->pdev->irq, priv->ndev); + tn40_sw_reset(priv); + tn40_destroy_tx_ring(priv); + tn40_destroy_rx_ring(priv); +} + +static int tn40_close(struct net_device *ndev) +{ + struct tn40_priv *priv = netdev_priv(ndev); + + phylink_stop(priv->phylink); + phylink_disconnect_phy(priv->phylink); + + napi_disable(&priv->napi); + netif_napi_del(&priv->napi); + tn40_stop(priv); + return 0; +} + +static int tn40_open(struct net_device *dev) +{ + struct tn40_priv *priv = netdev_priv(dev); + int ret; + + ret = phylink_connect_phy(priv->phylink, priv->phydev); + if (ret) { + netdev_err(dev, "failed to connect to phy %d\n", ret); + return ret; + } + tn40_sw_reset(priv); + ret = tn40_start(priv); + if (ret) { + phylink_disconnect_phy(priv->phylink); + netdev_err(dev, "failed to start %d\n", ret); + return ret; + } + napi_enable(&priv->napi); + phylink_start(priv->phylink); + netif_start_queue(priv->ndev); + return 0; +} + +static void __tn40_vlan_rx_vid(struct net_device *ndev, uint16_t vid, + int enable) +{ + struct tn40_priv *priv = netdev_priv(ndev); + u32 reg, bit, val; + + netdev_dbg(priv->ndev, "vid =%d value =%d\n", (int)vid, enable); + reg = TN40_REG_VLAN_0 + (vid / 32) * 4; + bit = 1 << vid % 32; + val = tn40_read_reg(priv, reg); + netdev_dbg(priv->ndev, "reg =%x, val =%x, bit =%d\n", reg, val, bit); + if (enable) + val |= bit; + else + val &= ~bit; + netdev_dbg(priv->ndev, "new val %x\n", val); + tn40_write_reg(priv, reg, val); +} + +static int tn40_vlan_rx_add_vid(struct net_device *ndev, + __always_unused __be16 proto, u16 vid) +{ + __tn40_vlan_rx_vid(ndev, vid, 1); + return 0; +} + +static int tn40_vlan_rx_kill_vid(struct net_device *ndev, + __always_unused __be16 proto, u16 vid) +{ + __tn40_vlan_rx_vid(ndev, vid, 0); + return 0; +} + +static void tn40_setmulti(struct net_device *ndev) +{ + u32 rxf_val = TN40_GMAC_RX_FILTER_AM | TN40_GMAC_RX_FILTER_AB | + TN40_GMAC_RX_FILTER_OSEN | TN40_GMAC_RX_FILTER_TXFC; + struct tn40_priv *priv = netdev_priv(ndev); + int i; + + /* IMF - imperfect (hash) rx multicast filter */ + /* PMF - perfect rx multicast filter */ + + /* FIXME: RXE(OFF) */ + if (ndev->flags & IFF_PROMISC) { + rxf_val |= TN40_GMAC_RX_FILTER_PRM; + } else if (ndev->flags & IFF_ALLMULTI) { + /* set IMF to accept all multicast frames */ + for (i = 0; i < TN40_MAC_MCST_HASH_NUM; i++) + tn40_write_reg(priv, + TN40_REG_RX_MCST_HASH0 + i * 4, ~0); + } else if (netdev_mc_count(ndev)) { + struct netdev_hw_addr *mclist; + u32 reg, val; + u8 hash; + + /* Set IMF to deny all multicast frames */ + for (i = 0; i < TN40_MAC_MCST_HASH_NUM; i++) + tn40_write_reg(priv, + TN40_REG_RX_MCST_HASH0 + i * 4, 0); + + /* Set PMF to deny all multicast frames */ + for (i = 0; i < TN40_MAC_MCST_NUM; i++) { + tn40_write_reg(priv, + TN40_REG_RX_MAC_MCST0 + i * 8, 0); + tn40_write_reg(priv, + TN40_REG_RX_MAC_MCST1 + i * 8, 0); + } + /* Use PMF to accept first MAC_MCST_NUM (15) addresses */ + + /* TBD: Sort the addresses and write them in ascending + * order into RX_MAC_MCST regs. we skip this phase now + * and accept ALL multicast frames through IMF. Accept + * the rest of addresses throw IMF. + */ + netdev_for_each_mc_addr(mclist, ndev) { + hash = 0; + for (i = 0; i < ETH_ALEN; i++) + hash ^= mclist->addr[i]; + + reg = TN40_REG_RX_MCST_HASH0 + ((hash >> 5) << 2); + val = tn40_read_reg(priv, reg); + val |= (1 << (hash % 32)); + tn40_write_reg(priv, reg, val); + } + } else { + rxf_val |= TN40_GMAC_RX_FILTER_AB; + } + tn40_write_reg(priv, TN40_REG_GMAC_RXF_A, rxf_val); + /* Enable RX */ + /* FIXME: RXE(ON) */ +} + +static int tn40_set_mac(struct net_device *ndev, void *p) +{ + struct tn40_priv *priv = netdev_priv(ndev); + struct sockaddr *addr = p; + + eth_hw_addr_set(ndev, addr->sa_data); + tn40_restore_mac(ndev, priv); + return 0; +} + +static void tn40_mac_init(struct tn40_priv *priv) +{ + u8 addr[ETH_ALEN]; + u64 val; + + val = (u64)tn40_read_reg(priv, TN40_REG_UNC_MAC0_A); + val |= (u64)tn40_read_reg(priv, TN40_REG_UNC_MAC1_A) << 16; + val |= (u64)tn40_read_reg(priv, TN40_REG_UNC_MAC2_A) << 32; + + u64_to_ether_addr(val, addr); + eth_hw_addr_set(priv->ndev, addr); +} + +static void tn40_get_stats(struct net_device *ndev, + struct rtnl_link_stats64 *stats) +{ + struct tn40_priv *priv = netdev_priv(ndev); + unsigned int start; + + do { + start = u64_stats_fetch_begin(&priv->syncp); + stats->tx_packets = priv->stats.tx_packets; + stats->tx_bytes = priv->stats.tx_bytes; + stats->tx_dropped = priv->stats.tx_dropped; + + stats->rx_packets = priv->stats.rx_packets; + stats->rx_bytes = priv->stats.rx_bytes; + stats->rx_dropped = priv->stats.rx_dropped; + stats->rx_errors = priv->stats.rx_errors; + } while (u64_stats_fetch_retry(&priv->syncp, start)); +} + +static const struct net_device_ops tn40_netdev_ops = { + .ndo_open = tn40_open, + .ndo_stop = tn40_close, + .ndo_start_xmit = tn40_start_xmit, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_rx_mode = tn40_setmulti, + .ndo_get_stats64 = tn40_get_stats, + .ndo_set_mac_address = tn40_set_mac, + .ndo_vlan_rx_add_vid = tn40_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = tn40_vlan_rx_kill_vid, +}; + +static int tn40_ethtool_get_link_ksettings(struct net_device *ndev, + struct ethtool_link_ksettings *cmd) +{ + struct tn40_priv *priv = netdev_priv(ndev); + + return phylink_ethtool_ksettings_get(priv->phylink, cmd); +} + +static const struct ethtool_ops tn40_ethtool_ops = { + .get_link = ethtool_op_get_link, + .get_link_ksettings = tn40_ethtool_get_link_ksettings, +}; + +static void tn40_get_queue_stats_rx(struct net_device *ndev, int idx, + struct netdev_queue_stats_rx *stats) +{ + struct tn40_priv *priv = netdev_priv(ndev); + unsigned int start; + + do { + start = u64_stats_fetch_begin(&priv->syncp); + + stats->packets = priv->stats.rx_packets; + stats->bytes = priv->stats.rx_bytes; + stats->alloc_fail = priv->alloc_fail; + } while (u64_stats_fetch_retry(&priv->syncp, start)); +} + +static void tn40_get_queue_stats_tx(struct net_device *ndev, int idx, + struct netdev_queue_stats_tx *stats) +{ + struct tn40_priv *priv = netdev_priv(ndev); + unsigned int start; + + do { + start = u64_stats_fetch_begin(&priv->syncp); + + stats->packets = priv->stats.tx_packets; + stats->bytes = priv->stats.tx_bytes; + } while (u64_stats_fetch_retry(&priv->syncp, start)); +} + +static void tn40_get_base_stats(struct net_device *ndev, + struct netdev_queue_stats_rx *rx, + struct netdev_queue_stats_tx *tx) +{ + rx->packets = 0; + rx->bytes = 0; + rx->alloc_fail = 0; + + tx->packets = 0; + tx->bytes = 0; +} + +static const struct netdev_stat_ops tn40_stat_ops = { + .get_queue_stats_rx = tn40_get_queue_stats_rx, + .get_queue_stats_tx = tn40_get_queue_stats_tx, + .get_base_stats = tn40_get_base_stats, +}; + +static int tn40_priv_init(struct tn40_priv *priv) +{ + int ret; + + tn40_set_link_speed(priv, 0); + + /* Set GPIO[9:0] to output 0 */ + tn40_write_reg(priv, 0x51E0, 0x30010006); /* GPIO_OE_ WR CMD */ + tn40_write_reg(priv, 0x51F0, 0x0); /* GPIO_OE_ DATA */ + tn40_write_reg(priv, TN40_REG_MDIO_CMD_STAT, 0x3ec8); + + /* we use tx descriptors to load a firmware. */ + ret = tn40_create_tx_ring(priv); + if (ret) + return ret; + ret = tn40_fw_load(priv); + tn40_destroy_tx_ring(priv); + return ret; +} + +static struct net_device *tn40_netdev_alloc(struct pci_dev *pdev) +{ + struct net_device *ndev; + + ndev = devm_alloc_etherdev(&pdev->dev, sizeof(struct tn40_priv)); + if (!ndev) + return NULL; + ndev->netdev_ops = &tn40_netdev_ops; + ndev->ethtool_ops = &tn40_ethtool_ops; + ndev->stat_ops = &tn40_stat_ops; + ndev->tx_queue_len = TN40_NDEV_TXQ_LEN; + ndev->mem_start = pci_resource_start(pdev, 0); + ndev->mem_end = pci_resource_end(pdev, 0); + ndev->min_mtu = ETH_ZLEN; + ndev->max_mtu = TN40_MAX_MTU; + + ndev->features = NETIF_F_IP_CSUM | + NETIF_F_SG | + NETIF_F_FRAGLIST | + NETIF_F_TSO | NETIF_F_GRO | + NETIF_F_RXCSUM | + NETIF_F_RXHASH | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_FILTER; + ndev->vlan_features = NETIF_F_IP_CSUM | + NETIF_F_SG | + NETIF_F_TSO | NETIF_F_GRO | NETIF_F_RXHASH; + + if (dma_get_mask(&pdev->dev) == DMA_BIT_MASK(64)) { + ndev->features |= NETIF_F_HIGHDMA; + ndev->vlan_features |= NETIF_F_HIGHDMA; + } + ndev->hw_features |= ndev->features; + + SET_NETDEV_DEV(ndev, &pdev->dev); + netif_stop_queue(ndev); + return ndev; +} + +static int tn40_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *ndev; + struct tn40_priv *priv; + unsigned int nvec = 1; + void __iomem *regs; + int ret; + + ret = pci_enable_device(pdev); + if (ret) + return ret; + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (ret) { + dev_err(&pdev->dev, "failed to set DMA mask.\n"); + goto err_disable_device; + } + + ret = pci_request_regions(pdev, TN40_DRV_NAME); + if (ret) { + dev_err(&pdev->dev, "failed to request PCI regions.\n"); + goto err_disable_device; + } + + pci_set_master(pdev); + + regs = pci_iomap(pdev, 0, TN40_REGS_SIZE); + if (!regs) { + ret = -EIO; + dev_err(&pdev->dev, "failed to map PCI bar.\n"); + goto err_free_regions; + } + + ndev = tn40_netdev_alloc(pdev); + if (!ndev) { + ret = -ENOMEM; + dev_err(&pdev->dev, "failed to allocate netdev.\n"); + goto err_iounmap; + } + + priv = netdev_priv(ndev); + pci_set_drvdata(pdev, priv); + netif_napi_add(ndev, &priv->napi, tn40_poll); + + priv->regs = regs; + priv->pdev = pdev; + priv->ndev = ndev; + /* Initialize fifo sizes. */ + priv->txd_size = 3; + priv->txf_size = 3; + priv->rxd_size = 3; + priv->rxf_size = 3; + /* Initialize the initial coalescing registers. */ + priv->rdintcm = TN40_INT_REG_VAL(0x20, 1, 4, 12); + priv->tdintcm = TN40_INT_REG_VAL(0x20, 1, 0, 12); + + ret = tn40_hw_reset(priv); + if (ret) { + dev_err(&pdev->dev, "failed to reset HW.\n"); + goto err_unset_drvdata; + } + + ret = pci_alloc_irq_vectors(pdev, 1, nvec, PCI_IRQ_MSI); + if (ret < 0) { + dev_err(&pdev->dev, "failed to allocate irq.\n"); + goto err_unset_drvdata; + } + + ret = tn40_mdiobus_init(priv); + if (ret) { + dev_err(&pdev->dev, "failed to initialize mdio bus.\n"); + goto err_free_irq; + } + + priv->stats_flag = + ((tn40_read_reg(priv, TN40_FPGA_VER) & 0xFFF) != 308); + u64_stats_init(&priv->syncp); + + priv->isr_mask = TN40_IR_RX_FREE_0 | TN40_IR_LNKCHG0 | TN40_IR_PSE | + TN40_IR_TMR0 | TN40_IR_RX_DESC_0 | TN40_IR_TX_FREE_0 | + TN40_IR_TMR1; + + tn40_mac_init(priv); + ret = tn40_phy_register(priv); + if (ret) { + dev_err(&pdev->dev, "failed to set up PHY.\n"); + goto err_free_irq; + } + + ret = tn40_priv_init(priv); + if (ret) { + dev_err(&pdev->dev, "failed to initialize tn40_priv.\n"); + goto err_unregister_phydev; + } + + ret = register_netdev(ndev); + if (ret) { + dev_err(&pdev->dev, "failed to register netdev.\n"); + goto err_unregister_phydev; + } + return 0; +err_unregister_phydev: + tn40_phy_unregister(priv); +err_free_irq: + pci_free_irq_vectors(pdev); +err_unset_drvdata: + pci_set_drvdata(pdev, NULL); +err_iounmap: + iounmap(regs); +err_free_regions: + pci_release_regions(pdev); +err_disable_device: + pci_disable_device(pdev); + return ret; +} + +static void tn40_remove(struct pci_dev *pdev) +{ + struct tn40_priv *priv = pci_get_drvdata(pdev); + struct net_device *ndev = priv->ndev; + + unregister_netdev(ndev); + + tn40_phy_unregister(priv); + pci_free_irq_vectors(priv->pdev); + pci_set_drvdata(pdev, NULL); + iounmap(priv->regs); + pci_release_regions(pdev); + pci_disable_device(pdev); +} + +static const struct pci_device_id tn40_id_table[] = { + { PCI_DEVICE_SUB(PCI_VENDOR_ID_TEHUTI, 0x4022, + PCI_VENDOR_ID_TEHUTI, 0x3015) }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_TEHUTI, 0x4022, + PCI_VENDOR_ID_DLINK, 0x4d00) }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_TEHUTI, 0x4022, + PCI_VENDOR_ID_ASUSTEK, 0x8709) }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_TEHUTI, 0x4022, + PCI_VENDOR_ID_EDIMAX, 0x8103) }, + { } +}; + +static struct pci_driver tn40_driver = { + .name = TN40_DRV_NAME, + .id_table = tn40_id_table, + .probe = tn40_probe, + .remove = tn40_remove, +}; + +module_pci_driver(tn40_driver); + +MODULE_DEVICE_TABLE(pci, tn40_id_table); +MODULE_LICENSE("GPL"); +MODULE_FIRMWARE(TN40_FIRMWARE_NAME); +MODULE_DESCRIPTION("Tehuti Network TN40xx Driver"); diff --git a/drivers/net/ethernet/tehuti/tn40.h b/drivers/net/ethernet/tehuti/tn40.h new file mode 100644 index 000000000000..490781fe5120 --- /dev/null +++ b/drivers/net/ethernet/tehuti/tn40.h @@ -0,0 +1,233 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) Tehuti Networks Ltd. */ + +#ifndef _TN40_H_ +#define _TN40_H_ + +#include "tn40_regs.h" + +#define TN40_DRV_NAME "tn40xx" + +#define TN40_MDIO_SPEED_1MHZ (1) +#define TN40_MDIO_SPEED_6MHZ (6) + +/* netdev tx queue len for Luxor. The default value is 1000. + * ifconfig eth1 txqueuelen 3000 - to change it at runtime. + */ +#define TN40_NDEV_TXQ_LEN 1000 + +#define TN40_FIFO_SIZE 4096 +#define TN40_FIFO_EXTRA_SPACE 1024 + +#define TN40_TXF_DESC_SZ 16 +#define TN40_MAX_TX_LEVEL (priv->txd_fifo0.m.memsz - 16) +#define TN40_MIN_TX_LEVEL 256 +#define TN40_NO_UPD_PACKETS 40 +#define TN40_MAX_MTU BIT(14) + +#define TN40_PCK_TH_MULT 128 +#define TN40_INT_COAL_MULT 2 + +#define TN40_INT_REG_VAL(coal, coal_rc, rxf_th, pck_th) ( \ + FIELD_PREP(GENMASK(14, 0), (coal)) | \ + FIELD_PREP(BIT(15), (coal_rc)) | \ + FIELD_PREP(GENMASK(19, 16), (rxf_th)) | \ + FIELD_PREP(GENMASK(31, 20), (pck_th)) \ + ) + +struct tn40_fifo { + dma_addr_t da; /* Physical address of fifo (used by HW) */ + char *va; /* Virtual address of fifo (used by SW) */ + u32 rptr, wptr; + /* Cached values of RPTR and WPTR registers, + * they're 32 bits on both 32 and 64 archs. + */ + u16 reg_cfg0; + u16 reg_cfg1; + u16 reg_rptr; + u16 reg_wptr; + u16 memsz; /* Memory size allocated for fifo */ + u16 size_mask; + u16 pktsz; /* Skb packet size to allocate */ + u16 rcvno; /* Number of buffers that come from this RXF */ +}; + +struct tn40_txf_fifo { + struct tn40_fifo m; /* The minimal set of variables used by all fifos */ +}; + +struct tn40_txd_fifo { + struct tn40_fifo m; /* The minimal set of variables used by all fifos */ +}; + +struct tn40_rxf_fifo { + struct tn40_fifo m; /* The minimal set of variables used by all fifos */ +}; + +struct tn40_rxd_fifo { + struct tn40_fifo m; /* The minimal set of variables used by all fifos */ +}; + +struct tn40_rx_map { + struct page *page; +}; + +struct tn40_rxdb { + unsigned int *stack; + struct tn40_rx_map *elems; + unsigned int nelem; + unsigned int top; +}; + +union tn40_tx_dma_addr { + dma_addr_t dma; + struct sk_buff *skb; +}; + +/* Entry in the db. + * if len == 0 addr is dma + * if len != 0 addr is skb + */ +struct tn40_tx_map { + union tn40_tx_dma_addr addr; + int len; +}; + +/* tx database - implemented as circular fifo buffer */ +struct tn40_txdb { + struct tn40_tx_map *start; /* Points to the first element */ + struct tn40_tx_map *end; /* Points just AFTER the last element */ + struct tn40_tx_map *rptr; /* Points to the next element to read */ + struct tn40_tx_map *wptr; /* Points to the next element to write */ + int size; /* Number of elements in the db */ +}; + +struct tn40_priv { + struct net_device *ndev; + struct pci_dev *pdev; + + struct napi_struct napi; + /* RX FIFOs: 1 for data (full) descs, and 2 for free descs */ + struct tn40_rxd_fifo rxd_fifo0; + struct tn40_rxf_fifo rxf_fifo0; + struct tn40_rxdb *rxdb0; /* Rx dbs to store skb pointers */ + struct page_pool *page_pool; + + /* Tx FIFOs: 1 for data desc, 1 for empty (acks) desc */ + struct tn40_txd_fifo txd_fifo0; + struct tn40_txf_fifo txf_fifo0; + struct tn40_txdb txdb; + int tx_level; + int tx_update_mark; + int tx_noupd; + + int stats_flag; + struct rtnl_link_stats64 stats; + u64 alloc_fail; + struct u64_stats_sync syncp; + + u8 txd_size; + u8 txf_size; + u8 rxd_size; + u8 rxf_size; + u32 rdintcm; + u32 tdintcm; + + u32 isr_mask; + + void __iomem *regs; + + /* SHORT_PKT_FIX */ + u32 b0_len; + dma_addr_t b0_dma; /* Physical address of buffer */ + char *b0_va; /* Virtual address of buffer */ + + struct mii_bus *mdio; + struct phy_device *phydev; + struct phylink *phylink; + struct phylink_config phylink_config; +}; + +/* RX FREE descriptor - 64bit */ +struct tn40_rxf_desc { + __le32 info; /* Buffer Count + Info - described below */ + __le32 va_lo; /* VAdr[31:0] */ + __le32 va_hi; /* VAdr[63:32] */ + __le32 pa_lo; /* PAdr[31:0] */ + __le32 pa_hi; /* PAdr[63:32] */ + __le32 len; /* Buffer Length */ +}; + +#define TN40_GET_RXD_BC(x) FIELD_GET(GENMASK(4, 0), (x)) +#define TN40_GET_RXD_ERR(x) FIELD_GET(GENMASK(26, 21), (x)) +#define TN40_GET_RXD_PKT_ID(x) FIELD_GET(GENMASK(30, 28), (x)) +#define TN40_GET_RXD_VTAG(x) FIELD_GET(BIT(31), (x)) +#define TN40_GET_RXD_VLAN_TCI(x) FIELD_GET(GENMASK(15, 0), (x)) + +struct tn40_rxd_desc { + __le32 rxd_val1; + __le16 len; + __le16 rxd_vlan; + __le32 va_lo; + __le32 va_hi; + __le32 rss_lo; + __le32 rss_hash; +}; + +#define TN40_MAX_PBL (19) +/* PBL describes each virtual buffer to be transmitted from the host. */ +struct tn40_pbl { + __le32 pa_lo; + __le32 pa_hi; + __le32 len; +}; + +/* First word for TXD descriptor. It means: type = 3 for regular Tx packet, + * hw_csum = 7 for IP+UDP+TCP HW checksums. + */ +#define TN40_TXD_W1_VAL(bc, checksum, vtag, lgsnd, vlan_id) ( \ + GENMASK(17, 16) | \ + FIELD_PREP(GENMASK(4, 0), (bc)) | \ + FIELD_PREP(GENMASK(7, 5), (checksum)) | \ + FIELD_PREP(BIT(8), (vtag)) | \ + FIELD_PREP(GENMASK(12, 9), (lgsnd)) | \ + FIELD_PREP(GENMASK(15, 13), \ + FIELD_GET(GENMASK(15, 13), (vlan_id))) | \ + FIELD_PREP(GENMASK(31, 20), \ + FIELD_GET(GENMASK(11, 0), (vlan_id))) \ + ) + +struct tn40_txd_desc { + __le32 txd_val1; + __le16 mss; + __le16 length; + __le32 va_lo; + __le32 va_hi; + struct tn40_pbl pbl[]; /* Fragments */ +}; + +struct tn40_txf_desc { + u32 status; + u32 va_lo; /* VAdr[31:0] */ + u32 va_hi; /* VAdr[63:32] */ + u32 pad; +}; + +static inline u32 tn40_read_reg(struct tn40_priv *priv, u32 reg) +{ + return readl(priv->regs + reg); +} + +static inline void tn40_write_reg(struct tn40_priv *priv, u32 reg, u32 val) +{ + writel(val, priv->regs + reg); +} + +int tn40_set_link_speed(struct tn40_priv *priv, u32 speed); + +int tn40_mdiobus_init(struct tn40_priv *priv); + +int tn40_phy_register(struct tn40_priv *priv); +void tn40_phy_unregister(struct tn40_priv *priv); + +#endif /* _TN40XX_H */ diff --git a/drivers/net/ethernet/tehuti/tn40_mdio.c b/drivers/net/ethernet/tehuti/tn40_mdio.c new file mode 100644 index 000000000000..af18615d64a8 --- /dev/null +++ b/drivers/net/ethernet/tehuti/tn40_mdio.c @@ -0,0 +1,142 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) Tehuti Networks Ltd. */ + +#include <linux/netdevice.h> +#include <linux/pci.h> +#include <linux/phylink.h> + +#include "tn40.h" + +#define TN40_MDIO_DEVAD_MASK GENMASK(4, 0) +#define TN40_MDIO_PRTAD_MASK GENMASK(9, 5) +#define TN40_MDIO_CMD_VAL(device, port) \ + (FIELD_PREP(TN40_MDIO_DEVAD_MASK, (device)) | \ + (FIELD_PREP(TN40_MDIO_PRTAD_MASK, (port)))) +#define TN40_MDIO_CMD_READ BIT(15) + +static void tn40_mdio_set_speed(struct tn40_priv *priv, u32 speed) +{ + void __iomem *regs = priv->regs; + int mdio_cfg; + + if (speed == TN40_MDIO_SPEED_1MHZ) + mdio_cfg = (0x7d << 7) | 0x08; /* 1MHz */ + else + mdio_cfg = 0xA08; /* 6MHz */ + mdio_cfg |= (1 << 6); + writel(mdio_cfg, regs + TN40_REG_MDIO_CMD_STAT); + msleep(100); +} + +static u32 tn40_mdio_stat(struct tn40_priv *priv) +{ + void __iomem *regs = priv->regs; + + return readl(regs + TN40_REG_MDIO_CMD_STAT); +} + +static int tn40_mdio_wait_nobusy(struct tn40_priv *priv, u32 *val) +{ + u32 stat; + int ret; + + ret = readx_poll_timeout_atomic(tn40_mdio_stat, priv, stat, + TN40_GET_MDIO_BUSY(stat) == 0, 10, + 10000); + if (val) + *val = stat; + return ret; +} + +static int tn40_mdio_read(struct tn40_priv *priv, int port, int device, + u16 regnum) +{ + void __iomem *regs = priv->regs; + u32 i; + + /* wait until MDIO is not busy */ + if (tn40_mdio_wait_nobusy(priv, NULL)) + return -EIO; + + i = TN40_MDIO_CMD_VAL(device, port); + writel(i, regs + TN40_REG_MDIO_CMD); + writel((u32)regnum, regs + TN40_REG_MDIO_ADDR); + if (tn40_mdio_wait_nobusy(priv, NULL)) + return -EIO; + + writel(TN40_MDIO_CMD_READ | i, regs + TN40_REG_MDIO_CMD); + /* read CMD_STAT until not busy */ + if (tn40_mdio_wait_nobusy(priv, NULL)) + return -EIO; + + return lower_16_bits(readl(regs + TN40_REG_MDIO_DATA)); +} + +static int tn40_mdio_write(struct tn40_priv *priv, int port, int device, + u16 regnum, u16 data) +{ + void __iomem *regs = priv->regs; + u32 tmp_reg = 0; + int ret; + + /* wait until MDIO is not busy */ + if (tn40_mdio_wait_nobusy(priv, NULL)) + return -EIO; + writel(TN40_MDIO_CMD_VAL(device, port), regs + TN40_REG_MDIO_CMD); + writel((u32)regnum, regs + TN40_REG_MDIO_ADDR); + if (tn40_mdio_wait_nobusy(priv, NULL)) + return -EIO; + writel((u32)data, regs + TN40_REG_MDIO_DATA); + /* read CMD_STAT until not busy */ + ret = tn40_mdio_wait_nobusy(priv, &tmp_reg); + if (ret) + return -EIO; + + if (TN40_GET_MDIO_RD_ERR(tmp_reg)) { + dev_err(&priv->pdev->dev, "MDIO error after write command\n"); + return -EIO; + } + return 0; +} + +static int tn40_mdio_read_c45(struct mii_bus *mii_bus, int addr, int devnum, + int regnum) +{ + return tn40_mdio_read(mii_bus->priv, addr, devnum, regnum); +} + +static int tn40_mdio_write_c45(struct mii_bus *mii_bus, int addr, int devnum, + int regnum, u16 val) +{ + return tn40_mdio_write(mii_bus->priv, addr, devnum, regnum, val); +} + +int tn40_mdiobus_init(struct tn40_priv *priv) +{ + struct pci_dev *pdev = priv->pdev; + struct mii_bus *bus; + int ret; + + bus = devm_mdiobus_alloc(&pdev->dev); + if (!bus) + return -ENOMEM; + + bus->name = TN40_DRV_NAME; + bus->parent = &pdev->dev; + snprintf(bus->id, MII_BUS_ID_SIZE, "tn40xx-%x-%x", + pci_domain_nr(pdev->bus), pci_dev_id(pdev)); + bus->priv = priv; + + bus->read_c45 = tn40_mdio_read_c45; + bus->write_c45 = tn40_mdio_write_c45; + + ret = devm_mdiobus_register(&pdev->dev, bus); + if (ret) { + dev_err(&pdev->dev, "failed to register mdiobus %d %u %u\n", + ret, bus->state, MDIOBUS_UNREGISTERED); + return ret; + } + tn40_mdio_set_speed(priv, TN40_MDIO_SPEED_6MHZ); + priv->mdio = bus; + return 0; +} diff --git a/drivers/net/ethernet/tehuti/tn40_phy.c b/drivers/net/ethernet/tehuti/tn40_phy.c new file mode 100644 index 000000000000..39eef7ca7958 --- /dev/null +++ b/drivers/net/ethernet/tehuti/tn40_phy.c @@ -0,0 +1,76 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) Tehuti Networks Ltd. */ + +#include <linux/netdevice.h> +#include <linux/pci.h> +#include <linux/phylink.h> + +#include "tn40.h" + +static struct tn40_priv *tn40_config_to_priv(struct phylink_config *config) +{ + return container_of(config, struct tn40_priv, phylink_config); +} + +static void tn40_link_up(struct phylink_config *config, struct phy_device *phy, + unsigned int mode, phy_interface_t interface, + int speed, int duplex, bool tx_pause, bool rx_pause) +{ + struct tn40_priv *priv = tn40_config_to_priv(config); + + tn40_set_link_speed(priv, speed); + netif_wake_queue(priv->ndev); +} + +static void tn40_link_down(struct phylink_config *config, unsigned int mode, + phy_interface_t interface) +{ + struct tn40_priv *priv = tn40_config_to_priv(config); + + netif_stop_queue(priv->ndev); + tn40_set_link_speed(priv, 0); +} + +static void tn40_mac_config(struct phylink_config *config, unsigned int mode, + const struct phylink_link_state *state) +{ +} + +static const struct phylink_mac_ops tn40_mac_ops = { + .mac_config = tn40_mac_config, + .mac_link_up = tn40_link_up, + .mac_link_down = tn40_link_down, +}; + +int tn40_phy_register(struct tn40_priv *priv) +{ + struct phylink_config *config; + struct phy_device *phydev; + struct phylink *phylink; + + phydev = phy_find_first(priv->mdio); + if (!phydev) { + dev_err(&priv->pdev->dev, "PHY isn't found\n"); + return -ENODEV; + } + + config = &priv->phylink_config; + config->dev = &priv->ndev->dev; + config->type = PHYLINK_NETDEV; + config->mac_capabilities = MAC_10000FD; + __set_bit(PHY_INTERFACE_MODE_XAUI, config->supported_interfaces); + + phylink = phylink_create(config, NULL, PHY_INTERFACE_MODE_XAUI, + &tn40_mac_ops); + if (IS_ERR(phylink)) + return PTR_ERR(phylink); + + priv->phydev = phydev; + priv->phylink = phylink; + return 0; +} + +void tn40_phy_unregister(struct tn40_priv *priv) +{ + phylink_destroy(priv->phylink); +} diff --git a/drivers/net/ethernet/tehuti/tn40_regs.h b/drivers/net/ethernet/tehuti/tn40_regs.h new file mode 100644 index 000000000000..95171aa57a9e --- /dev/null +++ b/drivers/net/ethernet/tehuti/tn40_regs.h @@ -0,0 +1,245 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) Tehuti Networks Ltd. */ + +#ifndef _TN40_REGS_H_ +#define _TN40_REGS_H_ + +/* Register region size */ +#define TN40_REGS_SIZE 0x10000 + +/* Registers from 0x0000-0x00fc were remapped to 0x4000-0x40fc */ +#define TN40_REG_TXD_CFG1_0 0x4000 +#define TN40_REG_TXD_CFG1_1 0x4004 +#define TN40_REG_TXD_CFG1_2 0x4008 +#define TN40_REG_TXD_CFG1_3 0x400C + +#define TN40_REG_RXF_CFG1_0 0x4010 +#define TN40_REG_RXF_CFG1_1 0x4014 +#define TN40_REG_RXF_CFG1_2 0x4018 +#define TN40_REG_RXF_CFG1_3 0x401C + +#define TN40_REG_RXD_CFG1_0 0x4020 +#define TN40_REG_RXD_CFG1_1 0x4024 +#define TN40_REG_RXD_CFG1_2 0x4028 +#define TN40_REG_RXD_CFG1_3 0x402C + +#define TN40_REG_TXF_CFG1_0 0x4030 +#define TN40_REG_TXF_CFG1_1 0x4034 +#define TN40_REG_TXF_CFG1_2 0x4038 +#define TN40_REG_TXF_CFG1_3 0x403C + +#define TN40_REG_TXD_CFG0_0 0x4040 +#define TN40_REG_TXD_CFG0_1 0x4044 +#define TN40_REG_TXD_CFG0_2 0x4048 +#define TN40_REG_TXD_CFG0_3 0x404C + +#define TN40_REG_RXF_CFG0_0 0x4050 +#define TN40_REG_RXF_CFG0_1 0x4054 +#define TN40_REG_RXF_CFG0_2 0x4058 +#define TN40_REG_RXF_CFG0_3 0x405C + +#define TN40_REG_RXD_CFG0_0 0x4060 +#define TN40_REG_RXD_CFG0_1 0x4064 +#define TN40_REG_RXD_CFG0_2 0x4068 +#define TN40_REG_RXD_CFG0_3 0x406C + +#define TN40_REG_TXF_CFG0_0 0x4070 +#define TN40_REG_TXF_CFG0_1 0x4074 +#define TN40_REG_TXF_CFG0_2 0x4078 +#define TN40_REG_TXF_CFG0_3 0x407C + +#define TN40_REG_TXD_WPTR_0 0x4080 +#define TN40_REG_TXD_WPTR_1 0x4084 +#define TN40_REG_TXD_WPTR_2 0x4088 +#define TN40_REG_TXD_WPTR_3 0x408C + +#define TN40_REG_RXF_WPTR_0 0x4090 +#define TN40_REG_RXF_WPTR_1 0x4094 +#define TN40_REG_RXF_WPTR_2 0x4098 +#define TN40_REG_RXF_WPTR_3 0x409C + +#define TN40_REG_RXD_WPTR_0 0x40A0 +#define TN40_REG_RXD_WPTR_1 0x40A4 +#define TN40_REG_RXD_WPTR_2 0x40A8 +#define TN40_REG_RXD_WPTR_3 0x40AC + +#define TN40_REG_TXF_WPTR_0 0x40B0 +#define TN40_REG_TXF_WPTR_1 0x40B4 +#define TN40_REG_TXF_WPTR_2 0x40B8 +#define TN40_REG_TXF_WPTR_3 0x40BC + +#define TN40_REG_TXD_RPTR_0 0x40C0 +#define TN40_REG_TXD_RPTR_1 0x40C4 +#define TN40_REG_TXD_RPTR_2 0x40C8 +#define TN40_REG_TXD_RPTR_3 0x40CC + +#define TN40_REG_RXF_RPTR_0 0x40D0 +#define TN40_REG_RXF_RPTR_1 0x40D4 +#define TN40_REG_RXF_RPTR_2 0x40D8 +#define TN40_REG_RXF_RPTR_3 0x40DC + +#define TN40_REG_RXD_RPTR_0 0x40E0 +#define TN40_REG_RXD_RPTR_1 0x40E4 +#define TN40_REG_RXD_RPTR_2 0x40E8 +#define TN40_REG_RXD_RPTR_3 0x40EC + +#define TN40_REG_TXF_RPTR_0 0x40F0 +#define TN40_REG_TXF_RPTR_1 0x40F4 +#define TN40_REG_TXF_RPTR_2 0x40F8 +#define TN40_REG_TXF_RPTR_3 0x40FC + +/* Hardware versioning */ +#define TN40_FPGA_VER 0x5030 + +/* Registers from 0x0100-0x0150 were remapped to 0x5100-0x5150 */ +#define TN40_REG_ISR TN40_REG_ISR0 +#define TN40_REG_ISR0 0x5100 + +#define TN40_REG_IMR TN40_REG_IMR0 +#define TN40_REG_IMR0 0x5110 + +#define TN40_REG_RDINTCM0 0x5120 +#define TN40_REG_RDINTCM2 0x5128 + +#define TN40_REG_TDINTCM0 0x5130 + +#define TN40_REG_ISR_MSK0 0x5140 + +#define TN40_REG_INIT_SEMAPHORE 0x5170 +#define TN40_REG_INIT_STATUS 0x5180 + +#define TN40_REG_MAC_LNK_STAT 0x0200 +#define TN40_MAC_LINK_STAT 0x0004 /* Link state */ + +#define TN40_REG_BLNK_LED 0x0210 + +#define TN40_REG_GMAC_RXF_A 0x1240 + +#define TN40_REG_UNC_MAC0_A 0x1250 +#define TN40_REG_UNC_MAC1_A 0x1260 +#define TN40_REG_UNC_MAC2_A 0x1270 + +#define TN40_REG_VLAN_0 0x1800 + +#define TN40_REG_MAX_FRAME_A 0x12C0 + +#define TN40_REG_RX_MAC_MCST0 0x1A80 +#define TN40_REG_RX_MAC_MCST1 0x1A84 +#define TN40_MAC_MCST_NUM 15 +#define TN40_REG_RX_MCST_HASH0 0x1A00 +#define TN40_MAC_MCST_HASH_NUM 8 + +#define TN40_REG_VPC 0x2300 +#define TN40_REG_VIC 0x2320 +#define TN40_REG_VGLB 0x2340 + +#define TN40_REG_CLKPLL 0x5000 + +/* MDIO interface */ + +#define TN40_REG_MDIO_CMD_STAT 0x6030 +#define TN40_REG_MDIO_CMD 0x6034 +#define TN40_REG_MDIO_DATA 0x6038 +#define TN40_REG_MDIO_ADDR 0x603C +#define TN40_GET_MDIO_BUSY(x) FIELD_GET(GENMASK(0, 0), (x)) +#define TN40_GET_MDIO_RD_ERR(x) FIELD_GET(GENMASK(1, 1), (x)) + +#define TN40_REG_REVISION 0x6000 +#define TN40_REG_SCRATCH 0x6004 +#define TN40_REG_CTRLST 0x6008 +#define TN40_REG_MAC_ADDR_0 0x600C +#define TN40_REG_MAC_ADDR_1 0x6010 +#define TN40_REG_FRM_LENGTH 0x6014 +#define TN40_REG_PAUSE_QUANT 0x6054 +#define TN40_REG_RX_FIFO_SECTION 0x601C +#define TN40_REG_TX_FIFO_SECTION 0x6020 +#define TN40_REG_RX_FULLNESS 0x6024 +#define TN40_REG_TX_FULLNESS 0x6028 +#define TN40_REG_HASHTABLE 0x602C + +#define TN40_REG_RST_PORT 0x7000 +#define TN40_REG_DIS_PORT 0x7010 +#define TN40_REG_RST_QU 0x7020 +#define TN40_REG_DIS_QU 0x7030 + +#define TN40_REG_CTRLST_TX_ENA 0x0001 +#define TN40_REG_CTRLST_RX_ENA 0x0002 +#define TN40_REG_CTRLST_PRM_ENA 0x0010 +#define TN40_REG_CTRLST_PAD_ENA 0x0020 + +#define TN40_REG_CTRLST_BASE (TN40_REG_CTRLST_PAD_ENA | REG_CTRLST_PRM_ENA) + +/* TXD TXF RXF RXD CONFIG 0x0000 --- 0x007c */ +#define TN40_TX_RX_CFG1_BASE 0xffffffff /*0-31 */ +#define TN40_TX_RX_CFG0_BASE 0xfffff000 /*31:12 */ +#define TN40_TX_RX_CFG0_RSVD 0x00000ffc /*11:2 */ +#define TN40_TX_RX_CFG0_SIZE 0x00000003 /*1:0 */ + +/* TXD TXF RXF RXD WRITE 0x0080 --- 0x00BC */ +#define TN40_TXF_WPTR_WR_PTR 0x00007ff8 /*14:3 */ + +/* TXD TXF RXF RXD READ 0x00CO --- 0x00FC */ +#define TN40_TXF_RPTR_RD_PTR 0x00007ff8 /*14:3 */ + +/* The last 4 bits are dropped size is rounded to 16 */ +#define TN40_TXF_WPTR_MASK 0x7ff0 + +/* regISR 0x0100 */ +/* regIMR 0x0110 */ +#define TN40_IMR_INPROG 0x80000000 /*31 */ +#define TN40_IR_LNKCHG1 0x10000000 /*28 */ +#define TN40_IR_LNKCHG0 0x08000000 /*27 */ +#define TN40_IR_GPIO 0x04000000 /*26 */ +#define TN40_IR_RFRSH 0x02000000 /*25 */ +#define TN40_IR_RSVD 0x01000000 /*24 */ +#define TN40_IR_SWI 0x00800000 /*23 */ +#define TN40_IR_RX_FREE_3 0x00400000 /*22 */ +#define TN40_IR_RX_FREE_2 0x00200000 /*21 */ +#define TN40_IR_RX_FREE_1 0x00100000 /*20 */ +#define TN40_IR_RX_FREE_0 0x00080000 /*19 */ +#define TN40_IR_TX_FREE_3 0x00040000 /*18 */ +#define TN40_IR_TX_FREE_2 0x00020000 /*17 */ +#define TN40_IR_TX_FREE_1 0x00010000 /*16 */ +#define TN40_IR_TX_FREE_0 0x00008000 /*15 */ +#define TN40_IR_RX_DESC_3 0x00004000 /*14 */ +#define TN40_IR_RX_DESC_2 0x00002000 /*13 */ +#define TN40_IR_RX_DESC_1 0x00001000 /*12 */ +#define TN40_IR_RX_DESC_0 0x00000800 /*11 */ +#define TN40_IR_PSE 0x00000400 /*10 */ +#define TN40_IR_TMR3 0x00000200 /* 9 */ +#define TN40_IR_TMR2 0x00000100 /* 8 */ +#define TN40_IR_TMR1 0x00000080 /* 7 */ +#define TN40_IR_TMR0 0x00000040 /* 6 */ +#define TN40_IR_VNT 0x00000020 /* 5 */ +#define TN40_IR_RxFL 0x00000010 /* 4 */ +#define TN40_IR_SDPERR 0x00000008 /* 3 */ +#define TN40_IR_TR 0x00000004 /* 2 */ +#define TN40_IR_PCIE_LINK 0x00000002 /* 1 */ +#define TN40_IR_PCIE_TOUT 0x00000001 /* 0 */ + +#define TN40_IR_EXTRA \ + (TN40_IR_RX_FREE_0 | TN40_IR_LNKCHG0 | TN40_IR_LNKCHG1 |\ + TN40_IR_PSE | TN40_IR_TMR0 | TN40_IR_PCIE_LINK | \ + TN40_IR_PCIE_TOUT) + +#define TN40_GMAC_RX_FILTER_OSEN 0x1000 /* shared OS enable */ +#define TN40_GMAC_RX_FILTER_TXFC 0x0400 /* Tx flow control */ +#define TN40_GMAC_RX_FILTER_RSV0 0x0200 /* reserved */ +#define TN40_GMAC_RX_FILTER_FDA 0x0100 /* filter out direct address */ +#define TN40_GMAC_RX_FILTER_AOF 0x0080 /* accept over run */ +#define TN40_GMAC_RX_FILTER_ACF 0x0040 /* accept control frames */ +#define TN40_GMAC_RX_FILTER_ARUNT 0x0020 /* accept under run */ +#define TN40_GMAC_RX_FILTER_ACRC 0x0010 /* accept crc error */ +#define TN40_GMAC_RX_FILTER_AM 0x0008 /* accept multicast */ +#define TN40_GMAC_RX_FILTER_AB 0x0004 /* accept broadcast */ +#define TN40_GMAC_RX_FILTER_PRM 0x0001 /* [0:1] promiscuous mode */ + +#define TN40_MAX_FRAME_AB_VAL 0x3fff /* 13:0 */ + +#define TN40_CLKPLL_PLLLKD 0x0200 /* 9 */ +#define TN40_CLKPLL_RSTEND 0x0100 /* 8 */ +#define TN40_CLKPLL_SFTRST 0x0001 /* 0 */ + +#define TN40_CLKPLL_LKD (TN40_CLKPLL_PLLLKD | TN40_CLKPLL_RSTEND) + +#endif diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig index 1729eb0e0b41..0d5a862cd78a 100644 --- a/drivers/net/ethernet/ti/Kconfig +++ b/drivers/net/ethernet/ti/Kconfig @@ -188,6 +188,7 @@ config TI_ICSSG_PRUETH select TI_ICSS_IEP select TI_K3_CPPI_DESC_POOL depends on PRU_REMOTEPROC + depends on NET_SWITCHDEV depends on ARCH_K3 && OF && TI_K3_UDMA_GLUE_LAYER depends on PTP_1588_CLOCK_OPTIONAL help @@ -204,6 +205,7 @@ config TI_ICSSG_PRUETH_SR1 select TI_ICSS_IEP select TI_K3_CPPI_DESC_POOL depends on PRU_REMOTEPROC + depends on NET_SWITCHDEV depends on ARCH_K3 && OF && TI_K3_UDMA_GLUE_LAYER help Support dual Gigabit Ethernet ports over the ICSSG PRU Subsystem. diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile index 6e086b4c0384..cbcf44806924 100644 --- a/drivers/net/ethernet/ti/Makefile +++ b/drivers/net/ethernet/ti/Makefile @@ -31,21 +31,18 @@ ti-am65-cpsw-nuss-$(CONFIG_TI_AM65_CPSW_QOS) += am65-cpsw-qos.o ti-am65-cpsw-nuss-$(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV) += am65-cpsw-switchdev.o obj-$(CONFIG_TI_K3_AM65_CPTS) += am65-cpts.o -obj-$(CONFIG_TI_ICSSG_PRUETH) += icssg-prueth.o -icssg-prueth-y := icssg/icssg_prueth.o \ - icssg/icssg_common.o \ - icssg/icssg_classifier.o \ - icssg/icssg_queues.o \ - icssg/icssg_config.o \ - icssg/icssg_mii_cfg.o \ - icssg/icssg_stats.o \ - icssg/icssg_ethtool.o -obj-$(CONFIG_TI_ICSSG_PRUETH_SR1) += icssg-prueth-sr1.o -icssg-prueth-sr1-y := icssg/icssg_prueth_sr1.o \ - icssg/icssg_common.o \ - icssg/icssg_classifier.o \ - icssg/icssg_config.o \ - icssg/icssg_mii_cfg.o \ - icssg/icssg_stats.o \ - icssg/icssg_ethtool.o +obj-$(CONFIG_TI_ICSSG_PRUETH) += icssg-prueth.o icssg.o +icssg-prueth-y := icssg/icssg_prueth.o icssg/icssg_switchdev.o + +obj-$(CONFIG_TI_ICSSG_PRUETH_SR1) += icssg-prueth-sr1.o icssg.o +icssg-prueth-sr1-y := icssg/icssg_prueth_sr1.o + +icssg-y := icssg/icssg_common.o \ + icssg/icssg_classifier.o \ + icssg/icssg_queues.o \ + icssg/icssg_config.o \ + icssg/icssg_mii_cfg.o \ + icssg/icssg_stats.o \ + icssg/icssg_ethtool.o + obj-$(CONFIG_TI_ICSS_IEP) += icssg/icss_iep.o diff --git a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c index a1d0935d1ebe..b60976947da5 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c +++ b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c @@ -692,7 +692,7 @@ static void am65_cpsw_get_eth_mac_stats(struct net_device *ndev, }; static int am65_cpsw_get_ethtool_ts_info(struct net_device *ndev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct am65_cpsw_common *common = am65_ndev_to_common(ndev); unsigned int ptp_v2_filter; diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index 4e50b3792888..81d9f21086ec 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -896,7 +896,7 @@ static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev) /* mac_sl should be configured via phy-link interface */ am65_cpsw_sl_ctl_reset(port); - ret = phylink_of_phy_connect(port->slave.phylink, port->slave.phy_node, 0); + ret = phylink_of_phy_connect(port->slave.phylink, port->slave.port_np, 0); if (ret) goto error_cleanup; @@ -2424,10 +2424,10 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common) rx_chn->irq = k3_udma_glue_rx_get_irq(rx_chn->rx_chn, i); - if (rx_chn->irq <= 0) { + if (rx_chn->irq < 0) { dev_err(dev, "Failed to get rx dma irq %d\n", rx_chn->irq); - ret = -ENXIO; + ret = rx_chn->irq; goto err; } } @@ -2611,7 +2611,7 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common) of_property_read_bool(port_np, "ti,mac-only"); /* get phy/link info */ - port->slave.phy_node = port_np; + port->slave.port_np = port_np; ret = of_get_phy_mode(port_np, &port->slave.phy_if); if (ret) { dev_err(dev, "%pOF read phy-mode err %d\n", @@ -2703,6 +2703,7 @@ am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx) mutex_init(&ndev_priv->mm_lock); port->qos.link_speed = SPEED_UNKNOWN; SET_NETDEV_DEV(port->ndev, dev); + port->ndev->dev.of_node = port->slave.port_np; eth_hw_addr_set(port->ndev, port->slave.mac_addr); @@ -2760,7 +2761,7 @@ am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx) } phylink = phylink_create(&port->slave.phylink_config, - of_node_to_fwnode(port->slave.phy_node), + of_node_to_fwnode(port->slave.port_np), port->slave.phy_if, &am65_cpsw_phylink_mac_ops); if (IS_ERR(phylink)) diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.h b/drivers/net/ethernet/ti/am65-cpsw-nuss.h index d8ce88dc9c89..e2ce2be320bd 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.h +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.h @@ -30,7 +30,7 @@ struct am65_cpts; struct am65_cpsw_slave_data { bool mac_only; struct cpsw_sl *mac_sl; - struct device_node *phy_node; + struct device_node *port_np; phy_interface_t phy_if; struct phy *ifphy; struct phy *serdes_phy; diff --git a/drivers/net/ethernet/ti/cpsw_ethtool.c b/drivers/net/ethernet/ti/cpsw_ethtool.c index f7b283353ba2..53ed23d68722 100644 --- a/drivers/net/ethernet/ti/cpsw_ethtool.c +++ b/drivers/net/ethernet/ti/cpsw_ethtool.c @@ -717,7 +717,7 @@ err: } #if IS_ENABLED(CONFIG_TI_CPTS) -int cpsw_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info) +int cpsw_get_ts_info(struct net_device *ndev, struct kernel_ethtool_ts_info *info) { struct cpsw_common *cpsw = ndev_to_cpsw(ndev); @@ -738,7 +738,7 @@ int cpsw_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info) return 0; } #else -int cpsw_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info) +int cpsw_get_ts_info(struct net_device *ndev, struct kernel_ethtool_ts_info *info) { info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | diff --git a/drivers/net/ethernet/ti/cpsw_priv.h b/drivers/net/ethernet/ti/cpsw_priv.h index 7efa72502c86..1f448290b9f4 100644 --- a/drivers/net/ethernet/ti/cpsw_priv.h +++ b/drivers/net/ethernet/ti/cpsw_priv.h @@ -510,6 +510,6 @@ int cpsw_set_ringparam(struct net_device *ndev, int cpsw_set_channels_common(struct net_device *ndev, struct ethtool_channels *chs, cpdma_handler_fn rx_handler); -int cpsw_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info); +int cpsw_get_ts_info(struct net_device *ndev, struct kernel_ethtool_ts_info *info); #endif /* DRIVERS_NET_ETHERNET_TI_CPSW_PRIV_H_ */ diff --git a/drivers/net/ethernet/ti/icssg/icss_iep.c b/drivers/net/ethernet/ti/icssg/icss_iep.c index 3025e9c18970..75c294ce6fb6 100644 --- a/drivers/net/ethernet/ti/icssg/icss_iep.c +++ b/drivers/net/ethernet/ti/icssg/icss_iep.c @@ -17,6 +17,7 @@ #include <linux/timekeeping.h> #include <linux/interrupt.h> #include <linux/of_irq.h> +#include <linux/workqueue.h> #include "icss_iep.h" @@ -94,7 +95,7 @@ enum { * @flags: Flags to represent IEP properties */ struct icss_iep_plat_data { - struct regmap_config *config; + const struct regmap_config *config; u32 reg_offs[ICSS_IEP_MAX_REGS]; u32 flags; }; @@ -110,7 +111,6 @@ struct icss_iep { struct ptp_clock_info ptp_info; struct ptp_clock *ptp_clock; struct mutex ptp_clk_mutex; /* PHC access serializer */ - spinlock_t irq_lock; /* CMP IRQ vs icss_iep_ptp_enable access */ u32 def_inc; s16 slow_cmp_inc; u32 slow_cmp_count; @@ -122,6 +122,7 @@ struct icss_iep { int cap_cmp_irq; u64 period; u32 latch_enable; + struct work_struct work; }; /** @@ -192,14 +193,11 @@ static void icss_iep_update_to_next_boundary(struct icss_iep *iep, u64 start_ns) */ static void icss_iep_settime(struct icss_iep *iep, u64 ns) { - unsigned long flags; - if (iep->ops && iep->ops->settime) { iep->ops->settime(iep->clockops_data, ns); return; } - spin_lock_irqsave(&iep->irq_lock, flags); if (iep->pps_enabled || iep->perout_enabled) writel(0, iep->base + iep->plat_data->reg_offs[ICSS_IEP_SYNC_CTRL_REG]); @@ -210,7 +208,6 @@ static void icss_iep_settime(struct icss_iep *iep, u64 ns) writel(IEP_SYNC_CTRL_SYNC_N_EN(0) | IEP_SYNC_CTRL_SYNC_EN, iep->base + iep->plat_data->reg_offs[ICSS_IEP_SYNC_CTRL_REG]); } - spin_unlock_irqrestore(&iep->irq_lock, flags); } /** @@ -546,7 +543,6 @@ static int icss_iep_perout_enable_hw(struct icss_iep *iep, static int icss_iep_perout_enable(struct icss_iep *iep, struct ptp_perout_request *req, int on) { - unsigned long flags; int ret = 0; mutex_lock(&iep->ptp_clk_mutex); @@ -559,11 +555,9 @@ static int icss_iep_perout_enable(struct icss_iep *iep, if (iep->perout_enabled == !!on) goto exit; - spin_lock_irqsave(&iep->irq_lock, flags); ret = icss_iep_perout_enable_hw(iep, req, on); if (!ret) iep->perout_enabled = !!on; - spin_unlock_irqrestore(&iep->irq_lock, flags); exit: mutex_unlock(&iep->ptp_clk_mutex); @@ -571,11 +565,61 @@ exit: return ret; } +static void icss_iep_cap_cmp_work(struct work_struct *work) +{ + struct icss_iep *iep = container_of(work, struct icss_iep, work); + const u32 *reg_offs = iep->plat_data->reg_offs; + struct ptp_clock_event pevent; + unsigned int val; + u64 ns, ns_next; + + mutex_lock(&iep->ptp_clk_mutex); + + ns = readl(iep->base + reg_offs[ICSS_IEP_CMP1_REG0]); + if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT) { + val = readl(iep->base + reg_offs[ICSS_IEP_CMP1_REG1]); + ns |= (u64)val << 32; + } + /* set next event */ + ns_next = ns + iep->period; + writel(lower_32_bits(ns_next), + iep->base + reg_offs[ICSS_IEP_CMP1_REG0]); + if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT) + writel(upper_32_bits(ns_next), + iep->base + reg_offs[ICSS_IEP_CMP1_REG1]); + + pevent.pps_times.ts_real = ns_to_timespec64(ns); + pevent.type = PTP_CLOCK_PPSUSR; + pevent.index = 0; + ptp_clock_event(iep->ptp_clock, &pevent); + dev_dbg(iep->dev, "IEP:pps ts: %llu next:%llu:\n", ns, ns_next); + + mutex_unlock(&iep->ptp_clk_mutex); +} + +static irqreturn_t icss_iep_cap_cmp_irq(int irq, void *dev_id) +{ + struct icss_iep *iep = (struct icss_iep *)dev_id; + const u32 *reg_offs = iep->plat_data->reg_offs; + unsigned int val; + + val = readl(iep->base + reg_offs[ICSS_IEP_CMP_STAT_REG]); + /* The driver only enables CMP1 */ + if (val & BIT(1)) { + /* Clear the event */ + writel(BIT(1), iep->base + reg_offs[ICSS_IEP_CMP_STAT_REG]); + if (iep->pps_enabled || iep->perout_enabled) + schedule_work(&iep->work); + return IRQ_HANDLED; + } + + return IRQ_NONE; +} + static int icss_iep_pps_enable(struct icss_iep *iep, int on) { struct ptp_clock_request rq; struct timespec64 ts; - unsigned long flags; int ret = 0; u64 ns; @@ -589,8 +633,6 @@ static int icss_iep_pps_enable(struct icss_iep *iep, int on) if (iep->pps_enabled == !!on) goto exit; - spin_lock_irqsave(&iep->irq_lock, flags); - rq.perout.index = 0; if (on) { ns = icss_iep_gettime(iep, NULL); @@ -602,13 +644,13 @@ static int icss_iep_pps_enable(struct icss_iep *iep, int on) ret = icss_iep_perout_enable_hw(iep, &rq.perout, on); } else { ret = icss_iep_perout_enable_hw(iep, &rq.perout, on); + if (iep->cap_cmp_irq) + cancel_work_sync(&iep->work); } if (!ret) iep->pps_enabled = !!on; - spin_unlock_irqrestore(&iep->irq_lock, flags); - exit: mutex_unlock(&iep->ptp_clk_mutex); @@ -777,6 +819,8 @@ int icss_iep_init(struct icss_iep *iep, const struct icss_iep_clockops *clkops, if (iep->ops && iep->ops->perout_enable) { iep->ptp_info.n_per_out = 1; iep->ptp_info.pps = 1; + } else if (iep->cap_cmp_irq) { + iep->ptp_info.pps = 1; } if (iep->ops && iep->ops->extts_enable) @@ -817,6 +861,7 @@ static int icss_iep_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct icss_iep *iep; struct clk *iep_clk; + int ret, irq; iep = devm_kzalloc(dev, sizeof(*iep), GFP_KERNEL); if (!iep) @@ -827,6 +872,22 @@ static int icss_iep_probe(struct platform_device *pdev) if (IS_ERR(iep->base)) return -ENODEV; + irq = platform_get_irq_byname_optional(pdev, "iep_cap_cmp"); + if (irq == -EPROBE_DEFER) + return irq; + + if (irq > 0) { + ret = devm_request_irq(dev, irq, icss_iep_cap_cmp_irq, + IRQF_TRIGGER_HIGH, "iep_cap_cmp", iep); + if (ret) { + dev_info(iep->dev, "cap_cmp irq request failed: %x\n", + ret); + } else { + iep->cap_cmp_irq = irq; + INIT_WORK(&iep->work, icss_iep_cap_cmp_work); + } + } + iep_clk = devm_clk_get(dev, NULL); if (IS_ERR(iep_clk)) return PTR_ERR(iep_clk); @@ -853,7 +914,6 @@ static int icss_iep_probe(struct platform_device *pdev) iep->ptp_info = icss_iep_ptp_info; mutex_init(&iep->ptp_clk_mutex); - spin_lock_init(&iep->irq_lock); dev_set_drvdata(dev, iep); icss_iep_disable(iep); @@ -892,7 +952,7 @@ static int icss_iep_regmap_read(void *context, unsigned int reg, return 0; } -static struct regmap_config am654_icss_iep_regmap_config = { +static const struct regmap_config am654_icss_iep_regmap_config = { .name = "icss iep", .reg_stride = 1, .reg_write = icss_iep_regmap_write, diff --git a/drivers/net/ethernet/ti/icssg/icssg_classifier.c b/drivers/net/ethernet/ti/icssg/icssg_classifier.c index f7d21da1a0fb..9ec504d976d6 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_classifier.c +++ b/drivers/net/ethernet/ti/icssg/icssg_classifier.c @@ -297,6 +297,7 @@ void icssg_class_set_mac_addr(struct regmap *miig_rt, int slice, u8 *mac) mac[2] << 16 | mac[3] << 24)); regmap_write(miig_rt, offs[slice].mac1, (u32)(mac[4] | mac[5] << 8)); } +EXPORT_SYMBOL_GPL(icssg_class_set_mac_addr); static void icssg_class_ft1_add_mcast(struct regmap *miig_rt, int slice, int slot, const u8 *addr, const u8 *mask) @@ -360,6 +361,7 @@ void icssg_class_disable(struct regmap *miig_rt, int slice) /* clear CFG2 */ regmap_write(miig_rt, offs[slice].rx_class_cfg2, 0); } +EXPORT_SYMBOL_GPL(icssg_class_disable); void icssg_class_default(struct regmap *miig_rt, int slice, bool allmulti, bool is_sr1) @@ -390,6 +392,7 @@ void icssg_class_default(struct regmap *miig_rt, int slice, bool allmulti, /* clear CFG2 */ regmap_write(miig_rt, offs[slice].rx_class_cfg2, 0); } +EXPORT_SYMBOL_GPL(icssg_class_default); void icssg_class_promiscuous_sr1(struct regmap *miig_rt, int slice) { @@ -408,6 +411,7 @@ void icssg_class_promiscuous_sr1(struct regmap *miig_rt, int slice) regmap_write(miig_rt, offset, data); } } +EXPORT_SYMBOL_GPL(icssg_class_promiscuous_sr1); void icssg_class_add_mcast_sr1(struct regmap *miig_rt, int slice, struct net_device *ndev) @@ -449,6 +453,7 @@ void icssg_class_add_mcast_sr1(struct regmap *miig_rt, int slice, slot++; } } +EXPORT_SYMBOL_GPL(icssg_class_add_mcast_sr1); /* required for SAV check */ void icssg_ft1_set_mac_addr(struct regmap *miig_rt, int slice, u8 *mac_addr) @@ -460,3 +465,4 @@ void icssg_ft1_set_mac_addr(struct regmap *miig_rt, int slice, u8 *mac_addr) rx_class_ft1_set_da_mask(miig_rt, slice, 0, mask_addr); rx_class_ft1_cfg_set_type(miig_rt, slice, 0, FT1_CFG_TYPE_EQ); } +EXPORT_SYMBOL_GPL(icssg_ft1_set_mac_addr); diff --git a/drivers/net/ethernet/ti/icssg/icssg_common.c b/drivers/net/ethernet/ti/icssg/icssg_common.c index 088ab8076db4..b9d8a93d1680 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_common.c +++ b/drivers/net/ethernet/ti/icssg/icssg_common.c @@ -51,6 +51,7 @@ void prueth_cleanup_rx_chns(struct prueth_emac *emac, if (rx_chn->rx_chn) k3_udma_glue_release_rx_chn(rx_chn->rx_chn); } +EXPORT_SYMBOL_GPL(prueth_cleanup_rx_chns); void prueth_cleanup_tx_chns(struct prueth_emac *emac) { @@ -71,6 +72,7 @@ void prueth_cleanup_tx_chns(struct prueth_emac *emac) memset(tx_chn, 0, sizeof(*tx_chn)); } } +EXPORT_SYMBOL_GPL(prueth_cleanup_tx_chns); void prueth_ndev_del_tx_napi(struct prueth_emac *emac, int num) { @@ -84,6 +86,7 @@ void prueth_ndev_del_tx_napi(struct prueth_emac *emac, int num) netif_napi_del(&tx_chn->napi_tx); } } +EXPORT_SYMBOL_GPL(prueth_ndev_del_tx_napi); void prueth_xmit_free(struct prueth_tx_chn *tx_chn, struct cppi5_host_desc_t *desc) @@ -120,6 +123,7 @@ void prueth_xmit_free(struct prueth_tx_chn *tx_chn, k3_cppi_desc_pool_free(tx_chn->desc_pool, first_desc); } +EXPORT_SYMBOL_GPL(prueth_xmit_free); int emac_tx_complete_packets(struct prueth_emac *emac, int chn, int budget, bool *tdown) @@ -264,6 +268,7 @@ fail: prueth_ndev_del_tx_napi(emac, i); return ret; } +EXPORT_SYMBOL_GPL(prueth_ndev_add_tx_napi); int prueth_init_tx_chns(struct prueth_emac *emac) { @@ -344,6 +349,7 @@ fail: prueth_cleanup_tx_chns(emac); return ret; } +EXPORT_SYMBOL_GPL(prueth_init_tx_chns); int prueth_init_rx_chns(struct prueth_emac *emac, struct prueth_rx_chn *rx_chn, @@ -440,9 +446,7 @@ int prueth_init_rx_chns(struct prueth_emac *emac, fdqring_id = k3_udma_glue_rx_flow_get_fdq_id(rx_chn->rx_chn, i); ret = k3_udma_glue_rx_get_irq(rx_chn->rx_chn, i); - if (ret <= 0) { - if (!ret) - ret = -ENXIO; + if (ret < 0) { netdev_err(ndev, "Failed to get rx dma irq"); goto fail; } @@ -455,6 +459,7 @@ fail: prueth_cleanup_rx_chns(emac, rx_chn, max_rflows); return ret; } +EXPORT_SYMBOL_GPL(prueth_init_rx_chns); int prueth_dma_rx_push(struct prueth_emac *emac, struct sk_buff *skb, @@ -492,6 +497,7 @@ int prueth_dma_rx_push(struct prueth_emac *emac, return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, 0, desc_rx, desc_dma); } +EXPORT_SYMBOL_GPL(prueth_dma_rx_push); u64 icssg_ts_to_ns(u32 hi_sw, u32 hi, u32 lo, u32 cycle_time_ns) { @@ -507,6 +513,7 @@ u64 icssg_ts_to_ns(u32 hi_sw, u32 hi, u32 lo, u32 cycle_time_ns) return ns; } +EXPORT_SYMBOL_GPL(icssg_ts_to_ns); void emac_rx_timestamp(struct prueth_emac *emac, struct sk_buff *skb, u32 *psdata) @@ -581,6 +588,8 @@ static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id) } else { /* send the filled skb up the n/w stack */ skb_put(skb, pkt_len); + if (emac->prueth->is_switch_mode) + skb->offload_fwd_mark = emac->offload_fwd_mark; skb->protocol = eth_type_trans(skb, ndev); napi_gro_receive(&emac->napi_rx, skb); ndev->stats.rx_bytes += pkt_len; @@ -636,7 +645,7 @@ static int prueth_tx_ts_cookie_get(struct prueth_emac *emac) } /** - * emac_ndo_start_xmit - EMAC Transmit function + * icssg_ndo_start_xmit - EMAC Transmit function * @skb: SKB pointer * @ndev: EMAC network adapter * @@ -647,7 +656,7 @@ static int prueth_tx_ts_cookie_get(struct prueth_emac *emac) * * Return: enum netdev_tx */ -enum netdev_tx emac_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev) +enum netdev_tx icssg_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct cppi5_host_desc_t *first_desc, *next_desc, *cur_desc; struct prueth_emac *emac = netdev_priv(ndev); @@ -806,6 +815,7 @@ drop_stop_q_busy: netif_tx_stop_queue(netif_txq); return NETDEV_TX_BUSY; } +EXPORT_SYMBOL_GPL(icssg_ndo_start_xmit); static void prueth_tx_cleanup(void *data, dma_addr_t desc_dma) { @@ -831,6 +841,7 @@ irqreturn_t prueth_rx_irq(int irq, void *dev_id) return IRQ_HANDLED; } +EXPORT_SYMBOL_GPL(prueth_rx_irq); void prueth_emac_stop(struct prueth_emac *emac) { @@ -855,6 +866,7 @@ void prueth_emac_stop(struct prueth_emac *emac) rproc_shutdown(prueth->rtu[slice]); rproc_shutdown(prueth->pru[slice]); } +EXPORT_SYMBOL_GPL(prueth_emac_stop); void prueth_cleanup_tx_ts(struct prueth_emac *emac) { @@ -867,8 +879,9 @@ void prueth_cleanup_tx_ts(struct prueth_emac *emac) } } } +EXPORT_SYMBOL_GPL(prueth_cleanup_tx_ts); -int emac_napi_rx_poll(struct napi_struct *napi_rx, int budget) +int icssg_napi_rx_poll(struct napi_struct *napi_rx, int budget) { struct prueth_emac *emac = prueth_napi_to_emac(napi_rx); int rx_flow = emac->is_sr1 ? @@ -905,6 +918,7 @@ int emac_napi_rx_poll(struct napi_struct *napi_rx, int budget) return num_rx; } +EXPORT_SYMBOL_GPL(icssg_napi_rx_poll); int prueth_prepare_rx_chan(struct prueth_emac *emac, struct prueth_rx_chn *chn, @@ -930,6 +944,7 @@ int prueth_prepare_rx_chan(struct prueth_emac *emac, return 0; } +EXPORT_SYMBOL_GPL(prueth_prepare_rx_chan); void prueth_reset_tx_chan(struct prueth_emac *emac, int ch_num, bool free_skb) @@ -944,6 +959,7 @@ void prueth_reset_tx_chan(struct prueth_emac *emac, int ch_num, k3_udma_glue_disable_tx_chn(emac->tx_chns[i].tx_chn); } } +EXPORT_SYMBOL_GPL(prueth_reset_tx_chan); void prueth_reset_rx_chan(struct prueth_rx_chn *chn, int num_flows, bool disable) @@ -956,11 +972,13 @@ void prueth_reset_rx_chan(struct prueth_rx_chn *chn, if (disable) k3_udma_glue_disable_rx_chn(chn->rx_chn); } +EXPORT_SYMBOL_GPL(prueth_reset_rx_chan); -void emac_ndo_tx_timeout(struct net_device *ndev, unsigned int txqueue) +void icssg_ndo_tx_timeout(struct net_device *ndev, unsigned int txqueue) { ndev->stats.tx_errors++; } +EXPORT_SYMBOL_GPL(icssg_ndo_tx_timeout); static int emac_set_ts_config(struct net_device *ndev, struct ifreq *ifr) { @@ -1024,7 +1042,7 @@ static int emac_get_ts_config(struct net_device *ndev, struct ifreq *ifr) -EFAULT : 0; } -int emac_ndo_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd) +int icssg_ndo_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd) { switch (cmd) { case SIOCGHWTSTAMP: @@ -1037,9 +1055,10 @@ int emac_ndo_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd) return phy_do_ioctl(ndev, ifr, cmd); } +EXPORT_SYMBOL_GPL(icssg_ndo_ioctl); -void emac_ndo_get_stats64(struct net_device *ndev, - struct rtnl_link_stats64 *stats) +void icssg_ndo_get_stats64(struct net_device *ndev, + struct rtnl_link_stats64 *stats) { struct prueth_emac *emac = netdev_priv(ndev); @@ -1058,9 +1077,10 @@ void emac_ndo_get_stats64(struct net_device *ndev, stats->tx_errors = ndev->stats.tx_errors; stats->tx_dropped = ndev->stats.tx_dropped; } +EXPORT_SYMBOL_GPL(icssg_ndo_get_stats64); -int emac_ndo_get_phys_port_name(struct net_device *ndev, char *name, - size_t len) +int icssg_ndo_get_phys_port_name(struct net_device *ndev, char *name, + size_t len) { struct prueth_emac *emac = netdev_priv(ndev); int ret; @@ -1071,6 +1091,7 @@ int emac_ndo_get_phys_port_name(struct net_device *ndev, char *name, return 0; } +EXPORT_SYMBOL_GPL(icssg_ndo_get_phys_port_name); /* get emac_port corresponding to eth_node name */ int prueth_node_port(struct device_node *eth_node) @@ -1089,6 +1110,7 @@ int prueth_node_port(struct device_node *eth_node) else return PRUETH_PORT_INVALID; } +EXPORT_SYMBOL_GPL(prueth_node_port); /* get MAC instance corresponding to eth_node name */ int prueth_node_mac(struct device_node *eth_node) @@ -1107,6 +1129,7 @@ int prueth_node_mac(struct device_node *eth_node) else return PRUETH_MAC_INVALID; } +EXPORT_SYMBOL_GPL(prueth_node_mac); void prueth_netdev_exit(struct prueth *prueth, struct device_node *eth_node) @@ -1132,6 +1155,7 @@ void prueth_netdev_exit(struct prueth *prueth, free_netdev(emac->ndev); prueth->emac[mac] = NULL; } +EXPORT_SYMBOL_GPL(prueth_netdev_exit); int prueth_get_cores(struct prueth *prueth, int slice, bool is_sr1) { @@ -1182,6 +1206,7 @@ int prueth_get_cores(struct prueth *prueth, int slice, bool is_sr1) return 0; } +EXPORT_SYMBOL_GPL(prueth_get_cores); void prueth_put_cores(struct prueth *prueth, int slice) { @@ -1194,6 +1219,7 @@ void prueth_put_cores(struct prueth *prueth, int slice) if (prueth->pru[slice]) pru_rproc_put(prueth->pru[slice]); } +EXPORT_SYMBOL_GPL(prueth_put_cores); #ifdef CONFIG_PM_SLEEP static int prueth_suspend(struct device *dev) @@ -1250,3 +1276,9 @@ static int prueth_resume(struct device *dev) const struct dev_pm_ops prueth_dev_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(prueth_suspend, prueth_resume) }; +EXPORT_SYMBOL_GPL(prueth_dev_pm_ops); + +MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>"); +MODULE_AUTHOR("Md Danish Anwar <danishanwar@ti.com>"); +MODULE_DESCRIPTION("PRUSS ICSSG Ethernet Driver Common Module"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/ti/icssg/icssg_config.c b/drivers/net/ethernet/ti/icssg/icssg_config.c index 15f2235bf90f..dae52a83a378 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_config.c +++ b/drivers/net/ethernet/ti/icssg/icssg_config.c @@ -107,28 +107,49 @@ static const struct map hwq_map[2][ICSSG_NUM_OTHER_QUEUES] = { }, }; +static void icssg_config_mii_init_switch(struct prueth_emac *emac) +{ + struct prueth *prueth = emac->prueth; + int mii = prueth_emac_slice(emac); + u32 txcfg_reg, pcnt_reg, txcfg; + struct regmap *mii_rt; + + mii_rt = prueth->mii_rt; + + txcfg_reg = (mii == ICSS_MII0) ? PRUSS_MII_RT_TXCFG0 : + PRUSS_MII_RT_TXCFG1; + pcnt_reg = (mii == ICSS_MII0) ? PRUSS_MII_RT_RX_PCNT0 : + PRUSS_MII_RT_RX_PCNT1; + + txcfg = PRUSS_MII_RT_TXCFG_TX_ENABLE | + PRUSS_MII_RT_TXCFG_TX_AUTO_PREAMBLE | + PRUSS_MII_RT_TXCFG_TX_IPG_WIRE_CLK_EN; + + if (emac->phy_if == PHY_INTERFACE_MODE_MII && mii == ICSS_MII1) + txcfg |= PRUSS_MII_RT_TXCFG_TX_MUX_SEL; + else if (emac->phy_if != PHY_INTERFACE_MODE_MII && mii == ICSS_MII0) + txcfg |= PRUSS_MII_RT_TXCFG_TX_MUX_SEL; + + regmap_write(mii_rt, txcfg_reg, txcfg); + regmap_write(mii_rt, pcnt_reg, 0x1); +} + static void icssg_config_mii_init(struct prueth_emac *emac) { - u32 rxcfg, txcfg, rxcfg_reg, txcfg_reg, pcnt_reg; struct prueth *prueth = emac->prueth; int slice = prueth_emac_slice(emac); + u32 txcfg, txcfg_reg, pcnt_reg; struct regmap *mii_rt; mii_rt = prueth->mii_rt; - rxcfg_reg = (slice == ICSS_MII0) ? PRUSS_MII_RT_RXCFG0 : - PRUSS_MII_RT_RXCFG1; txcfg_reg = (slice == ICSS_MII0) ? PRUSS_MII_RT_TXCFG0 : PRUSS_MII_RT_TXCFG1; pcnt_reg = (slice == ICSS_MII0) ? PRUSS_MII_RT_RX_PCNT0 : PRUSS_MII_RT_RX_PCNT1; - rxcfg = MII_RXCFG_DEFAULT; txcfg = MII_TXCFG_DEFAULT; - if (slice == ICSS_MII1) - rxcfg |= PRUSS_MII_RT_RXCFG_RX_MUX_SEL; - /* In MII mode TX lines swapped inside ICSSG, so TX_MUX_SEL cfg need * to be swapped also comparing to RGMII mode. */ @@ -137,7 +158,6 @@ static void icssg_config_mii_init(struct prueth_emac *emac) else if (emac->phy_if != PHY_INTERFACE_MODE_MII && slice == ICSS_MII1) txcfg |= PRUSS_MII_RT_TXCFG_TX_MUX_SEL; - regmap_write(mii_rt, rxcfg_reg, rxcfg); regmap_write(mii_rt, txcfg_reg, txcfg); regmap_write(mii_rt, pcnt_reg, 0x1); } @@ -228,6 +248,7 @@ void icssg_config_ipg(struct prueth_emac *emac) icssg_mii_update_ipg(prueth->mii_rt, slice, ipg); } +EXPORT_SYMBOL_GPL(icssg_config_ipg); static void emac_r30_cmd_init(struct prueth_emac *emac) { @@ -257,6 +278,66 @@ static int emac_r30_is_done(struct prueth_emac *emac) return 1; } +static int prueth_switch_buffer_setup(struct prueth_emac *emac) +{ + struct icssg_buffer_pool_cfg __iomem *bpool_cfg; + struct icssg_rxq_ctx __iomem *rxq_ctx; + struct prueth *prueth = emac->prueth; + int slice = prueth_emac_slice(emac); + u32 addr; + int i; + + addr = lower_32_bits(prueth->msmcram.pa); + if (slice) + addr += PRUETH_NUM_BUF_POOLS * PRUETH_EMAC_BUF_POOL_SIZE; + + if (addr % SZ_64K) { + dev_warn(prueth->dev, "buffer pool needs to be 64KB aligned\n"); + return -EINVAL; + } + + bpool_cfg = emac->dram.va + BUFFER_POOL_0_ADDR_OFFSET; + /* workaround for f/w bug. bpool 0 needs to be initialized */ + for (i = 0; i < PRUETH_NUM_BUF_POOLS; i++) { + writel(addr, &bpool_cfg[i].addr); + writel(PRUETH_EMAC_BUF_POOL_SIZE, &bpool_cfg[i].len); + addr += PRUETH_EMAC_BUF_POOL_SIZE; + } + + if (!slice) + addr += PRUETH_NUM_BUF_POOLS * PRUETH_EMAC_BUF_POOL_SIZE; + else + addr += PRUETH_SW_NUM_BUF_POOLS_HOST * PRUETH_SW_BUF_POOL_SIZE_HOST; + + for (i = PRUETH_NUM_BUF_POOLS; + i < 2 * PRUETH_SW_NUM_BUF_POOLS_HOST + PRUETH_NUM_BUF_POOLS; + i++) { + /* The driver only uses first 4 queues per PRU so only initialize them */ + if (i % PRUETH_SW_NUM_BUF_POOLS_HOST < PRUETH_SW_NUM_BUF_POOLS_PER_PRU) { + writel(addr, &bpool_cfg[i].addr); + writel(PRUETH_SW_BUF_POOL_SIZE_HOST, &bpool_cfg[i].len); + addr += PRUETH_SW_BUF_POOL_SIZE_HOST; + } else { + writel(0, &bpool_cfg[i].addr); + writel(0, &bpool_cfg[i].len); + } + } + + if (!slice) + addr += PRUETH_SW_NUM_BUF_POOLS_HOST * PRUETH_SW_BUF_POOL_SIZE_HOST; + else + addr += PRUETH_EMAC_RX_CTX_BUF_SIZE; + + rxq_ctx = emac->dram.va + HOST_RX_Q_PRE_CONTEXT_OFFSET; + for (i = 0; i < 3; i++) + writel(addr, &rxq_ctx->start[i]); + + addr += PRUETH_EMAC_RX_CTX_BUF_SIZE; + writel(addr - SZ_2K, &rxq_ctx->end); + + return 0; +} + static int prueth_emac_buffer_setup(struct prueth_emac *emac) { struct icssg_buffer_pool_cfg __iomem *bpool_cfg; @@ -321,25 +402,63 @@ static void icssg_init_emac_mode(struct prueth *prueth) /* When the device is configured as a bridge and it is being brought * back to the emac mode, the host mac address has to be set as 0. */ + u32 addr = prueth->shram.pa + EMAC_ICSSG_SWITCH_DEFAULT_VLAN_TABLE_OFFSET; + int i; u8 mac[ETH_ALEN] = { 0 }; if (prueth->emacs_initialized) return; - regmap_update_bits(prueth->miig_rt, FDB_GEN_CFG1, - SMEM_VLAN_OFFSET_MASK, 0); - regmap_write(prueth->miig_rt, FDB_GEN_CFG2, 0); + /* Set VLAN TABLE address base */ + regmap_update_bits(prueth->miig_rt, FDB_GEN_CFG1, SMEM_VLAN_OFFSET_MASK, + addr << SMEM_VLAN_OFFSET); + /* Set enable VLAN aware mode, and FDBs for all PRUs */ + regmap_write(prueth->miig_rt, FDB_GEN_CFG2, (FDB_PRU0_EN | FDB_PRU1_EN | FDB_HOST_EN)); + prueth->vlan_tbl = (struct prueth_vlan_tbl __force *)(prueth->shram.va + + EMAC_ICSSG_SWITCH_DEFAULT_VLAN_TABLE_OFFSET); + for (i = 0; i < SZ_4K - 1; i++) { + prueth->vlan_tbl[i].fid = i; + prueth->vlan_tbl[i].fid_c1 = 0; + } /* Clear host MAC address */ icssg_class_set_host_mac_addr(prueth->miig_rt, mac); } +static void icssg_init_switch_mode(struct prueth *prueth) +{ + u32 addr = prueth->shram.pa + EMAC_ICSSG_SWITCH_DEFAULT_VLAN_TABLE_OFFSET; + int i; + + if (prueth->emacs_initialized) + return; + + /* Set VLAN TABLE address base */ + regmap_update_bits(prueth->miig_rt, FDB_GEN_CFG1, SMEM_VLAN_OFFSET_MASK, + addr << SMEM_VLAN_OFFSET); + /* Set enable VLAN aware mode, and FDBs for all PRUs */ + regmap_write(prueth->miig_rt, FDB_GEN_CFG2, FDB_EN_ALL); + prueth->vlan_tbl = (struct prueth_vlan_tbl __force *)(prueth->shram.va + + EMAC_ICSSG_SWITCH_DEFAULT_VLAN_TABLE_OFFSET); + for (i = 0; i < SZ_4K - 1; i++) { + prueth->vlan_tbl[i].fid = i; + prueth->vlan_tbl[i].fid_c1 = 0; + } + + if (prueth->hw_bridge_dev) + icssg_class_set_host_mac_addr(prueth->miig_rt, prueth->hw_bridge_dev->dev_addr); + icssg_set_pvid(prueth, prueth->default_vlan, PRUETH_PORT_HOST); +} + int icssg_config(struct prueth *prueth, struct prueth_emac *emac, int slice) { void __iomem *config = emac->dram.va + ICSSG_CONFIG_OFFSET; struct icssg_flow_cfg __iomem *flow_cfg; int ret; - icssg_init_emac_mode(prueth); + if (prueth->is_switch_mode) + icssg_init_switch_mode(prueth); + else + icssg_init_emac_mode(prueth); memset_io(config, 0, TAS_GATE_MASK_LIST0); icssg_miig_queues_init(prueth, slice); @@ -353,7 +472,10 @@ int icssg_config(struct prueth *prueth, struct prueth_emac *emac, int slice) regmap_update_bits(prueth->miig_rt, ICSSG_CFG_OFFSET, ICSSG_CFG_DEFAULT, ICSSG_CFG_DEFAULT); icssg_miig_set_interface_mode(prueth->miig_rt, slice, emac->phy_if); - icssg_config_mii_init(emac); + if (prueth->is_switch_mode) + icssg_config_mii_init_switch(emac); + else + icssg_config_mii_init(emac); icssg_config_ipg(emac); icssg_update_rgmii_cfg(prueth->miig_rt, emac); @@ -376,7 +498,10 @@ int icssg_config(struct prueth *prueth, struct prueth_emac *emac, int slice) writeb(0, config + SPL_PKT_DEFAULT_PRIORITY); writeb(0, config + QUEUE_NUM_UNTAGGED); - ret = prueth_emac_buffer_setup(emac); + if (prueth->is_switch_mode) + ret = prueth_switch_buffer_setup(emac); + else + ret = prueth_emac_buffer_setup(emac); if (ret) return ret; @@ -384,6 +509,7 @@ int icssg_config(struct prueth *prueth, struct prueth_emac *emac, int slice) return 0; } +EXPORT_SYMBOL_GPL(icssg_config); /* Bitmask for ICSSG r30 commands */ static const struct icssg_r30_cmd emac_r32_bitmask[] = { @@ -408,8 +534,8 @@ static const struct icssg_r30_cmd emac_r32_bitmask[] = { {{0xffef0000, EMAC_NONE, 0xffef0000, EMAC_NONE}} /* VLAN UNWARE*/ }; -int emac_set_port_state(struct prueth_emac *emac, - enum icssg_port_state_cmd cmd) +int icssg_set_port_state(struct prueth_emac *emac, + enum icssg_port_state_cmd cmd) { struct icssg_r30_cmd __iomem *p; int ret = -ETIMEDOUT; @@ -440,6 +566,7 @@ int emac_set_port_state(struct prueth_emac *emac, return ret; } +EXPORT_SYMBOL_GPL(icssg_set_port_state); void icssg_config_half_duplex(struct prueth_emac *emac) { @@ -451,6 +578,7 @@ void icssg_config_half_duplex(struct prueth_emac *emac) val = get_random_u32(); writel(val, emac->dram.va + HD_RAND_SEED_OFFSET); } +EXPORT_SYMBOL_GPL(icssg_config_half_duplex); void icssg_config_set_speed(struct prueth_emac *emac) { @@ -477,3 +605,180 @@ void icssg_config_set_speed(struct prueth_emac *emac) writeb(fw_speed, emac->dram.va + PORT_LINK_SPEED_OFFSET); } +EXPORT_SYMBOL_GPL(icssg_config_set_speed); + +int icssg_send_fdb_msg(struct prueth_emac *emac, struct mgmt_cmd *cmd, + struct mgmt_cmd_rsp *rsp) +{ + struct prueth *prueth = emac->prueth; + int slice = prueth_emac_slice(emac); + int addr, ret; + + addr = icssg_queue_pop(prueth, slice == 0 ? + ICSSG_CMD_POP_SLICE0 : ICSSG_CMD_POP_SLICE1); + if (addr < 0) + return addr; + + /* First 4 bytes have FW owned buffer linking info which should + * not be touched + */ + memcpy_toio(prueth->shram.va + addr + 4, cmd, sizeof(*cmd)); + icssg_queue_push(prueth, slice == 0 ? + ICSSG_CMD_PUSH_SLICE0 : ICSSG_CMD_PUSH_SLICE1, addr); + ret = read_poll_timeout(icssg_queue_pop, addr, addr >= 0, + 2000, 20000000, false, prueth, slice == 0 ? + ICSSG_RSP_POP_SLICE0 : ICSSG_RSP_POP_SLICE1); + if (ret) { + netdev_err(emac->ndev, "Timedout sending HWQ message\n"); + return ret; + } + + memcpy_fromio(rsp, prueth->shram.va + addr, sizeof(*rsp)); + /* Return buffer back for to pool */ + icssg_queue_push(prueth, slice == 0 ? + ICSSG_RSP_PUSH_SLICE0 : ICSSG_RSP_PUSH_SLICE1, addr); + + return 0; +} +EXPORT_SYMBOL_GPL(icssg_send_fdb_msg); + +static void icssg_fdb_setup(struct prueth_emac *emac, struct mgmt_cmd *fdb_cmd, + const unsigned char *addr, u8 fid, int cmd) +{ + int slice = prueth_emac_slice(emac); + u8 mac_fid[ETH_ALEN + 2]; + u16 fdb_slot; + + ether_addr_copy(mac_fid, addr); + + /* 1-1 VID-FID mapping is already setup */ + mac_fid[ETH_ALEN] = fid; + mac_fid[ETH_ALEN + 1] = 0; + + fdb_slot = bitrev32(crc32_le(0, mac_fid, 8)) & PRUETH_SWITCH_FDB_MASK; + + fdb_cmd->header = ICSSG_FW_MGMT_CMD_HEADER; + fdb_cmd->type = ICSSG_FW_MGMT_FDB_CMD_TYPE; + fdb_cmd->seqnum = ++(emac->prueth->icssg_hwcmdseq); + fdb_cmd->param = cmd; + fdb_cmd->param |= (slice << 4); + + memcpy(&fdb_cmd->cmd_args[0], addr, 4); + memcpy(&fdb_cmd->cmd_args[1], &addr[4], 2); + fdb_cmd->cmd_args[2] = fdb_slot; + + netdev_dbg(emac->ndev, "MAC %pM slot %X FID %X\n", addr, fdb_slot, fid); +} + +int icssg_fdb_add_del(struct prueth_emac *emac, const unsigned char *addr, + u8 vid, u8 fid_c2, bool add) +{ + struct mgmt_cmd_rsp fdb_cmd_rsp = { 0 }; + struct mgmt_cmd fdb_cmd = { 0 }; + u8 fid = vid; + int ret; + + icssg_fdb_setup(emac, &fdb_cmd, addr, fid, add ? ICSS_CMD_ADD_FDB : ICSS_CMD_DEL_FDB); + + fid_c2 |= ICSSG_FDB_ENTRY_VALID; + fdb_cmd.cmd_args[1] |= ((fid << 16) | (fid_c2 << 24)); + + ret = icssg_send_fdb_msg(emac, &fdb_cmd, &fdb_cmd_rsp); + if (ret) + return ret; + + WARN_ON(fdb_cmd.seqnum != fdb_cmd_rsp.seqnum); + if (fdb_cmd_rsp.status == 1) + return 0; + + return -EINVAL; +} +EXPORT_SYMBOL_GPL(icssg_fdb_add_del); + +int icssg_fdb_lookup(struct prueth_emac *emac, const unsigned char *addr, + u8 vid) +{ + struct mgmt_cmd_rsp fdb_cmd_rsp = { 0 }; + struct mgmt_cmd fdb_cmd = { 0 }; + struct prueth_fdb_slot *slot; + u8 fid = vid; + int ret, i; + + icssg_fdb_setup(emac, &fdb_cmd, addr, fid, ICSS_CMD_GET_FDB_SLOT); + + fdb_cmd.cmd_args[1] |= fid << 16; + + ret = icssg_send_fdb_msg(emac, &fdb_cmd, &fdb_cmd_rsp); + if (ret) + return ret; + + WARN_ON(fdb_cmd.seqnum != fdb_cmd_rsp.seqnum); + + slot = (struct prueth_fdb_slot __force *)(emac->dram.va + FDB_CMD_BUFFER); + for (i = 0; i < 4; i++) { + if (ether_addr_equal(addr, slot->mac) && vid == slot->fid) + return (slot->fid_c2 & ~ICSSG_FDB_ENTRY_VALID); + slot++; + } + + return 0; +} +EXPORT_SYMBOL_GPL(icssg_fdb_lookup); + +void icssg_vtbl_modify(struct prueth_emac *emac, u8 vid, u8 port_mask, + u8 untag_mask, bool add) +{ + struct prueth *prueth = emac->prueth; + struct prueth_vlan_tbl *tbl; + u8 fid_c1; + + tbl = prueth->vlan_tbl; + fid_c1 = tbl[vid].fid_c1; + + /* FID_C1: bit0..2 port membership mask, + * bit3..5 tagging mask for each port + * bit6 Stream VID (not handled currently) + * bit7 MC flood (not handled currently) + */ + if (add) { + fid_c1 |= (port_mask | port_mask << 3); + fid_c1 &= ~(untag_mask << 3); + } else { + fid_c1 &= ~(port_mask | port_mask << 3); + } + + tbl[vid].fid_c1 = fid_c1; +} +EXPORT_SYMBOL_GPL(icssg_vtbl_modify); + +u16 icssg_get_pvid(struct prueth_emac *emac) +{ + struct prueth *prueth = emac->prueth; + u32 pvid; + + if (emac->port_id == PRUETH_PORT_MII0) + pvid = readl(prueth->shram.va + EMAC_ICSSG_SWITCH_PORT1_DEFAULT_VLAN_OFFSET); + else + pvid = readl(prueth->shram.va + EMAC_ICSSG_SWITCH_PORT2_DEFAULT_VLAN_OFFSET); + + pvid = pvid >> 24; + + return pvid; +} +EXPORT_SYMBOL_GPL(icssg_get_pvid); + +void icssg_set_pvid(struct prueth *prueth, u8 vid, u8 port) +{ + u32 pvid; + + /* only 256 VLANs are supported */ + pvid = (u32 __force)cpu_to_be32((ETH_P_8021Q << 16) | (vid & 0xff)); + + if (port == PRUETH_PORT_MII0) + writel(pvid, prueth->shram.va + EMAC_ICSSG_SWITCH_PORT1_DEFAULT_VLAN_OFFSET); + else if (port == PRUETH_PORT_MII1) + writel(pvid, prueth->shram.va + EMAC_ICSSG_SWITCH_PORT2_DEFAULT_VLAN_OFFSET); + else + writel(pvid, prueth->shram.va + EMAC_ICSSG_SWITCH_PORT0_DEFAULT_VLAN_OFFSET); +} +EXPORT_SYMBOL_GPL(icssg_set_pvid); diff --git a/drivers/net/ethernet/ti/icssg/icssg_config.h b/drivers/net/ethernet/ti/icssg/icssg_config.h index cf2ea4bd22a2..1ac60283923b 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_config.h +++ b/drivers/net/ethernet/ti/icssg/icssg_config.h @@ -35,6 +35,15 @@ struct icssg_flow_cfg { (2 * (PRUETH_EMAC_BUF_POOL_SIZE * PRUETH_NUM_BUF_POOLS + \ PRUETH_EMAC_RX_CTX_BUF_SIZE * 2)) +#define PRUETH_SW_BUF_POOL_SIZE_HOST SZ_4K +#define PRUETH_SW_NUM_BUF_POOLS_HOST 8 +#define PRUETH_SW_NUM_BUF_POOLS_PER_PRU 4 +#define MSMC_RAM_SIZE_SWITCH_MODE \ + (MSMC_RAM_SIZE + \ + (2 * PRUETH_SW_BUF_POOL_SIZE_HOST * PRUETH_SW_NUM_BUF_POOLS_HOST)) + +#define PRUETH_SWITCH_FDB_MASK ((SIZE_OF_FDB / NUMBER_OF_FDB_BUCKET_ENTRIES) - 1) + struct icssg_rxq_ctx { __le32 start[3]; __le32 end; @@ -202,6 +211,23 @@ struct icssg_setclock_desc { #define ICSSG_TS_PUSH_SLICE0 40 #define ICSSG_TS_PUSH_SLICE1 41 +struct mgmt_cmd { + u8 param; + u8 seqnum; + u8 type; + u8 header; + u32 cmd_args[3]; +}; + +struct mgmt_cmd_rsp { + u32 reserved; + u8 status; + u8 seqnum; + u8 type; + u8 header; + u32 cmd_args[3]; +}; + /* FDB FID_C2 flag definitions */ /* Indicates host port membership.*/ #define ICSSG_FDB_ENTRY_P0_MEMBERSHIP BIT(0) diff --git a/drivers/net/ethernet/ti/icssg/icssg_ethtool.c b/drivers/net/ethernet/ti/icssg/icssg_ethtool.c index c8d0f45cc5b1..5688f054cec5 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_ethtool.c +++ b/drivers/net/ethernet/ti/icssg/icssg_ethtool.c @@ -110,7 +110,7 @@ static void emac_get_ethtool_stats(struct net_device *ndev, } static int emac_get_ts_info(struct net_device *ndev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct prueth_emac *emac = netdev_priv(ndev); @@ -312,3 +312,4 @@ const struct ethtool_ops icssg_ethtool_ops = { .nway_reset = emac_nway_reset, .get_rmon_stats = emac_get_rmon_stats, }; +EXPORT_SYMBOL_GPL(icssg_ethtool_ops); diff --git a/drivers/net/ethernet/ti/icssg/icssg_mii_cfg.c b/drivers/net/ethernet/ti/icssg/icssg_mii_cfg.c index 92718ae40d7e..b64955438bb2 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_mii_cfg.c +++ b/drivers/net/ethernet/ti/icssg/icssg_mii_cfg.c @@ -40,6 +40,7 @@ void icssg_mii_update_mtu(struct regmap *mii_rt, int mii, int mtu) (mtu - 1) << PRUSS_MII_RT_RX_FRMS_MAX_FRM_SHIFT); } } +EXPORT_SYMBOL_GPL(icssg_mii_update_mtu); void icssg_update_rgmii_cfg(struct regmap *miig_rt, struct prueth_emac *emac) { @@ -66,6 +67,7 @@ void icssg_update_rgmii_cfg(struct regmap *miig_rt, struct prueth_emac *emac) regmap_update_bits(miig_rt, RGMII_CFG_OFFSET, full_duplex_mask, full_duplex_val); } +EXPORT_SYMBOL_GPL(icssg_update_rgmii_cfg); void icssg_miig_set_interface_mode(struct regmap *miig_rt, int mii, phy_interface_t phy_if) { @@ -105,6 +107,7 @@ u32 icssg_rgmii_get_speed(struct regmap *miig_rt, int mii) return icssg_rgmii_cfg_get_bitfield(miig_rt, mask, shift); } +EXPORT_SYMBOL_GPL(icssg_rgmii_get_speed); u32 icssg_rgmii_get_fullduplex(struct regmap *miig_rt, int mii) { @@ -118,3 +121,4 @@ u32 icssg_rgmii_get_fullduplex(struct regmap *miig_rt, int mii) return icssg_rgmii_cfg_get_bitfield(miig_rt, mask, shift); } +EXPORT_SYMBOL_GPL(icssg_rgmii_get_fullduplex); diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c index 1ea3fbd5e954..3e51b3a9b0a5 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c +++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c @@ -27,13 +27,19 @@ #include <linux/remoteproc/pruss.h> #include <linux/regmap.h> #include <linux/remoteproc.h> +#include <net/switchdev.h> #include "icssg_prueth.h" #include "icssg_mii_rt.h" +#include "icssg_switchdev.h" #include "../k3-cppi-desc-pool.h" #define PRUETH_MODULE_DESCRIPTION "PRUSS ICSSG Ethernet driver" +#define DEFAULT_VID 1 +#define DEFAULT_PORT_MASK 1 +#define DEFAULT_UNTAG_MASK 1 + /* CTRLMMR_ICSSG_RGMII_CTRL register bits */ #define ICSSG_CTRL_RGMII_ID_MODE BIT(24) @@ -112,6 +118,19 @@ static irqreturn_t prueth_tx_ts_irq(int irq, void *dev_id) return IRQ_HANDLED; } +static struct icssg_firmwares icssg_switch_firmwares[] = { + { + .pru = "ti-pruss/am65x-sr2-pru0-prusw-fw.elf", + .rtu = "ti-pruss/am65x-sr2-rtu0-prusw-fw.elf", + .txpru = "ti-pruss/am65x-sr2-txpru0-prusw-fw.elf", + }, + { + .pru = "ti-pruss/am65x-sr2-pru1-prusw-fw.elf", + .rtu = "ti-pruss/am65x-sr2-rtu1-prusw-fw.elf", + .txpru = "ti-pruss/am65x-sr2-txpru1-prusw-fw.elf", + } +}; + static struct icssg_firmwares icssg_emac_firmwares[] = { { .pru = "ti-pruss/am65x-sr2-pru0-prueth-fw.elf", @@ -131,7 +150,10 @@ static int prueth_emac_start(struct prueth *prueth, struct prueth_emac *emac) struct device *dev = prueth->dev; int slice, ret; - firmwares = icssg_emac_firmwares; + if (prueth->is_switch_mode) + firmwares = icssg_switch_firmwares; + else + firmwares = icssg_emac_firmwares; slice = prueth_emac_slice(emac); if (slice < 0) { @@ -227,10 +249,10 @@ static void emac_adjust_link(struct net_device *ndev) icssg_config_ipg(emac); spin_unlock_irqrestore(&emac->lock, flags); icssg_config_set_speed(emac); - emac_set_port_state(emac, ICSSG_EMAC_PORT_FORWARD); + icssg_set_port_state(emac, ICSSG_EMAC_PORT_FORWARD); } else { - emac_set_port_state(emac, ICSSG_EMAC_PORT_DISABLE); + icssg_set_port_state(emac, ICSSG_EMAC_PORT_DISABLE); } } @@ -417,6 +439,37 @@ const struct icss_iep_clockops prueth_iep_clockops = { .perout_enable = prueth_perout_enable, }; +static int icssg_prueth_add_mcast(struct net_device *ndev, const u8 *addr) +{ + struct prueth_emac *emac = netdev_priv(ndev); + int port_mask = BIT(emac->port_id); + + port_mask |= icssg_fdb_lookup(emac, addr, 0); + icssg_fdb_add_del(emac, addr, 0, port_mask, true); + icssg_vtbl_modify(emac, 0, port_mask, port_mask, true); + + return 0; +} + +static int icssg_prueth_del_mcast(struct net_device *ndev, const u8 *addr) +{ + struct prueth_emac *emac = netdev_priv(ndev); + int port_mask = BIT(emac->port_id); + int other_port_mask; + + other_port_mask = port_mask ^ icssg_fdb_lookup(emac, addr, 0); + + icssg_fdb_add_del(emac, addr, 0, port_mask, false); + icssg_vtbl_modify(emac, 0, port_mask, port_mask, false); + + if (other_port_mask) { + icssg_fdb_add_del(emac, addr, 0, other_port_mask, true); + icssg_vtbl_modify(emac, 0, other_port_mask, other_port_mask, true); + } + + return 0; +} + /** * emac_ndo_open - EMAC device open * @ndev: network adapter device @@ -445,9 +498,8 @@ static int emac_ndo_open(struct net_device *ndev) ether_addr_copy(emac->mac_addr, ndev->dev_addr); icssg_class_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr); - icssg_ft1_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr); - icssg_class_default(prueth->miig_rt, slice, 0, false); + icssg_ft1_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr); /* Notify the stack of the actual queue counts. */ ret = netif_set_real_num_tx_queues(ndev, num_data_chn); @@ -578,6 +630,8 @@ static int emac_ndo_stop(struct net_device *ndev) icssg_class_disable(prueth->miig_rt, prueth_emac_slice(emac)); + __dev_mc_unsync(ndev, icssg_prueth_del_mcast); + atomic_set(&emac->tdown_cnt, emac->tx_ch_num); /* ensure new tdown_cnt value is visible */ smp_mb__after_atomic(); @@ -640,24 +694,21 @@ static void emac_ndo_set_rx_mode_work(struct work_struct *work) promisc = ndev->flags & IFF_PROMISC; allmulti = ndev->flags & IFF_ALLMULTI; - emac_set_port_state(emac, ICSSG_EMAC_PORT_UC_FLOODING_DISABLE); - emac_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_DISABLE); + icssg_set_port_state(emac, ICSSG_EMAC_PORT_UC_FLOODING_DISABLE); + icssg_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_DISABLE); if (promisc) { - emac_set_port_state(emac, ICSSG_EMAC_PORT_UC_FLOODING_ENABLE); - emac_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_ENABLE); + icssg_set_port_state(emac, ICSSG_EMAC_PORT_UC_FLOODING_ENABLE); + icssg_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_ENABLE); return; } if (allmulti) { - emac_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_ENABLE); + icssg_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_ENABLE); return; } - if (!netdev_mc_empty(ndev)) { - emac_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_ENABLE); - return; - } + __dev_mc_sync(ndev, icssg_prueth_add_mcast, icssg_prueth_del_mcast); } /** @@ -677,14 +728,14 @@ static void emac_ndo_set_rx_mode(struct net_device *ndev) static const struct net_device_ops emac_netdev_ops = { .ndo_open = emac_ndo_open, .ndo_stop = emac_ndo_stop, - .ndo_start_xmit = emac_ndo_start_xmit, + .ndo_start_xmit = icssg_ndo_start_xmit, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, - .ndo_tx_timeout = emac_ndo_tx_timeout, + .ndo_tx_timeout = icssg_ndo_tx_timeout, .ndo_set_rx_mode = emac_ndo_set_rx_mode, - .ndo_eth_ioctl = emac_ndo_ioctl, - .ndo_get_stats64 = emac_ndo_get_stats64, - .ndo_get_phys_port_name = emac_ndo_get_phys_port_name, + .ndo_eth_ioctl = icssg_ndo_ioctl, + .ndo_get_stats64 = icssg_ndo_get_stats64, + .ndo_get_phys_port_name = icssg_ndo_get_phys_port_name, }; static int prueth_netdev_init(struct prueth *prueth, @@ -720,7 +771,7 @@ static int prueth_netdev_init(struct prueth *prueth, } INIT_WORK(&emac->rx_mode_work, emac_ndo_set_rx_mode_work); - INIT_DELAYED_WORK(&emac->stats_work, emac_stats_work_handler); + INIT_DELAYED_WORK(&emac->stats_work, icssg_stats_work_handler); ret = pruss_request_mem_region(prueth->pruss, port == PRUETH_PORT_MII0 ? @@ -813,7 +864,7 @@ static int prueth_netdev_init(struct prueth *prueth, ndev->hw_features = NETIF_F_SG; ndev->features = ndev->hw_features; - netif_napi_add(ndev, &emac->napi_rx, emac_napi_rx_poll); + netif_napi_add(ndev, &emac->napi_rx, icssg_napi_rx_poll); hrtimer_init(&emac->rx_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); emac->rx_hrtimer.function = &emac_rx_timer_callback; @@ -833,6 +884,214 @@ free_ndev: return ret; } +bool prueth_dev_check(const struct net_device *ndev) +{ + if (ndev->netdev_ops == &emac_netdev_ops && netif_running(ndev)) { + struct prueth_emac *emac = netdev_priv(ndev); + + return emac->prueth->is_switch_mode; + } + + return false; +} + +static void prueth_offload_fwd_mark_update(struct prueth *prueth) +{ + int set_val = 0; + int i; + + if (prueth->br_members == (BIT(PRUETH_PORT_MII0) | BIT(PRUETH_PORT_MII1))) + set_val = 1; + + dev_dbg(prueth->dev, "set offload_fwd_mark %d\n", set_val); + + for (i = PRUETH_MAC0; i < PRUETH_NUM_MACS; i++) { + struct prueth_emac *emac = prueth->emac[i]; + + if (!emac || !emac->ndev) + continue; + + emac->offload_fwd_mark = set_val; + } +} + +static void prueth_emac_restart(struct prueth *prueth) +{ + struct prueth_emac *emac0 = prueth->emac[PRUETH_MAC0]; + struct prueth_emac *emac1 = prueth->emac[PRUETH_MAC1]; + + /* Detach the net_device for both PRUeth ports*/ + if (netif_running(emac0->ndev)) + netif_device_detach(emac0->ndev); + if (netif_running(emac1->ndev)) + netif_device_detach(emac1->ndev); + + /* Disable both PRUeth ports */ + icssg_set_port_state(emac0, ICSSG_EMAC_PORT_DISABLE); + icssg_set_port_state(emac1, ICSSG_EMAC_PORT_DISABLE); + + /* Stop both pru cores for both PRUeth ports*/ + prueth_emac_stop(emac0); + prueth->emacs_initialized--; + prueth_emac_stop(emac1); + prueth->emacs_initialized--; + + /* Start both pru cores for both PRUeth ports */ + prueth_emac_start(prueth, emac0); + prueth->emacs_initialized++; + prueth_emac_start(prueth, emac1); + prueth->emacs_initialized++; + + /* Enable forwarding for both PRUeth ports */ + icssg_set_port_state(emac0, ICSSG_EMAC_PORT_FORWARD); + icssg_set_port_state(emac1, ICSSG_EMAC_PORT_FORWARD); + + /* Attache net_device for both PRUeth ports */ + netif_device_attach(emac0->ndev); + netif_device_attach(emac1->ndev); +} + +static void icssg_enable_switch_mode(struct prueth *prueth) +{ + struct prueth_emac *emac; + int mac; + + prueth_emac_restart(prueth); + + for (mac = PRUETH_MAC0; mac < PRUETH_NUM_MACS; mac++) { + emac = prueth->emac[mac]; + if (netif_running(emac->ndev)) { + icssg_fdb_add_del(emac, eth_stp_addr, prueth->default_vlan, + ICSSG_FDB_ENTRY_P0_MEMBERSHIP | + ICSSG_FDB_ENTRY_P1_MEMBERSHIP | + ICSSG_FDB_ENTRY_P2_MEMBERSHIP | + ICSSG_FDB_ENTRY_BLOCK, + true); + icssg_vtbl_modify(emac, emac->port_vlan | DEFAULT_VID, + BIT(emac->port_id) | DEFAULT_PORT_MASK, + BIT(emac->port_id) | DEFAULT_UNTAG_MASK, + true); + icssg_set_pvid(prueth, emac->port_vlan, emac->port_id); + icssg_set_port_state(emac, ICSSG_EMAC_PORT_VLAN_AWARE_ENABLE); + } + } +} + +static int prueth_netdevice_port_link(struct net_device *ndev, + struct net_device *br_ndev, + struct netlink_ext_ack *extack) +{ + struct prueth_emac *emac = netdev_priv(ndev); + struct prueth *prueth = emac->prueth; + int err; + + if (!prueth->br_members) { + prueth->hw_bridge_dev = br_ndev; + } else { + /* This is adding the port to a second bridge, this is + * unsupported + */ + if (prueth->hw_bridge_dev != br_ndev) + return -EOPNOTSUPP; + } + + err = switchdev_bridge_port_offload(br_ndev, ndev, emac, + &prueth->prueth_switchdev_nb, + &prueth->prueth_switchdev_bl_nb, + false, extack); + if (err) + return err; + + prueth->br_members |= BIT(emac->port_id); + + if (!prueth->is_switch_mode) { + if (prueth->br_members & BIT(PRUETH_PORT_MII0) && + prueth->br_members & BIT(PRUETH_PORT_MII1)) { + prueth->is_switch_mode = true; + prueth->default_vlan = 1; + emac->port_vlan = prueth->default_vlan; + icssg_enable_switch_mode(prueth); + } + } + + prueth_offload_fwd_mark_update(prueth); + + return NOTIFY_DONE; +} + +static void prueth_netdevice_port_unlink(struct net_device *ndev) +{ + struct prueth_emac *emac = netdev_priv(ndev); + struct prueth *prueth = emac->prueth; + + prueth->br_members &= ~BIT(emac->port_id); + + if (prueth->is_switch_mode) { + prueth->is_switch_mode = false; + emac->port_vlan = 0; + prueth_emac_restart(prueth); + } + + prueth_offload_fwd_mark_update(prueth); + + if (!prueth->br_members) + prueth->hw_bridge_dev = NULL; +} + +/* netdev notifier */ +static int prueth_netdevice_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr); + struct net_device *ndev = netdev_notifier_info_to_dev(ptr); + struct netdev_notifier_changeupper_info *info; + int ret = NOTIFY_DONE; + + if (ndev->netdev_ops != &emac_netdev_ops) + return NOTIFY_DONE; + + switch (event) { + case NETDEV_CHANGEUPPER: + info = ptr; + + if (netif_is_bridge_master(info->upper_dev)) { + if (info->linking) + ret = prueth_netdevice_port_link(ndev, info->upper_dev, extack); + else + prueth_netdevice_port_unlink(ndev); + } + break; + default: + return NOTIFY_DONE; + } + + return notifier_from_errno(ret); +} + +static int prueth_register_notifiers(struct prueth *prueth) +{ + int ret = 0; + + prueth->prueth_netdevice_nb.notifier_call = &prueth_netdevice_event; + ret = register_netdevice_notifier(&prueth->prueth_netdevice_nb); + if (ret) { + dev_err(prueth->dev, "can't register netdevice notifier\n"); + return ret; + } + + ret = prueth_switchdev_register_notifiers(prueth); + if (ret) + unregister_netdevice_notifier(&prueth->prueth_netdevice_nb); + + return ret; +} + +static void prueth_unregister_notifiers(struct prueth *prueth) +{ + prueth_switchdev_unregister_notifiers(prueth); + unregister_netdevice_notifier(&prueth->prueth_netdevice_nb); +} + static int prueth_probe(struct platform_device *pdev) { struct device_node *eth_node, *eth_ports_node; @@ -960,6 +1219,9 @@ static int prueth_probe(struct platform_device *pdev) } msmc_ram_size = MSMC_RAM_SIZE; + prueth->is_switchmode_supported = prueth->pdata.switch_mode; + if (prueth->is_switchmode_supported) + msmc_ram_size = MSMC_RAM_SIZE_SWITCH_MODE; /* NOTE: FW bug needs buffer base to be 64KB aligned */ prueth->msmcram.va = @@ -1065,6 +1327,14 @@ static int prueth_probe(struct platform_device *pdev) phy_attached_info(prueth->emac[PRUETH_MAC1]->ndev->phydev); } + if (prueth->is_switchmode_supported) { + ret = prueth_register_notifiers(prueth); + if (ret) + goto netdev_unregister; + + sprintf(prueth->switch_id, "%s", dev_name(dev)); + } + dev_info(dev, "TI PRU ethernet driver initialized: %s EMAC mode\n", (!eth0_node || !eth1_node) ? "single" : "dual"); @@ -1134,6 +1404,8 @@ static void prueth_remove(struct platform_device *pdev) struct device_node *eth_node; int i; + prueth_unregister_notifiers(prueth); + for (i = 0; i < PRUETH_NUM_MACS; i++) { if (!prueth->registered_netdevs[i]) continue; @@ -1175,10 +1447,12 @@ static void prueth_remove(struct platform_device *pdev) static const struct prueth_pdata am654_icssg_pdata = { .fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE, .quirk_10m_link_issue = 1, + .switch_mode = 1, }; static const struct prueth_pdata am64x_icssg_pdata = { .fdqring_mode = K3_RINGACC_RING_MODE_RING, + .switch_mode = 1, }; static const struct of_device_id prueth_dt_match[] = { diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.h b/drivers/net/ethernet/ti/icssg/icssg_prueth.h index a78c5eb75fb8..f678d656a3ed 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_prueth.h +++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.h @@ -186,6 +186,9 @@ struct prueth_emac { struct pruss_mem_region dram; + bool offload_fwd_mark; + int port_vlan; + struct delayed_work stats_work; u64 stats[ICSSG_NUM_STATS]; @@ -198,10 +201,12 @@ struct prueth_emac { * struct prueth_pdata - PRUeth platform data * @fdqring_mode: Free desc queue mode * @quirk_10m_link_issue: 10M link detect errata + * @switch_mode: switch firmware support */ struct prueth_pdata { enum k3_ring_mode fdqring_mode; u32 quirk_10m_link_issue:1; + u32 switch_mode:1; }; struct icssg_firmwares { @@ -232,6 +237,16 @@ struct icssg_firmwares { * @emacs_initialized: num of EMACs/ext ports that are up/running * @iep0: pointer to IEP0 device * @iep1: pointer to IEP1 device + * @vlan_tbl: VLAN-FID table pointer + * @hw_bridge_dev: pointer to HW bridge net device + * @br_members: bitmask of bridge member ports + * @prueth_netdevice_nb: netdevice notifier block + * @prueth_switchdev_nb: switchdev notifier block + * @prueth_switchdev_bl_nb: switchdev blocking notifier block + * @is_switch_mode: flag to indicate if device is in Switch mode + * @is_switchmode_supported: indicates platform support for switch mode + * @switch_id: ID for mapping switch ports to bridge + * @default_vlan: Default VLAN for host */ struct prueth { struct device *dev; @@ -256,6 +271,17 @@ struct prueth { int emacs_initialized; struct icss_iep *iep0; struct icss_iep *iep1; + struct prueth_vlan_tbl *vlan_tbl; + + struct net_device *hw_bridge_dev; + u8 br_members; + struct notifier_block prueth_netdevice_nb; + struct notifier_block prueth_switchdev_nb; + struct notifier_block prueth_switchdev_bl_nb; + bool is_switch_mode; + bool is_switchmode_supported; + unsigned char switch_id[MAX_PHYS_ITEM_ID_LEN]; + int default_vlan; }; struct emac_tx_ts_response { @@ -303,8 +329,8 @@ void icssg_ft1_set_mac_addr(struct regmap *miig_rt, int slice, u8 *mac_addr); void icssg_config_ipg(struct prueth_emac *emac); int icssg_config(struct prueth *prueth, struct prueth_emac *emac, int slice); -int emac_set_port_state(struct prueth_emac *emac, - enum icssg_port_state_cmd state); +int icssg_set_port_state(struct prueth_emac *emac, + enum icssg_port_state_cmd state); void icssg_config_set_speed(struct prueth_emac *emac); void icssg_config_half_duplex(struct prueth_emac *emac); @@ -313,10 +339,20 @@ int icssg_queue_pop(struct prueth *prueth, u8 queue); void icssg_queue_push(struct prueth *prueth, int queue, u16 addr); u32 icssg_queue_level(struct prueth *prueth, int queue); +int icssg_send_fdb_msg(struct prueth_emac *emac, struct mgmt_cmd *cmd, + struct mgmt_cmd_rsp *rsp); +int icssg_fdb_add_del(struct prueth_emac *emac, const unsigned char *addr, + u8 vid, u8 fid_c2, bool add); +int icssg_fdb_lookup(struct prueth_emac *emac, const unsigned char *addr, + u8 vid); +void icssg_vtbl_modify(struct prueth_emac *emac, u8 vid, u8 port_mask, + u8 untag_mask, bool add); +u16 icssg_get_pvid(struct prueth_emac *emac); +void icssg_set_pvid(struct prueth *prueth, u8 vid, u8 port); #define prueth_napi_to_tx_chn(pnapi) \ container_of(pnapi, struct prueth_tx_chn, napi_tx) -void emac_stats_work_handler(struct work_struct *work); +void icssg_stats_work_handler(struct work_struct *work); void emac_update_hardware_stats(struct prueth_emac *emac); int emac_get_stat_by_name(struct prueth_emac *emac, char *stat_name); @@ -341,11 +377,11 @@ int prueth_dma_rx_push(struct prueth_emac *emac, struct prueth_rx_chn *rx_chn); void emac_rx_timestamp(struct prueth_emac *emac, struct sk_buff *skb, u32 *psdata); -enum netdev_tx emac_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev); +enum netdev_tx icssg_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev); irqreturn_t prueth_rx_irq(int irq, void *dev_id); void prueth_emac_stop(struct prueth_emac *emac); void prueth_cleanup_tx_ts(struct prueth_emac *emac); -int emac_napi_rx_poll(struct napi_struct *napi_rx, int budget); +int icssg_napi_rx_poll(struct napi_struct *napi_rx, int budget); int prueth_prepare_rx_chan(struct prueth_emac *emac, struct prueth_rx_chn *chn, int buf_size); @@ -353,12 +389,12 @@ void prueth_reset_tx_chan(struct prueth_emac *emac, int ch_num, bool free_skb); void prueth_reset_rx_chan(struct prueth_rx_chn *chn, int num_flows, bool disable); -void emac_ndo_tx_timeout(struct net_device *ndev, unsigned int txqueue); -int emac_ndo_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd); -void emac_ndo_get_stats64(struct net_device *ndev, - struct rtnl_link_stats64 *stats); -int emac_ndo_get_phys_port_name(struct net_device *ndev, char *name, - size_t len); +void icssg_ndo_tx_timeout(struct net_device *ndev, unsigned int txqueue); +int icssg_ndo_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd); +void icssg_ndo_get_stats64(struct net_device *ndev, + struct rtnl_link_stats64 *stats); +int icssg_ndo_get_phys_port_name(struct net_device *ndev, char *name, + size_t len); int prueth_node_port(struct device_node *eth_node); int prueth_node_mac(struct device_node *eth_node); void prueth_netdev_exit(struct prueth *prueth, diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c b/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c index 7b3304bbd7fc..e180c1166170 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c +++ b/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c @@ -722,14 +722,14 @@ static void emac_ndo_set_rx_mode_sr1(struct net_device *ndev) static const struct net_device_ops emac_netdev_ops = { .ndo_open = emac_ndo_open, .ndo_stop = emac_ndo_stop, - .ndo_start_xmit = emac_ndo_start_xmit, + .ndo_start_xmit = icssg_ndo_start_xmit, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, - .ndo_tx_timeout = emac_ndo_tx_timeout, + .ndo_tx_timeout = icssg_ndo_tx_timeout, .ndo_set_rx_mode = emac_ndo_set_rx_mode_sr1, - .ndo_eth_ioctl = emac_ndo_ioctl, - .ndo_get_stats64 = emac_ndo_get_stats64, - .ndo_get_phys_port_name = emac_ndo_get_phys_port_name, + .ndo_eth_ioctl = icssg_ndo_ioctl, + .ndo_get_stats64 = icssg_ndo_get_stats64, + .ndo_get_phys_port_name = icssg_ndo_get_phys_port_name, }; static int prueth_netdev_init(struct prueth *prueth, @@ -767,7 +767,7 @@ static int prueth_netdev_init(struct prueth *prueth, goto free_ndev; } - INIT_DELAYED_WORK(&emac->stats_work, emac_stats_work_handler); + INIT_DELAYED_WORK(&emac->stats_work, icssg_stats_work_handler); ret = pruss_request_mem_region(prueth->pruss, port == PRUETH_PORT_MII0 ? @@ -854,7 +854,7 @@ static int prueth_netdev_init(struct prueth *prueth, ndev->hw_features = NETIF_F_SG; ndev->features = ndev->hw_features; - netif_napi_add(ndev, &emac->napi_rx, emac_napi_rx_poll); + netif_napi_add(ndev, &emac->napi_rx, icssg_napi_rx_poll); prueth->emac[mac] = emac; return 0; @@ -1011,16 +1011,44 @@ static int prueth_probe(struct platform_device *pdev) dev_dbg(dev, "sram: pa %llx va %p size %zx\n", prueth->msmcram.pa, prueth->msmcram.va, prueth->msmcram.size); + prueth->iep0 = icss_iep_get_idx(np, 0); + if (IS_ERR(prueth->iep0)) { + ret = dev_err_probe(dev, PTR_ERR(prueth->iep0), + "iep0 get failed\n"); + goto free_pool; + } + + prueth->iep1 = icss_iep_get_idx(np, 1); + if (IS_ERR(prueth->iep1)) { + ret = dev_err_probe(dev, PTR_ERR(prueth->iep1), + "iep1 get failed\n"); + goto put_iep0; + } + + ret = icss_iep_init(prueth->iep0, NULL, NULL, 0); + if (ret) { + dev_err_probe(dev, ret, "failed to init iep0\n"); + goto put_iep; + } + + ret = icss_iep_init(prueth->iep1, NULL, NULL, 0); + if (ret) { + dev_err_probe(dev, ret, "failed to init iep1\n"); + goto exit_iep0; + } + if (eth0_node) { ret = prueth_netdev_init(prueth, eth0_node); if (ret) { dev_err_probe(dev, ret, "netdev init %s failed\n", eth0_node->name); - goto free_pool; + goto exit_iep; } if (of_find_property(eth0_node, "ti,half-duplex-capable", NULL)) prueth->emac[PRUETH_MAC0]->half_duplex = 1; + + prueth->emac[PRUETH_MAC0]->iep = prueth->iep0; } if (eth1_node) { @@ -1033,6 +1061,8 @@ static int prueth_probe(struct platform_device *pdev) if (of_find_property(eth1_node, "ti,half-duplex-capable", NULL)) prueth->emac[PRUETH_MAC1]->half_duplex = 1; + + prueth->emac[PRUETH_MAC1]->iep = prueth->iep1; } /* register the network devices */ @@ -1091,6 +1121,19 @@ netdev_exit: prueth_netdev_exit(prueth, eth_node); } +exit_iep: + icss_iep_exit(prueth->iep1); +exit_iep0: + icss_iep_exit(prueth->iep0); + +put_iep: + icss_iep_put(prueth->iep1); + +put_iep0: + icss_iep_put(prueth->iep0); + prueth->iep0 = NULL; + prueth->iep1 = NULL; + free_pool: gen_pool_free(prueth->sram_pool, (unsigned long)prueth->msmcram.va, msmc_ram_size); @@ -1138,6 +1181,12 @@ static void prueth_remove(struct platform_device *pdev) prueth_netdev_exit(prueth, eth_node); } + icss_iep_exit(prueth->iep1); + icss_iep_exit(prueth->iep0); + + icss_iep_put(prueth->iep1); + icss_iep_put(prueth->iep0); + gen_pool_free(prueth->sram_pool, (unsigned long)prueth->msmcram.va, MSMC_RAM_SIZE_SR1); diff --git a/drivers/net/ethernet/ti/icssg/icssg_queues.c b/drivers/net/ethernet/ti/icssg/icssg_queues.c index 3c34f61ad40b..e5052d9e7807 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_queues.c +++ b/drivers/net/ethernet/ti/icssg/icssg_queues.c @@ -28,6 +28,7 @@ int icssg_queue_pop(struct prueth *prueth, u8 queue) return val; } +EXPORT_SYMBOL_GPL(icssg_queue_pop); void icssg_queue_push(struct prueth *prueth, int queue, u16 addr) { @@ -36,6 +37,7 @@ void icssg_queue_push(struct prueth *prueth, int queue, u16 addr) regmap_write(prueth->miig_rt, ICSSG_QUEUE_OFFSET + 4 * queue, addr); } +EXPORT_SYMBOL_GPL(icssg_queue_push); u32 icssg_queue_level(struct prueth *prueth, int queue) { diff --git a/drivers/net/ethernet/ti/icssg/icssg_stats.c b/drivers/net/ethernet/ti/icssg/icssg_stats.c index 3dbadddd7e35..2fb150c13078 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_stats.c +++ b/drivers/net/ethernet/ti/icssg/icssg_stats.c @@ -42,7 +42,7 @@ void emac_update_hardware_stats(struct prueth_emac *emac) } } -void emac_stats_work_handler(struct work_struct *work) +void icssg_stats_work_handler(struct work_struct *work) { struct prueth_emac *emac = container_of(work, struct prueth_emac, stats_work.work); @@ -51,6 +51,7 @@ void emac_stats_work_handler(struct work_struct *work) queue_delayed_work(system_long_wq, &emac->stats_work, msecs_to_jiffies((STATS_TIME_LIMIT_1G_MS * 1000) / emac->speed)); } +EXPORT_SYMBOL_GPL(icssg_stats_work_handler); int emac_get_stat_by_name(struct prueth_emac *emac, char *stat_name) { diff --git a/drivers/net/ethernet/ti/icssg/icssg_switchdev.c b/drivers/net/ethernet/ti/icssg/icssg_switchdev.c new file mode 100644 index 000000000000..67e2927e176d --- /dev/null +++ b/drivers/net/ethernet/ti/icssg/icssg_switchdev.c @@ -0,0 +1,477 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* Texas Instruments K3 ICSSG Ethernet Switchdev Driver + * + * Copyright (C) 2021 Texas Instruments Incorporated - https://www.ti.com/ + * + */ + +#include <linux/etherdevice.h> +#include <linux/if_bridge.h> +#include <linux/netdevice.h> +#include <linux/workqueue.h> +#include <net/switchdev.h> + +#include "icssg_prueth.h" +#include "icssg_switchdev.h" +#include "icssg_mii_rt.h" + +struct prueth_switchdev_event_work { + struct work_struct work; + struct switchdev_notifier_fdb_info fdb_info; + struct prueth_emac *emac; + unsigned long event; +}; + +static int prueth_switchdev_stp_state_set(struct prueth_emac *emac, + u8 state) +{ + enum icssg_port_state_cmd emac_state; + int ret = 0; + + switch (state) { + case BR_STATE_FORWARDING: + emac_state = ICSSG_EMAC_PORT_FORWARD; + break; + case BR_STATE_DISABLED: + emac_state = ICSSG_EMAC_PORT_DISABLE; + break; + case BR_STATE_LISTENING: + case BR_STATE_BLOCKING: + emac_state = ICSSG_EMAC_PORT_BLOCK; + break; + default: + return -EOPNOTSUPP; + } + + icssg_set_port_state(emac, emac_state); + netdev_dbg(emac->ndev, "STP state: %u\n", emac_state); + + return ret; +} + +static int prueth_switchdev_attr_br_flags_set(struct prueth_emac *emac, + struct net_device *orig_dev, + struct switchdev_brport_flags brport_flags) +{ + enum icssg_port_state_cmd emac_state; + + if (brport_flags.mask & BR_MCAST_FLOOD) + emac_state = ICSSG_EMAC_PORT_MC_FLOODING_ENABLE; + else + emac_state = ICSSG_EMAC_PORT_MC_FLOODING_DISABLE; + + netdev_dbg(emac->ndev, "BR_MCAST_FLOOD: %d port %u\n", + emac_state, emac->port_id); + + icssg_set_port_state(emac, emac_state); + + return 0; +} + +static int prueth_switchdev_attr_br_flags_pre_set(struct net_device *netdev, + struct switchdev_brport_flags brport_flags) +{ + if (brport_flags.mask & ~(BR_LEARNING | BR_MCAST_FLOOD)) + return -EINVAL; + + return 0; +} + +static int prueth_switchdev_attr_set(struct net_device *ndev, const void *ctx, + const struct switchdev_attr *attr, + struct netlink_ext_ack *extack) +{ + struct prueth_emac *emac = netdev_priv(ndev); + int ret; + + netdev_dbg(ndev, "attr: id %u port: %u\n", attr->id, emac->port_id); + + switch (attr->id) { + case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS: + ret = prueth_switchdev_attr_br_flags_pre_set(ndev, + attr->u.brport_flags); + break; + case SWITCHDEV_ATTR_ID_PORT_STP_STATE: + ret = prueth_switchdev_stp_state_set(emac, + attr->u.stp_state); + netdev_dbg(ndev, "stp state: %u\n", attr->u.stp_state); + break; + case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: + ret = prueth_switchdev_attr_br_flags_set(emac, attr->orig_dev, + attr->u.brport_flags); + break; + default: + ret = -EOPNOTSUPP; + break; + } + + return ret; +} + +static void prueth_switchdev_fdb_offload_notify(struct net_device *ndev, + struct switchdev_notifier_fdb_info *rcv) +{ + struct switchdev_notifier_fdb_info info; + + memset(&info, 0, sizeof(info)); + info.addr = rcv->addr; + info.vid = rcv->vid; + info.offloaded = true; + call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, + ndev, &info.info, NULL); +} + +static void prueth_switchdev_event_work(struct work_struct *work) +{ + struct prueth_switchdev_event_work *switchdev_work = + container_of(work, struct prueth_switchdev_event_work, work); + struct prueth_emac *emac = switchdev_work->emac; + struct switchdev_notifier_fdb_info *fdb; + int port_id = emac->port_id; + int ret; + + rtnl_lock(); + switch (switchdev_work->event) { + case SWITCHDEV_FDB_ADD_TO_DEVICE: + fdb = &switchdev_work->fdb_info; + + netdev_dbg(emac->ndev, "prueth_fdb_add: MACID = %pM vid = %u flags = %u %u -- port %d\n", + fdb->addr, fdb->vid, fdb->added_by_user, + fdb->offloaded, port_id); + + if (!fdb->added_by_user) + break; + if (!ether_addr_equal(emac->mac_addr, fdb->addr)) + break; + + ret = icssg_fdb_add_del(emac, fdb->addr, fdb->vid, + BIT(port_id), true); + if (!ret) + prueth_switchdev_fdb_offload_notify(emac->ndev, fdb); + break; + case SWITCHDEV_FDB_DEL_TO_DEVICE: + fdb = &switchdev_work->fdb_info; + + netdev_dbg(emac->ndev, "prueth_fdb_del: MACID = %pM vid = %u flags = %u %u -- port %d\n", + fdb->addr, fdb->vid, fdb->added_by_user, + fdb->offloaded, port_id); + + if (!fdb->added_by_user) + break; + if (!ether_addr_equal(emac->mac_addr, fdb->addr)) + break; + icssg_fdb_add_del(emac, fdb->addr, fdb->vid, + BIT(port_id), false); + break; + default: + break; + } + rtnl_unlock(); + + kfree(switchdev_work->fdb_info.addr); + kfree(switchdev_work); + dev_put(emac->ndev); +} + +static int prueth_switchdev_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *ndev = switchdev_notifier_info_to_dev(ptr); + struct prueth_switchdev_event_work *switchdev_work; + struct switchdev_notifier_fdb_info *fdb_info = ptr; + struct prueth_emac *emac = netdev_priv(ndev); + int err; + + if (!prueth_dev_check(ndev)) + return NOTIFY_DONE; + + if (event == SWITCHDEV_PORT_ATTR_SET) { + err = switchdev_handle_port_attr_set(ndev, ptr, + prueth_dev_check, + prueth_switchdev_attr_set); + return notifier_from_errno(err); + } + + switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC); + if (WARN_ON(!switchdev_work)) + return NOTIFY_BAD; + + INIT_WORK(&switchdev_work->work, prueth_switchdev_event_work); + switchdev_work->emac = emac; + switchdev_work->event = event; + + switch (event) { + case SWITCHDEV_FDB_ADD_TO_DEVICE: + case SWITCHDEV_FDB_DEL_TO_DEVICE: + memcpy(&switchdev_work->fdb_info, ptr, + sizeof(switchdev_work->fdb_info)); + switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC); + if (!switchdev_work->fdb_info.addr) + goto err_addr_alloc; + ether_addr_copy((u8 *)switchdev_work->fdb_info.addr, + fdb_info->addr); + dev_hold(ndev); + break; + default: + kfree(switchdev_work); + return NOTIFY_DONE; + } + + queue_work(system_long_wq, &switchdev_work->work); + + return NOTIFY_DONE; + +err_addr_alloc: + kfree(switchdev_work); + return NOTIFY_BAD; +} + +static int prueth_switchdev_vlan_add(struct prueth_emac *emac, bool untag, bool pvid, + u8 vid, struct net_device *orig_dev) +{ + bool cpu_port = netif_is_bridge_master(orig_dev); + int untag_mask = 0; + int port_mask; + int ret = 0; + + if (cpu_port) + port_mask = BIT(PRUETH_PORT_HOST); + else + port_mask = BIT(emac->port_id); + + if (untag) + untag_mask = port_mask; + + icssg_vtbl_modify(emac, vid, port_mask, untag_mask, true); + + netdev_dbg(emac->ndev, "VID add vid:%u port_mask:%X untag_mask %X PVID %d\n", + vid, port_mask, untag_mask, pvid); + + if (!pvid) + return ret; + + icssg_set_pvid(emac->prueth, vid, emac->port_id); + + return ret; +} + +static int prueth_switchdev_vlan_del(struct prueth_emac *emac, u16 vid, + struct net_device *orig_dev) +{ + bool cpu_port = netif_is_bridge_master(orig_dev); + int port_mask; + int ret = 0; + + if (cpu_port) + port_mask = BIT(PRUETH_PORT_HOST); + else + port_mask = BIT(emac->port_id); + + icssg_vtbl_modify(emac, vid, port_mask, 0, false); + + if (cpu_port) + icssg_fdb_add_del(emac, emac->mac_addr, vid, + BIT(PRUETH_PORT_HOST), false); + + if (vid == icssg_get_pvid(emac)) + icssg_set_pvid(emac->prueth, 0, emac->port_id); + + netdev_dbg(emac->ndev, "VID del vid:%u port_mask:%X\n", + vid, port_mask); + + return ret; +} + +static int prueth_switchdev_vlans_add(struct prueth_emac *emac, + const struct switchdev_obj_port_vlan *vlan) +{ + bool untag = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; + struct net_device *orig_dev = vlan->obj.orig_dev; + bool cpu_port = netif_is_bridge_master(orig_dev); + bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; + + netdev_dbg(emac->ndev, "VID add vid:%u flags:%X\n", + vlan->vid, vlan->flags); + + if (cpu_port && !(vlan->flags & BRIDGE_VLAN_INFO_BRENTRY)) + return 0; + + if (vlan->vid > 0xff) + return 0; + + return prueth_switchdev_vlan_add(emac, untag, pvid, vlan->vid, + orig_dev); +} + +static int prueth_switchdev_vlans_del(struct prueth_emac *emac, + const struct switchdev_obj_port_vlan *vlan) +{ + if (vlan->vid > 0xff) + return 0; + + return prueth_switchdev_vlan_del(emac, vlan->vid, + vlan->obj.orig_dev); +} + +static int prueth_switchdev_mdb_add(struct prueth_emac *emac, + struct switchdev_obj_port_mdb *mdb) +{ + struct net_device *orig_dev = mdb->obj.orig_dev; + u8 port_mask, fid_c2; + bool cpu_port; + int err; + + cpu_port = netif_is_bridge_master(orig_dev); + + if (cpu_port) + port_mask = BIT(PRUETH_PORT_HOST); + else + port_mask = BIT(emac->port_id); + + fid_c2 = icssg_fdb_lookup(emac, mdb->addr, mdb->vid); + + err = icssg_fdb_add_del(emac, mdb->addr, mdb->vid, fid_c2 | port_mask, true); + netdev_dbg(emac->ndev, "MDB add vid %u:%pM ports: %X\n", + mdb->vid, mdb->addr, port_mask); + + return err; +} + +static int prueth_switchdev_mdb_del(struct prueth_emac *emac, + struct switchdev_obj_port_mdb *mdb) +{ + struct net_device *orig_dev = mdb->obj.orig_dev; + int del_mask, ret, fid_c2; + bool cpu_port; + + cpu_port = netif_is_bridge_master(orig_dev); + + if (cpu_port) + del_mask = BIT(PRUETH_PORT_HOST); + else + del_mask = BIT(emac->port_id); + + fid_c2 = icssg_fdb_lookup(emac, mdb->addr, mdb->vid); + + if (fid_c2 & ~del_mask) + ret = icssg_fdb_add_del(emac, mdb->addr, mdb->vid, fid_c2 & ~del_mask, true); + else + ret = icssg_fdb_add_del(emac, mdb->addr, mdb->vid, 0, false); + + netdev_dbg(emac->ndev, "MDB del vid %u:%pM ports: %X\n", + mdb->vid, mdb->addr, del_mask); + + return ret; +} + +static int prueth_switchdev_obj_add(struct net_device *ndev, const void *ctx, + const struct switchdev_obj *obj, + struct netlink_ext_ack *extack) +{ + struct switchdev_obj_port_vlan *vlan = SWITCHDEV_OBJ_PORT_VLAN(obj); + struct switchdev_obj_port_mdb *mdb = SWITCHDEV_OBJ_PORT_MDB(obj); + struct prueth_emac *emac = netdev_priv(ndev); + int err = 0; + + netdev_dbg(ndev, "obj_add: id %u port: %u\n", obj->id, emac->port_id); + + switch (obj->id) { + case SWITCHDEV_OBJ_ID_PORT_VLAN: + err = prueth_switchdev_vlans_add(emac, vlan); + break; + case SWITCHDEV_OBJ_ID_PORT_MDB: + case SWITCHDEV_OBJ_ID_HOST_MDB: + err = prueth_switchdev_mdb_add(emac, mdb); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +static int prueth_switchdev_obj_del(struct net_device *ndev, const void *ctx, + const struct switchdev_obj *obj) +{ + struct switchdev_obj_port_vlan *vlan = SWITCHDEV_OBJ_PORT_VLAN(obj); + struct switchdev_obj_port_mdb *mdb = SWITCHDEV_OBJ_PORT_MDB(obj); + struct prueth_emac *emac = netdev_priv(ndev); + int err = 0; + + netdev_dbg(ndev, "obj_del: id %u port: %u\n", obj->id, emac->port_id); + + switch (obj->id) { + case SWITCHDEV_OBJ_ID_PORT_VLAN: + err = prueth_switchdev_vlans_del(emac, vlan); + break; + case SWITCHDEV_OBJ_ID_PORT_MDB: + case SWITCHDEV_OBJ_ID_HOST_MDB: + err = prueth_switchdev_mdb_del(emac, mdb); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +static int prueth_switchdev_blocking_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev = switchdev_notifier_info_to_dev(ptr); + int err; + + switch (event) { + case SWITCHDEV_PORT_OBJ_ADD: + err = switchdev_handle_port_obj_add(dev, ptr, + prueth_dev_check, + prueth_switchdev_obj_add); + return notifier_from_errno(err); + case SWITCHDEV_PORT_OBJ_DEL: + err = switchdev_handle_port_obj_del(dev, ptr, + prueth_dev_check, + prueth_switchdev_obj_del); + return notifier_from_errno(err); + case SWITCHDEV_PORT_ATTR_SET: + err = switchdev_handle_port_attr_set(dev, ptr, + prueth_dev_check, + prueth_switchdev_attr_set); + return notifier_from_errno(err); + default: + break; + } + + return NOTIFY_DONE; +} + +int prueth_switchdev_register_notifiers(struct prueth *prueth) +{ + int ret = 0; + + prueth->prueth_switchdev_nb.notifier_call = &prueth_switchdev_event; + ret = register_switchdev_notifier(&prueth->prueth_switchdev_nb); + if (ret) { + dev_err(prueth->dev, "register switchdev notifier fail ret:%d\n", + ret); + return ret; + } + + prueth->prueth_switchdev_bl_nb.notifier_call = &prueth_switchdev_blocking_event; + ret = register_switchdev_blocking_notifier(&prueth->prueth_switchdev_bl_nb); + if (ret) { + dev_err(prueth->dev, "register switchdev blocking notifier ret:%d\n", + ret); + unregister_switchdev_notifier(&prueth->prueth_switchdev_nb); + } + + return ret; +} + +void prueth_switchdev_unregister_notifiers(struct prueth *prueth) +{ + unregister_switchdev_blocking_notifier(&prueth->prueth_switchdev_bl_nb); + unregister_switchdev_notifier(&prueth->prueth_switchdev_nb); +} diff --git a/drivers/net/ethernet/ti/icssg/icssg_switchdev.h b/drivers/net/ethernet/ti/icssg/icssg_switchdev.h new file mode 100644 index 000000000000..0e64e7760a00 --- /dev/null +++ b/drivers/net/ethernet/ti/icssg/icssg_switchdev.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 Texas Instruments Incorporated - https://www.ti.com/ + */ +#ifndef __NET_TI_ICSSG_SWITCHDEV_H +#define __NET_TI_ICSSG_SWITCHDEV_H + +#include "icssg_prueth.h" + +int prueth_switchdev_register_notifiers(struct prueth *prueth); +void prueth_switchdev_unregister_notifiers(struct prueth *prueth); +bool prueth_dev_check(const struct net_device *ndev); + +#endif /* __NET_TI_ICSSG_SWITCHDEV_H */ diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c index 02cb6474f6dc..d286709ca3b9 100644 --- a/drivers/net/ethernet/ti/netcp_ethss.c +++ b/drivers/net/ethernet/ti/netcp_ethss.c @@ -1999,7 +1999,7 @@ static int keystone_set_link_ksettings(struct net_device *ndev, #if IS_ENABLED(CONFIG_TI_CPTS) static int keystone_get_ts_info(struct net_device *ndev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct netcp_intf *netcp = netdev_priv(ndev); struct gbe_intf *gbe_intf; @@ -2027,7 +2027,7 @@ static int keystone_get_ts_info(struct net_device *ndev, } #else static int keystone_get_ts_info(struct net_device *ndev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c index cc3bec42ed8e..abe5921dde02 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c @@ -43,6 +43,11 @@ static const struct wx_stats wx_gstrings_stats[] = { WX_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed), }; +static const struct wx_stats wx_gstrings_fdir_stats[] = { + WX_STAT("fdir_match", stats.fdirmatch), + WX_STAT("fdir_miss", stats.fdirmiss), +}; + /* drivers allocates num_tx_queues and num_rx_queues symmetrically so * we set the num_rx_queues to evaluate to num_tx_queues. This is * used because we do not have a good way to get the max number of @@ -55,13 +60,17 @@ static const struct wx_stats wx_gstrings_stats[] = { (WX_NUM_TX_QUEUES + WX_NUM_RX_QUEUES) * \ (sizeof(struct wx_queue_stats) / sizeof(u64))) #define WX_GLOBAL_STATS_LEN ARRAY_SIZE(wx_gstrings_stats) +#define WX_FDIR_STATS_LEN ARRAY_SIZE(wx_gstrings_fdir_stats) #define WX_STATS_LEN (WX_GLOBAL_STATS_LEN + WX_QUEUE_STATS_LEN) int wx_get_sset_count(struct net_device *netdev, int sset) { + struct wx *wx = netdev_priv(netdev); + switch (sset) { case ETH_SS_STATS: - return WX_STATS_LEN; + return (wx->mac.type == wx_mac_sp) ? + WX_STATS_LEN + WX_FDIR_STATS_LEN : WX_STATS_LEN; default: return -EOPNOTSUPP; } @@ -70,6 +79,7 @@ EXPORT_SYMBOL(wx_get_sset_count); void wx_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { + struct wx *wx = netdev_priv(netdev); u8 *p = data; int i; @@ -77,6 +87,10 @@ void wx_get_strings(struct net_device *netdev, u32 stringset, u8 *data) case ETH_SS_STATS: for (i = 0; i < WX_GLOBAL_STATS_LEN; i++) ethtool_puts(&p, wx_gstrings_stats[i].stat_string); + if (wx->mac.type == wx_mac_sp) { + for (i = 0; i < WX_FDIR_STATS_LEN; i++) + ethtool_puts(&p, wx_gstrings_fdir_stats[i].stat_string); + } for (i = 0; i < netdev->num_tx_queues; i++) { ethtool_sprintf(&p, "tx_queue_%u_packets", i); ethtool_sprintf(&p, "tx_queue_%u_bytes", i); @@ -96,7 +110,7 @@ void wx_get_ethtool_stats(struct net_device *netdev, struct wx *wx = netdev_priv(netdev); struct wx_ring *ring; unsigned int start; - int i, j; + int i, j, k; char *p; wx_update_stats(wx); @@ -107,6 +121,13 @@ void wx_get_ethtool_stats(struct net_device *netdev, sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } + if (wx->mac.type == wx_mac_sp) { + for (k = 0; k < WX_FDIR_STATS_LEN; k++) { + p = (char *)wx + wx_gstrings_fdir_stats[k].stat_offset; + data[i++] = *(u64 *)p; + } + } + for (j = 0; j < netdev->num_tx_queues; j++) { ring = wx->tx_ring[j]; if (!ring) { @@ -172,17 +193,21 @@ EXPORT_SYMBOL(wx_get_pause_stats); void wx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info) { + unsigned int stats_len = WX_STATS_LEN; struct wx *wx = netdev_priv(netdev); + if (wx->mac.type == wx_mac_sp) + stats_len += WX_FDIR_STATS_LEN; + strscpy(info->driver, wx->driver_name, sizeof(info->driver)); strscpy(info->fw_version, wx->eeprom_id, sizeof(info->fw_version)); strscpy(info->bus_info, pci_name(wx->pdev), sizeof(info->bus_info)); if (wx->num_tx_queues <= WX_NUM_TX_QUEUES) { - info->n_stats = WX_STATS_LEN - + info->n_stats = stats_len - (WX_NUM_TX_QUEUES - wx->num_tx_queues) * (sizeof(struct wx_queue_stats) / sizeof(u64)) * 2; } else { - info->n_stats = WX_STATS_LEN; + info->n_stats = stats_len; } } EXPORT_SYMBOL(wx_get_drvinfo); @@ -383,6 +408,9 @@ void wx_get_channels(struct net_device *dev, /* record RSS queues */ ch->combined_count = wx->ring_feature[RING_F_RSS].indices; + + if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) + ch->combined_count = wx->ring_feature[RING_F_FDIR].indices; } EXPORT_SYMBOL(wx_get_channels); @@ -400,6 +428,9 @@ int wx_set_channels(struct net_device *dev, if (count > wx_max_channels(wx)) return -EINVAL; + if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) + wx->ring_feature[RING_F_FDIR].limit = count; + wx->ring_feature[RING_F_RSS].limit = count; return 0; diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c index d1b682ce9c6d..1bf9c38e4125 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c @@ -1147,8 +1147,15 @@ static void wx_enable_rx(struct wx *wx) static void wx_set_rxpba(struct wx *wx) { u32 rxpktsize, txpktsize, txpbthresh; + u32 pbsize = wx->mac.rx_pb_size; - rxpktsize = wx->mac.rx_pb_size << WX_RDB_PB_SZ_SHIFT; + if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) { + if (test_bit(WX_FLAG_FDIR_HASH, wx->flags) || + test_bit(WX_FLAG_FDIR_PERFECT, wx->flags)) + pbsize -= 64; /* Default 64KB */ + } + + rxpktsize = pbsize << WX_RDB_PB_SZ_SHIFT; wr32(wx, WX_RDB_PB_SZ(0), rxpktsize); /* Only support an equally distributed Tx packet buffer strategy. */ @@ -1261,7 +1268,7 @@ static void wx_configure_port(struct wx *wx) * Stops the receive data path and waits for the HW to internally empty * the Rx security block **/ -static int wx_disable_sec_rx_path(struct wx *wx) +int wx_disable_sec_rx_path(struct wx *wx) { u32 secrx; @@ -1271,6 +1278,7 @@ static int wx_disable_sec_rx_path(struct wx *wx) return read_poll_timeout(rd32, secrx, secrx & WX_RSC_ST_RSEC_RDY, 1000, 40000, false, wx, WX_RSC_ST); } +EXPORT_SYMBOL(wx_disable_sec_rx_path); /** * wx_enable_sec_rx_path - Enables the receive data path @@ -1278,11 +1286,12 @@ static int wx_disable_sec_rx_path(struct wx *wx) * * Enables the receive data path. **/ -static void wx_enable_sec_rx_path(struct wx *wx) +void wx_enable_sec_rx_path(struct wx *wx) { wr32m(wx, WX_RSC_CTL, WX_RSC_CTL_RX_DIS, 0); WX_WRITE_FLUSH(wx); } +EXPORT_SYMBOL(wx_enable_sec_rx_path); static void wx_vlan_strip_control(struct wx *wx, bool enable) { @@ -1499,6 +1508,13 @@ static void wx_configure_tx_ring(struct wx *wx, txdctl |= ring->count / 128 << WX_PX_TR_CFG_TR_SIZE_SHIFT; txdctl |= 0x20 << WX_PX_TR_CFG_WTHRESH_SHIFT; + ring->atr_count = 0; + if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags) && + test_bit(WX_FLAG_FDIR_HASH, wx->flags)) + ring->atr_sample_rate = wx->atr_sample_rate; + else + ring->atr_sample_rate = 0; + /* reinitialize tx_buffer_info */ memset(ring->tx_buffer_info, 0, sizeof(struct wx_tx_buffer) * ring->count); @@ -1732,7 +1748,9 @@ void wx_configure(struct wx *wx) wx_set_rx_mode(wx->netdev); wx_restore_vlan(wx); - wx_enable_sec_rx_path(wx); + + if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) + wx->configure_fdir(wx); wx_configure_tx(wx); wx_configure_rx(wx); @@ -1959,6 +1977,7 @@ int wx_sw_init(struct wx *wx) } bitmap_zero(wx->state, WX_STATE_NBITS); + bitmap_zero(wx->flags, WX_PF_FLAGS_NBITS); wx->misc_irq_domain = false; return 0; @@ -2334,6 +2353,11 @@ void wx_update_stats(struct wx *wx) hwstats->b2ogprc += rd32(wx, WX_RDM_BMC2OS_CNT); hwstats->rdmdrop += rd32(wx, WX_RDM_DRP_PKT); + if (wx->mac.type == wx_mac_sp) { + hwstats->fdirmatch += rd32(wx, WX_RDB_FDIR_MATCH); + hwstats->fdirmiss += rd32(wx, WX_RDB_FDIR_MISS); + } + for (i = 0; i < wx->mac.max_rx_queues; i++) hwstats->qmprc += rd32(wx, WX_PX_MPRC(i)); } diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.h b/drivers/net/ethernet/wangxun/libwx/wx_hw.h index 9e219fa717a2..11fb33349482 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_hw.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.h @@ -28,6 +28,8 @@ void wx_mac_set_default_filter(struct wx *wx, u8 *addr); void wx_flush_sw_mac_table(struct wx *wx); int wx_set_mac(struct net_device *netdev, void *p); void wx_disable_rx(struct wx *wx); +int wx_disable_sec_rx_path(struct wx *wx); +void wx_enable_sec_rx_path(struct wx *wx); void wx_set_rx_mode(struct net_device *netdev); int wx_change_mtu(struct net_device *netdev, int new_mtu); void wx_disable_rx_queue(struct wx *wx, struct wx_ring *ring); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c index 81bedc8ee8d4..1eecba984f3b 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c @@ -148,10 +148,11 @@ static struct wx_dec_ptype wx_ptype_lookup[256] = { [0xFD] = WX_PTT(IP, IPV6, IGMV, IPV6, SCTP, PAY4), }; -static struct wx_dec_ptype wx_decode_ptype(const u8 ptype) +struct wx_dec_ptype wx_decode_ptype(const u8 ptype) { return wx_ptype_lookup[ptype]; } +EXPORT_SYMBOL(wx_decode_ptype); /* wx_test_staterr - tests bits in Rx descriptor status and error fields */ static __le32 wx_test_staterr(union wx_rx_desc *rx_desc, @@ -1453,6 +1454,7 @@ static void wx_tx_csum(struct wx_ring *tx_ring, struct wx_tx_buffer *first, static netdev_tx_t wx_xmit_frame_ring(struct sk_buff *skb, struct wx_ring *tx_ring) { + struct wx *wx = netdev_priv(tx_ring->netdev); u16 count = TXD_USE_COUNT(skb_headlen(skb)); struct wx_tx_buffer *first; u8 hdr_len = 0, ptype; @@ -1498,6 +1500,10 @@ static netdev_tx_t wx_xmit_frame_ring(struct sk_buff *skb, goto out_drop; else if (!tso) wx_tx_csum(tx_ring, first, ptype); + + if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags) && tx_ring->atr_sample_rate) + wx->atr(tx_ring, first, ptype); + wx_tx_map(tx_ring, first, hdr_len); return NETDEV_TX_OK; @@ -1574,8 +1580,27 @@ static void wx_set_rss_queues(struct wx *wx) f = &wx->ring_feature[RING_F_RSS]; f->indices = f->limit; - wx->num_rx_queues = f->limit; - wx->num_tx_queues = f->limit; + if (!(test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags))) + goto out; + + clear_bit(WX_FLAG_FDIR_HASH, wx->flags); + + /* Use Flow Director in addition to RSS to ensure the best + * distribution of flows across cores, even when an FDIR flow + * isn't matched. + */ + if (f->indices > 1) { + f = &wx->ring_feature[RING_F_FDIR]; + + f->indices = f->limit; + + if (!(test_bit(WX_FLAG_FDIR_PERFECT, wx->flags))) + set_bit(WX_FLAG_FDIR_HASH, wx->flags); + } + +out: + wx->num_rx_queues = f->indices; + wx->num_tx_queues = f->indices; } static void wx_set_num_queues(struct wx *wx) @@ -2684,6 +2709,7 @@ int wx_set_features(struct net_device *netdev, netdev_features_t features) { netdev_features_t changed = netdev->features ^ features; struct wx *wx = netdev_priv(netdev); + bool need_reset = false; if (features & NETIF_F_RXHASH) { wr32m(wx, WX_RDB_RA_CTL, WX_RDB_RA_CTL_RSS_EN, @@ -2701,6 +2727,36 @@ int wx_set_features(struct net_device *netdev, netdev_features_t features) else if (changed & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER)) wx_set_rx_mode(netdev); + if (!(test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags))) + return 0; + + /* Check if Flow Director n-tuple support was enabled or disabled. If + * the state changed, we need to reset. + */ + switch (features & NETIF_F_NTUPLE) { + case NETIF_F_NTUPLE: + /* turn off ATR, enable perfect filters and reset */ + if (!(test_and_set_bit(WX_FLAG_FDIR_PERFECT, wx->flags))) + need_reset = true; + + clear_bit(WX_FLAG_FDIR_HASH, wx->flags); + break; + default: + /* turn off perfect filters, enable ATR and reset */ + if (test_and_clear_bit(WX_FLAG_FDIR_PERFECT, wx->flags)) + need_reset = true; + + /* We cannot enable ATR if RSS is disabled */ + if (wx->ring_feature[RING_F_RSS].limit <= 1) + break; + + set_bit(WX_FLAG_FDIR_HASH, wx->flags); + break; + } + + if (need_reset) + wx->do_reset(netdev); + return 0; } EXPORT_SYMBOL(wx_set_features); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.h b/drivers/net/ethernet/wangxun/libwx/wx_lib.h index c41b29ea812f..fdeb0c315b75 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_lib.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.h @@ -7,6 +7,7 @@ #ifndef _WX_LIB_H_ #define _WX_LIB_H_ +struct wx_dec_ptype wx_decode_ptype(const u8 ptype); void wx_alloc_rx_buffers(struct wx_ring *rx_ring, u16 cleaned_count); u16 wx_desc_unused(struct wx_ring *ring); netdev_tx_t wx_xmit_frame(struct sk_buff *skb, diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h index 0df7f5712b6f..1d57b047817b 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_type.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h @@ -157,6 +157,8 @@ #define WX_RDB_RA_CTL_RSS_IPV6_TCP BIT(21) #define WX_RDB_RA_CTL_RSS_IPV4_UDP BIT(22) #define WX_RDB_RA_CTL_RSS_IPV6_UDP BIT(23) +#define WX_RDB_FDIR_MATCH 0x19558 +#define WX_RDB_FDIR_MISS 0x1955C /******************************* PSR Registers *******************************/ /* psr control */ @@ -503,6 +505,34 @@ enum WX_MSCA_CMD_value { #define WX_PTYPE_TYP_TCP 0x04 #define WX_PTYPE_TYP_SCTP 0x05 +/* Packet type non-ip values */ +enum wx_l2_ptypes { + WX_PTYPE_L2_ABORTED = (WX_PTYPE_PKT_MAC), + WX_PTYPE_L2_MAC = (WX_PTYPE_PKT_MAC | WX_PTYPE_TYP_MAC), + + WX_PTYPE_L2_IPV4_FRAG = (WX_PTYPE_PKT_IP | WX_PTYPE_TYP_IPFRAG), + WX_PTYPE_L2_IPV4 = (WX_PTYPE_PKT_IP | WX_PTYPE_TYP_IP), + WX_PTYPE_L2_IPV4_UDP = (WX_PTYPE_PKT_IP | WX_PTYPE_TYP_UDP), + WX_PTYPE_L2_IPV4_TCP = (WX_PTYPE_PKT_IP | WX_PTYPE_TYP_TCP), + WX_PTYPE_L2_IPV4_SCTP = (WX_PTYPE_PKT_IP | WX_PTYPE_TYP_SCTP), + WX_PTYPE_L2_IPV6_FRAG = (WX_PTYPE_PKT_IP | WX_PTYPE_PKT_IPV6 | + WX_PTYPE_TYP_IPFRAG), + WX_PTYPE_L2_IPV6 = (WX_PTYPE_PKT_IP | WX_PTYPE_PKT_IPV6 | + WX_PTYPE_TYP_IP), + WX_PTYPE_L2_IPV6_UDP = (WX_PTYPE_PKT_IP | WX_PTYPE_PKT_IPV6 | + WX_PTYPE_TYP_UDP), + WX_PTYPE_L2_IPV6_TCP = (WX_PTYPE_PKT_IP | WX_PTYPE_PKT_IPV6 | + WX_PTYPE_TYP_TCP), + WX_PTYPE_L2_IPV6_SCTP = (WX_PTYPE_PKT_IP | WX_PTYPE_PKT_IPV6 | + WX_PTYPE_TYP_SCTP), + + WX_PTYPE_L2_TUN4_MAC = (WX_PTYPE_TUN_IPV4 | WX_PTYPE_PKT_IGM), + WX_PTYPE_L2_TUN6_MAC = (WX_PTYPE_TUN_IPV6 | WX_PTYPE_PKT_IGM), +}; + +#define WX_PTYPE_PKT(_pt) ((_pt) & 0x30) +#define WX_PTYPE_TYPL4(_pt) ((_pt) & 0x07) + #define WX_RXD_PKTTYPE(_rxd) \ ((le32_to_cpu((_rxd)->wb.lower.lo_dword.data) >> 9) & 0xFF) #define WX_RXD_IPV6EX(_rxd) \ @@ -552,6 +582,9 @@ enum wx_tx_flags { WX_TX_FLAGS_OUTER_IPV4 = 0x100, WX_TX_FLAGS_LINKSEC = 0x200, WX_TX_FLAGS_IPSEC = 0x400, + + /* software defined flags */ + WX_TX_FLAGS_SW_VLAN = 0x40, }; /* VLAN info */ @@ -900,7 +933,13 @@ struct wx_ring { */ u16 next_to_use; u16 next_to_clean; - u16 next_to_alloc; + union { + u16 next_to_alloc; + struct { + u8 atr_sample_rate; + u8 atr_count; + }; + }; struct wx_queue_stats stats; struct u64_stats_sync syncp; @@ -939,6 +978,7 @@ struct wx_ring_feature { enum wx_ring_f_enum { RING_F_NONE = 0, RING_F_RSS, + RING_F_FDIR, RING_F_ARRAY_SIZE /* must be last in enum set */ }; @@ -980,15 +1020,26 @@ struct wx_hw_stats { u64 crcerrs; u64 rlec; u64 qmprc; + u64 fdirmatch; + u64 fdirmiss; }; enum wx_state { WX_STATE_RESETTING, WX_STATE_NBITS, /* must be last */ }; + +enum wx_pf_flags { + WX_FLAG_FDIR_CAPABLE, + WX_FLAG_FDIR_HASH, + WX_FLAG_FDIR_PERFECT, + WX_PF_FLAGS_NBITS /* must be last */ +}; + struct wx { unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; DECLARE_BITMAP(state, WX_STATE_NBITS); + DECLARE_BITMAP(flags, WX_PF_FLAGS_NBITS); void *priv; u8 __iomem *hw_addr; @@ -1078,6 +1129,9 @@ struct wx { u64 hw_csum_rx_error; u64 alloc_rx_buff_failed; + u32 atr_sample_rate; + void (*atr)(struct wx_ring *ring, struct wx_tx_buffer *first, u8 ptype); + void (*configure_fdir)(struct wx *wx); void (*do_reset)(struct net_device *netdev); }; diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c index 46a5a3e95202..e868f7ef4920 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c @@ -37,9 +37,9 @@ static int ngbe_set_wol(struct net_device *netdev, wx->wol = 0; if (wol->wolopts & WAKE_MAGIC) wx->wol = WX_PSR_WKUP_CTL_MAG; - netdev->wol_enabled = !!(wx->wol); + netdev->ethtool->wol_enabled = !!(wx->wol); wr32(wx, WX_PSR_WKUP_CTL, wx->wol); - device_set_wakeup_enable(&pdev->dev, netdev->wol_enabled); + device_set_wakeup_enable(&pdev->dev, netdev->ethtool->wol_enabled); return 0; } diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c index af30ca0312b8..53aeae2f884b 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c @@ -652,7 +652,7 @@ static int ngbe_probe(struct pci_dev *pdev, if (wx->wol_hw_supported) wx->wol = NGBE_PSR_WKUP_CTL_MAG; - netdev->wol_enabled = !!(wx->wol); + netdev->ethtool->wol_enabled = !!(wx->wol); wr32(wx, NGBE_PSR_WKUP_CTL, wx->wol); device_set_wakeup_enable(&pdev->dev, wx->wol); diff --git a/drivers/net/ethernet/wangxun/txgbe/Makefile b/drivers/net/ethernet/wangxun/txgbe/Makefile index 42718875277c..f74576fe7062 100644 --- a/drivers/net/ethernet/wangxun/txgbe/Makefile +++ b/drivers/net/ethernet/wangxun/txgbe/Makefile @@ -10,4 +10,5 @@ txgbe-objs := txgbe_main.o \ txgbe_hw.o \ txgbe_phy.o \ txgbe_irq.o \ + txgbe_fdir.o \ txgbe_ethtool.o diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c index 31fde3fa7c6b..d98314b26c19 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c @@ -9,6 +9,7 @@ #include "../libwx/wx_type.h" #include "../libwx/wx_lib.h" #include "txgbe_type.h" +#include "txgbe_fdir.h" #include "txgbe_ethtool.h" static int txgbe_set_ringparam(struct net_device *netdev, @@ -79,6 +80,430 @@ static int txgbe_set_channels(struct net_device *dev, return txgbe_setup_tc(dev, netdev_get_num_tc(dev)); } +static int txgbe_get_ethtool_fdir_entry(struct txgbe *txgbe, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + union txgbe_atr_input *mask = &txgbe->fdir_mask; + struct txgbe_fdir_filter *rule = NULL; + struct hlist_node *node; + + /* report total rule count */ + cmd->data = (1024 << TXGBE_FDIR_PBALLOC_64K) - 2; + + hlist_for_each_entry_safe(rule, node, &txgbe->fdir_filter_list, + fdir_node) { + if (fsp->location <= rule->sw_idx) + break; + } + + if (!rule || fsp->location != rule->sw_idx) + return -EINVAL; + + /* set flow type field */ + switch (rule->filter.formatted.flow_type) { + case TXGBE_ATR_FLOW_TYPE_TCPV4: + fsp->flow_type = TCP_V4_FLOW; + break; + case TXGBE_ATR_FLOW_TYPE_UDPV4: + fsp->flow_type = UDP_V4_FLOW; + break; + case TXGBE_ATR_FLOW_TYPE_SCTPV4: + fsp->flow_type = SCTP_V4_FLOW; + break; + case TXGBE_ATR_FLOW_TYPE_IPV4: + fsp->flow_type = IP_USER_FLOW; + fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; + fsp->h_u.usr_ip4_spec.proto = 0; + fsp->m_u.usr_ip4_spec.proto = 0; + break; + default: + return -EINVAL; + } + + fsp->h_u.tcp_ip4_spec.psrc = rule->filter.formatted.src_port; + fsp->m_u.tcp_ip4_spec.psrc = mask->formatted.src_port; + fsp->h_u.tcp_ip4_spec.pdst = rule->filter.formatted.dst_port; + fsp->m_u.tcp_ip4_spec.pdst = mask->formatted.dst_port; + fsp->h_u.tcp_ip4_spec.ip4src = rule->filter.formatted.src_ip[0]; + fsp->m_u.tcp_ip4_spec.ip4src = mask->formatted.src_ip[0]; + fsp->h_u.tcp_ip4_spec.ip4dst = rule->filter.formatted.dst_ip[0]; + fsp->m_u.tcp_ip4_spec.ip4dst = mask->formatted.dst_ip[0]; + fsp->h_ext.vlan_etype = rule->filter.formatted.flex_bytes; + fsp->m_ext.vlan_etype = mask->formatted.flex_bytes; + fsp->h_ext.data[1] = htonl(rule->filter.formatted.vm_pool); + fsp->m_ext.data[1] = htonl(mask->formatted.vm_pool); + fsp->flow_type |= FLOW_EXT; + + /* record action */ + if (rule->action == TXGBE_RDB_FDIR_DROP_QUEUE) + fsp->ring_cookie = RX_CLS_FLOW_DISC; + else + fsp->ring_cookie = rule->action; + + return 0; +} + +static int txgbe_get_ethtool_fdir_all(struct txgbe *txgbe, + struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct txgbe_fdir_filter *rule; + struct hlist_node *node; + int cnt = 0; + + /* report total rule count */ + cmd->data = (1024 << TXGBE_FDIR_PBALLOC_64K) - 2; + + hlist_for_each_entry_safe(rule, node, &txgbe->fdir_filter_list, + fdir_node) { + if (cnt == cmd->rule_cnt) + return -EMSGSIZE; + rule_locs[cnt] = rule->sw_idx; + cnt++; + } + + cmd->rule_cnt = cnt; + + return 0; +} + +static int txgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct wx *wx = netdev_priv(dev); + struct txgbe *txgbe = wx->priv; + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = wx->num_rx_queues; + ret = 0; + break; + case ETHTOOL_GRXCLSRLCNT: + cmd->rule_cnt = txgbe->fdir_filter_count; + ret = 0; + break; + case ETHTOOL_GRXCLSRULE: + ret = txgbe_get_ethtool_fdir_entry(txgbe, cmd); + break; + case ETHTOOL_GRXCLSRLALL: + ret = txgbe_get_ethtool_fdir_all(txgbe, cmd, (u32 *)rule_locs); + break; + default: + break; + } + + return ret; +} + +static int txgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec *fsp, + u8 *flow_type) +{ + switch (fsp->flow_type & ~FLOW_EXT) { + case TCP_V4_FLOW: + *flow_type = TXGBE_ATR_FLOW_TYPE_TCPV4; + break; + case UDP_V4_FLOW: + *flow_type = TXGBE_ATR_FLOW_TYPE_UDPV4; + break; + case SCTP_V4_FLOW: + *flow_type = TXGBE_ATR_FLOW_TYPE_SCTPV4; + break; + case IP_USER_FLOW: + switch (fsp->h_u.usr_ip4_spec.proto) { + case IPPROTO_TCP: + *flow_type = TXGBE_ATR_FLOW_TYPE_TCPV4; + break; + case IPPROTO_UDP: + *flow_type = TXGBE_ATR_FLOW_TYPE_UDPV4; + break; + case IPPROTO_SCTP: + *flow_type = TXGBE_ATR_FLOW_TYPE_SCTPV4; + break; + case 0: + if (!fsp->m_u.usr_ip4_spec.proto) { + *flow_type = TXGBE_ATR_FLOW_TYPE_IPV4; + break; + } + fallthrough; + default: + return -EINVAL; + } + break; + default: + return -EINVAL; + } + + return 0; +} + +static bool txgbe_match_ethtool_fdir_entry(struct txgbe *txgbe, + struct txgbe_fdir_filter *input) +{ + struct txgbe_fdir_filter *rule = NULL; + struct hlist_node *node2; + + hlist_for_each_entry_safe(rule, node2, &txgbe->fdir_filter_list, + fdir_node) { + if (rule->filter.formatted.bkt_hash == + input->filter.formatted.bkt_hash && + rule->action == input->action) { + wx_dbg(txgbe->wx, "FDIR entry already exist\n"); + return true; + } + } + return false; +} + +static int txgbe_update_ethtool_fdir_entry(struct txgbe *txgbe, + struct txgbe_fdir_filter *input, + u16 sw_idx) +{ + struct hlist_node *node = NULL, *parent = NULL; + struct txgbe_fdir_filter *rule; + struct wx *wx = txgbe->wx; + bool deleted = false; + int err; + + hlist_for_each_entry_safe(rule, node, &txgbe->fdir_filter_list, + fdir_node) { + /* hash found, or no matching entry */ + if (rule->sw_idx >= sw_idx) + break; + parent = node; + } + + /* if there is an old rule occupying our place remove it */ + if (rule && rule->sw_idx == sw_idx) { + /* hardware filters are only configured when interface is up, + * and we should not issue filter commands while the interface + * is down + */ + if (netif_running(wx->netdev) && + (!input || rule->filter.formatted.bkt_hash != + input->filter.formatted.bkt_hash)) { + err = txgbe_fdir_erase_perfect_filter(wx, + &rule->filter, + sw_idx); + if (err) + return -EINVAL; + } + + hlist_del(&rule->fdir_node); + kfree(rule); + txgbe->fdir_filter_count--; + deleted = true; + } + + /* If we weren't given an input, then this was a request to delete a + * filter. We should return -EINVAL if the filter wasn't found, but + * return 0 if the rule was successfully deleted. + */ + if (!input) + return deleted ? 0 : -EINVAL; + + /* initialize node and set software index */ + INIT_HLIST_NODE(&input->fdir_node); + + /* add filter to the list */ + if (parent) + hlist_add_behind(&input->fdir_node, parent); + else + hlist_add_head(&input->fdir_node, + &txgbe->fdir_filter_list); + + /* update counts */ + txgbe->fdir_filter_count++; + + return 0; +} + +static int txgbe_add_ethtool_fdir_entry(struct txgbe *txgbe, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + struct txgbe_fdir_filter *input; + union txgbe_atr_input mask; + struct wx *wx = txgbe->wx; + int err = -EINVAL; + u16 ptype = 0; + u8 queue; + + if (!(test_bit(WX_FLAG_FDIR_PERFECT, wx->flags))) + return -EOPNOTSUPP; + + /* ring_cookie is a masked into a set of queues and txgbe pools or + * we use drop index + */ + if (fsp->ring_cookie == RX_CLS_FLOW_DISC) { + queue = TXGBE_RDB_FDIR_DROP_QUEUE; + } else { + u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie); + + if (ring >= wx->num_rx_queues) + return -EINVAL; + + /* Map the ring onto the absolute queue index */ + queue = wx->rx_ring[ring]->reg_idx; + } + + /* Don't allow indexes to exist outside of available space */ + if (fsp->location >= ((1024 << TXGBE_FDIR_PBALLOC_64K) - 2)) { + wx_err(wx, "Location out of range\n"); + return -EINVAL; + } + + input = kzalloc(sizeof(*input), GFP_ATOMIC); + if (!input) + return -ENOMEM; + + memset(&mask, 0, sizeof(union txgbe_atr_input)); + + /* set SW index */ + input->sw_idx = fsp->location; + + /* record flow type */ + if (txgbe_flowspec_to_flow_type(fsp, + &input->filter.formatted.flow_type)) { + wx_err(wx, "Unrecognized flow type\n"); + goto err_out; + } + + mask.formatted.flow_type = TXGBE_ATR_L4TYPE_IPV6_MASK | + TXGBE_ATR_L4TYPE_MASK; + + if (input->filter.formatted.flow_type == TXGBE_ATR_FLOW_TYPE_IPV4) + mask.formatted.flow_type &= TXGBE_ATR_L4TYPE_IPV6_MASK; + + /* Copy input into formatted structures */ + input->filter.formatted.src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src; + mask.formatted.src_ip[0] = fsp->m_u.tcp_ip4_spec.ip4src; + input->filter.formatted.dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst; + mask.formatted.dst_ip[0] = fsp->m_u.tcp_ip4_spec.ip4dst; + input->filter.formatted.src_port = fsp->h_u.tcp_ip4_spec.psrc; + mask.formatted.src_port = fsp->m_u.tcp_ip4_spec.psrc; + input->filter.formatted.dst_port = fsp->h_u.tcp_ip4_spec.pdst; + mask.formatted.dst_port = fsp->m_u.tcp_ip4_spec.pdst; + + if (fsp->flow_type & FLOW_EXT) { + input->filter.formatted.vm_pool = + (unsigned char)ntohl(fsp->h_ext.data[1]); + mask.formatted.vm_pool = + (unsigned char)ntohl(fsp->m_ext.data[1]); + input->filter.formatted.flex_bytes = + fsp->h_ext.vlan_etype; + mask.formatted.flex_bytes = fsp->m_ext.vlan_etype; + } + + switch (input->filter.formatted.flow_type) { + case TXGBE_ATR_FLOW_TYPE_TCPV4: + ptype = WX_PTYPE_L2_IPV4_TCP; + break; + case TXGBE_ATR_FLOW_TYPE_UDPV4: + ptype = WX_PTYPE_L2_IPV4_UDP; + break; + case TXGBE_ATR_FLOW_TYPE_SCTPV4: + ptype = WX_PTYPE_L2_IPV4_SCTP; + break; + case TXGBE_ATR_FLOW_TYPE_IPV4: + ptype = WX_PTYPE_L2_IPV4; + break; + default: + break; + } + + input->filter.formatted.vlan_id = htons(ptype); + if (mask.formatted.flow_type & TXGBE_ATR_L4TYPE_MASK) + mask.formatted.vlan_id = htons(0xFFFF); + else + mask.formatted.vlan_id = htons(0xFFF8); + + /* determine if we need to drop or route the packet */ + if (fsp->ring_cookie == RX_CLS_FLOW_DISC) + input->action = TXGBE_RDB_FDIR_DROP_QUEUE; + else + input->action = fsp->ring_cookie; + + spin_lock(&txgbe->fdir_perfect_lock); + + if (hlist_empty(&txgbe->fdir_filter_list)) { + /* save mask and program input mask into HW */ + memcpy(&txgbe->fdir_mask, &mask, sizeof(mask)); + err = txgbe_fdir_set_input_mask(wx, &mask); + if (err) + goto err_unlock; + } else if (memcmp(&txgbe->fdir_mask, &mask, sizeof(mask))) { + wx_err(wx, "Hardware only supports one mask per port. To change the mask you must first delete all the rules.\n"); + goto err_unlock; + } + + /* apply mask and compute/store hash */ + txgbe_atr_compute_perfect_hash(&input->filter, &mask); + + /* check if new entry does not exist on filter list */ + if (txgbe_match_ethtool_fdir_entry(txgbe, input)) + goto err_unlock; + + /* only program filters to hardware if the net device is running, as + * we store the filters in the Rx buffer which is not allocated when + * the device is down + */ + if (netif_running(wx->netdev)) { + err = txgbe_fdir_write_perfect_filter(wx, &input->filter, + input->sw_idx, queue); + if (err) + goto err_unlock; + } + + txgbe_update_ethtool_fdir_entry(txgbe, input, input->sw_idx); + + spin_unlock(&txgbe->fdir_perfect_lock); + + return 0; +err_unlock: + spin_unlock(&txgbe->fdir_perfect_lock); +err_out: + kfree(input); + return err; +} + +static int txgbe_del_ethtool_fdir_entry(struct txgbe *txgbe, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + int err = 0; + + spin_lock(&txgbe->fdir_perfect_lock); + err = txgbe_update_ethtool_fdir_entry(txgbe, NULL, fsp->location); + spin_unlock(&txgbe->fdir_perfect_lock); + + return err; +} + +static int txgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) +{ + struct wx *wx = netdev_priv(dev); + struct txgbe *txgbe = wx->priv; + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_SRXCLSRLINS: + ret = txgbe_add_ethtool_fdir_entry(txgbe, cmd); + break; + case ETHTOOL_SRXCLSRLDEL: + ret = txgbe_del_ethtool_fdir_entry(txgbe, cmd); + break; + default: + break; + } + + return ret; +} + static const struct ethtool_ops txgbe_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ, @@ -100,6 +525,8 @@ static const struct ethtool_ops txgbe_ethtool_ops = { .set_coalesce = wx_set_coalesce, .get_channels = wx_get_channels, .set_channels = txgbe_set_channels, + .get_rxnfc = txgbe_get_rxnfc, + .set_rxnfc = txgbe_set_rxnfc, .get_msglevel = wx_get_msglevel, .set_msglevel = wx_set_msglevel, }; diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_fdir.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_fdir.c new file mode 100644 index 000000000000..ef50efbaec0f --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_fdir.c @@ -0,0 +1,643 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2015 - 2024 Beijing WangXun Technology Co., Ltd. */ + +#include <linux/string.h> +#include <linux/types.h> +#include <linux/pci.h> + +#include "../libwx/wx_type.h" +#include "../libwx/wx_lib.h" +#include "../libwx/wx_hw.h" +#include "txgbe_type.h" +#include "txgbe_fdir.h" + +/* These defines allow us to quickly generate all of the necessary instructions + * in the function below by simply calling out TXGBE_COMPUTE_SIG_HASH_ITERATION + * for values 0 through 15 + */ +#define TXGBE_ATR_COMMON_HASH_KEY \ + (TXGBE_ATR_BUCKET_HASH_KEY & TXGBE_ATR_SIGNATURE_HASH_KEY) +#define TXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \ +do { \ + u32 n = (_n); \ + if (TXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \ + common_hash ^= lo_hash_dword >> n; \ + else if (TXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \ + bucket_hash ^= lo_hash_dword >> n; \ + else if (TXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \ + sig_hash ^= lo_hash_dword << (16 - n); \ + if (TXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \ + common_hash ^= hi_hash_dword >> n; \ + else if (TXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ + bucket_hash ^= hi_hash_dword >> n; \ + else if (TXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \ + sig_hash ^= hi_hash_dword << (16 - n); \ +} while (0) + +/** + * txgbe_atr_compute_sig_hash - Compute the signature hash + * @input: input bitstream to compute the hash on + * @common: compressed common input dword + * @hash: pointer to the computed hash + * + * This function is almost identical to the function above but contains + * several optimizations such as unwinding all of the loops, letting the + * compiler work out all of the conditional ifs since the keys are static + * defines, and computing two keys at once since the hashed dword stream + * will be the same for both keys. + **/ +static void txgbe_atr_compute_sig_hash(union txgbe_atr_hash_dword input, + union txgbe_atr_hash_dword common, + u32 *hash) +{ + u32 sig_hash = 0, bucket_hash = 0, common_hash = 0; + u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; + u32 i; + + /* record the flow_vm_vlan bits as they are a key part to the hash */ + flow_vm_vlan = ntohl(input.dword); + + /* generate common hash dword */ + hi_hash_dword = ntohl(common.dword); + + /* low dword is word swapped version of common */ + lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); + + /* apply flow ID/VM pool/VLAN ID bits to hash words */ + hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16); + + /* Process bits 0 and 16 */ + TXGBE_COMPUTE_SIG_HASH_ITERATION(0); + + /* apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to + * delay this because bit 0 of the stream should not be processed + * so we do not add the VLAN until after bit 0 was processed + */ + lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); + + /* Process remaining 30 bit of the key */ + for (i = 1; i <= 15; i++) + TXGBE_COMPUTE_SIG_HASH_ITERATION(i); + + /* combine common_hash result with signature and bucket hashes */ + bucket_hash ^= common_hash; + bucket_hash &= TXGBE_ATR_HASH_MASK; + + sig_hash ^= common_hash << 16; + sig_hash &= TXGBE_ATR_HASH_MASK << 16; + + /* return completed signature hash */ + *hash = sig_hash ^ bucket_hash; +} + +#define TXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \ +do { \ + u32 n = (_n); \ + if (TXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \ + bucket_hash ^= lo_hash_dword >> n; \ + if (TXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ + bucket_hash ^= hi_hash_dword >> n; \ +} while (0) + +/** + * txgbe_atr_compute_perfect_hash - Compute the perfect filter hash + * @input: input bitstream to compute the hash on + * @input_mask: mask for the input bitstream + * + * This function serves two main purposes. First it applies the input_mask + * to the atr_input resulting in a cleaned up atr_input data stream. + * Secondly it computes the hash and stores it in the bkt_hash field at + * the end of the input byte stream. This way it will be available for + * future use without needing to recompute the hash. + **/ +void txgbe_atr_compute_perfect_hash(union txgbe_atr_input *input, + union txgbe_atr_input *input_mask) +{ + u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; + u32 bucket_hash = 0; + __be32 hi_dword = 0; + u32 i = 0; + + /* Apply masks to input data */ + for (i = 0; i < 11; i++) + input->dword_stream[i] &= input_mask->dword_stream[i]; + + /* record the flow_vm_vlan bits as they are a key part to the hash */ + flow_vm_vlan = ntohl(input->dword_stream[0]); + + /* generate common hash dword */ + for (i = 1; i <= 10; i++) + hi_dword ^= input->dword_stream[i]; + hi_hash_dword = ntohl(hi_dword); + + /* low dword is word swapped version of common */ + lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); + + /* apply flow ID/VM pool/VLAN ID bits to hash words */ + hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16); + + /* Process bits 0 and 16 */ + TXGBE_COMPUTE_BKT_HASH_ITERATION(0); + + /* apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to + * delay this because bit 0 of the stream should not be processed + * so we do not add the VLAN until after bit 0 was processed + */ + lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); + + /* Process remaining 30 bit of the key */ + for (i = 1; i <= 15; i++) + TXGBE_COMPUTE_BKT_HASH_ITERATION(i); + + /* Limit hash to 13 bits since max bucket count is 8K. + * Store result at the end of the input stream. + */ + input->formatted.bkt_hash = (__force __be16)(bucket_hash & 0x1FFF); +} + +static int txgbe_fdir_check_cmd_complete(struct wx *wx) +{ + u32 val; + + return read_poll_timeout_atomic(rd32, val, + !(val & TXGBE_RDB_FDIR_CMD_CMD_MASK), + 10, 100, false, + wx, TXGBE_RDB_FDIR_CMD); +} + +/** + * txgbe_fdir_add_signature_filter - Adds a signature hash filter + * @wx: pointer to hardware structure + * @input: unique input dword + * @common: compressed common input dword + * @queue: queue index to direct traffic to + * + * @return: 0 on success and negative on failure + **/ +static int txgbe_fdir_add_signature_filter(struct wx *wx, + union txgbe_atr_hash_dword input, + union txgbe_atr_hash_dword common, + u8 queue) +{ + u32 fdirhashcmd, fdircmd; + u8 flow_type; + int err; + + /* Get the flow_type in order to program FDIRCMD properly + * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6 + * fifth is FDIRCMD.TUNNEL_FILTER + */ + flow_type = input.formatted.flow_type; + switch (flow_type) { + case TXGBE_ATR_FLOW_TYPE_TCPV4: + case TXGBE_ATR_FLOW_TYPE_UDPV4: + case TXGBE_ATR_FLOW_TYPE_SCTPV4: + case TXGBE_ATR_FLOW_TYPE_TCPV6: + case TXGBE_ATR_FLOW_TYPE_UDPV6: + case TXGBE_ATR_FLOW_TYPE_SCTPV6: + break; + default: + wx_err(wx, "Error on flow type input\n"); + return -EINVAL; + } + + /* configure FDIRCMD register */ + fdircmd = TXGBE_RDB_FDIR_CMD_CMD_ADD_FLOW | + TXGBE_RDB_FDIR_CMD_FILTER_UPDATE | + TXGBE_RDB_FDIR_CMD_LAST | TXGBE_RDB_FDIR_CMD_QUEUE_EN; + fdircmd |= TXGBE_RDB_FDIR_CMD_FLOW_TYPE(flow_type); + fdircmd |= TXGBE_RDB_FDIR_CMD_RX_QUEUE(queue); + + txgbe_atr_compute_sig_hash(input, common, &fdirhashcmd); + fdirhashcmd |= TXGBE_RDB_FDIR_HASH_BUCKET_VALID; + wr32(wx, TXGBE_RDB_FDIR_HASH, fdirhashcmd); + wr32(wx, TXGBE_RDB_FDIR_CMD, fdircmd); + + wx_dbg(wx, "Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd); + + err = txgbe_fdir_check_cmd_complete(wx); + if (err) + wx_err(wx, "Flow Director command did not complete!\n"); + + return err; +} + +void txgbe_atr(struct wx_ring *ring, struct wx_tx_buffer *first, u8 ptype) +{ + union txgbe_atr_hash_dword common = { .dword = 0 }; + union txgbe_atr_hash_dword input = { .dword = 0 }; + struct wx_q_vector *q_vector = ring->q_vector; + struct wx_dec_ptype dptype; + union network_header { + struct ipv6hdr *ipv6; + struct iphdr *ipv4; + void *raw; + } hdr; + struct tcphdr *th; + + /* if ring doesn't have a interrupt vector, cannot perform ATR */ + if (!q_vector) + return; + + ring->atr_count++; + dptype = wx_decode_ptype(ptype); + if (dptype.etype) { + if (WX_PTYPE_TYPL4(ptype) != WX_PTYPE_TYP_TCP) + return; + hdr.raw = (void *)skb_inner_network_header(first->skb); + th = inner_tcp_hdr(first->skb); + } else { + if (WX_PTYPE_PKT(ptype) != WX_PTYPE_PKT_IP || + WX_PTYPE_TYPL4(ptype) != WX_PTYPE_TYP_TCP) + return; + hdr.raw = (void *)skb_network_header(first->skb); + th = tcp_hdr(first->skb); + } + + /* skip this packet since it is invalid or the socket is closing */ + if (!th || th->fin) + return; + + /* sample on all syn packets or once every atr sample count */ + if (!th->syn && ring->atr_count < ring->atr_sample_rate) + return; + + /* reset sample count */ + ring->atr_count = 0; + + /* src and dst are inverted, think how the receiver sees them + * + * The input is broken into two sections, a non-compressed section + * containing vm_pool, vlan_id, and flow_type. The rest of the data + * is XORed together and stored in the compressed dword. + */ + input.formatted.vlan_id = htons((u16)ptype); + + /* since src port and flex bytes occupy the same word XOR them together + * and write the value to source port portion of compressed dword + */ + if (first->tx_flags & WX_TX_FLAGS_SW_VLAN) + common.port.src ^= th->dest ^ first->skb->protocol; + else if (first->tx_flags & WX_TX_FLAGS_HW_VLAN) + common.port.src ^= th->dest ^ first->skb->vlan_proto; + else + common.port.src ^= th->dest ^ first->protocol; + common.port.dst ^= th->source; + + if (WX_PTYPE_PKT_IPV6 & WX_PTYPE_PKT(ptype)) { + input.formatted.flow_type = TXGBE_ATR_FLOW_TYPE_TCPV6; + common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^ + hdr.ipv6->saddr.s6_addr32[1] ^ + hdr.ipv6->saddr.s6_addr32[2] ^ + hdr.ipv6->saddr.s6_addr32[3] ^ + hdr.ipv6->daddr.s6_addr32[0] ^ + hdr.ipv6->daddr.s6_addr32[1] ^ + hdr.ipv6->daddr.s6_addr32[2] ^ + hdr.ipv6->daddr.s6_addr32[3]; + } else { + input.formatted.flow_type = TXGBE_ATR_FLOW_TYPE_TCPV4; + common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr; + } + + /* This assumes the Rx queue and Tx queue are bound to the same CPU */ + txgbe_fdir_add_signature_filter(q_vector->wx, input, common, + ring->queue_index); +} + +int txgbe_fdir_set_input_mask(struct wx *wx, union txgbe_atr_input *input_mask) +{ + u32 fdirm = 0, fdirtcpm = 0, flex = 0; + + /* Program the relevant mask registers. If src/dst_port or src/dst_addr + * are zero, then assume a full mask for that field. Also assume that + * a VLAN of 0 is unspecified, so mask that out as well. L4type + * cannot be masked out in this implementation. + * + * This also assumes IPv4 only. IPv6 masking isn't supported at this + * point in time. + */ + + /* verify bucket hash is cleared on hash generation */ + if (input_mask->formatted.bkt_hash) + wx_dbg(wx, "bucket hash should always be 0 in mask\n"); + + /* Program FDIRM and verify partial masks */ + switch (input_mask->formatted.vm_pool & 0x7F) { + case 0x0: + fdirm |= TXGBE_RDB_FDIR_OTHER_MSK_POOL; + break; + case 0x7F: + break; + default: + wx_err(wx, "Error on vm pool mask\n"); + return -EINVAL; + } + + switch (input_mask->formatted.flow_type & TXGBE_ATR_L4TYPE_MASK) { + case 0x0: + fdirm |= TXGBE_RDB_FDIR_OTHER_MSK_L4P; + if (input_mask->formatted.dst_port || + input_mask->formatted.src_port) { + wx_err(wx, "Error on src/dst port mask\n"); + return -EINVAL; + } + break; + case TXGBE_ATR_L4TYPE_MASK: + break; + default: + wx_err(wx, "Error on flow type mask\n"); + return -EINVAL; + } + + /* Now mask VM pool and destination IPv6 - bits 5 and 2 */ + wr32(wx, TXGBE_RDB_FDIR_OTHER_MSK, fdirm); + + flex = rd32(wx, TXGBE_RDB_FDIR_FLEX_CFG(0)); + flex &= ~TXGBE_RDB_FDIR_FLEX_CFG_FIELD0; + flex |= (TXGBE_RDB_FDIR_FLEX_CFG_BASE_MAC | + TXGBE_RDB_FDIR_FLEX_CFG_OFST(0x6)); + + switch ((__force u16)input_mask->formatted.flex_bytes & 0xFFFF) { + case 0x0000: + /* Mask Flex Bytes */ + flex |= TXGBE_RDB_FDIR_FLEX_CFG_MSK; + break; + case 0xFFFF: + break; + default: + wx_err(wx, "Error on flexible byte mask\n"); + return -EINVAL; + } + wr32(wx, TXGBE_RDB_FDIR_FLEX_CFG(0), flex); + + /* store the TCP/UDP port masks, bit reversed from port layout */ + fdirtcpm = ntohs(input_mask->formatted.dst_port); + fdirtcpm <<= TXGBE_RDB_FDIR_PORT_DESTINATION_SHIFT; + fdirtcpm |= ntohs(input_mask->formatted.src_port); + + /* write both the same so that UDP and TCP use the same mask */ + wr32(wx, TXGBE_RDB_FDIR_TCP_MSK, ~fdirtcpm); + wr32(wx, TXGBE_RDB_FDIR_UDP_MSK, ~fdirtcpm); + wr32(wx, TXGBE_RDB_FDIR_SCTP_MSK, ~fdirtcpm); + + /* store source and destination IP masks (little-enian) */ + wr32(wx, TXGBE_RDB_FDIR_SA4_MSK, + ntohl(~input_mask->formatted.src_ip[0])); + wr32(wx, TXGBE_RDB_FDIR_DA4_MSK, + ntohl(~input_mask->formatted.dst_ip[0])); + + return 0; +} + +int txgbe_fdir_write_perfect_filter(struct wx *wx, + union txgbe_atr_input *input, + u16 soft_id, u8 queue) +{ + u32 fdirport, fdirvlan, fdirhash, fdircmd; + int err = 0; + + /* currently IPv6 is not supported, must be programmed with 0 */ + wr32(wx, TXGBE_RDB_FDIR_IP6(2), ntohl(input->formatted.src_ip[0])); + wr32(wx, TXGBE_RDB_FDIR_IP6(1), ntohl(input->formatted.src_ip[1])); + wr32(wx, TXGBE_RDB_FDIR_IP6(0), ntohl(input->formatted.src_ip[2])); + + /* record the source address (little-endian) */ + wr32(wx, TXGBE_RDB_FDIR_SA, ntohl(input->formatted.src_ip[0])); + + /* record the first 32 bits of the destination address + * (little-endian) + */ + wr32(wx, TXGBE_RDB_FDIR_DA, ntohl(input->formatted.dst_ip[0])); + + /* record source and destination port (little-endian)*/ + fdirport = ntohs(input->formatted.dst_port); + fdirport <<= TXGBE_RDB_FDIR_PORT_DESTINATION_SHIFT; + fdirport |= ntohs(input->formatted.src_port); + wr32(wx, TXGBE_RDB_FDIR_PORT, fdirport); + + /* record packet type and flex_bytes (little-endian) */ + fdirvlan = ntohs(input->formatted.flex_bytes); + fdirvlan <<= TXGBE_RDB_FDIR_FLEX_FLEX_SHIFT; + fdirvlan |= ntohs(input->formatted.vlan_id); + wr32(wx, TXGBE_RDB_FDIR_FLEX, fdirvlan); + + /* configure FDIRHASH register */ + fdirhash = (__force u32)input->formatted.bkt_hash | + TXGBE_RDB_FDIR_HASH_BUCKET_VALID | + TXGBE_RDB_FDIR_HASH_SIG_SW_INDEX(soft_id); + wr32(wx, TXGBE_RDB_FDIR_HASH, fdirhash); + + /* flush all previous writes to make certain registers are + * programmed prior to issuing the command + */ + WX_WRITE_FLUSH(wx); + + /* configure FDIRCMD register */ + fdircmd = TXGBE_RDB_FDIR_CMD_CMD_ADD_FLOW | + TXGBE_RDB_FDIR_CMD_FILTER_UPDATE | + TXGBE_RDB_FDIR_CMD_LAST | TXGBE_RDB_FDIR_CMD_QUEUE_EN; + if (queue == TXGBE_RDB_FDIR_DROP_QUEUE) + fdircmd |= TXGBE_RDB_FDIR_CMD_DROP; + fdircmd |= TXGBE_RDB_FDIR_CMD_FLOW_TYPE(input->formatted.flow_type); + fdircmd |= TXGBE_RDB_FDIR_CMD_RX_QUEUE(queue); + fdircmd |= TXGBE_RDB_FDIR_CMD_VT_POOL(input->formatted.vm_pool); + + wr32(wx, TXGBE_RDB_FDIR_CMD, fdircmd); + err = txgbe_fdir_check_cmd_complete(wx); + if (err) + wx_err(wx, "Flow Director command did not complete!\n"); + + return err; +} + +int txgbe_fdir_erase_perfect_filter(struct wx *wx, + union txgbe_atr_input *input, + u16 soft_id) +{ + u32 fdirhash, fdircmd; + int err = 0; + + /* configure FDIRHASH register */ + fdirhash = (__force u32)input->formatted.bkt_hash; + fdirhash |= TXGBE_RDB_FDIR_HASH_SIG_SW_INDEX(soft_id); + wr32(wx, TXGBE_RDB_FDIR_HASH, fdirhash); + + /* flush hash to HW */ + WX_WRITE_FLUSH(wx); + + /* Query if filter is present */ + wr32(wx, TXGBE_RDB_FDIR_CMD, TXGBE_RDB_FDIR_CMD_CMD_QUERY_REM_FILT); + + err = txgbe_fdir_check_cmd_complete(wx); + if (err) { + wx_err(wx, "Flow Director command did not complete!\n"); + return err; + } + + fdircmd = rd32(wx, TXGBE_RDB_FDIR_CMD); + /* if filter exists in hardware then remove it */ + if (fdircmd & TXGBE_RDB_FDIR_CMD_FILTER_VALID) { + wr32(wx, TXGBE_RDB_FDIR_HASH, fdirhash); + WX_WRITE_FLUSH(wx); + wr32(wx, TXGBE_RDB_FDIR_CMD, + TXGBE_RDB_FDIR_CMD_CMD_REMOVE_FLOW); + } + + return 0; +} + +/** + * txgbe_fdir_enable - Initialize Flow Director control registers + * @wx: pointer to hardware structure + * @fdirctrl: value to write to flow director control register + **/ +static void txgbe_fdir_enable(struct wx *wx, u32 fdirctrl) +{ + u32 val; + int ret; + + /* Prime the keys for hashing */ + wr32(wx, TXGBE_RDB_FDIR_HKEY, TXGBE_ATR_BUCKET_HASH_KEY); + wr32(wx, TXGBE_RDB_FDIR_SKEY, TXGBE_ATR_SIGNATURE_HASH_KEY); + + wr32(wx, TXGBE_RDB_FDIR_CTL, fdirctrl); + WX_WRITE_FLUSH(wx); + ret = read_poll_timeout(rd32, val, val & TXGBE_RDB_FDIR_CTL_INIT_DONE, + 1000, 10000, false, wx, TXGBE_RDB_FDIR_CTL); + + if (ret < 0) + wx_dbg(wx, "Flow Director poll time exceeded!\n"); +} + +/** + * txgbe_init_fdir_signature -Initialize Flow Director sig filters + * @wx: pointer to hardware structure + **/ +static void txgbe_init_fdir_signature(struct wx *wx) +{ + u32 fdirctrl = TXGBE_FDIR_PBALLOC_64K; + u32 flex = 0; + + flex = rd32(wx, TXGBE_RDB_FDIR_FLEX_CFG(0)); + flex &= ~TXGBE_RDB_FDIR_FLEX_CFG_FIELD0; + + flex |= (TXGBE_RDB_FDIR_FLEX_CFG_BASE_MAC | + TXGBE_RDB_FDIR_FLEX_CFG_OFST(0x6)); + wr32(wx, TXGBE_RDB_FDIR_FLEX_CFG(0), flex); + + /* Continue setup of fdirctrl register bits: + * Move the flexible bytes to use the ethertype - shift 6 words + * Set the maximum length per hash bucket to 0xA filters + * Send interrupt when 64 filters are left + */ + fdirctrl |= TXGBE_RDB_FDIR_CTL_HASH_BITS(0xF) | + TXGBE_RDB_FDIR_CTL_MAX_LENGTH(0xA) | + TXGBE_RDB_FDIR_CTL_FULL_THRESH(4); + + /* write hashes and fdirctrl register, poll for completion */ + txgbe_fdir_enable(wx, fdirctrl); +} + +/** + * txgbe_init_fdir_perfect - Initialize Flow Director perfect filters + * @wx: pointer to hardware structure + **/ +static void txgbe_init_fdir_perfect(struct wx *wx) +{ + u32 fdirctrl = TXGBE_FDIR_PBALLOC_64K; + + /* Continue setup of fdirctrl register bits: + * Turn perfect match filtering on + * Report hash in RSS field of Rx wb descriptor + * Initialize the drop queue + * Move the flexible bytes to use the ethertype - shift 6 words + * Set the maximum length per hash bucket to 0xA filters + * Send interrupt when 64 (0x4 * 16) filters are left + */ + fdirctrl |= TXGBE_RDB_FDIR_CTL_PERFECT_MATCH | + TXGBE_RDB_FDIR_CTL_DROP_Q(TXGBE_RDB_FDIR_DROP_QUEUE) | + TXGBE_RDB_FDIR_CTL_HASH_BITS(0xF) | + TXGBE_RDB_FDIR_CTL_MAX_LENGTH(0xA) | + TXGBE_RDB_FDIR_CTL_FULL_THRESH(4); + + /* write hashes and fdirctrl register, poll for completion */ + txgbe_fdir_enable(wx, fdirctrl); +} + +static void txgbe_fdir_filter_restore(struct wx *wx) +{ + struct txgbe_fdir_filter *filter; + struct txgbe *txgbe = wx->priv; + struct hlist_node *node; + u8 queue = 0; + int ret = 0; + + spin_lock(&txgbe->fdir_perfect_lock); + + if (!hlist_empty(&txgbe->fdir_filter_list)) + ret = txgbe_fdir_set_input_mask(wx, &txgbe->fdir_mask); + + if (ret) + goto unlock; + + hlist_for_each_entry_safe(filter, node, + &txgbe->fdir_filter_list, fdir_node) { + if (filter->action == TXGBE_RDB_FDIR_DROP_QUEUE) { + queue = TXGBE_RDB_FDIR_DROP_QUEUE; + } else { + u32 ring = ethtool_get_flow_spec_ring(filter->action); + + if (ring >= wx->num_rx_queues) { + wx_err(wx, "FDIR restore failed, ring:%u\n", + ring); + continue; + } + + /* Map the ring onto the absolute queue index */ + queue = wx->rx_ring[ring]->reg_idx; + } + + ret = txgbe_fdir_write_perfect_filter(wx, + &filter->filter, + filter->sw_idx, + queue); + if (ret) + wx_err(wx, "FDIR restore failed, index:%u\n", + filter->sw_idx); + } + +unlock: + spin_unlock(&txgbe->fdir_perfect_lock); +} + +void txgbe_configure_fdir(struct wx *wx) +{ + wx_disable_sec_rx_path(wx); + + if (test_bit(WX_FLAG_FDIR_HASH, wx->flags)) { + txgbe_init_fdir_signature(wx); + } else if (test_bit(WX_FLAG_FDIR_PERFECT, wx->flags)) { + txgbe_init_fdir_perfect(wx); + txgbe_fdir_filter_restore(wx); + } + + wx_enable_sec_rx_path(wx); +} + +void txgbe_fdir_filter_exit(struct wx *wx) +{ + struct txgbe_fdir_filter *filter; + struct txgbe *txgbe = wx->priv; + struct hlist_node *node; + + spin_lock(&txgbe->fdir_perfect_lock); + + hlist_for_each_entry_safe(filter, node, + &txgbe->fdir_filter_list, fdir_node) { + hlist_del(&filter->fdir_node); + kfree(filter); + } + txgbe->fdir_filter_count = 0; + + spin_unlock(&txgbe->fdir_perfect_lock); +} diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_fdir.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_fdir.h new file mode 100644 index 000000000000..1f44ce60becb --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_fdir.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2015 - 2024 Beijing WangXun Technology Co., Ltd. */ + +#ifndef _TXGBE_FDIR_H_ +#define _TXGBE_FDIR_H_ + +void txgbe_atr_compute_perfect_hash(union txgbe_atr_input *input, + union txgbe_atr_input *input_mask); +void txgbe_atr(struct wx_ring *ring, struct wx_tx_buffer *first, u8 ptype); +int txgbe_fdir_set_input_mask(struct wx *wx, union txgbe_atr_input *input_mask); +int txgbe_fdir_write_perfect_filter(struct wx *wx, + union txgbe_atr_input *input, + u16 soft_id, u8 queue); +int txgbe_fdir_erase_perfect_filter(struct wx *wx, + union txgbe_atr_input *input, + u16 soft_id); +void txgbe_configure_fdir(struct wx *wx); +void txgbe_fdir_filter_exit(struct wx *wx); + +#endif /* _TXGBE_FDIR_H_ */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c index ca74d9422065..93180225a6f1 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c @@ -18,6 +18,7 @@ #include "txgbe_hw.h" #include "txgbe_phy.h" #include "txgbe_irq.h" +#include "txgbe_fdir.h" #include "txgbe_ethtool.h" char txgbe_driver_name[] = "txgbe"; @@ -257,6 +258,14 @@ static int txgbe_sw_init(struct wx *wx) num_online_cpus()); wx->rss_enabled = true; + wx->ring_feature[RING_F_FDIR].limit = min_t(int, TXGBE_MAX_FDIR_INDICES, + num_online_cpus()); + set_bit(WX_FLAG_FDIR_CAPABLE, wx->flags); + set_bit(WX_FLAG_FDIR_HASH, wx->flags); + wx->atr_sample_rate = TXGBE_DEFAULT_ATR_SAMPLE_RATE; + wx->atr = txgbe_atr; + wx->configure_fdir = txgbe_configure_fdir; + /* enable itr by default in dynamic mode */ wx->rx_itr_setting = 1; wx->tx_itr_setting = 1; @@ -274,6 +283,12 @@ static int txgbe_sw_init(struct wx *wx) return 0; } +static void txgbe_init_fdir(struct txgbe *txgbe) +{ + txgbe->fdir_filter_count = 0; + spin_lock_init(&txgbe->fdir_perfect_lock); +} + /** * txgbe_open - Called when a network interface is made active * @netdev: network interface device structure @@ -352,6 +367,7 @@ static int txgbe_close(struct net_device *netdev) txgbe_down(wx); wx_free_irq(wx); wx_free_resources(wx); + txgbe_fdir_filter_exit(wx); wx_control_hw(wx, false); return 0; @@ -660,6 +676,8 @@ static int txgbe_probe(struct pci_dev *pdev, txgbe->wx = wx; wx->priv = txgbe; + txgbe_init_fdir(txgbe); + err = txgbe_setup_misc_irq(txgbe); if (err) goto err_release_hw; diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h index f434a7865cb7..959102c4c379 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h @@ -89,6 +89,55 @@ #define TXGBE_XPCS_IDA_ADDR 0x13000 #define TXGBE_XPCS_IDA_DATA 0x13004 +/********************************* Flow Director *****************************/ +#define TXGBE_RDB_FDIR_DROP_QUEUE 127 +#define TXGBE_RDB_FDIR_CTL 0x19500 +#define TXGBE_RDB_FDIR_CTL_INIT_DONE BIT(3) +#define TXGBE_RDB_FDIR_CTL_PERFECT_MATCH BIT(4) +#define TXGBE_RDB_FDIR_CTL_DROP_Q(v) FIELD_PREP(GENMASK(14, 8), v) +#define TXGBE_RDB_FDIR_CTL_HASH_BITS(v) FIELD_PREP(GENMASK(23, 20), v) +#define TXGBE_RDB_FDIR_CTL_MAX_LENGTH(v) FIELD_PREP(GENMASK(27, 24), v) +#define TXGBE_RDB_FDIR_CTL_FULL_THRESH(v) FIELD_PREP(GENMASK(31, 28), v) +#define TXGBE_RDB_FDIR_IP6(_i) (0x1950C + ((_i) * 4)) /* 0-2 */ +#define TXGBE_RDB_FDIR_SA 0x19518 +#define TXGBE_RDB_FDIR_DA 0x1951C +#define TXGBE_RDB_FDIR_PORT 0x19520 +#define TXGBE_RDB_FDIR_PORT_DESTINATION_SHIFT 16 +#define TXGBE_RDB_FDIR_FLEX 0x19524 +#define TXGBE_RDB_FDIR_FLEX_FLEX_SHIFT 16 +#define TXGBE_RDB_FDIR_HASH 0x19528 +#define TXGBE_RDB_FDIR_HASH_SIG_SW_INDEX(v) FIELD_PREP(GENMASK(31, 16), v) +#define TXGBE_RDB_FDIR_HASH_BUCKET_VALID BIT(15) +#define TXGBE_RDB_FDIR_CMD 0x1952C +#define TXGBE_RDB_FDIR_CMD_CMD_MASK GENMASK(1, 0) +#define TXGBE_RDB_FDIR_CMD_CMD(v) FIELD_PREP(GENMASK(1, 0), v) +#define TXGBE_RDB_FDIR_CMD_CMD_ADD_FLOW TXGBE_RDB_FDIR_CMD_CMD(1) +#define TXGBE_RDB_FDIR_CMD_CMD_REMOVE_FLOW TXGBE_RDB_FDIR_CMD_CMD(2) +#define TXGBE_RDB_FDIR_CMD_CMD_QUERY_REM_FILT TXGBE_RDB_FDIR_CMD_CMD(3) +#define TXGBE_RDB_FDIR_CMD_FILTER_VALID BIT(2) +#define TXGBE_RDB_FDIR_CMD_FILTER_UPDATE BIT(3) +#define TXGBE_RDB_FDIR_CMD_FLOW_TYPE(v) FIELD_PREP(GENMASK(6, 5), v) +#define TXGBE_RDB_FDIR_CMD_DROP BIT(9) +#define TXGBE_RDB_FDIR_CMD_LAST BIT(11) +#define TXGBE_RDB_FDIR_CMD_QUEUE_EN BIT(15) +#define TXGBE_RDB_FDIR_CMD_RX_QUEUE(v) FIELD_PREP(GENMASK(22, 16), v) +#define TXGBE_RDB_FDIR_CMD_VT_POOL(v) FIELD_PREP(GENMASK(29, 24), v) +#define TXGBE_RDB_FDIR_DA4_MSK 0x1953C +#define TXGBE_RDB_FDIR_SA4_MSK 0x19540 +#define TXGBE_RDB_FDIR_TCP_MSK 0x19544 +#define TXGBE_RDB_FDIR_UDP_MSK 0x19548 +#define TXGBE_RDB_FDIR_SCTP_MSK 0x19560 +#define TXGBE_RDB_FDIR_HKEY 0x19568 +#define TXGBE_RDB_FDIR_SKEY 0x1956C +#define TXGBE_RDB_FDIR_OTHER_MSK 0x19570 +#define TXGBE_RDB_FDIR_OTHER_MSK_POOL BIT(2) +#define TXGBE_RDB_FDIR_OTHER_MSK_L4P BIT(3) +#define TXGBE_RDB_FDIR_FLEX_CFG(_i) (0x19580 + ((_i) * 4)) +#define TXGBE_RDB_FDIR_FLEX_CFG_FIELD0 GENMASK(7, 0) +#define TXGBE_RDB_FDIR_FLEX_CFG_BASE_MAC FIELD_PREP(GENMASK(1, 0), 0) +#define TXGBE_RDB_FDIR_FLEX_CFG_MSK BIT(2) +#define TXGBE_RDB_FDIR_FLEX_CFG_OFST(v) FIELD_PREP(GENMASK(7, 3), v) + /* Checksum and EEPROM pointers */ #define TXGBE_EEPROM_LAST_WORD 0x800 #define TXGBE_EEPROM_CHECKSUM 0x2F @@ -112,6 +161,98 @@ #define TXGBE_SP_RX_PB_SIZE 512 #define TXGBE_SP_TDB_PB_SZ (160 * 1024) /* 160KB Packet Buffer */ +#define TXGBE_DEFAULT_ATR_SAMPLE_RATE 20 + +/* Software ATR hash keys */ +#define TXGBE_ATR_BUCKET_HASH_KEY 0x3DAD14E2 +#define TXGBE_ATR_SIGNATURE_HASH_KEY 0x174D3614 + +/* Software ATR input stream values and masks */ +#define TXGBE_ATR_HASH_MASK 0x7fff +#define TXGBE_ATR_L4TYPE_MASK 0x3 +#define TXGBE_ATR_L4TYPE_UDP 0x1 +#define TXGBE_ATR_L4TYPE_TCP 0x2 +#define TXGBE_ATR_L4TYPE_SCTP 0x3 +#define TXGBE_ATR_L4TYPE_IPV6_MASK 0x4 +#define TXGBE_ATR_L4TYPE_TUNNEL_MASK 0x10 + +enum txgbe_atr_flow_type { + TXGBE_ATR_FLOW_TYPE_IPV4 = 0x0, + TXGBE_ATR_FLOW_TYPE_UDPV4 = 0x1, + TXGBE_ATR_FLOW_TYPE_TCPV4 = 0x2, + TXGBE_ATR_FLOW_TYPE_SCTPV4 = 0x3, + TXGBE_ATR_FLOW_TYPE_IPV6 = 0x4, + TXGBE_ATR_FLOW_TYPE_UDPV6 = 0x5, + TXGBE_ATR_FLOW_TYPE_TCPV6 = 0x6, + TXGBE_ATR_FLOW_TYPE_SCTPV6 = 0x7, + TXGBE_ATR_FLOW_TYPE_TUNNELED_IPV4 = 0x10, + TXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV4 = 0x11, + TXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV4 = 0x12, + TXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV4 = 0x13, + TXGBE_ATR_FLOW_TYPE_TUNNELED_IPV6 = 0x14, + TXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV6 = 0x15, + TXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV6 = 0x16, + TXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV6 = 0x17, +}; + +/* Flow Director ATR input struct. */ +union txgbe_atr_input { + /* Byte layout in order, all values with MSB first: + * + * vm_pool - 1 byte + * flow_type - 1 byte + * vlan_id - 2 bytes + * dst_ip - 16 bytes + * src_ip - 16 bytes + * src_port - 2 bytes + * dst_port - 2 bytes + * flex_bytes - 2 bytes + * bkt_hash - 2 bytes + */ + struct { + u8 vm_pool; + u8 flow_type; + __be16 vlan_id; + __be32 dst_ip[4]; + __be32 src_ip[4]; + __be16 src_port; + __be16 dst_port; + __be16 flex_bytes; + __be16 bkt_hash; + } formatted; + __be32 dword_stream[11]; +}; + +/* Flow Director compressed ATR hash input struct */ +union txgbe_atr_hash_dword { + struct { + u8 vm_pool; + u8 flow_type; + __be16 vlan_id; + } formatted; + __be32 ip; + struct { + __be16 src; + __be16 dst; + } port; + __be16 flex_bytes; + __be32 dword; +}; + +enum txgbe_fdir_pballoc_type { + TXGBE_FDIR_PBALLOC_NONE = 0, + TXGBE_FDIR_PBALLOC_64K = 1, + TXGBE_FDIR_PBALLOC_128K = 2, + TXGBE_FDIR_PBALLOC_256K = 3, +}; + +struct txgbe_fdir_filter { + struct hlist_node fdir_node; + union txgbe_atr_input filter; + u16 sw_idx; + u16 action; +}; + /* TX/RX descriptor defines */ #define TXGBE_DEFAULT_TXD 512 #define TXGBE_DEFAULT_TX_WORK 256 @@ -196,6 +337,12 @@ struct txgbe { struct gpio_chip *gpio; unsigned int gpio_irq; unsigned int link_irq; + + /* flow director */ + struct hlist_head fdir_filter_list; + union txgbe_atr_input fdir_mask; + int fdir_filter_count; + spinlock_t fdir_perfect_lock; /* spinlock for FDIR */ }; #endif /* _TXGBE_TYPE_H_ */ diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index c29809cd9201..e342f387c3dd 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -1945,9 +1945,9 @@ axienet_ethtools_set_coalesce(struct net_device *ndev, struct axienet_local *lp = netdev_priv(ndev); if (netif_running(ndev)) { - netdev_err(ndev, - "Please stop netif before applying configuration\n"); - return -EFAULT; + NL_SET_ERR_MSG(extack, + "Please stop netif before applying configuration"); + return -EBUSY; } if (ecoalesce->rx_max_coalesced_frames) @@ -2254,7 +2254,6 @@ static int axienet_probe(struct platform_device *pdev) platform_set_drvdata(pdev, ndev); SET_NETDEV_DEV(ndev, &pdev->dev); - ndev->flags &= ~IFF_MULTICAST; /* clear multicast */ ndev->features = NETIF_F_SG; ndev->ethtool_ops = &axienet_ethtool_ops; diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c index 8aff6a73ca0a..56df37f8d50a 100644 --- a/drivers/net/ethernet/xscale/ixp4xx_eth.c +++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c @@ -1015,7 +1015,7 @@ static void ixp4xx_get_drvinfo(struct net_device *dev, } static int ixp4xx_get_ts_info(struct net_device *dev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct port *port = netdev_priv(dev); |