summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/meta
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/meta')
-rw-r--r--drivers/net/ethernet/meta/Kconfig31
-rw-r--r--drivers/net/ethernet/meta/Makefile6
-rw-r--r--drivers/net/ethernet/meta/fbnic/Makefile19
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic.h144
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_csr.h838
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_devlink.c88
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_drvinfo.h5
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw.c791
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw.h124
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_irq.c208
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_mac.c666
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_mac.h86
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_netdev.c488
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_netdev.h63
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_pci.c564
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_phylink.c161
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_rpc.c651
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_rpc.h189
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_tlv.c529
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_tlv.h175
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_txrx.c1913
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_txrx.h127
22 files changed, 7866 insertions, 0 deletions
diff --git a/drivers/net/ethernet/meta/Kconfig b/drivers/net/ethernet/meta/Kconfig
new file mode 100644
index 000000000000..d8f5e9f9bb33
--- /dev/null
+++ b/drivers/net/ethernet/meta/Kconfig
@@ -0,0 +1,31 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Meta Platforms network device configuration
+#
+
+config NET_VENDOR_META
+ bool "Meta Platforms devices"
+ default y
+ help
+ If you have a network (Ethernet) card designed by Meta, say Y.
+ That's Meta as in the parent company of Facebook.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Meta cards. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_META
+
+config FBNIC
+ tristate "Meta Platforms Host Network Interface"
+ depends on X86_64 || COMPILE_TEST
+ depends on PCI_MSI
+ select PHYLINK
+ help
+ This driver supports Meta Platforms Host Network Interface.
+
+ To compile this driver as a module, choose M here. The module
+ will be called fbnic. MSI-X interrupt support is required.
+
+endif # NET_VENDOR_META
diff --git a/drivers/net/ethernet/meta/Makefile b/drivers/net/ethernet/meta/Makefile
new file mode 100644
index 000000000000..88804f3de963
--- /dev/null
+++ b/drivers/net/ethernet/meta/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the Meta Platforms network device drivers.
+#
+
+obj-$(CONFIG_FBNIC) += fbnic/
diff --git a/drivers/net/ethernet/meta/fbnic/Makefile b/drivers/net/ethernet/meta/fbnic/Makefile
new file mode 100644
index 000000000000..9373b558fdc9
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/Makefile
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+
+#
+# Makefile for the Meta(R) Host Network Interface
+#
+
+obj-$(CONFIG_FBNIC) += fbnic.o
+
+fbnic-y := fbnic_devlink.o \
+ fbnic_fw.o \
+ fbnic_irq.o \
+ fbnic_mac.o \
+ fbnic_netdev.o \
+ fbnic_pci.o \
+ fbnic_phylink.o \
+ fbnic_rpc.o \
+ fbnic_tlv.o \
+ fbnic_txrx.o
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic.h b/drivers/net/ethernet/meta/fbnic/fbnic.h
new file mode 100644
index 000000000000..ad2689bfd6cb
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic.h
@@ -0,0 +1,144 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#ifndef _FBNIC_H_
+#define _FBNIC_H_
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include "fbnic_csr.h"
+#include "fbnic_fw.h"
+#include "fbnic_mac.h"
+#include "fbnic_rpc.h"
+
+struct fbnic_dev {
+ struct device *dev;
+ struct net_device *netdev;
+
+ u32 __iomem *uc_addr0;
+ u32 __iomem *uc_addr4;
+ const struct fbnic_mac *mac;
+ unsigned int fw_msix_vector;
+ unsigned int pcs_msix_vector;
+ unsigned short num_irqs;
+
+ struct delayed_work service_task;
+
+ struct fbnic_fw_mbx mbx[FBNIC_IPC_MBX_INDICES];
+ struct fbnic_fw_cap fw_cap;
+ /* Lock protecting Tx Mailbox queue to prevent possible races */
+ spinlock_t fw_tx_lock;
+
+ unsigned long last_heartbeat_request;
+ unsigned long last_heartbeat_response;
+ u8 fw_heartbeat_enabled;
+
+ u64 dsn;
+ u32 mps;
+ u32 readrq;
+
+ /* Local copy of the devices TCAM */
+ struct fbnic_act_tcam act_tcam[FBNIC_RPC_TCAM_ACT_NUM_ENTRIES];
+ struct fbnic_mac_addr mac_addr[FBNIC_RPC_TCAM_MACDA_NUM_ENTRIES];
+ u8 mac_addr_boundary;
+
+ /* Number of TCQs/RCQs available on hardware */
+ u16 max_num_queues;
+};
+
+/* Reserve entry 0 in the MSI-X "others" array until we have filled all
+ * 32 of the possible interrupt slots. By doing this we can avoid any
+ * potential conflicts should we need to enable one of the debug interrupt
+ * causes later.
+ */
+enum {
+ FBNIC_FW_MSIX_ENTRY,
+ FBNIC_PCS_MSIX_ENTRY,
+ FBNIC_NON_NAPI_VECTORS
+};
+
+static inline bool fbnic_present(struct fbnic_dev *fbd)
+{
+ return !!READ_ONCE(fbd->uc_addr0);
+}
+
+static inline void fbnic_wr32(struct fbnic_dev *fbd, u32 reg, u32 val)
+{
+ u32 __iomem *csr = READ_ONCE(fbd->uc_addr0);
+
+ if (csr)
+ writel(val, csr + reg);
+}
+
+u32 fbnic_rd32(struct fbnic_dev *fbd, u32 reg);
+
+static inline void fbnic_wrfl(struct fbnic_dev *fbd)
+{
+ fbnic_rd32(fbd, FBNIC_MASTER_SPARE_0);
+}
+
+static inline void
+fbnic_rmw32(struct fbnic_dev *fbd, u32 reg, u32 mask, u32 val)
+{
+ u32 v;
+
+ v = fbnic_rd32(fbd, reg);
+ v &= ~mask;
+ v |= val;
+ fbnic_wr32(fbd, reg, v);
+}
+
+#define wr32(_f, _r, _v) fbnic_wr32(_f, _r, _v)
+#define rd32(_f, _r) fbnic_rd32(_f, _r)
+#define wrfl(_f) fbnic_wrfl(_f)
+
+bool fbnic_fw_present(struct fbnic_dev *fbd);
+u32 fbnic_fw_rd32(struct fbnic_dev *fbd, u32 reg);
+void fbnic_fw_wr32(struct fbnic_dev *fbd, u32 reg, u32 val);
+
+#define fw_rd32(_f, _r) fbnic_fw_rd32(_f, _r)
+#define fw_wr32(_f, _r, _v) fbnic_fw_wr32(_f, _r, _v)
+#define fw_wrfl(_f) fbnic_fw_rd32(_f, FBNIC_FW_ZERO_REG)
+
+static inline bool fbnic_bmc_present(struct fbnic_dev *fbd)
+{
+ return fbd->fw_cap.bmc_present;
+}
+
+static inline bool fbnic_init_failure(struct fbnic_dev *fbd)
+{
+ return !fbd->netdev;
+}
+
+extern char fbnic_driver_name[];
+
+void fbnic_devlink_free(struct fbnic_dev *fbd);
+struct fbnic_dev *fbnic_devlink_alloc(struct pci_dev *pdev);
+void fbnic_devlink_register(struct fbnic_dev *fbd);
+void fbnic_devlink_unregister(struct fbnic_dev *fbd);
+
+int fbnic_fw_enable_mbx(struct fbnic_dev *fbd);
+void fbnic_fw_disable_mbx(struct fbnic_dev *fbd);
+
+int fbnic_pcs_irq_enable(struct fbnic_dev *fbd);
+void fbnic_pcs_irq_disable(struct fbnic_dev *fbd);
+
+int fbnic_request_irq(struct fbnic_dev *dev, int nr, irq_handler_t handler,
+ unsigned long flags, const char *name, void *data);
+void fbnic_free_irq(struct fbnic_dev *dev, int nr, void *data);
+void fbnic_free_irqs(struct fbnic_dev *fbd);
+int fbnic_alloc_irqs(struct fbnic_dev *fbd);
+
+enum fbnic_boards {
+ fbnic_board_asic
+};
+
+struct fbnic_info {
+ unsigned int max_num_queues;
+ unsigned int bar_mask;
+};
+
+#endif /* _FBNIC_H_ */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_csr.h b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h
new file mode 100644
index 000000000000..a64360de0552
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h
@@ -0,0 +1,838 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#ifndef _FBNIC_CSR_H_
+#define _FBNIC_CSR_H_
+
+#include <linux/bitops.h>
+
+#define CSR_BIT(nr) (1u << (nr))
+#define CSR_GENMASK(h, l) GENMASK(h, l)
+
+#define DESC_BIT(nr) BIT_ULL(nr)
+#define DESC_GENMASK(h, l) GENMASK_ULL(h, l)
+
+/* Defines the minimum firmware version required by the driver */
+#define MIN_FW_MAJOR_VERSION 0
+#define MIN_FW_MINOR_VERSION 10
+#define MIN_FW_BUILD_VERSION 6
+#define MIN_FW_VERSION_CODE (MIN_FW_MAJOR_VERSION * (1u << 24) + \
+ MIN_FW_MINOR_VERSION * (1u << 16) + \
+ MIN_FW_BUILD_VERSION)
+
+#define PCI_DEVICE_ID_META_FBNIC_ASIC 0x0013
+
+#define FBNIC_CLOCK_FREQ (600 * (1000 * 1000))
+
+/* Transmit Work Descriptor Format */
+/* Length, Type, Offset Masks and Shifts */
+#define FBNIC_TWD_L2_HLEN_MASK DESC_GENMASK(5, 0)
+
+#define FBNIC_TWD_L3_TYPE_MASK DESC_GENMASK(7, 6)
+enum {
+ FBNIC_TWD_L3_TYPE_OTHER = 0,
+ FBNIC_TWD_L3_TYPE_IPV4 = 1,
+ FBNIC_TWD_L3_TYPE_IPV6 = 2,
+ FBNIC_TWD_L3_TYPE_V6V6 = 3,
+};
+
+#define FBNIC_TWD_L3_OHLEN_MASK DESC_GENMASK(15, 8)
+#define FBNIC_TWD_L3_IHLEN_MASK DESC_GENMASK(23, 16)
+
+enum {
+ FBNIC_TWD_L4_TYPE_OTHER = 0,
+ FBNIC_TWD_L4_TYPE_TCP = 1,
+ FBNIC_TWD_L4_TYPE_UDP = 2,
+};
+
+#define FBNIC_TWD_CSUM_OFFSET_MASK DESC_GENMASK(27, 24)
+#define FBNIC_TWD_L4_HLEN_MASK DESC_GENMASK(31, 28)
+
+/* Flags and Type */
+#define FBNIC_TWD_L4_TYPE_MASK DESC_GENMASK(33, 32)
+#define FBNIC_TWD_FLAG_REQ_TS DESC_BIT(34)
+#define FBNIC_TWD_FLAG_REQ_LSO DESC_BIT(35)
+#define FBNIC_TWD_FLAG_REQ_CSO DESC_BIT(36)
+#define FBNIC_TWD_FLAG_REQ_COMPLETION DESC_BIT(37)
+#define FBNIC_TWD_FLAG_DEST_MAC DESC_BIT(43)
+#define FBNIC_TWD_FLAG_DEST_BMC DESC_BIT(44)
+#define FBNIC_TWD_FLAG_DEST_FW DESC_BIT(45)
+#define FBNIC_TWD_TYPE_MASK DESC_GENMASK(47, 46)
+enum {
+ FBNIC_TWD_TYPE_META = 0,
+ FBNIC_TWD_TYPE_OPT_META = 1,
+ FBNIC_TWD_TYPE_AL = 2,
+ FBNIC_TWD_TYPE_LAST_AL = 3,
+};
+
+/* MSS and Completion Req */
+#define FBNIC_TWD_MSS_MASK DESC_GENMASK(61, 48)
+
+#define FBNIC_TWD_TS_MASK DESC_GENMASK(39, 0)
+#define FBNIC_TWD_ADDR_MASK DESC_GENMASK(45, 0)
+#define FBNIC_TWD_LEN_MASK DESC_GENMASK(63, 48)
+
+/* Tx Completion Descriptor Format */
+#define FBNIC_TCD_TYPE0_HEAD0_MASK DESC_GENMASK(15, 0)
+#define FBNIC_TCD_TYPE0_HEAD1_MASK DESC_GENMASK(31, 16)
+
+#define FBNIC_TCD_TYPE1_TS_MASK DESC_GENMASK(39, 0)
+
+#define FBNIC_TCD_STATUS_MASK DESC_GENMASK(59, 48)
+#define FBNIC_TCD_STATUS_TS_INVALID DESC_BIT(48)
+#define FBNIC_TCD_STATUS_ILLEGAL_TS_REQ DESC_BIT(49)
+#define FBNIC_TCD_TWQ1 DESC_BIT(60)
+#define FBNIC_TCD_TYPE_MASK DESC_GENMASK(62, 61)
+enum {
+ FBNIC_TCD_TYPE_0 = 0,
+ FBNIC_TCD_TYPE_1 = 1,
+};
+
+#define FBNIC_TCD_DONE DESC_BIT(63)
+
+/* Rx Buffer Descriptor Format
+ *
+ * The layout of this can vary depending on the page size of the system.
+ *
+ * If the page size is 4K then the layout will simply consist of ID for
+ * the 16 most significant bits, and the lower 46 are essentially the page
+ * address with the lowest 12 bits being reserved 0 due to the fact that
+ * a page will be aligned.
+ *
+ * If the page size is larger than 4K then the lower n bits of the ID and
+ * page address will be reserved for the fragment ID. This fragment will
+ * be 4K in size and will be used to index both the DMA address and the ID
+ * by the same amount.
+ */
+#define FBNIC_BD_DESC_ADDR_MASK DESC_GENMASK(45, 12)
+#define FBNIC_BD_DESC_ID_MASK DESC_GENMASK(63, 48)
+#define FBNIC_BD_FRAG_SIZE \
+ (FBNIC_BD_DESC_ADDR_MASK & ~(FBNIC_BD_DESC_ADDR_MASK - 1))
+#define FBNIC_BD_FRAG_COUNT \
+ (PAGE_SIZE / FBNIC_BD_FRAG_SIZE)
+#define FBNIC_BD_FRAG_ADDR_MASK \
+ (FBNIC_BD_DESC_ADDR_MASK & \
+ ~(FBNIC_BD_DESC_ADDR_MASK * FBNIC_BD_FRAG_COUNT))
+#define FBNIC_BD_FRAG_ID_MASK \
+ (FBNIC_BD_DESC_ID_MASK & \
+ ~(FBNIC_BD_DESC_ID_MASK * FBNIC_BD_FRAG_COUNT))
+#define FBNIC_BD_PAGE_ADDR_MASK \
+ (FBNIC_BD_DESC_ADDR_MASK & ~FBNIC_BD_FRAG_ADDR_MASK)
+#define FBNIC_BD_PAGE_ID_MASK \
+ (FBNIC_BD_DESC_ID_MASK & ~FBNIC_BD_FRAG_ID_MASK)
+
+/* Rx Completion Queue Descriptors */
+#define FBNIC_RCD_TYPE_MASK DESC_GENMASK(62, 61)
+enum {
+ FBNIC_RCD_TYPE_HDR_AL = 0,
+ FBNIC_RCD_TYPE_PAY_AL = 1,
+ FBNIC_RCD_TYPE_OPT_META = 2,
+ FBNIC_RCD_TYPE_META = 3,
+};
+
+#define FBNIC_RCD_DONE DESC_BIT(63)
+
+/* Address/Length Completion Descriptors */
+#define FBNIC_RCD_AL_BUFF_ID_MASK DESC_GENMASK(15, 0)
+#define FBNIC_RCD_AL_BUFF_FRAG_MASK (FBNIC_BD_FRAG_COUNT - 1)
+#define FBNIC_RCD_AL_BUFF_PAGE_MASK \
+ (FBNIC_RCD_AL_BUFF_ID_MASK & ~FBNIC_RCD_AL_BUFF_FRAG_MASK)
+#define FBNIC_RCD_AL_BUFF_LEN_MASK DESC_GENMASK(28, 16)
+#define FBNIC_RCD_AL_BUFF_OFF_MASK DESC_GENMASK(43, 32)
+#define FBNIC_RCD_AL_PAGE_FIN DESC_BIT(60)
+
+/* Header AL specific values */
+#define FBNIC_RCD_HDR_AL_OVERFLOW DESC_BIT(53)
+#define FBNIC_RCD_HDR_AL_DMA_HINT_MASK DESC_GENMASK(59, 54)
+enum {
+ FBNIC_RCD_HDR_AL_DMA_HINT_NONE = 0,
+ FBNIC_RCD_HDR_AL_DMA_HINT_L2 = 1,
+ FBNIC_RCD_HDR_AL_DMA_HINT_L3 = 2,
+ FBNIC_RCD_HDR_AL_DMA_HINT_L4 = 4,
+};
+
+/* Optional Metadata Completion Descriptors */
+#define FBNIC_RCD_OPT_META_TS_MASK DESC_GENMASK(39, 0)
+#define FBNIC_RCD_OPT_META_ACTION_MASK DESC_GENMASK(45, 40)
+#define FBNIC_RCD_OPT_META_ACTION DESC_BIT(57)
+#define FBNIC_RCD_OPT_META_TS DESC_BIT(58)
+#define FBNIC_RCD_OPT_META_TYPE_MASK DESC_GENMASK(60, 59)
+
+/* Metadata Completion Descriptors */
+#define FBNIC_RCD_META_RSS_HASH_MASK DESC_GENMASK(31, 0)
+#define FBNIC_RCD_META_L2_CSUM_MASK DESC_GENMASK(47, 32)
+#define FBNIC_RCD_META_L3_TYPE_MASK DESC_GENMASK(49, 48)
+enum {
+ FBNIC_RCD_META_L3_TYPE_OTHER = 0,
+ FBNIC_RCD_META_L3_TYPE_IPV4 = 1,
+ FBNIC_RCD_META_L3_TYPE_IPV6 = 2,
+ FBNIC_RCD_META_L3_TYPE_V6V6 = 3,
+};
+
+#define FBNIC_RCD_META_L4_TYPE_MASK DESC_GENMASK(51, 50)
+enum {
+ FBNIC_RCD_META_L4_TYPE_OTHER = 0,
+ FBNIC_RCD_META_L4_TYPE_TCP = 1,
+ FBNIC_RCD_META_L4_TYPE_UDP = 2,
+};
+
+#define FBNIC_RCD_META_L4_CSUM_UNNECESSARY DESC_BIT(52)
+#define FBNIC_RCD_META_ERR_MAC_EOP DESC_BIT(53)
+#define FBNIC_RCD_META_ERR_TRUNCATED_FRAME DESC_BIT(54)
+#define FBNIC_RCD_META_ERR_PARSER DESC_BIT(55)
+#define FBNIC_RCD_META_UNCORRECTABLE_ERR_MASK \
+ (FBNIC_RCD_META_ERR_MAC_EOP | FBNIC_RCD_META_ERR_TRUNCATED_FRAME)
+#define FBNIC_RCD_META_ECN DESC_BIT(60)
+
+/* Register Definitions
+ *
+ * The registers are laid as indexes into an le32 array. As such the actual
+ * address is 4 times the index value. Below each register is defined as 3
+ * fields, name, index, and Address.
+ *
+ * Name Index Address
+ *************************************************************************/
+/* Interrupt Registers */
+#define FBNIC_CSR_START_INTR 0x00000 /* CSR section delimiter */
+#define FBNIC_INTR_STATUS(n) (0x00000 + (n)) /* 0x00000 + 4*n */
+#define FBNIC_INTR_STATUS_CNT 8
+#define FBNIC_INTR_MASK(n) (0x00008 + (n)) /* 0x00020 + 4*n */
+#define FBNIC_INTR_MASK_CNT 8
+#define FBNIC_INTR_SET(n) (0x00010 + (n)) /* 0x00040 + 4*n */
+#define FBNIC_INTR_SET_CNT 8
+#define FBNIC_INTR_CLEAR(n) (0x00018 + (n)) /* 0x00060 + 4*n */
+#define FBNIC_INTR_CLEAR_CNT 8
+#define FBNIC_INTR_SW_STATUS(n) (0x00020 + (n)) /* 0x00080 + 4*n */
+#define FBNIC_INTR_SW_STATUS_CNT 8
+#define FBNIC_INTR_SW_AC_MODE(n) (0x00028 + (n)) /* 0x000a0 + 4*n */
+#define FBNIC_INTR_SW_AC_MODE_CNT 8
+#define FBNIC_INTR_MASK_SET(n) (0x00030 + (n)) /* 0x000c0 + 4*n */
+#define FBNIC_INTR_MASK_SET_CNT 8
+#define FBNIC_INTR_MASK_CLEAR(n) (0x00038 + (n)) /* 0x000e0 + 4*n */
+#define FBNIC_INTR_MASK_CLEAR_CNT 8
+#define FBNIC_MAX_MSIX_VECS 256U
+#define FBNIC_INTR_MSIX_CTRL(n) (0x00040 + (n)) /* 0x00100 + 4*n */
+#define FBNIC_INTR_MSIX_CTRL_VECTOR_MASK CSR_GENMASK(7, 0)
+#define FBNIC_INTR_MSIX_CTRL_ENABLE CSR_BIT(31)
+enum {
+ FBNIC_INTR_MSIX_CTRL_PCS_IDX = 34,
+};
+
+#define FBNIC_CSR_END_INTR 0x0005f /* CSR section delimiter */
+
+/* Interrupt MSIX Registers */
+#define FBNIC_CSR_START_INTR_CQ 0x00400 /* CSR section delimiter */
+#define FBNIC_INTR_CQ_REARM(n) \
+ (0x00400 + 4 * (n)) /* 0x01000 + 16*n */
+#define FBNIC_INTR_CQ_REARM_CNT 256
+#define FBNIC_INTR_CQ_REARM_RCQ_TIMEOUT CSR_GENMASK(13, 0)
+#define FBNIC_INTR_CQ_REARM_RCQ_TIMEOUT_UPD_EN CSR_BIT(14)
+#define FBNIC_INTR_CQ_REARM_TCQ_TIMEOUT CSR_GENMASK(28, 15)
+#define FBNIC_INTR_CQ_REARM_TCQ_TIMEOUT_UPD_EN CSR_BIT(29)
+#define FBNIC_INTR_CQ_REARM_INTR_RELOAD CSR_BIT(30)
+#define FBNIC_INTR_CQ_REARM_INTR_UNMASK CSR_BIT(31)
+
+#define FBNIC_INTR_RCQ_TIMEOUT(n) \
+ (0x00401 + 4 * (n)) /* 0x01004 + 16*n */
+#define FBNIC_INTR_RCQ_TIMEOUT_CNT 256
+#define FBNIC_INTR_TCQ_TIMEOUT(n) \
+ (0x00402 + 4 * (n)) /* 0x01008 + 16*n */
+#define FBNIC_INTR_TCQ_TIMEOUT_CNT 256
+#define FBNIC_CSR_END_INTR_CQ 0x007fe /* CSR section delimiter */
+
+/* Global QM Tx registers */
+#define FBNIC_CSR_START_QM_TX 0x00800 /* CSR section delimiter */
+#define FBNIC_QM_TWQ_IDLE(n) (0x00800 + (n)) /* 0x02000 + 4*n */
+#define FBNIC_QM_TWQ_IDLE_CNT 8
+#define FBNIC_QM_TWQ_DEFAULT_META_L 0x00818 /* 0x02060 */
+#define FBNIC_QM_TWQ_DEFAULT_META_H 0x00819 /* 0x02064 */
+
+#define FBNIC_QM_TQS_CTL0 0x0081b /* 0x0206c */
+#define FBNIC_QM_TQS_CTL0_LSO_TS_MASK CSR_BIT(0)
+enum {
+ FBNIC_QM_TQS_CTL0_LSO_TS_FIRST = 0,
+ FBNIC_QM_TQS_CTL0_LSO_TS_LAST = 1,
+};
+
+#define FBNIC_QM_TQS_CTL0_PREFETCH_THRESH CSR_GENMASK(7, 1)
+enum {
+ FBNIC_QM_TQS_CTL0_PREFETCH_THRESH_MIN = 16,
+};
+
+#define FBNIC_QM_TQS_CTL1 0x0081c /* 0x02070 */
+#define FBNIC_QM_TQS_CTL1_MC_MAX_CREDITS CSR_GENMASK(7, 0)
+#define FBNIC_QM_TQS_CTL1_BULK_MAX_CREDITS CSR_GENMASK(15, 8)
+#define FBNIC_QM_TQS_MTU_CTL0 0x0081d /* 0x02074 */
+#define FBNIC_QM_TQS_MTU_CTL1 0x0081e /* 0x02078 */
+#define FBNIC_QM_TQS_MTU_CTL1_BULK CSR_GENMASK(13, 0)
+#define FBNIC_QM_TCQ_IDLE(n) (0x00821 + (n)) /* 0x02084 + 4*n */
+#define FBNIC_QM_TCQ_IDLE_CNT 4
+#define FBNIC_QM_TCQ_CTL0 0x0082d /* 0x020b4 */
+#define FBNIC_QM_TCQ_CTL0_COAL_WAIT CSR_GENMASK(15, 0)
+#define FBNIC_QM_TCQ_CTL0_TICK_CYCLES CSR_GENMASK(26, 16)
+#define FBNIC_QM_TQS_IDLE(n) (0x00830 + (n)) /* 0x020c0 + 4*n */
+#define FBNIC_QM_TQS_IDLE_CNT 8
+#define FBNIC_QM_TQS_EDT_TS_RANGE 0x00849 /* 0x2124 */
+#define FBNIC_QM_TDE_IDLE(n) (0x00853 + (n)) /* 0x0214c + 4*n */
+#define FBNIC_QM_TDE_IDLE_CNT 8
+#define FBNIC_QM_TNI_TDF_CTL 0x0086c /* 0x021b0 */
+#define FBNIC_QM_TNI_TDF_CTL_MRRS CSR_GENMASK(1, 0)
+#define FBNIC_QM_TNI_TDF_CTL_CLS CSR_GENMASK(3, 2)
+#define FBNIC_QM_TNI_TDF_CTL_MAX_OT CSR_GENMASK(11, 4)
+#define FBNIC_QM_TNI_TDF_CTL_MAX_OB CSR_GENMASK(23, 12)
+#define FBNIC_QM_TNI_TDE_CTL 0x0086d /* 0x021b4 */
+#define FBNIC_QM_TNI_TDE_CTL_MRRS CSR_GENMASK(1, 0)
+#define FBNIC_QM_TNI_TDE_CTL_CLS CSR_GENMASK(3, 2)
+#define FBNIC_QM_TNI_TDE_CTL_MAX_OT CSR_GENMASK(11, 4)
+#define FBNIC_QM_TNI_TDE_CTL_MAX_OB CSR_GENMASK(24, 12)
+#define FBNIC_QM_TNI_TDE_CTL_MRRS_1K CSR_BIT(25)
+#define FBNIC_QM_TNI_TCM_CTL 0x0086e /* 0x021b8 */
+#define FBNIC_QM_TNI_TCM_CTL_MPS CSR_GENMASK(1, 0)
+#define FBNIC_QM_TNI_TCM_CTL_CLS CSR_GENMASK(3, 2)
+#define FBNIC_QM_TNI_TCM_CTL_MAX_OT CSR_GENMASK(11, 4)
+#define FBNIC_QM_TNI_TCM_CTL_MAX_OB CSR_GENMASK(23, 12)
+#define FBNIC_CSR_END_QM_TX 0x00873 /* CSR section delimiter */
+
+/* Global QM Rx registers */
+#define FBNIC_CSR_START_QM_RX 0x00c00 /* CSR section delimiter */
+#define FBNIC_QM_RCQ_IDLE(n) (0x00c00 + (n)) /* 0x03000 + 4*n */
+#define FBNIC_QM_RCQ_IDLE_CNT 4
+#define FBNIC_QM_RCQ_CTL0 0x00c0c /* 0x03030 */
+#define FBNIC_QM_RCQ_CTL0_COAL_WAIT CSR_GENMASK(15, 0)
+#define FBNIC_QM_RCQ_CTL0_TICK_CYCLES CSR_GENMASK(26, 16)
+#define FBNIC_QM_HPQ_IDLE(n) (0x00c0f + (n)) /* 0x0303c + 4*n */
+#define FBNIC_QM_HPQ_IDLE_CNT 4
+#define FBNIC_QM_PPQ_IDLE(n) (0x00c13 + (n)) /* 0x0304c + 4*n */
+#define FBNIC_QM_PPQ_IDLE_CNT 4
+#define FBNIC_QM_RNI_RBP_CTL 0x00c2d /* 0x030b4 */
+#define FBNIC_QM_RNI_RBP_CTL_MRRS CSR_GENMASK(1, 0)
+#define FBNIC_QM_RNI_RBP_CTL_CLS CSR_GENMASK(3, 2)
+#define FBNIC_QM_RNI_RBP_CTL_MAX_OT CSR_GENMASK(11, 4)
+#define FBNIC_QM_RNI_RBP_CTL_MAX_OB CSR_GENMASK(23, 12)
+#define FBNIC_QM_RNI_RDE_CTL 0x00c2e /* 0x030b8 */
+#define FBNIC_QM_RNI_RDE_CTL_MPS CSR_GENMASK(1, 0)
+#define FBNIC_QM_RNI_RDE_CTL_CLS CSR_GENMASK(3, 2)
+#define FBNIC_QM_RNI_RDE_CTL_MAX_OT CSR_GENMASK(11, 4)
+#define FBNIC_QM_RNI_RDE_CTL_MAX_OB CSR_GENMASK(23, 12)
+#define FBNIC_QM_RNI_RCM_CTL 0x00c2f /* 0x030bc */
+#define FBNIC_QM_RNI_RCM_CTL_MPS CSR_GENMASK(1, 0)
+#define FBNIC_QM_RNI_RCM_CTL_CLS CSR_GENMASK(3, 2)
+#define FBNIC_QM_RNI_RCM_CTL_MAX_OT CSR_GENMASK(11, 4)
+#define FBNIC_QM_RNI_RCM_CTL_MAX_OB CSR_GENMASK(23, 12)
+#define FBNIC_CSR_END_QM_RX 0x00c34 /* CSR section delimiter */
+
+/* TCE registers */
+#define FBNIC_CSR_START_TCE 0x04000 /* CSR section delimiter */
+#define FBNIC_TCE_REG_BASE 0x04000 /* 0x10000 */
+
+#define FBNIC_TCE_LSO_CTRL 0x04000 /* 0x10000 */
+#define FBNIC_TCE_LSO_CTRL_TCPF_CLR_1ST CSR_GENMASK(8, 0)
+#define FBNIC_TCE_LSO_CTRL_TCPF_CLR_MID CSR_GENMASK(17, 9)
+#define FBNIC_TCE_LSO_CTRL_TCPF_CLR_END CSR_GENMASK(26, 18)
+#define FBNIC_TCE_LSO_CTRL_IPID_MODE_INC CSR_BIT(27)
+
+#define FBNIC_TCE_CSO_CTRL 0x04001 /* 0x10004 */
+#define FBNIC_TCE_CSO_CTRL_TCP_ZERO_CSUM CSR_BIT(0)
+
+#define FBNIC_TCE_TXB_CTRL 0x04002 /* 0x10008 */
+#define FBNIC_TCE_TXB_CTRL_LOAD CSR_BIT(0)
+#define FBNIC_TCE_TXB_CTRL_TCAM_ENABLE CSR_BIT(1)
+#define FBNIC_TCE_TXB_CTRL_DISABLE CSR_BIT(2)
+
+#define FBNIC_TCE_TXB_ENQ_WRR_CTRL 0x04003 /* 0x1000c */
+#define FBNIC_TCE_TXB_ENQ_WRR_CTRL_WEIGHT0 CSR_GENMASK(7, 0)
+#define FBNIC_TCE_TXB_ENQ_WRR_CTRL_WEIGHT1 CSR_GENMASK(15, 8)
+#define FBNIC_TCE_TXB_ENQ_WRR_CTRL_WEIGHT2 CSR_GENMASK(23, 16)
+
+#define FBNIC_TCE_TXB_TEI_Q0_CTRL 0x04004 /* 0x10010 */
+#define FBNIC_TCE_TXB_TEI_Q1_CTRL 0x04005 /* 0x10014 */
+#define FBNIC_TCE_TXB_MC_Q_CTRL 0x04006 /* 0x10018 */
+#define FBNIC_TCE_TXB_RX_TEI_Q_CTRL 0x04007 /* 0x1001c */
+#define FBNIC_TCE_TXB_RX_BMC_Q_CTRL 0x04008 /* 0x10020 */
+#define FBNIC_TCE_TXB_Q_CTRL_START CSR_GENMASK(10, 0)
+#define FBNIC_TCE_TXB_Q_CTRL_SIZE CSR_GENMASK(22, 11)
+
+#define FBNIC_TCE_TXB_TEI_DWRR_CTRL 0x04009 /* 0x10024 */
+#define FBNIC_TCE_TXB_TEI_DWRR_CTRL_QUANTUM0 CSR_GENMASK(7, 0)
+#define FBNIC_TCE_TXB_TEI_DWRR_CTRL_QUANTUM1 CSR_GENMASK(15, 8)
+#define FBNIC_TCE_TXB_NTWRK_DWRR_CTRL 0x0400a /* 0x10028 */
+#define FBNIC_TCE_TXB_NTWRK_DWRR_CTRL_QUANTUM0 CSR_GENMASK(7, 0)
+#define FBNIC_TCE_TXB_NTWRK_DWRR_CTRL_QUANTUM1 CSR_GENMASK(15, 8)
+#define FBNIC_TCE_TXB_NTWRK_DWRR_CTRL_QUANTUM2 CSR_GENMASK(23, 16)
+
+#define FBNIC_TCE_TXB_CLDR_CFG 0x0400b /* 0x1002c */
+#define FBNIC_TCE_TXB_CLDR_CFG_NUM_SLOT CSR_GENMASK(5, 0)
+#define FBNIC_TCE_TXB_CLDR_SLOT_CFG(n) (0x0400c + (n)) /* 0x10030 + 4*n */
+#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_CNT 16
+#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_0_0 CSR_GENMASK(1, 0)
+#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_0_1 CSR_GENMASK(3, 2)
+#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_0_2 CSR_GENMASK(5, 4)
+#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_0_3 CSR_GENMASK(7, 6)
+#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_1_0 CSR_GENMASK(9, 8)
+#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_1_1 CSR_GENMASK(11, 10)
+#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_1_2 CSR_GENMASK(13, 12)
+#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_1_3 CSR_GENMASK(15, 14)
+#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_2_0 CSR_GENMASK(17, 16)
+#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_2_1 CSR_GENMASK(19, 18)
+#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_2_2 CSR_GENMASK(21, 20)
+#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_2_3 CSR_GENMASK(23, 22)
+#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_3_0 CSR_GENMASK(25, 24)
+#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_3_1 CSR_GENMASK(27, 26)
+#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_3_2 CSR_GENMASK(29, 28)
+#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_3_3 CSR_GENMASK(31, 30)
+
+#define FBNIC_TCE_BMC_MAX_PKTSZ 0x0403a /* 0x100e8 */
+#define FBNIC_TCE_BMC_MAX_PKTSZ_TX CSR_GENMASK(13, 0)
+#define FBNIC_TCE_BMC_MAX_PKTSZ_RX CSR_GENMASK(27, 14)
+#define FBNIC_TCE_MC_MAX_PKTSZ 0x0403b /* 0x100ec */
+#define FBNIC_TCE_MC_MAX_PKTSZ_TMI CSR_GENMASK(13, 0)
+
+#define FBNIC_TCE_SOP_PROT_CTRL 0x0403c /* 0x100f0 */
+#define FBNIC_TCE_SOP_PROT_CTRL_TBI CSR_GENMASK(7, 0)
+#define FBNIC_TCE_SOP_PROT_CTRL_TTI_FRM CSR_GENMASK(14, 8)
+#define FBNIC_TCE_SOP_PROT_CTRL_TTI_CM CSR_GENMASK(18, 15)
+
+#define FBNIC_TCE_DROP_CTRL 0x0403d /* 0x100f4 */
+#define FBNIC_TCE_DROP_CTRL_TTI_CM_DROP_EN CSR_BIT(0)
+#define FBNIC_TCE_DROP_CTRL_TTI_FRM_DROP_EN CSR_BIT(1)
+#define FBNIC_TCE_DROP_CTRL_TTI_TBI_DROP_EN CSR_BIT(2)
+
+#define FBNIC_TCE_TXB_TX_BMC_Q_CTRL 0x0404B /* 0x1012c */
+#define FBNIC_TCE_TXB_BMC_DWRR_CTRL 0x0404C /* 0x10130 */
+#define FBNIC_TCE_TXB_BMC_DWRR_CTRL_QUANTUM0 CSR_GENMASK(7, 0)
+#define FBNIC_TCE_TXB_BMC_DWRR_CTRL_QUANTUM1 CSR_GENMASK(15, 8)
+#define FBNIC_TCE_TXB_TEI_DWRR_CTRL_EXT 0x0404D /* 0x10134 */
+#define FBNIC_TCE_TXB_NTWRK_DWRR_CTRL_EXT \
+ 0x0404E /* 0x10138 */
+#define FBNIC_TCE_TXB_BMC_DWRR_CTRL_EXT 0x0404F /* 0x1013c */
+#define FBNIC_CSR_END_TCE 0x04050 /* CSR section delimiter */
+
+/* TMI registers */
+#define FBNIC_CSR_START_TMI 0x04400 /* CSR section delimiter */
+#define FBNIC_TMI_SOP_PROT_CTRL 0x04400 /* 0x11000 */
+#define FBNIC_TMI_DROP_CTRL 0x04401 /* 0x11004 */
+#define FBNIC_TMI_DROP_CTRL_EN CSR_BIT(0)
+#define FBNIC_CSR_END_TMI 0x0443f /* CSR section delimiter */
+/* Rx Buffer Registers */
+#define FBNIC_CSR_START_RXB 0x08000 /* CSR section delimiter */
+enum {
+ FBNIC_RXB_FIFO_MC = 0,
+ /* Unused */
+ /* Unused */
+ FBNIC_RXB_FIFO_NET_TO_BMC = 3,
+ FBNIC_RXB_FIFO_HOST = 4,
+ /* Unused */
+ FBNIC_RXB_FIFO_BMC_TO_HOST = 6,
+ /* Unused */
+ FBNIC_RXB_FIFO_INDICES = 8
+};
+
+#define FBNIC_RXB_CT_SIZE(n) (0x08000 + (n)) /* 0x20000 + 4*n */
+#define FBNIC_RXB_CT_SIZE_CNT 8
+#define FBNIC_RXB_CT_SIZE_HEADER CSR_GENMASK(5, 0)
+#define FBNIC_RXB_CT_SIZE_PAYLOAD CSR_GENMASK(11, 6)
+#define FBNIC_RXB_CT_SIZE_ENABLE CSR_BIT(12)
+#define FBNIC_RXB_PAUSE_DROP_CTRL 0x08008 /* 0x20020 */
+#define FBNIC_RXB_PAUSE_DROP_CTRL_DROP_ENABLE CSR_GENMASK(7, 0)
+#define FBNIC_RXB_PAUSE_DROP_CTRL_PAUSE_ENABLE CSR_GENMASK(15, 8)
+#define FBNIC_RXB_PAUSE_DROP_CTRL_ECN_ENABLE CSR_GENMASK(23, 16)
+#define FBNIC_RXB_PAUSE_DROP_CTRL_PS_ENABLE CSR_GENMASK(27, 24)
+#define FBNIC_RXB_PAUSE_THLD(n) (0x08009 + (n)) /* 0x20024 + 4*n */
+#define FBNIC_RXB_PAUSE_THLD_CNT 8
+#define FBNIC_RXB_PAUSE_THLD_ON CSR_GENMASK(12, 0)
+#define FBNIC_RXB_PAUSE_THLD_OFF CSR_GENMASK(25, 13)
+#define FBNIC_RXB_DROP_THLD(n) (0x08011 + (n)) /* 0x20044 + 4*n */
+#define FBNIC_RXB_DROP_THLD_CNT 8
+#define FBNIC_RXB_DROP_THLD_ON CSR_GENMASK(12, 0)
+#define FBNIC_RXB_DROP_THLD_OFF CSR_GENMASK(25, 13)
+#define FBNIC_RXB_ECN_THLD(n) (0x0801e + (n)) /* 0x20078 + 4*n */
+#define FBNIC_RXB_ECN_THLD_CNT 8
+#define FBNIC_RXB_ECN_THLD_ON CSR_GENMASK(12, 0)
+#define FBNIC_RXB_ECN_THLD_OFF CSR_GENMASK(25, 13)
+#define FBNIC_RXB_PBUF_CFG(n) (0x08027 + (n)) /* 0x2009c + 4*n */
+#define FBNIC_RXB_PBUF_CFG_CNT 8
+#define FBNIC_RXB_PBUF_BASE_ADDR CSR_GENMASK(12, 0)
+#define FBNIC_RXB_PBUF_SIZE CSR_GENMASK(21, 13)
+#define FBNIC_RXB_DWRR_RDE_WEIGHT0 0x0802f /* 0x200bc */
+#define FBNIC_RXB_DWRR_RDE_WEIGHT0_QUANTUM0 CSR_GENMASK(7, 0)
+#define FBNIC_RXB_DWRR_RDE_WEIGHT0_QUANTUM1 CSR_GENMASK(15, 8)
+#define FBNIC_RXB_DWRR_RDE_WEIGHT0_QUANTUM2 CSR_GENMASK(23, 16)
+#define FBNIC_RXB_DWRR_RDE_WEIGHT0_QUANTUM3 CSR_GENMASK(31, 24)
+#define FBNIC_RXB_DWRR_RDE_WEIGHT1 0x08030 /* 0x200c0 */
+#define FBNIC_RXB_DWRR_RDE_WEIGHT1_QUANTUM4 CSR_GENMASK(7, 0)
+#define FBNIC_RXB_DWRR_BMC_WEIGHT 0x08031 /* 0x200c4 */
+#define FBNIC_RXB_CLDR_PRIO_CFG(n) (0x8034 + (n)) /* 0x200d0 + 4*n */
+#define FBNIC_RXB_CLDR_PRIO_CFG_CNT 16
+#define FBNIC_RXB_ENDIAN_FCS 0x08044 /* 0x20110 */
+enum {
+ /* Unused */
+ /* Unused */
+ FBNIC_RXB_DEQUEUE_BMC = 2,
+ FBNIC_RXB_DEQUEUE_HOST = 3,
+ FBNIC_RXB_DEQUEUE_INDICES = 4
+};
+
+#define FBNIC_RXB_PBUF_CREDIT(n) (0x08047 + (n)) /* 0x2011C + 4*n */
+#define FBNIC_RXB_PBUF_CREDIT_CNT 8
+#define FBNIC_RXB_PBUF_CREDIT_MASK CSR_GENMASK(13, 0)
+#define FBNIC_RXB_INTF_CREDIT 0x0804f /* 0x2013C */
+#define FBNIC_RXB_INTF_CREDIT_MASK0 CSR_GENMASK(3, 0)
+#define FBNIC_RXB_INTF_CREDIT_MASK1 CSR_GENMASK(7, 4)
+#define FBNIC_RXB_INTF_CREDIT_MASK2 CSR_GENMASK(11, 8)
+#define FBNIC_RXB_INTF_CREDIT_MASK3 CSR_GENMASK(15, 12)
+
+#define FBNIC_RXB_PAUSE_EVENT_CNT(n) (0x08053 + (n)) /* 0x2014c + 4*n */
+#define FBNIC_RXB_DROP_FRMS_STS(n) (0x08057 + (n)) /* 0x2015c + 4*n */
+#define FBNIC_RXB_DROP_BYTES_STS_L(n) \
+ (0x08080 + 2 * (n)) /* 0x20200 + 8*n */
+#define FBNIC_RXB_DROP_BYTES_STS_H(n) \
+ (0x08081 + 2 * (n)) /* 0x20204 + 8*n */
+#define FBNIC_RXB_TRUN_FRMS_STS(n) (0x08091 + (n)) /* 0x20244 + 4*n */
+#define FBNIC_RXB_TRUN_BYTES_STS_L(n) \
+ (0x080c0 + 2 * (n)) /* 0x20300 + 8*n */
+#define FBNIC_RXB_TRUN_BYTES_STS_H(n) \
+ (0x080c1 + 2 * (n)) /* 0x20304 + 8*n */
+#define FBNIC_RXB_TRANS_PAUSE_STS(n) (0x080d1 + (n)) /* 0x20344 + 4*n */
+#define FBNIC_RXB_TRANS_DROP_STS(n) (0x080d9 + (n)) /* 0x20364 + 4*n */
+#define FBNIC_RXB_TRANS_ECN_STS(n) (0x080e1 + (n)) /* 0x20384 + 4*n */
+enum {
+ FBNIC_RXB_ENQUEUE_NET = 0,
+ FBNIC_RXB_ENQUEUE_BMC = 1,
+ /* Unused */
+ /* Unused */
+ FBNIC_RXB_ENQUEUE_INDICES = 4
+};
+
+#define FBNIC_RXB_DRBO_FRM_CNT_SRC(n) (0x080f9 + (n)) /* 0x203e4 + 4*n */
+#define FBNIC_RXB_DRBO_BYTE_CNT_SRC_L(n) \
+ (0x080fd + (n)) /* 0x203f4 + 4*n */
+#define FBNIC_RXB_DRBO_BYTE_CNT_SRC_H(n) \
+ (0x08101 + (n)) /* 0x20404 + 4*n */
+#define FBNIC_RXB_INTF_FRM_CNT_DST(n) (0x08105 + (n)) /* 0x20414 + 4*n */
+#define FBNIC_RXB_INTF_BYTE_CNT_DST_L(n) \
+ (0x08109 + (n)) /* 0x20424 + 4*n */
+#define FBNIC_RXB_INTF_BYTE_CNT_DST_H(n) \
+ (0x0810d + (n)) /* 0x20434 + 4*n */
+#define FBNIC_RXB_PBUF_FRM_CNT_DST(n) (0x08111 + (n)) /* 0x20444 + 4*n */
+#define FBNIC_RXB_PBUF_BYTE_CNT_DST_L(n) \
+ (0x08115 + (n)) /* 0x20454 + 4*n */
+#define FBNIC_RXB_PBUF_BYTE_CNT_DST_H(n) \
+ (0x08119 + (n)) /* 0x20464 + 4*n */
+
+#define FBNIC_RXB_PBUF_FIFO_LEVEL(n) (0x0811d + (n)) /* 0x20474 + 4*n */
+
+#define FBNIC_RXB_INTEGRITY_ERR(n) (0x0812f + (n)) /* 0x204bc + 4*n */
+#define FBNIC_RXB_MAC_ERR(n) (0x08133 + (n)) /* 0x204cc + 4*n */
+#define FBNIC_RXB_PARSER_ERR(n) (0x08137 + (n)) /* 0x204dc + 4*n */
+#define FBNIC_RXB_FRM_ERR(n) (0x0813b + (n)) /* 0x204ec + 4*n */
+
+#define FBNIC_RXB_DWRR_RDE_WEIGHT0_EXT 0x08143 /* 0x2050c */
+#define FBNIC_RXB_DWRR_RDE_WEIGHT1_EXT 0x08144 /* 0x20510 */
+#define FBNIC_CSR_END_RXB 0x081b1 /* CSR section delimiter */
+
+/* Rx Parser and Classifier Registers */
+#define FBNIC_CSR_START_RPC 0x08400 /* CSR section delimiter */
+#define FBNIC_RPC_RMI_CONFIG 0x08400 /* 0x21000 */
+#define FBNIC_RPC_RMI_CONFIG_OH_BYTES CSR_GENMASK(4, 0)
+#define FBNIC_RPC_RMI_CONFIG_FCS_PRESENT CSR_BIT(8)
+#define FBNIC_RPC_RMI_CONFIG_ENABLE CSR_BIT(12)
+#define FBNIC_RPC_RMI_CONFIG_MTU CSR_GENMASK(31, 16)
+
+#define FBNIC_RPC_ACT_TBL0_DEFAULT 0x0840a /* 0x21028 */
+#define FBNIC_RPC_ACT_TBL0_DROP CSR_BIT(0)
+#define FBNIC_RPC_ACT_TBL0_DEST_MASK CSR_GENMASK(3, 1)
+enum {
+ FBNIC_RPC_ACT_TBL0_DEST_HOST = 1,
+ FBNIC_RPC_ACT_TBL0_DEST_BMC = 2,
+ FBNIC_RPC_ACT_TBL0_DEST_EI = 4,
+};
+
+#define FBNIC_RPC_ACT_TBL0_DMA_HINT CSR_GENMASK(24, 16)
+#define FBNIC_RPC_ACT_TBL0_RSS_CTXT_ID CSR_BIT(30)
+
+#define FBNIC_RPC_ACT_TBL1_DEFAULT 0x0840b /* 0x2102c */
+#define FBNIC_RPC_ACT_TBL1_RSS_ENA_MASK CSR_GENMASK(15, 0)
+enum {
+ FBNIC_RPC_ACT_TBL1_RSS_ENA_IP_SRC = 1,
+ FBNIC_RPC_ACT_TBL1_RSS_ENA_IP_DST = 2,
+ FBNIC_RPC_ACT_TBL1_RSS_ENA_L4_SRC = 4,
+ FBNIC_RPC_ACT_TBL1_RSS_ENA_L4_DST = 8,
+ FBNIC_RPC_ACT_TBL1_RSS_ENA_L2_DA = 16,
+ FBNIC_RPC_ACT_TBL1_RSS_ENA_L4_RSS_BYTE = 32,
+ FBNIC_RPC_ACT_TBL1_RSS_ENA_IV6_FL_LBL = 64,
+ FBNIC_RPC_ACT_TBL1_RSS_ENA_OV6_FL_LBL = 128,
+ FBNIC_RPC_ACT_TBL1_RSS_ENA_DSCP = 256,
+ FBNIC_RPC_ACT_TBL1_RSS_ENA_L3_PROT = 512,
+ FBNIC_RPC_ACT_TBL1_RSS_ENA_L4_PROT = 1024,
+};
+
+#define FBNIC_RPC_RSS_KEY(n) (0x0840c + (n)) /* 0x21030 + 4*n */
+#define FBNIC_RPC_RSS_KEY_BIT_LEN 425
+#define FBNIC_RPC_RSS_KEY_BYTE_LEN \
+ DIV_ROUND_UP(FBNIC_RPC_RSS_KEY_BIT_LEN, 8)
+#define FBNIC_RPC_RSS_KEY_DWORD_LEN \
+ DIV_ROUND_UP(FBNIC_RPC_RSS_KEY_BIT_LEN, 32)
+#define FBNIC_RPC_RSS_KEY_LAST_IDX \
+ (FBNIC_RPC_RSS_KEY_DWORD_LEN - 1)
+#define FBNIC_RPC_RSS_KEY_LAST_MASK \
+ CSR_GENMASK(31, \
+ FBNIC_RPC_RSS_KEY_DWORD_LEN * 32 - \
+ FBNIC_RPC_RSS_KEY_BIT_LEN)
+
+#define FBNIC_RPC_TCAM_MACDA_VALIDATE 0x0852d /* 0x214b4 */
+#define FBNIC_CSR_END_RPC 0x0856b /* CSR section delimiter */
+
+/* RPC RAM Registers */
+
+#define FBNIC_CSR_START_RPC_RAM 0x08800 /* CSR section delimiter */
+#define FBNIC_RPC_ACT_TBL0(n) (0x08800 + (n)) /* 0x22000 + 4*n */
+#define FBNIC_RPC_ACT_TBL1(n) (0x08840 + (n)) /* 0x22100 + 4*n */
+#define FBNIC_RPC_ACT_TBL_NUM_ENTRIES 64
+
+/* TCAM Tables */
+#define FBNIC_RPC_TCAM_VALIDATE CSR_BIT(31)
+
+/* 64 Action TCAM Entries, 12 registers
+ * 3 mixed, src port, dst port, 6 L4 words, and Validate
+ */
+#define FBNIC_RPC_TCAM_ACT(m, n) \
+ (0x08880 + 0x40 * (n) + (m)) /* 0x22200 + 256*n + 4*m */
+
+#define FBNIC_RPC_TCAM_ACT_VALUE CSR_GENMASK(15, 0)
+#define FBNIC_RPC_TCAM_ACT_MASK CSR_GENMASK(31, 16)
+
+#define FBNIC_RPC_TCAM_MACDA(m, n) \
+ (0x08b80 + 0x20 * (n) + (m)) /* 0x022e00 + 128*n + 4*m */
+#define FBNIC_RPC_TCAM_MACDA_VALUE CSR_GENMASK(15, 0)
+#define FBNIC_RPC_TCAM_MACDA_MASK CSR_GENMASK(31, 16)
+
+#define FBNIC_RPC_RSS_TBL(n, m) \
+ (0x08d20 + 0x100 * (n) + (m)) /* 0x023480 + 1024*n + 4*m */
+#define FBNIC_RPC_RSS_TBL_COUNT 2
+#define FBNIC_RPC_RSS_TBL_SIZE 256
+#define FBNIC_CSR_END_RPC_RAM 0x08f1f /* CSR section delimiter */
+
+/* Fab Registers */
+#define FBNIC_CSR_START_FAB 0x0C000 /* CSR section delimiter */
+#define FBNIC_FAB_AXI4_AR_SPACER_2_CFG 0x0C005 /* 0x30014 */
+#define FBNIC_FAB_AXI4_AR_SPACER_MASK CSR_BIT(16)
+#define FBNIC_FAB_AXI4_AR_SPACER_THREADSHOLD CSR_GENMASK(15, 0)
+#define FBNIC_CSR_END_FAB 0x0C020 /* CSR section delimiter */
+
+/* Master Registers */
+#define FBNIC_CSR_START_MASTER 0x0C400 /* CSR section delimiter */
+#define FBNIC_MASTER_SPARE_0 0x0C41B /* 0x3106c */
+#define FBNIC_CSR_END_MASTER 0x0C452 /* CSR section delimiter */
+
+/* MAC MAC registers (ASIC only) */
+#define FBNIC_CSR_START_MAC_MAC 0x11000 /* CSR section delimiter */
+#define FBNIC_MAC_COMMAND_CONFIG 0x11002 /* 0x44008 */
+#define FBNIC_MAC_COMMAND_CONFIG_RX_PAUSE_DIS CSR_BIT(29)
+#define FBNIC_MAC_COMMAND_CONFIG_TX_PAUSE_DIS CSR_BIT(28)
+#define FBNIC_MAC_COMMAND_CONFIG_FLT_HDL_DIS CSR_BIT(27)
+#define FBNIC_MAC_COMMAND_CONFIG_TX_PAD_EN CSR_BIT(11)
+#define FBNIC_MAC_COMMAND_CONFIG_LOOPBACK_EN CSR_BIT(10)
+#define FBNIC_MAC_COMMAND_CONFIG_PROMISC_EN CSR_BIT(4)
+#define FBNIC_MAC_COMMAND_CONFIG_RX_ENA CSR_BIT(1)
+#define FBNIC_MAC_COMMAND_CONFIG_TX_ENA CSR_BIT(0)
+#define FBNIC_MAC_CL01_PAUSE_QUANTA 0x11015 /* 0x44054 */
+#define FBNIC_MAC_CL01_QUANTA_THRESH 0x11019 /* 0x44064 */
+#define FBNIC_CSR_END_MAC_MAC 0x11028 /* CSR section delimiter */
+
+/* Signals from MAC, AN, PCS, and LED CSR registers (ASIC only) */
+#define FBNIC_CSR_START_SIG 0x11800 /* CSR section delimiter */
+#define FBNIC_SIG_MAC_IN0 0x11800 /* 0x46000 */
+#define FBNIC_SIG_MAC_IN0_RESET_FF_TX_CLK CSR_BIT(14)
+#define FBNIC_SIG_MAC_IN0_RESET_FF_RX_CLK CSR_BIT(13)
+#define FBNIC_SIG_MAC_IN0_RESET_TX_CLK CSR_BIT(12)
+#define FBNIC_SIG_MAC_IN0_RESET_RX_CLK CSR_BIT(11)
+#define FBNIC_SIG_MAC_IN0_TX_CRC CSR_BIT(8)
+#define FBNIC_SIG_MAC_IN0_CFG_MODE128 CSR_BIT(10)
+#define FBNIC_SIG_PCS_OUT0 0x11808 /* 0x46020 */
+#define FBNIC_SIG_PCS_OUT0_LINK CSR_BIT(27)
+#define FBNIC_SIG_PCS_OUT0_BLOCK_LOCK CSR_GENMASK(24, 5)
+#define FBNIC_SIG_PCS_OUT0_AMPS_LOCK CSR_GENMASK(4, 1)
+#define FBNIC_SIG_PCS_OUT1 0x11809 /* 0x46024 */
+#define FBNIC_SIG_PCS_OUT1_FCFEC_LOCK CSR_GENMASK(11, 8)
+#define FBNIC_SIG_PCS_INTR_STS 0x11814 /* 0x46050 */
+#define FBNIC_SIG_PCS_INTR_LINK_DOWN CSR_BIT(1)
+#define FBNIC_SIG_PCS_INTR_LINK_UP CSR_BIT(0)
+#define FBNIC_SIG_PCS_INTR_MASK 0x11816 /* 0x46058 */
+#define FBNIC_CSR_END_SIG 0x1184e /* CSR section delimiter */
+
+/* PUL User Registers */
+#define FBNIC_CSR_START_PUL_USER 0x31000 /* CSR section delimiter */
+#define FBNIC_PUL_OB_TLP_HDR_AW_CFG 0x3103d /* 0xc40f4 */
+#define FBNIC_PUL_OB_TLP_HDR_AW_CFG_BME CSR_BIT(18)
+#define FBNIC_PUL_OB_TLP_HDR_AR_CFG 0x3103e /* 0xc40f8 */
+#define FBNIC_PUL_OB_TLP_HDR_AR_CFG_BME CSR_BIT(18)
+#define FBNIC_CSR_END_PUL_USER 0x31080 /* CSR section delimiter */
+
+/* Queue Registers
+ *
+ * The queue register offsets are specific for a given queue grouping. So to
+ * find the actual register offset it is necessary to combine FBNIC_QUEUE(n)
+ * with the register to get the actual register offset like so:
+ * FBNIC_QUEUE_TWQ0_CTL(n) == FBNIC_QUEUE(n) + FBNIC_QUEUE_TWQ0_CTL
+ */
+#define FBNIC_CSR_START_QUEUE 0x40000 /* CSR section delimiter */
+#define FBNIC_QUEUE_STRIDE 0x400 /* 0x1000 */
+#define FBNIC_QUEUE(n)\
+ (0x40000 + FBNIC_QUEUE_STRIDE * (n)) /* 0x100000 + 4096*n */
+
+#define FBNIC_QUEUE_TWQ0_CTL 0x000 /* 0x000 */
+#define FBNIC_QUEUE_TWQ1_CTL 0x001 /* 0x004 */
+#define FBNIC_QUEUE_TWQ_CTL_RESET CSR_BIT(0)
+#define FBNIC_QUEUE_TWQ_CTL_ENABLE CSR_BIT(1)
+#define FBNIC_QUEUE_TWQ0_TAIL 0x002 /* 0x008 */
+#define FBNIC_QUEUE_TWQ1_TAIL 0x003 /* 0x00c */
+
+#define FBNIC_QUEUE_TWQ0_SIZE 0x00a /* 0x028 */
+#define FBNIC_QUEUE_TWQ1_SIZE 0x00b /* 0x02c */
+#define FBNIC_QUEUE_TWQ_SIZE_MASK CSR_GENMASK(3, 0)
+
+#define FBNIC_QUEUE_TWQ0_BAL 0x020 /* 0x080 */
+#define FBNIC_QUEUE_BAL_MASK CSR_GENMASK(31, 7)
+#define FBNIC_QUEUE_TWQ0_BAH 0x021 /* 0x084 */
+#define FBNIC_QUEUE_TWQ1_BAL 0x022 /* 0x088 */
+#define FBNIC_QUEUE_TWQ1_BAH 0x023 /* 0x08c */
+
+/* Tx Completion Queue Registers */
+#define FBNIC_QUEUE_TCQ_CTL 0x080 /* 0x200 */
+#define FBNIC_QUEUE_TCQ_CTL_RESET CSR_BIT(0)
+#define FBNIC_QUEUE_TCQ_CTL_ENABLE CSR_BIT(1)
+
+#define FBNIC_QUEUE_TCQ_HEAD 0x081 /* 0x204 */
+
+#define FBNIC_QUEUE_TCQ_SIZE 0x084 /* 0x210 */
+#define FBNIC_QUEUE_TCQ_SIZE_MASK CSR_GENMASK(3, 0)
+
+#define FBNIC_QUEUE_TCQ_BAL 0x0a0 /* 0x280 */
+#define FBNIC_QUEUE_TCQ_BAH 0x0a1 /* 0x284 */
+
+/* Tx Interrupt Manager Registers */
+#define FBNIC_QUEUE_TIM_CTL 0x0c0 /* 0x300 */
+#define FBNIC_QUEUE_TIM_CTL_MSIX_MASK CSR_GENMASK(7, 0)
+
+#define FBNIC_QUEUE_TIM_THRESHOLD 0x0c1 /* 0x304 */
+#define FBNIC_QUEUE_TIM_THRESHOLD_TWD_MASK CSR_GENMASK(14, 0)
+
+#define FBNIC_QUEUE_TIM_CLEAR 0x0c2 /* 0x308 */
+#define FBNIC_QUEUE_TIM_CLEAR_MASK CSR_BIT(0)
+#define FBNIC_QUEUE_TIM_SET 0x0c3 /* 0x30c */
+#define FBNIC_QUEUE_TIM_SET_MASK CSR_BIT(0)
+#define FBNIC_QUEUE_TIM_MASK 0x0c4 /* 0x310 */
+#define FBNIC_QUEUE_TIM_MASK_MASK CSR_BIT(0)
+
+#define FBNIC_QUEUE_TIM_TIMER 0x0c5 /* 0x314 */
+
+#define FBNIC_QUEUE_TIM_COUNTS 0x0c6 /* 0x318 */
+#define FBNIC_QUEUE_TIM_COUNTS_CNT1_MASK CSR_GENMASK(30, 16)
+#define FBNIC_QUEUE_TIM_COUNTS_CNT0_MASK CSR_GENMASK(14, 0)
+
+/* Rx Completion Queue Registers */
+#define FBNIC_QUEUE_RCQ_CTL 0x200 /* 0x800 */
+#define FBNIC_QUEUE_RCQ_CTL_RESET CSR_BIT(0)
+#define FBNIC_QUEUE_RCQ_CTL_ENABLE CSR_BIT(1)
+
+#define FBNIC_QUEUE_RCQ_HEAD 0x201 /* 0x804 */
+
+#define FBNIC_QUEUE_RCQ_SIZE 0x204 /* 0x810 */
+#define FBNIC_QUEUE_RCQ_SIZE_MASK CSR_GENMASK(3, 0)
+
+#define FBNIC_QUEUE_RCQ_BAL 0x220 /* 0x880 */
+#define FBNIC_QUEUE_RCQ_BAH 0x221 /* 0x884 */
+
+/* Rx Buffer Descriptor Queue Registers */
+#define FBNIC_QUEUE_BDQ_CTL 0x240 /* 0x900 */
+#define FBNIC_QUEUE_BDQ_CTL_RESET CSR_BIT(0)
+#define FBNIC_QUEUE_BDQ_CTL_ENABLE CSR_BIT(1)
+#define FBNIC_QUEUE_BDQ_CTL_PPQ_ENABLE CSR_BIT(30)
+
+#define FBNIC_QUEUE_BDQ_HPQ_TAIL 0x241 /* 0x904 */
+#define FBNIC_QUEUE_BDQ_PPQ_TAIL 0x242 /* 0x908 */
+
+#define FBNIC_QUEUE_BDQ_HPQ_SIZE 0x247 /* 0x91c */
+#define FBNIC_QUEUE_BDQ_PPQ_SIZE 0x248 /* 0x920 */
+#define FBNIC_QUEUE_BDQ_SIZE_MASK CSR_GENMASK(3, 0)
+
+#define FBNIC_QUEUE_BDQ_HPQ_BAL 0x260 /* 0x980 */
+#define FBNIC_QUEUE_BDQ_HPQ_BAH 0x261 /* 0x984 */
+#define FBNIC_QUEUE_BDQ_PPQ_BAL 0x262 /* 0x988 */
+#define FBNIC_QUEUE_BDQ_PPQ_BAH 0x263 /* 0x98c */
+
+/* Rx DMA Engine Configuration */
+#define FBNIC_QUEUE_RDE_CTL0 0x2a0 /* 0xa80 */
+#define FBNIC_QUEUE_RDE_CTL0_EN_HDR_SPLIT CSR_BIT(31)
+#define FBNIC_QUEUE_RDE_CTL0_DROP_MODE_MASK CSR_GENMASK(30, 29)
+enum {
+ FBNIC_QUEUE_RDE_CTL0_DROP_IMMEDIATE = 0,
+ FBNIC_QUEUE_RDE_CTL0_DROP_WAIT = 1,
+ FBNIC_QUEUE_RDE_CTL0_DROP_NEVER = 2,
+};
+
+#define FBNIC_QUEUE_RDE_CTL0_MIN_HROOM_MASK CSR_GENMASK(28, 20)
+#define FBNIC_QUEUE_RDE_CTL0_MIN_TROOM_MASK CSR_GENMASK(19, 11)
+
+#define FBNIC_QUEUE_RDE_CTL1 0x2a1 /* 0xa84 */
+#define FBNIC_QUEUE_RDE_CTL1_MAX_HDR_MASK CSR_GENMASK(24, 12)
+#define FBNIC_QUEUE_RDE_CTL1_PAYLD_OFF_MASK CSR_GENMASK(11, 9)
+#define FBNIC_QUEUE_RDE_CTL1_PAYLD_PG_CL_MASK CSR_GENMASK(8, 6)
+#define FBNIC_QUEUE_RDE_CTL1_PADLEN_MASK CSR_GENMASK(5, 2)
+#define FBNIC_QUEUE_RDE_CTL1_PAYLD_PACK_MASK CSR_GENMASK(1, 0)
+enum {
+ FBNIC_QUEUE_RDE_CTL1_PAYLD_PACK_NONE = 0,
+ FBNIC_QUEUE_RDE_CTL1_PAYLD_PACK_ALL = 1,
+ FBNIC_QUEUE_RDE_CTL1_PAYLD_PACK_RSS = 2,
+};
+
+/* Rx Interrupt Manager Registers */
+#define FBNIC_QUEUE_RIM_CTL 0x2c0 /* 0xb00 */
+#define FBNIC_QUEUE_RIM_CTL_MSIX_MASK CSR_GENMASK(7, 0)
+
+#define FBNIC_QUEUE_RIM_THRESHOLD 0x2c1 /* 0xb04 */
+#define FBNIC_QUEUE_RIM_THRESHOLD_RCD_MASK CSR_GENMASK(14, 0)
+
+#define FBNIC_QUEUE_RIM_CLEAR 0x2c2 /* 0xb08 */
+#define FBNIC_QUEUE_RIM_CLEAR_MASK CSR_BIT(0)
+#define FBNIC_QUEUE_RIM_SET 0x2c3 /* 0xb0c */
+#define FBNIC_QUEUE_RIM_SET_MASK CSR_BIT(0)
+#define FBNIC_QUEUE_RIM_MASK 0x2c4 /* 0xb10 */
+#define FBNIC_QUEUE_RIM_MASK_MASK CSR_BIT(0)
+
+#define FBNIC_QUEUE_RIM_COAL_STATUS 0x2c5 /* 0xb14 */
+#define FBNIC_QUEUE_RIM_RCD_COUNT_MASK CSR_GENMASK(30, 16)
+#define FBNIC_QUEUE_RIM_TIMER_MASK CSR_GENMASK(13, 0)
+#define FBNIC_MAX_QUEUES 128
+#define FBNIC_CSR_END_QUEUE (0x40000 + 0x400 * FBNIC_MAX_QUEUES - 1)
+
+/* BAR 4 CSRs */
+
+/* The IPC mailbox consists of 32 mailboxes, with each mailbox consisting
+ * of 32 4 byte registers. We will use 2 registers per descriptor so the
+ * length of the mailbox is reduced to 16.
+ *
+ * Currently we use an offset of 0x6000 on BAR4 for the mailbox so we just
+ * have to do the math and determine the offset based on the mailbox
+ * direction and index inside that mailbox.
+ */
+#define FBNIC_IPC_MBX_DESC_LEN 16
+#define FBNIC_IPC_MBX(mbx_idx, desc_idx) \
+ ((((mbx_idx) * FBNIC_IPC_MBX_DESC_LEN + (desc_idx)) * 2) + 0x6000)
+
+/* Use first register in mailbox to flush writes */
+#define FBNIC_FW_ZERO_REG FBNIC_IPC_MBX(0, 0)
+
+enum {
+ FBNIC_IPC_MBX_RX_IDX,
+ FBNIC_IPC_MBX_TX_IDX,
+ FBNIC_IPC_MBX_INDICES,
+};
+
+#define FBNIC_IPC_MBX_DESC_LEN_MASK DESC_GENMASK(63, 48)
+#define FBNIC_IPC_MBX_DESC_EOM DESC_BIT(46)
+#define FBNIC_IPC_MBX_DESC_ADDR_MASK DESC_GENMASK(45, 3)
+#define FBNIC_IPC_MBX_DESC_FW_CMPL DESC_BIT(1)
+#define FBNIC_IPC_MBX_DESC_HOST_CMPL DESC_BIT(0)
+
+#endif /* _FBNIC_CSR_H_ */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c b/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c
new file mode 100644
index 000000000000..e87049dfd223
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include <asm/unaligned.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+#include <net/devlink.h>
+
+#include "fbnic.h"
+
+#define FBNIC_SN_STR_LEN 24
+
+static int fbnic_devlink_info_get(struct devlink *devlink,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct fbnic_dev *fbd = devlink_priv(devlink);
+ int err;
+
+ if (fbd->dsn) {
+ unsigned char serial[FBNIC_SN_STR_LEN];
+ u8 dsn[8];
+
+ put_unaligned_be64(fbd->dsn, dsn);
+ err = snprintf(serial, FBNIC_SN_STR_LEN, "%8phD", dsn);
+ if (err < 0)
+ return err;
+
+ err = devlink_info_serial_number_put(req, serial);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static const struct devlink_ops fbnic_devlink_ops = {
+ .info_get = fbnic_devlink_info_get,
+};
+
+void fbnic_devlink_free(struct fbnic_dev *fbd)
+{
+ struct devlink *devlink = priv_to_devlink(fbd);
+
+ devlink_free(devlink);
+}
+
+struct fbnic_dev *fbnic_devlink_alloc(struct pci_dev *pdev)
+{
+ void __iomem * const *iomap_table;
+ struct devlink *devlink;
+ struct fbnic_dev *fbd;
+
+ devlink = devlink_alloc(&fbnic_devlink_ops, sizeof(struct fbnic_dev),
+ &pdev->dev);
+ if (!devlink)
+ return NULL;
+
+ fbd = devlink_priv(devlink);
+ pci_set_drvdata(pdev, fbd);
+ fbd->dev = &pdev->dev;
+
+ iomap_table = pcim_iomap_table(pdev);
+ fbd->uc_addr0 = iomap_table[0];
+ fbd->uc_addr4 = iomap_table[4];
+
+ fbd->dsn = pci_get_dsn(pdev);
+ fbd->mps = pcie_get_mps(pdev);
+ fbd->readrq = pcie_get_readrq(pdev);
+
+ fbd->mac_addr_boundary = FBNIC_RPC_TCAM_MACDA_DEFAULT_BOUNDARY;
+
+ return fbd;
+}
+
+void fbnic_devlink_register(struct fbnic_dev *fbd)
+{
+ struct devlink *devlink = priv_to_devlink(fbd);
+
+ devlink_register(devlink);
+}
+
+void fbnic_devlink_unregister(struct fbnic_dev *fbd)
+{
+ struct devlink *devlink = priv_to_devlink(fbd);
+
+ devlink_unregister(devlink);
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_drvinfo.h b/drivers/net/ethernet/meta/fbnic/fbnic_drvinfo.h
new file mode 100644
index 000000000000..809ba6729442
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_drvinfo.h
@@ -0,0 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#define DRV_NAME "fbnic"
+#define DRV_SUMMARY "Meta(R) Host Network Interface Driver"
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
new file mode 100644
index 000000000000..0c6e1b4c119b
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
@@ -0,0 +1,791 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include <linux/bitfield.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+#include <linux/dev_printk.h>
+#include <linux/dma-mapping.h>
+#include <linux/gfp.h>
+#include <linux/types.h>
+
+#include "fbnic.h"
+#include "fbnic_tlv.h"
+
+static void __fbnic_mbx_wr_desc(struct fbnic_dev *fbd, int mbx_idx,
+ int desc_idx, u64 desc)
+{
+ u32 desc_offset = FBNIC_IPC_MBX(mbx_idx, desc_idx);
+
+ fw_wr32(fbd, desc_offset + 1, upper_32_bits(desc));
+ fw_wrfl(fbd);
+ fw_wr32(fbd, desc_offset, lower_32_bits(desc));
+}
+
+static u64 __fbnic_mbx_rd_desc(struct fbnic_dev *fbd, int mbx_idx, int desc_idx)
+{
+ u32 desc_offset = FBNIC_IPC_MBX(mbx_idx, desc_idx);
+ u64 desc;
+
+ desc = fw_rd32(fbd, desc_offset);
+ desc |= (u64)fw_rd32(fbd, desc_offset + 1) << 32;
+
+ return desc;
+}
+
+static void fbnic_mbx_init_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
+{
+ int desc_idx;
+
+ /* Initialize first descriptor to all 0s. Doing this gives us a
+ * solid stop for the firmware to hit when it is done looping
+ * through the ring.
+ */
+ __fbnic_mbx_wr_desc(fbd, mbx_idx, 0, 0);
+
+ fw_wrfl(fbd);
+
+ /* We then fill the rest of the ring starting at the end and moving
+ * back toward descriptor 0 with skip descriptors that have no
+ * length nor address, and tell the firmware that they can skip
+ * them and just move past them to the one we initialized to 0.
+ */
+ for (desc_idx = FBNIC_IPC_MBX_DESC_LEN; --desc_idx;) {
+ __fbnic_mbx_wr_desc(fbd, mbx_idx, desc_idx,
+ FBNIC_IPC_MBX_DESC_FW_CMPL |
+ FBNIC_IPC_MBX_DESC_HOST_CMPL);
+ fw_wrfl(fbd);
+ }
+}
+
+void fbnic_mbx_init(struct fbnic_dev *fbd)
+{
+ int i;
+
+ /* Initialize lock to protect Tx ring */
+ spin_lock_init(&fbd->fw_tx_lock);
+
+ /* Reinitialize mailbox memory */
+ for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
+ memset(&fbd->mbx[i], 0, sizeof(struct fbnic_fw_mbx));
+
+ /* Do not auto-clear the FW mailbox interrupt, let SW clear it */
+ wr32(fbd, FBNIC_INTR_SW_AC_MODE(0), ~(1u << FBNIC_FW_MSIX_ENTRY));
+
+ /* Clear any stale causes in vector 0 as that is used for doorbell */
+ wr32(fbd, FBNIC_INTR_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY);
+
+ for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
+ fbnic_mbx_init_desc_ring(fbd, i);
+}
+
+static int fbnic_mbx_map_msg(struct fbnic_dev *fbd, int mbx_idx,
+ struct fbnic_tlv_msg *msg, u16 length, u8 eom)
+{
+ struct fbnic_fw_mbx *mbx = &fbd->mbx[mbx_idx];
+ u8 tail = mbx->tail;
+ dma_addr_t addr;
+ int direction;
+
+ if (!mbx->ready || !fbnic_fw_present(fbd))
+ return -ENODEV;
+
+ direction = (mbx_idx == FBNIC_IPC_MBX_RX_IDX) ? DMA_FROM_DEVICE :
+ DMA_TO_DEVICE;
+
+ if (mbx->head == ((tail + 1) % FBNIC_IPC_MBX_DESC_LEN))
+ return -EBUSY;
+
+ addr = dma_map_single(fbd->dev, msg, PAGE_SIZE, direction);
+ if (dma_mapping_error(fbd->dev, addr)) {
+ free_page((unsigned long)msg);
+
+ return -ENOSPC;
+ }
+
+ mbx->buf_info[tail].msg = msg;
+ mbx->buf_info[tail].addr = addr;
+
+ mbx->tail = (tail + 1) % FBNIC_IPC_MBX_DESC_LEN;
+
+ fw_wr32(fbd, FBNIC_IPC_MBX(mbx_idx, mbx->tail), 0);
+
+ __fbnic_mbx_wr_desc(fbd, mbx_idx, tail,
+ FIELD_PREP(FBNIC_IPC_MBX_DESC_LEN_MASK, length) |
+ (addr & FBNIC_IPC_MBX_DESC_ADDR_MASK) |
+ (eom ? FBNIC_IPC_MBX_DESC_EOM : 0) |
+ FBNIC_IPC_MBX_DESC_HOST_CMPL);
+
+ return 0;
+}
+
+static void fbnic_mbx_unmap_and_free_msg(struct fbnic_dev *fbd, int mbx_idx,
+ int desc_idx)
+{
+ struct fbnic_fw_mbx *mbx = &fbd->mbx[mbx_idx];
+ int direction;
+
+ if (!mbx->buf_info[desc_idx].msg)
+ return;
+
+ direction = (mbx_idx == FBNIC_IPC_MBX_RX_IDX) ? DMA_FROM_DEVICE :
+ DMA_TO_DEVICE;
+ dma_unmap_single(fbd->dev, mbx->buf_info[desc_idx].addr,
+ PAGE_SIZE, direction);
+
+ free_page((unsigned long)mbx->buf_info[desc_idx].msg);
+ mbx->buf_info[desc_idx].msg = NULL;
+}
+
+static void fbnic_mbx_clean_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
+{
+ int i;
+
+ fbnic_mbx_init_desc_ring(fbd, mbx_idx);
+
+ for (i = FBNIC_IPC_MBX_DESC_LEN; i--;)
+ fbnic_mbx_unmap_and_free_msg(fbd, mbx_idx, i);
+}
+
+void fbnic_mbx_clean(struct fbnic_dev *fbd)
+{
+ int i;
+
+ for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
+ fbnic_mbx_clean_desc_ring(fbd, i);
+}
+
+#define FBNIC_MBX_MAX_PAGE_SIZE FIELD_MAX(FBNIC_IPC_MBX_DESC_LEN_MASK)
+#define FBNIC_RX_PAGE_SIZE min_t(int, PAGE_SIZE, FBNIC_MBX_MAX_PAGE_SIZE)
+
+static int fbnic_mbx_alloc_rx_msgs(struct fbnic_dev *fbd)
+{
+ struct fbnic_fw_mbx *rx_mbx = &fbd->mbx[FBNIC_IPC_MBX_RX_IDX];
+ u8 tail = rx_mbx->tail, head = rx_mbx->head, count;
+ int err = 0;
+
+ /* Do nothing if mailbox is not ready, or we already have pages on
+ * the ring that can be used by the firmware
+ */
+ if (!rx_mbx->ready)
+ return -ENODEV;
+
+ /* Fill all but 1 unused descriptors in the Rx queue. */
+ count = (head - tail - 1) % FBNIC_IPC_MBX_DESC_LEN;
+ while (!err && count--) {
+ struct fbnic_tlv_msg *msg;
+
+ msg = (struct fbnic_tlv_msg *)__get_free_page(GFP_ATOMIC |
+ __GFP_NOWARN);
+ if (!msg) {
+ err = -ENOMEM;
+ break;
+ }
+
+ err = fbnic_mbx_map_msg(fbd, FBNIC_IPC_MBX_RX_IDX, msg,
+ FBNIC_RX_PAGE_SIZE, 0);
+ if (err)
+ free_page((unsigned long)msg);
+ }
+
+ return err;
+}
+
+static int fbnic_mbx_map_tlv_msg(struct fbnic_dev *fbd,
+ struct fbnic_tlv_msg *msg)
+{
+ unsigned long flags;
+ int err;
+
+ spin_lock_irqsave(&fbd->fw_tx_lock, flags);
+
+ err = fbnic_mbx_map_msg(fbd, FBNIC_IPC_MBX_TX_IDX, msg,
+ le16_to_cpu(msg->hdr.len) * sizeof(u32), 1);
+
+ spin_unlock_irqrestore(&fbd->fw_tx_lock, flags);
+
+ return err;
+}
+
+static void fbnic_mbx_process_tx_msgs(struct fbnic_dev *fbd)
+{
+ struct fbnic_fw_mbx *tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX];
+ u8 head = tx_mbx->head;
+ u64 desc;
+
+ while (head != tx_mbx->tail) {
+ desc = __fbnic_mbx_rd_desc(fbd, FBNIC_IPC_MBX_TX_IDX, head);
+ if (!(desc & FBNIC_IPC_MBX_DESC_FW_CMPL))
+ break;
+
+ fbnic_mbx_unmap_and_free_msg(fbd, FBNIC_IPC_MBX_TX_IDX, head);
+
+ head++;
+ head %= FBNIC_IPC_MBX_DESC_LEN;
+ }
+
+ /* Record head for next interrupt */
+ tx_mbx->head = head;
+}
+
+/**
+ * fbnic_fw_xmit_simple_msg - Transmit a simple single TLV message w/o data
+ * @fbd: FBNIC device structure
+ * @msg_type: ENUM value indicating message type to send
+ *
+ * Return:
+ * One the following values:
+ * -EOPNOTSUPP: Is not ASIC so mailbox is not supported
+ * -ENODEV: Device I/O error
+ * -ENOMEM: Failed to allocate message
+ * -EBUSY: No space in mailbox
+ * -ENOSPC: DMA mapping failed
+ *
+ * This function sends a single TLV header indicating the host wants to take
+ * some action. However there are no other side effects which means that any
+ * response will need to be caught via a completion if this action is
+ * expected to kick off a resultant action.
+ */
+static int fbnic_fw_xmit_simple_msg(struct fbnic_dev *fbd, u32 msg_type)
+{
+ struct fbnic_tlv_msg *msg;
+ int err = 0;
+
+ if (!fbnic_fw_present(fbd))
+ return -ENODEV;
+
+ msg = fbnic_tlv_msg_alloc(msg_type);
+ if (!msg)
+ return -ENOMEM;
+
+ err = fbnic_mbx_map_tlv_msg(fbd, msg);
+ if (err)
+ free_page((unsigned long)msg);
+
+ return err;
+}
+
+/**
+ * fbnic_fw_xmit_cap_msg - Allocate and populate a FW capabilities message
+ * @fbd: FBNIC device structure
+ *
+ * Return: NULL on failure to allocate, error pointer on error, or pointer
+ * to new TLV test message.
+ *
+ * Sends a single TLV header indicating the host wants the firmware to
+ * confirm the capabilities and version.
+ **/
+static int fbnic_fw_xmit_cap_msg(struct fbnic_dev *fbd)
+{
+ int err = fbnic_fw_xmit_simple_msg(fbd, FBNIC_TLV_MSG_ID_HOST_CAP_REQ);
+
+ /* Return 0 if we are not calling this on ASIC */
+ return (err == -EOPNOTSUPP) ? 0 : err;
+}
+
+static void fbnic_mbx_postinit_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
+{
+ struct fbnic_fw_mbx *mbx = &fbd->mbx[mbx_idx];
+
+ /* This is a one time init, so just exit if it is completed */
+ if (mbx->ready)
+ return;
+
+ mbx->ready = true;
+
+ switch (mbx_idx) {
+ case FBNIC_IPC_MBX_RX_IDX:
+ /* Make sure we have a page for the FW to write to */
+ fbnic_mbx_alloc_rx_msgs(fbd);
+ break;
+ case FBNIC_IPC_MBX_TX_IDX:
+ /* Force version to 1 if we successfully requested an update
+ * from the firmware. This should be overwritten once we get
+ * the actual version from the firmware in the capabilities
+ * request message.
+ */
+ if (!fbnic_fw_xmit_cap_msg(fbd) &&
+ !fbd->fw_cap.running.mgmt.version)
+ fbd->fw_cap.running.mgmt.version = 1;
+ break;
+ }
+}
+
+static void fbnic_mbx_postinit(struct fbnic_dev *fbd)
+{
+ int i;
+
+ /* We only need to do this on the first interrupt following init.
+ * this primes the mailbox so that we will have cleared all the
+ * skip descriptors.
+ */
+ if (!(rd32(fbd, FBNIC_INTR_STATUS(0)) & (1u << FBNIC_FW_MSIX_ENTRY)))
+ return;
+
+ wr32(fbd, FBNIC_INTR_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY);
+
+ for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
+ fbnic_mbx_postinit_desc_ring(fbd, i);
+}
+
+/**
+ * fbnic_fw_xmit_ownership_msg - Create and transmit a host ownership message
+ * to FW mailbox
+ *
+ * @fbd: FBNIC device structure
+ * @take_ownership: take/release the ownership
+ *
+ * Return: zero on success, negative value on failure
+ *
+ * Notifies the firmware that the driver either takes ownership of the NIC
+ * (when @take_ownership is true) or releases it.
+ */
+int fbnic_fw_xmit_ownership_msg(struct fbnic_dev *fbd, bool take_ownership)
+{
+ unsigned long req_time = jiffies;
+ struct fbnic_tlv_msg *msg;
+ int err = 0;
+
+ if (!fbnic_fw_present(fbd))
+ return -ENODEV;
+
+ msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_OWNERSHIP_REQ);
+ if (!msg)
+ return -ENOMEM;
+
+ if (take_ownership) {
+ err = fbnic_tlv_attr_put_flag(msg, FBNIC_FW_OWNERSHIP_FLAG);
+ if (err)
+ goto free_message;
+ }
+
+ err = fbnic_mbx_map_tlv_msg(fbd, msg);
+ if (err)
+ goto free_message;
+
+ /* Initialize heartbeat, set last response to 1 second in the past
+ * so that we will trigger a timeout if the firmware doesn't respond
+ */
+ fbd->last_heartbeat_response = req_time - HZ;
+
+ fbd->last_heartbeat_request = req_time;
+
+ /* Set heartbeat detection based on if we are taking ownership */
+ fbd->fw_heartbeat_enabled = take_ownership;
+
+ return err;
+
+free_message:
+ free_page((unsigned long)msg);
+ return err;
+}
+
+static const struct fbnic_tlv_index fbnic_fw_cap_resp_index[] = {
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_VERSION),
+ FBNIC_TLV_ATTR_FLAG(FBNIC_FW_CAP_RESP_BMC_PRESENT),
+ FBNIC_TLV_ATTR_MAC_ADDR(FBNIC_FW_CAP_RESP_BMC_MAC_ADDR),
+ FBNIC_TLV_ATTR_ARRAY(FBNIC_FW_CAP_RESP_BMC_MAC_ARRAY),
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_STORED_VERSION),
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_ACTIVE_FW_SLOT),
+ FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_VERSION_COMMIT_STR,
+ FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE),
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_BMC_ALL_MULTI),
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_FW_LINK_SPEED),
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_FW_LINK_FEC),
+ FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_STORED_COMMIT_STR,
+ FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE),
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_CMRT_VERSION),
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_STORED_CMRT_VERSION),
+ FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_CMRT_COMMIT_STR,
+ FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE),
+ FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_STORED_CMRT_COMMIT_STR,
+ FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE),
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_UEFI_VERSION),
+ FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_UEFI_COMMIT_STR,
+ FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE),
+ FBNIC_TLV_ATTR_LAST
+};
+
+static int fbnic_fw_parse_bmc_addrs(u8 bmc_mac_addr[][ETH_ALEN],
+ struct fbnic_tlv_msg *attr, int len)
+{
+ int attr_len = le16_to_cpu(attr->hdr.len) / sizeof(u32) - 1;
+ struct fbnic_tlv_msg *mac_results[8];
+ int err, i = 0;
+
+ /* Make sure we have enough room to process all the MAC addresses */
+ if (len > 8)
+ return -ENOSPC;
+
+ /* Parse the array */
+ err = fbnic_tlv_attr_parse_array(&attr[1], attr_len, mac_results,
+ fbnic_fw_cap_resp_index,
+ FBNIC_FW_CAP_RESP_BMC_MAC_ADDR, len);
+ if (err)
+ return err;
+
+ /* Copy results into MAC addr array */
+ for (i = 0; i < len && mac_results[i]; i++)
+ fbnic_tlv_attr_addr_copy(bmc_mac_addr[i], mac_results[i]);
+
+ /* Zero remaining unused addresses */
+ while (i < len)
+ eth_zero_addr(bmc_mac_addr[i++]);
+
+ return 0;
+}
+
+static int fbnic_fw_parse_cap_resp(void *opaque, struct fbnic_tlv_msg **results)
+{
+ u32 active_slot = 0, all_multi = 0;
+ struct fbnic_dev *fbd = opaque;
+ u32 speed = 0, fec = 0;
+ size_t commit_size = 0;
+ bool bmc_present;
+ int err;
+
+ get_unsigned_result(FBNIC_FW_CAP_RESP_VERSION,
+ fbd->fw_cap.running.mgmt.version);
+
+ if (!fbd->fw_cap.running.mgmt.version)
+ return -EINVAL;
+
+ if (fbd->fw_cap.running.mgmt.version < MIN_FW_VERSION_CODE) {
+ char running_ver[FBNIC_FW_VER_MAX_SIZE];
+
+ fbnic_mk_fw_ver_str(fbd->fw_cap.running.mgmt.version,
+ running_ver);
+ dev_err(fbd->dev, "Device firmware version(%s) is older than minimum required version(%02d.%02d.%02d)\n",
+ running_ver,
+ MIN_FW_MAJOR_VERSION,
+ MIN_FW_MINOR_VERSION,
+ MIN_FW_BUILD_VERSION);
+ /* Disable TX mailbox to prevent card use until firmware is
+ * updated.
+ */
+ fbd->mbx[FBNIC_IPC_MBX_TX_IDX].ready = false;
+ return -EINVAL;
+ }
+
+ get_string_result(FBNIC_FW_CAP_RESP_VERSION_COMMIT_STR, commit_size,
+ fbd->fw_cap.running.mgmt.commit,
+ FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
+ if (!commit_size)
+ dev_warn(fbd->dev, "Firmware did not send mgmt commit!\n");
+
+ get_unsigned_result(FBNIC_FW_CAP_RESP_STORED_VERSION,
+ fbd->fw_cap.stored.mgmt.version);
+ get_string_result(FBNIC_FW_CAP_RESP_STORED_COMMIT_STR, commit_size,
+ fbd->fw_cap.stored.mgmt.commit,
+ FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
+
+ get_unsigned_result(FBNIC_FW_CAP_RESP_CMRT_VERSION,
+ fbd->fw_cap.running.bootloader.version);
+ get_string_result(FBNIC_FW_CAP_RESP_CMRT_COMMIT_STR, commit_size,
+ fbd->fw_cap.running.bootloader.commit,
+ FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
+
+ get_unsigned_result(FBNIC_FW_CAP_RESP_STORED_CMRT_VERSION,
+ fbd->fw_cap.stored.bootloader.version);
+ get_string_result(FBNIC_FW_CAP_RESP_STORED_CMRT_COMMIT_STR, commit_size,
+ fbd->fw_cap.stored.bootloader.commit,
+ FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
+
+ get_unsigned_result(FBNIC_FW_CAP_RESP_UEFI_VERSION,
+ fbd->fw_cap.stored.undi.version);
+ get_string_result(FBNIC_FW_CAP_RESP_UEFI_COMMIT_STR, commit_size,
+ fbd->fw_cap.stored.undi.commit,
+ FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
+
+ get_unsigned_result(FBNIC_FW_CAP_RESP_ACTIVE_FW_SLOT, active_slot);
+ fbd->fw_cap.active_slot = active_slot;
+
+ get_unsigned_result(FBNIC_FW_CAP_RESP_FW_LINK_SPEED, speed);
+ get_unsigned_result(FBNIC_FW_CAP_RESP_FW_LINK_FEC, fec);
+ fbd->fw_cap.link_speed = speed;
+ fbd->fw_cap.link_fec = fec;
+
+ bmc_present = !!results[FBNIC_FW_CAP_RESP_BMC_PRESENT];
+ if (bmc_present) {
+ struct fbnic_tlv_msg *attr;
+
+ attr = results[FBNIC_FW_CAP_RESP_BMC_MAC_ARRAY];
+ if (!attr)
+ return -EINVAL;
+
+ err = fbnic_fw_parse_bmc_addrs(fbd->fw_cap.bmc_mac_addr,
+ attr, 4);
+ if (err)
+ return err;
+
+ get_unsigned_result(FBNIC_FW_CAP_RESP_BMC_ALL_MULTI, all_multi);
+ } else {
+ memset(fbd->fw_cap.bmc_mac_addr, 0,
+ sizeof(fbd->fw_cap.bmc_mac_addr));
+ }
+
+ fbd->fw_cap.bmc_present = bmc_present;
+
+ if (results[FBNIC_FW_CAP_RESP_BMC_ALL_MULTI] || !bmc_present)
+ fbd->fw_cap.all_multi = all_multi;
+
+ return 0;
+}
+
+static const struct fbnic_tlv_index fbnic_ownership_resp_index[] = {
+ FBNIC_TLV_ATTR_LAST
+};
+
+static int fbnic_fw_parse_ownership_resp(void *opaque,
+ struct fbnic_tlv_msg **results)
+{
+ struct fbnic_dev *fbd = (struct fbnic_dev *)opaque;
+
+ /* Count the ownership response as a heartbeat reply */
+ fbd->last_heartbeat_response = jiffies;
+
+ return 0;
+}
+
+static const struct fbnic_tlv_index fbnic_heartbeat_resp_index[] = {
+ FBNIC_TLV_ATTR_LAST
+};
+
+static int fbnic_fw_parse_heartbeat_resp(void *opaque,
+ struct fbnic_tlv_msg **results)
+{
+ struct fbnic_dev *fbd = (struct fbnic_dev *)opaque;
+
+ fbd->last_heartbeat_response = jiffies;
+
+ return 0;
+}
+
+static int fbnic_fw_xmit_heartbeat_message(struct fbnic_dev *fbd)
+{
+ unsigned long req_time = jiffies;
+ struct fbnic_tlv_msg *msg;
+ int err = 0;
+
+ if (!fbnic_fw_present(fbd))
+ return -ENODEV;
+
+ msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_HEARTBEAT_REQ);
+ if (!msg)
+ return -ENOMEM;
+
+ err = fbnic_mbx_map_tlv_msg(fbd, msg);
+ if (err)
+ goto free_message;
+
+ fbd->last_heartbeat_request = req_time;
+
+ return err;
+
+free_message:
+ free_page((unsigned long)msg);
+ return err;
+}
+
+static bool fbnic_fw_heartbeat_current(struct fbnic_dev *fbd)
+{
+ unsigned long last_response = fbd->last_heartbeat_response;
+ unsigned long last_request = fbd->last_heartbeat_request;
+
+ return !time_before(last_response, last_request);
+}
+
+int fbnic_fw_init_heartbeat(struct fbnic_dev *fbd, bool poll)
+{
+ int err = -ETIMEDOUT;
+ int attempts = 50;
+
+ if (!fbnic_fw_present(fbd))
+ return -ENODEV;
+
+ while (attempts--) {
+ msleep(200);
+ if (poll)
+ fbnic_mbx_poll(fbd);
+
+ if (!fbnic_fw_heartbeat_current(fbd))
+ continue;
+
+ /* Place new message on mailbox to elicit a response */
+ err = fbnic_fw_xmit_heartbeat_message(fbd);
+ if (err)
+ dev_warn(fbd->dev,
+ "Failed to send heartbeat message: %d\n",
+ err);
+ break;
+ }
+
+ return err;
+}
+
+void fbnic_fw_check_heartbeat(struct fbnic_dev *fbd)
+{
+ unsigned long last_request = fbd->last_heartbeat_request;
+ int err;
+
+ /* Do not check heartbeat or send another request until current
+ * period has expired. Otherwise we might start spamming requests.
+ */
+ if (time_is_after_jiffies(last_request + FW_HEARTBEAT_PERIOD))
+ return;
+
+ /* We already reported no mailbox. Wait for it to come back */
+ if (!fbd->fw_heartbeat_enabled)
+ return;
+
+ /* Was the last heartbeat response long time ago? */
+ if (!fbnic_fw_heartbeat_current(fbd)) {
+ dev_warn(fbd->dev,
+ "Firmware did not respond to heartbeat message\n");
+ fbd->fw_heartbeat_enabled = false;
+ }
+
+ /* Place new message on mailbox to elicit a response */
+ err = fbnic_fw_xmit_heartbeat_message(fbd);
+ if (err)
+ dev_warn(fbd->dev, "Failed to send heartbeat message\n");
+}
+
+static const struct fbnic_tlv_parser fbnic_fw_tlv_parser[] = {
+ FBNIC_TLV_PARSER(FW_CAP_RESP, fbnic_fw_cap_resp_index,
+ fbnic_fw_parse_cap_resp),
+ FBNIC_TLV_PARSER(OWNERSHIP_RESP, fbnic_ownership_resp_index,
+ fbnic_fw_parse_ownership_resp),
+ FBNIC_TLV_PARSER(HEARTBEAT_RESP, fbnic_heartbeat_resp_index,
+ fbnic_fw_parse_heartbeat_resp),
+ FBNIC_TLV_MSG_ERROR
+};
+
+static void fbnic_mbx_process_rx_msgs(struct fbnic_dev *fbd)
+{
+ struct fbnic_fw_mbx *rx_mbx = &fbd->mbx[FBNIC_IPC_MBX_RX_IDX];
+ u8 head = rx_mbx->head;
+ u64 desc, length;
+
+ while (head != rx_mbx->tail) {
+ struct fbnic_tlv_msg *msg;
+ int err;
+
+ desc = __fbnic_mbx_rd_desc(fbd, FBNIC_IPC_MBX_RX_IDX, head);
+ if (!(desc & FBNIC_IPC_MBX_DESC_FW_CMPL))
+ break;
+
+ dma_unmap_single(fbd->dev, rx_mbx->buf_info[head].addr,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+
+ msg = rx_mbx->buf_info[head].msg;
+
+ length = FIELD_GET(FBNIC_IPC_MBX_DESC_LEN_MASK, desc);
+
+ /* Ignore NULL mailbox descriptors */
+ if (!length)
+ goto next_page;
+
+ /* Report descriptors with length greater than page size */
+ if (length > PAGE_SIZE) {
+ dev_warn(fbd->dev,
+ "Invalid mailbox descriptor length: %lld\n",
+ length);
+ goto next_page;
+ }
+
+ if (le16_to_cpu(msg->hdr.len) * sizeof(u32) > length)
+ dev_warn(fbd->dev, "Mailbox message length mismatch\n");
+
+ /* If parsing fails dump contents of message to dmesg */
+ err = fbnic_tlv_msg_parse(fbd, msg, fbnic_fw_tlv_parser);
+ if (err) {
+ dev_warn(fbd->dev, "Unable to process message: %d\n",
+ err);
+ print_hex_dump(KERN_WARNING, "fbnic:",
+ DUMP_PREFIX_OFFSET, 16, 2,
+ msg, length, true);
+ }
+
+ dev_dbg(fbd->dev, "Parsed msg type %d\n", msg->hdr.type);
+next_page:
+
+ free_page((unsigned long)rx_mbx->buf_info[head].msg);
+ rx_mbx->buf_info[head].msg = NULL;
+
+ head++;
+ head %= FBNIC_IPC_MBX_DESC_LEN;
+ }
+
+ /* Record head for next interrupt */
+ rx_mbx->head = head;
+
+ /* Make sure we have at least one page for the FW to write to */
+ fbnic_mbx_alloc_rx_msgs(fbd);
+}
+
+void fbnic_mbx_poll(struct fbnic_dev *fbd)
+{
+ fbnic_mbx_postinit(fbd);
+
+ fbnic_mbx_process_tx_msgs(fbd);
+ fbnic_mbx_process_rx_msgs(fbd);
+}
+
+int fbnic_mbx_poll_tx_ready(struct fbnic_dev *fbd)
+{
+ struct fbnic_fw_mbx *tx_mbx;
+ int attempts = 50;
+
+ /* Immediate fail if BAR4 isn't there */
+ if (!fbnic_fw_present(fbd))
+ return -ENODEV;
+
+ tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX];
+ while (!tx_mbx->ready && --attempts) {
+ /* Force the firmware to trigger an interrupt response to
+ * avoid the mailbox getting stuck closed if the interrupt
+ * is reset.
+ */
+ fbnic_mbx_init_desc_ring(fbd, FBNIC_IPC_MBX_TX_IDX);
+
+ msleep(200);
+
+ fbnic_mbx_poll(fbd);
+ }
+
+ return attempts ? 0 : -ETIMEDOUT;
+}
+
+void fbnic_mbx_flush_tx(struct fbnic_dev *fbd)
+{
+ struct fbnic_fw_mbx *tx_mbx;
+ int attempts = 50;
+ u8 count = 0;
+
+ /* Nothing to do if there is no mailbox */
+ if (!fbnic_fw_present(fbd))
+ return;
+
+ /* Record current Rx stats */
+ tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX];
+
+ /* Nothing to do if mailbox never got to ready */
+ if (!tx_mbx->ready)
+ return;
+
+ /* Give firmware time to process packet,
+ * we will wait up to 10 seconds which is 50 waits of 200ms.
+ */
+ do {
+ u8 head = tx_mbx->head;
+
+ if (head == tx_mbx->tail)
+ break;
+
+ msleep(200);
+ fbnic_mbx_process_tx_msgs(fbd);
+
+ count += (tx_mbx->head - head) % FBNIC_IPC_MBX_DESC_LEN;
+ } while (count < FBNIC_IPC_MBX_DESC_LEN && --attempts);
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.h b/drivers/net/ethernet/meta/fbnic/fbnic_fw.h
new file mode 100644
index 000000000000..c65bca613665
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#ifndef _FBNIC_FW_H_
+#define _FBNIC_FW_H_
+
+#include <linux/if_ether.h>
+#include <linux/types.h>
+
+struct fbnic_dev;
+struct fbnic_tlv_msg;
+
+struct fbnic_fw_mbx {
+ u8 ready, head, tail;
+ struct {
+ struct fbnic_tlv_msg *msg;
+ dma_addr_t addr;
+ } buf_info[FBNIC_IPC_MBX_DESC_LEN];
+};
+
+// FW_VER_MAX_SIZE must match ETHTOOL_FWVERS_LEN
+#define FBNIC_FW_VER_MAX_SIZE 32
+// Formatted version is in the format XX.YY.ZZ_RRR_COMMIT
+#define FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE (FBNIC_FW_VER_MAX_SIZE - 13)
+#define FBNIC_FW_LOG_MAX_SIZE 256
+
+struct fbnic_fw_ver {
+ u32 version;
+ char commit[FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE];
+};
+
+struct fbnic_fw_cap {
+ struct {
+ struct fbnic_fw_ver mgmt, bootloader;
+ } running;
+ struct {
+ struct fbnic_fw_ver mgmt, bootloader, undi;
+ } stored;
+ u8 active_slot;
+ u8 bmc_mac_addr[4][ETH_ALEN];
+ u8 bmc_present : 1;
+ u8 all_multi : 1;
+ u8 link_speed;
+ u8 link_fec;
+};
+
+void fbnic_mbx_init(struct fbnic_dev *fbd);
+void fbnic_mbx_clean(struct fbnic_dev *fbd);
+void fbnic_mbx_poll(struct fbnic_dev *fbd);
+int fbnic_mbx_poll_tx_ready(struct fbnic_dev *fbd);
+void fbnic_mbx_flush_tx(struct fbnic_dev *fbd);
+int fbnic_fw_xmit_ownership_msg(struct fbnic_dev *fbd, bool take_ownership);
+int fbnic_fw_init_heartbeat(struct fbnic_dev *fbd, bool poll);
+void fbnic_fw_check_heartbeat(struct fbnic_dev *fbd);
+
+#define fbnic_mk_full_fw_ver_str(_rev_id, _delim, _commit, _str) \
+do { \
+ const u32 __rev_id = _rev_id; \
+ snprintf(_str, sizeof(_str), "%02lu.%02lu.%02lu-%03lu%s%s", \
+ FIELD_GET(FBNIC_FW_CAP_RESP_VERSION_MAJOR, __rev_id), \
+ FIELD_GET(FBNIC_FW_CAP_RESP_VERSION_MINOR, __rev_id), \
+ FIELD_GET(FBNIC_FW_CAP_RESP_VERSION_PATCH, __rev_id), \
+ FIELD_GET(FBNIC_FW_CAP_RESP_VERSION_BUILD, __rev_id), \
+ _delim, _commit); \
+} while (0)
+
+#define fbnic_mk_fw_ver_str(_rev_id, _str) \
+ fbnic_mk_full_fw_ver_str(_rev_id, "", "", _str)
+
+#define FW_HEARTBEAT_PERIOD (10 * HZ)
+
+enum {
+ FBNIC_TLV_MSG_ID_HOST_CAP_REQ = 0x10,
+ FBNIC_TLV_MSG_ID_FW_CAP_RESP = 0x11,
+ FBNIC_TLV_MSG_ID_OWNERSHIP_REQ = 0x12,
+ FBNIC_TLV_MSG_ID_OWNERSHIP_RESP = 0x13,
+ FBNIC_TLV_MSG_ID_HEARTBEAT_REQ = 0x14,
+ FBNIC_TLV_MSG_ID_HEARTBEAT_RESP = 0x15,
+};
+
+#define FBNIC_FW_CAP_RESP_VERSION_MAJOR CSR_GENMASK(31, 24)
+#define FBNIC_FW_CAP_RESP_VERSION_MINOR CSR_GENMASK(23, 16)
+#define FBNIC_FW_CAP_RESP_VERSION_PATCH CSR_GENMASK(15, 8)
+#define FBNIC_FW_CAP_RESP_VERSION_BUILD CSR_GENMASK(7, 0)
+enum {
+ FBNIC_FW_CAP_RESP_VERSION = 0x0,
+ FBNIC_FW_CAP_RESP_BMC_PRESENT = 0x1,
+ FBNIC_FW_CAP_RESP_BMC_MAC_ADDR = 0x2,
+ FBNIC_FW_CAP_RESP_BMC_MAC_ARRAY = 0x3,
+ FBNIC_FW_CAP_RESP_STORED_VERSION = 0x4,
+ FBNIC_FW_CAP_RESP_ACTIVE_FW_SLOT = 0x5,
+ FBNIC_FW_CAP_RESP_VERSION_COMMIT_STR = 0x6,
+ FBNIC_FW_CAP_RESP_BMC_ALL_MULTI = 0x8,
+ FBNIC_FW_CAP_RESP_FW_STATE = 0x9,
+ FBNIC_FW_CAP_RESP_FW_LINK_SPEED = 0xa,
+ FBNIC_FW_CAP_RESP_FW_LINK_FEC = 0xb,
+ FBNIC_FW_CAP_RESP_STORED_COMMIT_STR = 0xc,
+ FBNIC_FW_CAP_RESP_CMRT_VERSION = 0xd,
+ FBNIC_FW_CAP_RESP_STORED_CMRT_VERSION = 0xe,
+ FBNIC_FW_CAP_RESP_CMRT_COMMIT_STR = 0xf,
+ FBNIC_FW_CAP_RESP_STORED_CMRT_COMMIT_STR = 0x10,
+ FBNIC_FW_CAP_RESP_UEFI_VERSION = 0x11,
+ FBNIC_FW_CAP_RESP_UEFI_COMMIT_STR = 0x12,
+ FBNIC_FW_CAP_RESP_MSG_MAX
+};
+
+enum {
+ FBNIC_FW_LINK_SPEED_25R1 = 1,
+ FBNIC_FW_LINK_SPEED_50R2 = 2,
+ FBNIC_FW_LINK_SPEED_50R1 = 3,
+ FBNIC_FW_LINK_SPEED_100R2 = 4,
+};
+
+enum {
+ FBNIC_FW_LINK_FEC_NONE = 1,
+ FBNIC_FW_LINK_FEC_RS = 2,
+ FBNIC_FW_LINK_FEC_BASER = 3,
+};
+
+enum {
+ FBNIC_FW_OWNERSHIP_FLAG = 0x0,
+ FBNIC_FW_OWNERSHIP_MSG_MAX
+};
+#endif /* _FBNIC_FW_H_ */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_irq.c b/drivers/net/ethernet/meta/fbnic/fbnic_irq.c
new file mode 100644
index 000000000000..914362195920
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_irq.c
@@ -0,0 +1,208 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include <linux/pci.h>
+#include <linux/types.h>
+
+#include "fbnic.h"
+#include "fbnic_netdev.h"
+#include "fbnic_txrx.h"
+
+static irqreturn_t fbnic_fw_msix_intr(int __always_unused irq, void *data)
+{
+ struct fbnic_dev *fbd = (struct fbnic_dev *)data;
+
+ fbnic_mbx_poll(fbd);
+
+ fbnic_wr32(fbd, FBNIC_INTR_MASK_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * fbnic_fw_enable_mbx - Configure and initialize Firmware Mailbox
+ * @fbd: Pointer to device to initialize
+ *
+ * This function will initialize the firmware mailbox rings, enable the IRQ
+ * and initialize the communication between the Firmware and the host. The
+ * firmware is expected to respond to the initialization by sending an
+ * interrupt essentially notifying the host that it has seen the
+ * initialization and is now synced up.
+ *
+ * Return: non-zero on failure.
+ **/
+int fbnic_fw_enable_mbx(struct fbnic_dev *fbd)
+{
+ u32 vector = fbd->fw_msix_vector;
+ int err;
+
+ /* Request the IRQ for FW Mailbox vector. */
+ err = request_threaded_irq(vector, NULL, &fbnic_fw_msix_intr,
+ IRQF_ONESHOT, dev_name(fbd->dev), fbd);
+ if (err)
+ return err;
+
+ /* Initialize mailbox and attempt to poll it into ready state */
+ fbnic_mbx_init(fbd);
+ err = fbnic_mbx_poll_tx_ready(fbd);
+ if (err) {
+ dev_warn(fbd->dev, "FW mailbox did not enter ready state\n");
+ free_irq(vector, fbd);
+ return err;
+ }
+
+ /* Enable interrupts */
+ fbnic_wr32(fbd, FBNIC_INTR_MASK_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY);
+
+ return 0;
+}
+
+/**
+ * fbnic_fw_disable_mbx - Disable mailbox and place it in standby state
+ * @fbd: Pointer to device to disable
+ *
+ * This function will disable the mailbox interrupt, free any messages still
+ * in the mailbox and place it into a standby state. The firmware is
+ * expected to see the update and assume that the host is in the reset state.
+ **/
+void fbnic_fw_disable_mbx(struct fbnic_dev *fbd)
+{
+ /* Disable interrupt and free vector */
+ fbnic_wr32(fbd, FBNIC_INTR_MASK_SET(0), 1u << FBNIC_FW_MSIX_ENTRY);
+
+ /* Free the vector */
+ free_irq(fbd->fw_msix_vector, fbd);
+
+ /* Make sure disabling logs message is sent, must be done here to
+ * avoid risk of completing without a running interrupt.
+ */
+ fbnic_mbx_flush_tx(fbd);
+
+ /* Reset the mailboxes to the initialized state */
+ fbnic_mbx_clean(fbd);
+}
+
+static irqreturn_t fbnic_pcs_msix_intr(int __always_unused irq, void *data)
+{
+ struct fbnic_dev *fbd = data;
+ struct fbnic_net *fbn;
+
+ if (fbd->mac->pcs_get_link_event(fbd) == FBNIC_LINK_EVENT_NONE) {
+ fbnic_wr32(fbd, FBNIC_INTR_MASK_CLEAR(0),
+ 1u << FBNIC_PCS_MSIX_ENTRY);
+ return IRQ_HANDLED;
+ }
+
+ fbn = netdev_priv(fbd->netdev);
+
+ phylink_pcs_change(&fbn->phylink_pcs, false);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * fbnic_pcs_irq_enable - Configure the MAC to enable it to advertise link
+ * @fbd: Pointer to device to initialize
+ *
+ * This function provides basic bringup for the MAC/PCS IRQ. For now the IRQ
+ * will remain disabled until we start the MAC/PCS/PHY logic via phylink.
+ *
+ * Return: non-zero on failure.
+ **/
+int fbnic_pcs_irq_enable(struct fbnic_dev *fbd)
+{
+ u32 vector = fbd->pcs_msix_vector;
+ int err;
+
+ /* Request the IRQ for MAC link vector.
+ * Map MAC cause to it, and unmask it
+ */
+ err = request_irq(vector, &fbnic_pcs_msix_intr, 0,
+ fbd->netdev->name, fbd);
+ if (err)
+ return err;
+
+ fbnic_wr32(fbd, FBNIC_INTR_MSIX_CTRL(FBNIC_INTR_MSIX_CTRL_PCS_IDX),
+ FBNIC_PCS_MSIX_ENTRY | FBNIC_INTR_MSIX_CTRL_ENABLE);
+
+ return 0;
+}
+
+/**
+ * fbnic_pcs_irq_disable - Teardown the MAC IRQ to prepare for stopping
+ * @fbd: Pointer to device that is stopping
+ *
+ * This function undoes the work done in fbnic_pcs_irq_enable and prepares
+ * the device to no longer receive traffic on the host interface.
+ **/
+void fbnic_pcs_irq_disable(struct fbnic_dev *fbd)
+{
+ /* Disable interrupt */
+ fbnic_wr32(fbd, FBNIC_INTR_MSIX_CTRL(FBNIC_INTR_MSIX_CTRL_PCS_IDX),
+ FBNIC_PCS_MSIX_ENTRY);
+ fbnic_wr32(fbd, FBNIC_INTR_MASK_SET(0), 1u << FBNIC_PCS_MSIX_ENTRY);
+
+ /* Free the vector */
+ free_irq(fbd->pcs_msix_vector, fbd);
+}
+
+int fbnic_request_irq(struct fbnic_dev *fbd, int nr, irq_handler_t handler,
+ unsigned long flags, const char *name, void *data)
+{
+ struct pci_dev *pdev = to_pci_dev(fbd->dev);
+ int irq = pci_irq_vector(pdev, nr);
+
+ if (irq < 0)
+ return irq;
+
+ return request_irq(irq, handler, flags, name, data);
+}
+
+void fbnic_free_irq(struct fbnic_dev *fbd, int nr, void *data)
+{
+ struct pci_dev *pdev = to_pci_dev(fbd->dev);
+ int irq = pci_irq_vector(pdev, nr);
+
+ if (irq < 0)
+ return;
+
+ free_irq(irq, data);
+}
+
+void fbnic_free_irqs(struct fbnic_dev *fbd)
+{
+ struct pci_dev *pdev = to_pci_dev(fbd->dev);
+
+ fbd->pcs_msix_vector = 0;
+ fbd->fw_msix_vector = 0;
+
+ fbd->num_irqs = 0;
+
+ pci_free_irq_vectors(pdev);
+}
+
+int fbnic_alloc_irqs(struct fbnic_dev *fbd)
+{
+ unsigned int wanted_irqs = FBNIC_NON_NAPI_VECTORS;
+ struct pci_dev *pdev = to_pci_dev(fbd->dev);
+ int num_irqs;
+
+ wanted_irqs += min_t(unsigned int, num_online_cpus(), FBNIC_MAX_RXQS);
+ num_irqs = pci_alloc_irq_vectors(pdev, FBNIC_NON_NAPI_VECTORS + 1,
+ wanted_irqs, PCI_IRQ_MSIX);
+ if (num_irqs < 0) {
+ dev_err(fbd->dev, "Failed to allocate MSI-X entries\n");
+ return num_irqs;
+ }
+
+ if (num_irqs < wanted_irqs)
+ dev_warn(fbd->dev, "Allocated %d IRQs, expected %d\n",
+ num_irqs, wanted_irqs);
+
+ fbd->num_irqs = num_irqs;
+
+ fbd->pcs_msix_vector = pci_irq_vector(pdev, FBNIC_PCS_MSIX_ENTRY);
+ fbd->fw_msix_vector = pci_irq_vector(pdev, FBNIC_FW_MSIX_ENTRY);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
new file mode 100644
index 000000000000..7920e7af82d9
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
@@ -0,0 +1,666 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include <linux/bitfield.h>
+#include <net/tcp.h>
+
+#include "fbnic.h"
+#include "fbnic_mac.h"
+#include "fbnic_netdev.h"
+
+static void fbnic_init_readrq(struct fbnic_dev *fbd, unsigned int offset,
+ unsigned int cls, unsigned int readrq)
+{
+ u32 val = rd32(fbd, offset);
+
+ /* The TDF_CTL masks are a superset of the RNI_RBP ones. So we can
+ * use them when setting either the TDE_CTF or RNI_RBP registers.
+ */
+ val &= FBNIC_QM_TNI_TDF_CTL_MAX_OT | FBNIC_QM_TNI_TDF_CTL_MAX_OB;
+
+ val |= FIELD_PREP(FBNIC_QM_TNI_TDF_CTL_MRRS, readrq) |
+ FIELD_PREP(FBNIC_QM_TNI_TDF_CTL_CLS, cls);
+
+ wr32(fbd, offset, val);
+}
+
+static void fbnic_init_mps(struct fbnic_dev *fbd, unsigned int offset,
+ unsigned int cls, unsigned int mps)
+{
+ u32 val = rd32(fbd, offset);
+
+ /* Currently all MPS masks are identical so just use the first one */
+ val &= ~(FBNIC_QM_TNI_TCM_CTL_MPS | FBNIC_QM_TNI_TCM_CTL_CLS);
+
+ val |= FIELD_PREP(FBNIC_QM_TNI_TCM_CTL_MPS, mps) |
+ FIELD_PREP(FBNIC_QM_TNI_TCM_CTL_CLS, cls);
+
+ wr32(fbd, offset, val);
+}
+
+static void fbnic_mac_init_axi(struct fbnic_dev *fbd)
+{
+ bool override_1k = false;
+ int readrq, mps, cls;
+
+ /* All of the values are based on being a power of 2 starting
+ * with 64 == 0. Therefore we can either divide by 64 in the
+ * case of constants, or just subtract 6 from the log2 of the value
+ * in order to get the value we will be programming into the
+ * registers.
+ */
+ readrq = ilog2(fbd->readrq) - 6;
+ if (readrq > 3)
+ override_1k = true;
+ readrq = clamp(readrq, 0, 3);
+
+ mps = ilog2(fbd->mps) - 6;
+ mps = clamp(mps, 0, 3);
+
+ cls = ilog2(L1_CACHE_BYTES) - 6;
+ cls = clamp(cls, 0, 3);
+
+ /* Configure Tx/Rx AXI Paths w/ Read Request and Max Payload sizes */
+ fbnic_init_readrq(fbd, FBNIC_QM_TNI_TDF_CTL, cls, readrq);
+ fbnic_init_mps(fbd, FBNIC_QM_TNI_TCM_CTL, cls, mps);
+
+ /* Configure QM TNI TDE:
+ * - Max outstanding AXI beats to 704(768 - 64) - guaranetees 8% of
+ * buffer capacity to descriptors.
+ * - Max outstanding transactions to 128
+ */
+ wr32(fbd, FBNIC_QM_TNI_TDE_CTL,
+ FIELD_PREP(FBNIC_QM_TNI_TDE_CTL_MRRS_1K, override_1k ? 1 : 0) |
+ FIELD_PREP(FBNIC_QM_TNI_TDE_CTL_MAX_OB, 704) |
+ FIELD_PREP(FBNIC_QM_TNI_TDE_CTL_MAX_OT, 128) |
+ FIELD_PREP(FBNIC_QM_TNI_TDE_CTL_MRRS, readrq) |
+ FIELD_PREP(FBNIC_QM_TNI_TDE_CTL_CLS, cls));
+
+ fbnic_init_readrq(fbd, FBNIC_QM_RNI_RBP_CTL, cls, readrq);
+ fbnic_init_mps(fbd, FBNIC_QM_RNI_RDE_CTL, cls, mps);
+ fbnic_init_mps(fbd, FBNIC_QM_RNI_RCM_CTL, cls, mps);
+
+ /* Enable XALI AR/AW outbound */
+ wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AW_CFG,
+ FBNIC_PUL_OB_TLP_HDR_AW_CFG_BME);
+ wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AR_CFG,
+ FBNIC_PUL_OB_TLP_HDR_AR_CFG_BME);
+}
+
+static void fbnic_mac_init_qm(struct fbnic_dev *fbd)
+{
+ u32 clock_freq;
+
+ /* Configure TSO behavior */
+ wr32(fbd, FBNIC_QM_TQS_CTL0,
+ FIELD_PREP(FBNIC_QM_TQS_CTL0_LSO_TS_MASK,
+ FBNIC_QM_TQS_CTL0_LSO_TS_LAST) |
+ FIELD_PREP(FBNIC_QM_TQS_CTL0_PREFETCH_THRESH,
+ FBNIC_QM_TQS_CTL0_PREFETCH_THRESH_MIN));
+
+ /* Limit EDT to INT_MAX as this is the limit of the EDT Qdisc */
+ wr32(fbd, FBNIC_QM_TQS_EDT_TS_RANGE, INT_MAX);
+
+ /* Configure MTU
+ * Due to known HW issue we cannot set the MTU to within 16 octets
+ * of a 64 octet aligned boundary. So we will set the TQS_MTU(s) to
+ * MTU + 1.
+ */
+ wr32(fbd, FBNIC_QM_TQS_MTU_CTL0, FBNIC_MAX_JUMBO_FRAME_SIZE + 1);
+ wr32(fbd, FBNIC_QM_TQS_MTU_CTL1,
+ FIELD_PREP(FBNIC_QM_TQS_MTU_CTL1_BULK,
+ FBNIC_MAX_JUMBO_FRAME_SIZE + 1));
+
+ clock_freq = FBNIC_CLOCK_FREQ;
+
+ /* Be aggressive on the timings. We will have the interrupt
+ * threshold timer tick once every 1 usec and coalesce writes for
+ * up to 80 usecs.
+ */
+ wr32(fbd, FBNIC_QM_TCQ_CTL0,
+ FIELD_PREP(FBNIC_QM_TCQ_CTL0_TICK_CYCLES,
+ clock_freq / 1000000) |
+ FIELD_PREP(FBNIC_QM_TCQ_CTL0_COAL_WAIT,
+ clock_freq / 12500));
+
+ /* We will have the interrupt threshold timer tick once every
+ * 1 usec and coalesce writes for up to 2 usecs.
+ */
+ wr32(fbd, FBNIC_QM_RCQ_CTL0,
+ FIELD_PREP(FBNIC_QM_RCQ_CTL0_TICK_CYCLES,
+ clock_freq / 1000000) |
+ FIELD_PREP(FBNIC_QM_RCQ_CTL0_COAL_WAIT,
+ clock_freq / 500000));
+
+ /* Configure spacer control to 64 beats. */
+ wr32(fbd, FBNIC_FAB_AXI4_AR_SPACER_2_CFG,
+ FBNIC_FAB_AXI4_AR_SPACER_MASK |
+ FIELD_PREP(FBNIC_FAB_AXI4_AR_SPACER_THREADSHOLD, 2));
+}
+
+#define FBNIC_DROP_EN_MASK 0x7d
+#define FBNIC_PAUSE_EN_MASK 0x14
+#define FBNIC_ECN_EN_MASK 0x10
+
+struct fbnic_fifo_config {
+ unsigned int addr;
+ unsigned int size;
+};
+
+/* Rx FIFO Configuration
+ * The table consists of 8 entries, of which only 4 are currently used
+ * The starting addr is in units of 64B and the size is in 2KB units
+ * Below is the human readable version of the table defined below:
+ * Function Addr Size
+ * ----------------------------------
+ * Network to Host/BMC 384K 64K
+ * Unused
+ * Unused
+ * Network to BMC 448K 32K
+ * Network to Host 0 384K
+ * Unused
+ * BMC to Host 480K 32K
+ * Unused
+ */
+static const struct fbnic_fifo_config fifo_config[] = {
+ { .addr = 0x1800, .size = 0x20 }, /* Network to Host/BMC */
+ { }, /* Unused */
+ { }, /* Unused */
+ { .addr = 0x1c00, .size = 0x10 }, /* Network to BMC */
+ { .addr = 0x0000, .size = 0xc0 }, /* Network to Host */
+ { }, /* Unused */
+ { .addr = 0x1e00, .size = 0x10 }, /* BMC to Host */
+ { } /* Unused */
+};
+
+static void fbnic_mac_init_rxb(struct fbnic_dev *fbd)
+{
+ bool rx_enable;
+ int i;
+
+ rx_enable = !!(rd32(fbd, FBNIC_RPC_RMI_CONFIG) &
+ FBNIC_RPC_RMI_CONFIG_ENABLE);
+
+ for (i = 0; i < 8; i++) {
+ unsigned int size = fifo_config[i].size;
+
+ /* If we are coming up on a system that already has the
+ * Rx data path enabled we don't need to reconfigure the
+ * FIFOs. Instead we can check to verify the values are
+ * large enough to meet our needs, and use the values to
+ * populate the flow control, ECN, and drop thresholds.
+ */
+ if (rx_enable) {
+ size = FIELD_GET(FBNIC_RXB_PBUF_SIZE,
+ rd32(fbd, FBNIC_RXB_PBUF_CFG(i)));
+ if (size < fifo_config[i].size)
+ dev_warn(fbd->dev,
+ "fifo%d size of %d smaller than expected value of %d\n",
+ i, size << 11,
+ fifo_config[i].size << 11);
+ } else {
+ /* Program RXB Cuthrough */
+ wr32(fbd, FBNIC_RXB_CT_SIZE(i),
+ FIELD_PREP(FBNIC_RXB_CT_SIZE_HEADER, 4) |
+ FIELD_PREP(FBNIC_RXB_CT_SIZE_PAYLOAD, 2));
+
+ /* The granularity for the packet buffer size is 2KB
+ * granularity while the packet buffer base address is
+ * only 64B granularity
+ */
+ wr32(fbd, FBNIC_RXB_PBUF_CFG(i),
+ FIELD_PREP(FBNIC_RXB_PBUF_BASE_ADDR,
+ fifo_config[i].addr) |
+ FIELD_PREP(FBNIC_RXB_PBUF_SIZE, size));
+
+ /* The granularity for the credits is 64B. This is
+ * based on RXB_PBUF_SIZE * 32 + 4.
+ */
+ wr32(fbd, FBNIC_RXB_PBUF_CREDIT(i),
+ FIELD_PREP(FBNIC_RXB_PBUF_CREDIT_MASK,
+ size ? size * 32 + 4 : 0));
+ }
+
+ if (!size)
+ continue;
+
+ /* Pause is size of FIFO with 56KB skid to start/stop */
+ wr32(fbd, FBNIC_RXB_PAUSE_THLD(i),
+ !(FBNIC_PAUSE_EN_MASK & (1u << i)) ? 0x1fff :
+ FIELD_PREP(FBNIC_RXB_PAUSE_THLD_ON,
+ size * 32 - 0x380) |
+ FIELD_PREP(FBNIC_RXB_PAUSE_THLD_OFF, 0x380));
+
+ /* Enable Drop when only one packet is left in the FIFO */
+ wr32(fbd, FBNIC_RXB_DROP_THLD(i),
+ !(FBNIC_DROP_EN_MASK & (1u << i)) ? 0x1fff :
+ FIELD_PREP(FBNIC_RXB_DROP_THLD_ON,
+ size * 32 -
+ FBNIC_MAX_JUMBO_FRAME_SIZE / 64) |
+ FIELD_PREP(FBNIC_RXB_DROP_THLD_OFF,
+ size * 32 -
+ FBNIC_MAX_JUMBO_FRAME_SIZE / 64));
+
+ /* Enable ECN bit when 1/4 of RXB is filled with at least
+ * 1 room for one full jumbo frame before setting ECN
+ */
+ wr32(fbd, FBNIC_RXB_ECN_THLD(i),
+ !(FBNIC_ECN_EN_MASK & (1u << i)) ? 0x1fff :
+ FIELD_PREP(FBNIC_RXB_ECN_THLD_ON,
+ max_t(unsigned int,
+ size * 32 / 4,
+ FBNIC_MAX_JUMBO_FRAME_SIZE / 64)) |
+ FIELD_PREP(FBNIC_RXB_ECN_THLD_OFF,
+ max_t(unsigned int,
+ size * 32 / 4,
+ FBNIC_MAX_JUMBO_FRAME_SIZE / 64)));
+ }
+
+ /* For now only enable drop and ECN. We need to add driver/kernel
+ * interfaces for configuring pause.
+ */
+ wr32(fbd, FBNIC_RXB_PAUSE_DROP_CTRL,
+ FIELD_PREP(FBNIC_RXB_PAUSE_DROP_CTRL_DROP_ENABLE,
+ FBNIC_DROP_EN_MASK) |
+ FIELD_PREP(FBNIC_RXB_PAUSE_DROP_CTRL_ECN_ENABLE,
+ FBNIC_ECN_EN_MASK));
+
+ /* Program INTF credits */
+ wr32(fbd, FBNIC_RXB_INTF_CREDIT,
+ FBNIC_RXB_INTF_CREDIT_MASK0 |
+ FBNIC_RXB_INTF_CREDIT_MASK1 |
+ FBNIC_RXB_INTF_CREDIT_MASK2 |
+ FIELD_PREP(FBNIC_RXB_INTF_CREDIT_MASK3, 8));
+
+ /* Configure calendar slots.
+ * Rx: 0 - 62 RDE 1st, BMC 2nd
+ * 63 BMC 1st, RDE 2nd
+ */
+ for (i = 0; i < 16; i++) {
+ u32 calendar_val = (i == 15) ? 0x1e1b1b1b : 0x1b1b1b1b;
+
+ wr32(fbd, FBNIC_RXB_CLDR_PRIO_CFG(i), calendar_val);
+ }
+
+ /* Split the credits for the DRR up as follows:
+ * Quantum0: 8000 Network to Host
+ * Quantum1: 0 Not used
+ * Quantum2: 80 BMC to Host
+ * Quantum3: 0 Not used
+ * Quantum4: 8000 Multicast to Host and BMC
+ */
+ wr32(fbd, FBNIC_RXB_DWRR_RDE_WEIGHT0,
+ FIELD_PREP(FBNIC_RXB_DWRR_RDE_WEIGHT0_QUANTUM0, 0x40) |
+ FIELD_PREP(FBNIC_RXB_DWRR_RDE_WEIGHT0_QUANTUM2, 0x50));
+ wr32(fbd, FBNIC_RXB_DWRR_RDE_WEIGHT0_EXT,
+ FIELD_PREP(FBNIC_RXB_DWRR_RDE_WEIGHT0_QUANTUM0, 0x1f));
+ wr32(fbd, FBNIC_RXB_DWRR_RDE_WEIGHT1,
+ FIELD_PREP(FBNIC_RXB_DWRR_RDE_WEIGHT1_QUANTUM4, 0x40));
+ wr32(fbd, FBNIC_RXB_DWRR_RDE_WEIGHT1_EXT,
+ FIELD_PREP(FBNIC_RXB_DWRR_RDE_WEIGHT1_QUANTUM4, 0x1f));
+
+ /* Program RXB FCS Endian register */
+ wr32(fbd, FBNIC_RXB_ENDIAN_FCS, 0x0aaaaaa0);
+}
+
+static void fbnic_mac_init_txb(struct fbnic_dev *fbd)
+{
+ int i;
+
+ wr32(fbd, FBNIC_TCE_TXB_CTRL, 0);
+
+ /* Configure Tx QM Credits */
+ wr32(fbd, FBNIC_QM_TQS_CTL1,
+ FIELD_PREP(FBNIC_QM_TQS_CTL1_MC_MAX_CREDITS, 0x40) |
+ FIELD_PREP(FBNIC_QM_TQS_CTL1_BULK_MAX_CREDITS, 0x20));
+
+ /* Initialize internal Tx queues */
+ wr32(fbd, FBNIC_TCE_TXB_TEI_Q0_CTRL, 0);
+ wr32(fbd, FBNIC_TCE_TXB_TEI_Q1_CTRL, 0);
+ wr32(fbd, FBNIC_TCE_TXB_MC_Q_CTRL,
+ FIELD_PREP(FBNIC_TCE_TXB_Q_CTRL_SIZE, 0x400) |
+ FIELD_PREP(FBNIC_TCE_TXB_Q_CTRL_START, 0x000));
+ wr32(fbd, FBNIC_TCE_TXB_RX_TEI_Q_CTRL, 0);
+ wr32(fbd, FBNIC_TCE_TXB_TX_BMC_Q_CTRL,
+ FIELD_PREP(FBNIC_TCE_TXB_Q_CTRL_SIZE, 0x200) |
+ FIELD_PREP(FBNIC_TCE_TXB_Q_CTRL_START, 0x400));
+ wr32(fbd, FBNIC_TCE_TXB_RX_BMC_Q_CTRL,
+ FIELD_PREP(FBNIC_TCE_TXB_Q_CTRL_SIZE, 0x200) |
+ FIELD_PREP(FBNIC_TCE_TXB_Q_CTRL_START, 0x600));
+
+ wr32(fbd, FBNIC_TCE_LSO_CTRL,
+ FBNIC_TCE_LSO_CTRL_IPID_MODE_INC |
+ FIELD_PREP(FBNIC_TCE_LSO_CTRL_TCPF_CLR_1ST, TCPHDR_PSH |
+ TCPHDR_FIN) |
+ FIELD_PREP(FBNIC_TCE_LSO_CTRL_TCPF_CLR_MID, TCPHDR_PSH |
+ TCPHDR_CWR |
+ TCPHDR_FIN) |
+ FIELD_PREP(FBNIC_TCE_LSO_CTRL_TCPF_CLR_END, TCPHDR_CWR));
+ wr32(fbd, FBNIC_TCE_CSO_CTRL, 0);
+
+ wr32(fbd, FBNIC_TCE_BMC_MAX_PKTSZ,
+ FIELD_PREP(FBNIC_TCE_BMC_MAX_PKTSZ_TX,
+ FBNIC_MAX_JUMBO_FRAME_SIZE) |
+ FIELD_PREP(FBNIC_TCE_BMC_MAX_PKTSZ_RX,
+ FBNIC_MAX_JUMBO_FRAME_SIZE));
+ wr32(fbd, FBNIC_TCE_MC_MAX_PKTSZ,
+ FIELD_PREP(FBNIC_TCE_MC_MAX_PKTSZ_TMI,
+ FBNIC_MAX_JUMBO_FRAME_SIZE));
+
+ /* Configure calendar slots.
+ * Tx: 0 - 62 TMI 1st, BMC 2nd
+ * 63 BMC 1st, TMI 2nd
+ */
+ for (i = 0; i < 16; i++) {
+ u32 calendar_val = (i == 15) ? 0x1e1b1b1b : 0x1b1b1b1b;
+
+ wr32(fbd, FBNIC_TCE_TXB_CLDR_SLOT_CFG(i), calendar_val);
+ }
+
+ /* Configure DWRR */
+ wr32(fbd, FBNIC_TCE_TXB_ENQ_WRR_CTRL,
+ FIELD_PREP(FBNIC_TCE_TXB_ENQ_WRR_CTRL_WEIGHT0, 0x64) |
+ FIELD_PREP(FBNIC_TCE_TXB_ENQ_WRR_CTRL_WEIGHT2, 0x04));
+ wr32(fbd, FBNIC_TCE_TXB_TEI_DWRR_CTRL, 0);
+ wr32(fbd, FBNIC_TCE_TXB_TEI_DWRR_CTRL_EXT, 0);
+ wr32(fbd, FBNIC_TCE_TXB_BMC_DWRR_CTRL,
+ FIELD_PREP(FBNIC_TCE_TXB_BMC_DWRR_CTRL_QUANTUM0, 0x50) |
+ FIELD_PREP(FBNIC_TCE_TXB_BMC_DWRR_CTRL_QUANTUM1, 0x82));
+ wr32(fbd, FBNIC_TCE_TXB_BMC_DWRR_CTRL_EXT, 0);
+ wr32(fbd, FBNIC_TCE_TXB_NTWRK_DWRR_CTRL,
+ FIELD_PREP(FBNIC_TCE_TXB_NTWRK_DWRR_CTRL_QUANTUM1, 0x50) |
+ FIELD_PREP(FBNIC_TCE_TXB_NTWRK_DWRR_CTRL_QUANTUM2, 0x20));
+ wr32(fbd, FBNIC_TCE_TXB_NTWRK_DWRR_CTRL_EXT,
+ FIELD_PREP(FBNIC_TCE_TXB_NTWRK_DWRR_CTRL_QUANTUM2, 0x03));
+
+ /* Configure SOP protocol protection */
+ wr32(fbd, FBNIC_TCE_SOP_PROT_CTRL,
+ FIELD_PREP(FBNIC_TCE_SOP_PROT_CTRL_TBI, 0x78) |
+ FIELD_PREP(FBNIC_TCE_SOP_PROT_CTRL_TTI_FRM, 0x40) |
+ FIELD_PREP(FBNIC_TCE_SOP_PROT_CTRL_TTI_CM, 0x0c));
+
+ /* Conservative configuration on MAC interface Start of Packet
+ * protection FIFO. This sets the minimum depth of the FIFO before
+ * we start sending packets to the MAC measured in 64B units and
+ * up to 160 entries deep.
+ *
+ * For the ASIC the clock is fast enough that we will likely fill
+ * the SOP FIFO before the MAC can drain it. So just use a minimum
+ * value of 8.
+ */
+ wr32(fbd, FBNIC_TMI_SOP_PROT_CTRL, 8);
+
+ wrfl(fbd);
+ wr32(fbd, FBNIC_TCE_TXB_CTRL, FBNIC_TCE_TXB_CTRL_TCAM_ENABLE |
+ FBNIC_TCE_TXB_CTRL_LOAD);
+}
+
+static void fbnic_mac_init_regs(struct fbnic_dev *fbd)
+{
+ fbnic_mac_init_axi(fbd);
+ fbnic_mac_init_qm(fbd);
+ fbnic_mac_init_rxb(fbd);
+ fbnic_mac_init_txb(fbd);
+}
+
+static void fbnic_mac_tx_pause_config(struct fbnic_dev *fbd, bool tx_pause)
+{
+ u32 rxb_pause_ctrl;
+
+ /* Enable generation of pause frames if enabled */
+ rxb_pause_ctrl = rd32(fbd, FBNIC_RXB_PAUSE_DROP_CTRL);
+ rxb_pause_ctrl &= ~FBNIC_RXB_PAUSE_DROP_CTRL_PAUSE_ENABLE;
+ if (tx_pause)
+ rxb_pause_ctrl |=
+ FIELD_PREP(FBNIC_RXB_PAUSE_DROP_CTRL_PAUSE_ENABLE,
+ FBNIC_PAUSE_EN_MASK);
+ wr32(fbd, FBNIC_RXB_PAUSE_DROP_CTRL, rxb_pause_ctrl);
+}
+
+static int fbnic_pcs_get_link_event_asic(struct fbnic_dev *fbd)
+{
+ u32 pcs_intr_mask = rd32(fbd, FBNIC_SIG_PCS_INTR_STS);
+
+ if (pcs_intr_mask & FBNIC_SIG_PCS_INTR_LINK_DOWN)
+ return FBNIC_LINK_EVENT_DOWN;
+
+ return (pcs_intr_mask & FBNIC_SIG_PCS_INTR_LINK_UP) ?
+ FBNIC_LINK_EVENT_UP : FBNIC_LINK_EVENT_NONE;
+}
+
+static u32 __fbnic_mac_cmd_config_asic(struct fbnic_dev *fbd,
+ bool tx_pause, bool rx_pause)
+{
+ /* Enable MAC Promiscuous mode and Tx padding */
+ u32 command_config = FBNIC_MAC_COMMAND_CONFIG_TX_PAD_EN |
+ FBNIC_MAC_COMMAND_CONFIG_PROMISC_EN;
+ struct fbnic_net *fbn = netdev_priv(fbd->netdev);
+
+ /* Disable pause frames if not enabled */
+ if (!tx_pause)
+ command_config |= FBNIC_MAC_COMMAND_CONFIG_TX_PAUSE_DIS;
+ if (!rx_pause)
+ command_config |= FBNIC_MAC_COMMAND_CONFIG_RX_PAUSE_DIS;
+
+ /* Disable fault handling if no FEC is requested */
+ if ((fbn->fec & FBNIC_FEC_MODE_MASK) == FBNIC_FEC_OFF)
+ command_config |= FBNIC_MAC_COMMAND_CONFIG_FLT_HDL_DIS;
+
+ return command_config;
+}
+
+static bool fbnic_mac_get_pcs_link_status(struct fbnic_dev *fbd)
+{
+ struct fbnic_net *fbn = netdev_priv(fbd->netdev);
+ u32 pcs_status, lane_mask = ~0;
+
+ pcs_status = rd32(fbd, FBNIC_SIG_PCS_OUT0);
+ if (!(pcs_status & FBNIC_SIG_PCS_OUT0_LINK))
+ return false;
+
+ /* Define the expected lane mask for the status bits we need to check */
+ switch (fbn->link_mode & FBNIC_LINK_MODE_MASK) {
+ case FBNIC_LINK_100R2:
+ lane_mask = 0xf;
+ break;
+ case FBNIC_LINK_50R1:
+ lane_mask = 3;
+ break;
+ case FBNIC_LINK_50R2:
+ switch (fbn->fec & FBNIC_FEC_MODE_MASK) {
+ case FBNIC_FEC_OFF:
+ lane_mask = 0x63;
+ break;
+ case FBNIC_FEC_RS:
+ lane_mask = 5;
+ break;
+ case FBNIC_FEC_BASER:
+ lane_mask = 0xf;
+ break;
+ }
+ break;
+ case FBNIC_LINK_25R1:
+ lane_mask = 1;
+ break;
+ }
+
+ /* Use an XOR to remove the bits we expect to see set */
+ switch (fbn->fec & FBNIC_FEC_MODE_MASK) {
+ case FBNIC_FEC_OFF:
+ lane_mask ^= FIELD_GET(FBNIC_SIG_PCS_OUT0_BLOCK_LOCK,
+ pcs_status);
+ break;
+ case FBNIC_FEC_RS:
+ lane_mask ^= FIELD_GET(FBNIC_SIG_PCS_OUT0_AMPS_LOCK,
+ pcs_status);
+ break;
+ case FBNIC_FEC_BASER:
+ lane_mask ^= FIELD_GET(FBNIC_SIG_PCS_OUT1_FCFEC_LOCK,
+ rd32(fbd, FBNIC_SIG_PCS_OUT1));
+ break;
+ }
+
+ /* If all lanes cancelled then we have a lock on all lanes */
+ return !lane_mask;
+}
+
+static bool fbnic_pcs_get_link_asic(struct fbnic_dev *fbd)
+{
+ bool link;
+
+ /* Flush status bits to clear possible stale data,
+ * bits should reset themselves back to 1 if link is truly up
+ */
+ wr32(fbd, FBNIC_SIG_PCS_OUT0, FBNIC_SIG_PCS_OUT0_LINK |
+ FBNIC_SIG_PCS_OUT0_BLOCK_LOCK |
+ FBNIC_SIG_PCS_OUT0_AMPS_LOCK);
+ wr32(fbd, FBNIC_SIG_PCS_OUT1, FBNIC_SIG_PCS_OUT1_FCFEC_LOCK);
+ wrfl(fbd);
+
+ /* Clear interrupt state due to recent changes. */
+ wr32(fbd, FBNIC_SIG_PCS_INTR_STS,
+ FBNIC_SIG_PCS_INTR_LINK_DOWN | FBNIC_SIG_PCS_INTR_LINK_UP);
+
+ link = fbnic_mac_get_pcs_link_status(fbd);
+
+ /* Enable interrupt to only capture changes in link state */
+ wr32(fbd, FBNIC_SIG_PCS_INTR_MASK,
+ ~FBNIC_SIG_PCS_INTR_LINK_DOWN & ~FBNIC_SIG_PCS_INTR_LINK_UP);
+ wr32(fbd, FBNIC_INTR_MASK_CLEAR(0), 1u << FBNIC_PCS_MSIX_ENTRY);
+
+ return link;
+}
+
+static void fbnic_pcs_get_fw_settings(struct fbnic_dev *fbd)
+{
+ struct fbnic_net *fbn = netdev_priv(fbd->netdev);
+ u8 link_mode = fbn->link_mode;
+ u8 fec = fbn->fec;
+
+ /* Update FEC first to reflect FW current mode */
+ if (fbn->fec & FBNIC_FEC_AUTO) {
+ switch (fbd->fw_cap.link_fec) {
+ case FBNIC_FW_LINK_FEC_NONE:
+ fec = FBNIC_FEC_OFF;
+ break;
+ case FBNIC_FW_LINK_FEC_RS:
+ fec = FBNIC_FEC_RS;
+ break;
+ case FBNIC_FW_LINK_FEC_BASER:
+ fec = FBNIC_FEC_BASER;
+ break;
+ default:
+ return;
+ }
+
+ fbn->fec = fec;
+ }
+
+ /* Do nothing if AUTO mode is not engaged */
+ if (fbn->link_mode & FBNIC_LINK_AUTO) {
+ switch (fbd->fw_cap.link_speed) {
+ case FBNIC_FW_LINK_SPEED_25R1:
+ link_mode = FBNIC_LINK_25R1;
+ break;
+ case FBNIC_FW_LINK_SPEED_50R2:
+ link_mode = FBNIC_LINK_50R2;
+ break;
+ case FBNIC_FW_LINK_SPEED_50R1:
+ link_mode = FBNIC_LINK_50R1;
+ fec = FBNIC_FEC_RS;
+ break;
+ case FBNIC_FW_LINK_SPEED_100R2:
+ link_mode = FBNIC_LINK_100R2;
+ fec = FBNIC_FEC_RS;
+ break;
+ default:
+ return;
+ }
+
+ fbn->link_mode = link_mode;
+ }
+}
+
+static int fbnic_pcs_enable_asic(struct fbnic_dev *fbd)
+{
+ /* Mask and clear the PCS interrupt, will be enabled by link handler */
+ wr32(fbd, FBNIC_SIG_PCS_INTR_MASK, ~0);
+ wr32(fbd, FBNIC_SIG_PCS_INTR_STS, ~0);
+
+ /* Pull in settings from FW */
+ fbnic_pcs_get_fw_settings(fbd);
+
+ return 0;
+}
+
+static void fbnic_pcs_disable_asic(struct fbnic_dev *fbd)
+{
+ /* Mask and clear the PCS interrupt */
+ wr32(fbd, FBNIC_SIG_PCS_INTR_MASK, ~0);
+ wr32(fbd, FBNIC_SIG_PCS_INTR_STS, ~0);
+}
+
+static void fbnic_mac_link_down_asic(struct fbnic_dev *fbd)
+{
+ u32 cmd_cfg, mac_ctrl;
+
+ cmd_cfg = __fbnic_mac_cmd_config_asic(fbd, false, false);
+ mac_ctrl = rd32(fbd, FBNIC_SIG_MAC_IN0);
+
+ mac_ctrl |= FBNIC_SIG_MAC_IN0_RESET_FF_TX_CLK |
+ FBNIC_SIG_MAC_IN0_RESET_TX_CLK |
+ FBNIC_SIG_MAC_IN0_RESET_FF_RX_CLK |
+ FBNIC_SIG_MAC_IN0_RESET_RX_CLK;
+
+ wr32(fbd, FBNIC_SIG_MAC_IN0, mac_ctrl);
+ wr32(fbd, FBNIC_MAC_COMMAND_CONFIG, cmd_cfg);
+}
+
+static void fbnic_mac_link_up_asic(struct fbnic_dev *fbd,
+ bool tx_pause, bool rx_pause)
+{
+ u32 cmd_cfg, mac_ctrl;
+
+ fbnic_mac_tx_pause_config(fbd, tx_pause);
+
+ cmd_cfg = __fbnic_mac_cmd_config_asic(fbd, tx_pause, rx_pause);
+ mac_ctrl = rd32(fbd, FBNIC_SIG_MAC_IN0);
+
+ mac_ctrl &= ~(FBNIC_SIG_MAC_IN0_RESET_FF_TX_CLK |
+ FBNIC_SIG_MAC_IN0_RESET_TX_CLK |
+ FBNIC_SIG_MAC_IN0_RESET_FF_RX_CLK |
+ FBNIC_SIG_MAC_IN0_RESET_RX_CLK);
+ cmd_cfg |= FBNIC_MAC_COMMAND_CONFIG_RX_ENA |
+ FBNIC_MAC_COMMAND_CONFIG_TX_ENA;
+
+ wr32(fbd, FBNIC_SIG_MAC_IN0, mac_ctrl);
+ wr32(fbd, FBNIC_MAC_COMMAND_CONFIG, cmd_cfg);
+}
+
+static const struct fbnic_mac fbnic_mac_asic = {
+ .init_regs = fbnic_mac_init_regs,
+ .pcs_enable = fbnic_pcs_enable_asic,
+ .pcs_disable = fbnic_pcs_disable_asic,
+ .pcs_get_link = fbnic_pcs_get_link_asic,
+ .pcs_get_link_event = fbnic_pcs_get_link_event_asic,
+ .link_down = fbnic_mac_link_down_asic,
+ .link_up = fbnic_mac_link_up_asic,
+};
+
+/**
+ * fbnic_mac_init - Assign a MAC type and initialize the fbnic device
+ * @fbd: Device pointer to device to initialize
+ *
+ * Return: zero on success, negative on failure
+ *
+ * Initialize the MAC function pointers and initializes the MAC of
+ * the device.
+ **/
+int fbnic_mac_init(struct fbnic_dev *fbd)
+{
+ fbd->mac = &fbnic_mac_asic;
+
+ fbd->mac->init_regs(fbd);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_mac.h b/drivers/net/ethernet/meta/fbnic/fbnic_mac.h
new file mode 100644
index 000000000000..f53be6e6aef9
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_mac.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#ifndef _FBNIC_MAC_H_
+#define _FBNIC_MAC_H_
+
+#include <linux/types.h>
+
+struct fbnic_dev;
+
+#define FBNIC_MAX_JUMBO_FRAME_SIZE 9742
+
+enum {
+ FBNIC_LINK_EVENT_NONE = 0,
+ FBNIC_LINK_EVENT_UP = 1,
+ FBNIC_LINK_EVENT_DOWN = 2,
+};
+
+/* Treat the FEC bits as a bitmask laid out as follows:
+ * Bit 0: RS Enabled
+ * Bit 1: BASER(Firecode) Enabled
+ * Bit 2: Retrieve FEC from FW
+ */
+enum {
+ FBNIC_FEC_OFF = 0,
+ FBNIC_FEC_RS = 1,
+ FBNIC_FEC_BASER = 2,
+ FBNIC_FEC_AUTO = 4,
+};
+
+#define FBNIC_FEC_MODE_MASK (FBNIC_FEC_AUTO - 1)
+
+/* Treat the link modes as a set of modulation/lanes bitmask:
+ * Bit 0: Lane Count, 0 = R1, 1 = R2
+ * Bit 1: Modulation, 0 = NRZ, 1 = PAM4
+ * Bit 2: Retrieve link mode from FW
+ */
+enum {
+ FBNIC_LINK_25R1 = 0,
+ FBNIC_LINK_50R2 = 1,
+ FBNIC_LINK_50R1 = 2,
+ FBNIC_LINK_100R2 = 3,
+ FBNIC_LINK_AUTO = 4,
+};
+
+#define FBNIC_LINK_MODE_R2 (FBNIC_LINK_50R2)
+#define FBNIC_LINK_MODE_PAM4 (FBNIC_LINK_50R1)
+#define FBNIC_LINK_MODE_MASK (FBNIC_LINK_AUTO - 1)
+
+/* This structure defines the interface hooks for the MAC. The MAC hooks
+ * will be configured as a const struct provided with a set of function
+ * pointers.
+ *
+ * void (*init_regs)(struct fbnic_dev *fbd);
+ * Initialize MAC registers to enable Tx/Rx paths and FIFOs.
+ *
+ * void (*pcs_enable)(struct fbnic_dev *fbd);
+ * Configure and enable PCS to enable link if not already enabled
+ * void (*pcs_disable)(struct fbnic_dev *fbd);
+ * Shutdown the link if we are the only consumer of it.
+ * bool (*pcs_get_link)(struct fbnic_dev *fbd);
+ * Check PCS link status
+ * int (*pcs_get_link_event)(struct fbnic_dev *fbd)
+ * Get the current link event status, reports true if link has
+ * changed to either FBNIC_LINK_EVENT_DOWN or FBNIC_LINK_EVENT_UP
+ *
+ * void (*link_down)(struct fbnic_dev *fbd);
+ * Configure MAC for link down event
+ * void (*link_up)(struct fbnic_dev *fbd, bool tx_pause, bool rx_pause);
+ * Configure MAC for link up event;
+ *
+ */
+struct fbnic_mac {
+ void (*init_regs)(struct fbnic_dev *fbd);
+
+ int (*pcs_enable)(struct fbnic_dev *fbd);
+ void (*pcs_disable)(struct fbnic_dev *fbd);
+ bool (*pcs_get_link)(struct fbnic_dev *fbd);
+ int (*pcs_get_link_event)(struct fbnic_dev *fbd);
+
+ void (*link_down)(struct fbnic_dev *fbd);
+ void (*link_up)(struct fbnic_dev *fbd, bool tx_pause, bool rx_pause);
+};
+
+int fbnic_mac_init(struct fbnic_dev *fbd);
+#endif /* _FBNIC_MAC_H_ */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
new file mode 100644
index 000000000000..b7ce6da68543
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
@@ -0,0 +1,488 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include <linux/etherdevice.h>
+#include <linux/ipv6.h>
+#include <linux/types.h>
+
+#include "fbnic.h"
+#include "fbnic_netdev.h"
+#include "fbnic_txrx.h"
+
+int __fbnic_open(struct fbnic_net *fbn)
+{
+ struct fbnic_dev *fbd = fbn->fbd;
+ int err;
+
+ err = fbnic_alloc_napi_vectors(fbn);
+ if (err)
+ return err;
+
+ err = fbnic_alloc_resources(fbn);
+ if (err)
+ goto free_napi_vectors;
+
+ err = netif_set_real_num_tx_queues(fbn->netdev,
+ fbn->num_tx_queues);
+ if (err)
+ goto free_resources;
+
+ err = netif_set_real_num_rx_queues(fbn->netdev,
+ fbn->num_rx_queues);
+ if (err)
+ goto free_resources;
+
+ /* Send ownership message and flush to verify FW has seen it */
+ err = fbnic_fw_xmit_ownership_msg(fbd, true);
+ if (err) {
+ dev_warn(fbd->dev,
+ "Error %d sending host ownership message to the firmware\n",
+ err);
+ goto free_resources;
+ }
+
+ err = fbnic_fw_init_heartbeat(fbd, false);
+ if (err)
+ goto release_ownership;
+
+ err = fbnic_pcs_irq_enable(fbd);
+ if (err)
+ goto release_ownership;
+ /* Pull the BMC config and initialize the RPC */
+ fbnic_bmc_rpc_init(fbd);
+ fbnic_rss_reinit(fbd, fbn);
+
+ return 0;
+release_ownership:
+ fbnic_fw_xmit_ownership_msg(fbn->fbd, false);
+free_resources:
+ fbnic_free_resources(fbn);
+free_napi_vectors:
+ fbnic_free_napi_vectors(fbn);
+ return err;
+}
+
+static int fbnic_open(struct net_device *netdev)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ int err;
+
+ err = __fbnic_open(fbn);
+ if (!err)
+ fbnic_up(fbn);
+
+ return err;
+}
+
+static int fbnic_stop(struct net_device *netdev)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ fbnic_down(fbn);
+ fbnic_pcs_irq_disable(fbn->fbd);
+
+ fbnic_fw_xmit_ownership_msg(fbn->fbd, false);
+
+ fbnic_free_resources(fbn);
+ fbnic_free_napi_vectors(fbn);
+
+ return 0;
+}
+
+static int fbnic_uc_sync(struct net_device *netdev, const unsigned char *addr)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_mac_addr *avail_addr;
+
+ if (WARN_ON(!is_valid_ether_addr(addr)))
+ return -EADDRNOTAVAIL;
+
+ avail_addr = __fbnic_uc_sync(fbn->fbd, addr);
+ if (!avail_addr)
+ return -ENOSPC;
+
+ /* Add type flag indicating this address is in use by the host */
+ set_bit(FBNIC_MAC_ADDR_T_UNICAST, avail_addr->act_tcam);
+
+ return 0;
+}
+
+static int fbnic_uc_unsync(struct net_device *netdev, const unsigned char *addr)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_dev *fbd = fbn->fbd;
+ int i, ret;
+
+ /* Scan from middle of list to bottom, filling bottom up.
+ * Skip the first entry which is reserved for dev_addr and
+ * leave the last entry to use for promiscuous filtering.
+ */
+ for (i = fbd->mac_addr_boundary, ret = -ENOENT;
+ i < FBNIC_RPC_TCAM_MACDA_HOST_ADDR_IDX && ret; i++) {
+ struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i];
+
+ if (!ether_addr_equal(mac_addr->value.addr8, addr))
+ continue;
+
+ ret = __fbnic_uc_unsync(mac_addr);
+ }
+
+ return ret;
+}
+
+static int fbnic_mc_sync(struct net_device *netdev, const unsigned char *addr)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_mac_addr *avail_addr;
+
+ if (WARN_ON(!is_multicast_ether_addr(addr)))
+ return -EADDRNOTAVAIL;
+
+ avail_addr = __fbnic_mc_sync(fbn->fbd, addr);
+ if (!avail_addr)
+ return -ENOSPC;
+
+ /* Add type flag indicating this address is in use by the host */
+ set_bit(FBNIC_MAC_ADDR_T_MULTICAST, avail_addr->act_tcam);
+
+ return 0;
+}
+
+static int fbnic_mc_unsync(struct net_device *netdev, const unsigned char *addr)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_dev *fbd = fbn->fbd;
+ int i, ret;
+
+ /* Scan from middle of list to top, filling top down.
+ * Skip over the address reserved for the BMC MAC and
+ * exclude index 0 as that belongs to the broadcast address
+ */
+ for (i = fbd->mac_addr_boundary, ret = -ENOENT;
+ --i > FBNIC_RPC_TCAM_MACDA_BROADCAST_IDX && ret;) {
+ struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i];
+
+ if (!ether_addr_equal(mac_addr->value.addr8, addr))
+ continue;
+
+ ret = __fbnic_mc_unsync(mac_addr);
+ }
+
+ return ret;
+}
+
+void __fbnic_set_rx_mode(struct net_device *netdev)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ bool uc_promisc = false, mc_promisc = false;
+ struct fbnic_dev *fbd = fbn->fbd;
+ struct fbnic_mac_addr *mac_addr;
+ int err;
+
+ /* Populate host address from dev_addr */
+ mac_addr = &fbd->mac_addr[FBNIC_RPC_TCAM_MACDA_HOST_ADDR_IDX];
+ if (!ether_addr_equal(mac_addr->value.addr8, netdev->dev_addr) ||
+ mac_addr->state != FBNIC_TCAM_S_VALID) {
+ ether_addr_copy(mac_addr->value.addr8, netdev->dev_addr);
+ mac_addr->state = FBNIC_TCAM_S_UPDATE;
+ set_bit(FBNIC_MAC_ADDR_T_UNICAST, mac_addr->act_tcam);
+ }
+
+ /* Populate broadcast address if broadcast is enabled */
+ mac_addr = &fbd->mac_addr[FBNIC_RPC_TCAM_MACDA_BROADCAST_IDX];
+ if (netdev->flags & IFF_BROADCAST) {
+ if (!is_broadcast_ether_addr(mac_addr->value.addr8) ||
+ mac_addr->state != FBNIC_TCAM_S_VALID) {
+ eth_broadcast_addr(mac_addr->value.addr8);
+ mac_addr->state = FBNIC_TCAM_S_ADD;
+ }
+ set_bit(FBNIC_MAC_ADDR_T_BROADCAST, mac_addr->act_tcam);
+ } else if (mac_addr->state == FBNIC_TCAM_S_VALID) {
+ __fbnic_xc_unsync(mac_addr, FBNIC_MAC_ADDR_T_BROADCAST);
+ }
+
+ /* Synchronize unicast and multicast address lists */
+ err = __dev_uc_sync(netdev, fbnic_uc_sync, fbnic_uc_unsync);
+ if (err == -ENOSPC)
+ uc_promisc = true;
+ err = __dev_mc_sync(netdev, fbnic_mc_sync, fbnic_mc_unsync);
+ if (err == -ENOSPC)
+ mc_promisc = true;
+
+ uc_promisc |= !!(netdev->flags & IFF_PROMISC);
+ mc_promisc |= !!(netdev->flags & IFF_ALLMULTI) || uc_promisc;
+
+ /* Populate last TCAM entry with promiscuous entry and 0/1 bit mask */
+ mac_addr = &fbd->mac_addr[FBNIC_RPC_TCAM_MACDA_PROMISC_IDX];
+ if (uc_promisc) {
+ if (!is_zero_ether_addr(mac_addr->value.addr8) ||
+ mac_addr->state != FBNIC_TCAM_S_VALID) {
+ eth_zero_addr(mac_addr->value.addr8);
+ eth_broadcast_addr(mac_addr->mask.addr8);
+ clear_bit(FBNIC_MAC_ADDR_T_ALLMULTI,
+ mac_addr->act_tcam);
+ set_bit(FBNIC_MAC_ADDR_T_PROMISC,
+ mac_addr->act_tcam);
+ mac_addr->state = FBNIC_TCAM_S_ADD;
+ }
+ } else if (mc_promisc &&
+ (!fbnic_bmc_present(fbd) || !fbd->fw_cap.all_multi)) {
+ /* We have to add a special handler for multicast as the
+ * BMC may have an all-multi rule already in place. As such
+ * adding a rule ourselves won't do any good so we will have
+ * to modify the rules for the ALL MULTI below if the BMC
+ * already has the rule in place.
+ */
+ if (!is_multicast_ether_addr(mac_addr->value.addr8) ||
+ mac_addr->state != FBNIC_TCAM_S_VALID) {
+ eth_zero_addr(mac_addr->value.addr8);
+ eth_broadcast_addr(mac_addr->mask.addr8);
+ mac_addr->value.addr8[0] ^= 1;
+ mac_addr->mask.addr8[0] ^= 1;
+ set_bit(FBNIC_MAC_ADDR_T_ALLMULTI,
+ mac_addr->act_tcam);
+ clear_bit(FBNIC_MAC_ADDR_T_PROMISC,
+ mac_addr->act_tcam);
+ mac_addr->state = FBNIC_TCAM_S_ADD;
+ }
+ } else if (mac_addr->state == FBNIC_TCAM_S_VALID) {
+ if (test_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam)) {
+ clear_bit(FBNIC_MAC_ADDR_T_ALLMULTI,
+ mac_addr->act_tcam);
+ clear_bit(FBNIC_MAC_ADDR_T_PROMISC,
+ mac_addr->act_tcam);
+ } else {
+ mac_addr->state = FBNIC_TCAM_S_DELETE;
+ }
+ }
+
+ /* Add rules for BMC all multicast if it is enabled */
+ fbnic_bmc_rpc_all_multi_config(fbd, mc_promisc);
+
+ /* Sift out any unshared BMC rules and place them in BMC only section */
+ fbnic_sift_macda(fbd);
+
+ /* Write updates to hardware */
+ fbnic_write_rules(fbd);
+ fbnic_write_macda(fbd);
+}
+
+static void fbnic_set_rx_mode(struct net_device *netdev)
+{
+ /* No need to update the hardware if we are not running */
+ if (netif_running(netdev))
+ __fbnic_set_rx_mode(netdev);
+}
+
+static int fbnic_set_mac(struct net_device *netdev, void *p)
+{
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ eth_hw_addr_set(netdev, addr->sa_data);
+
+ fbnic_set_rx_mode(netdev);
+
+ return 0;
+}
+
+void fbnic_clear_rx_mode(struct net_device *netdev)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_dev *fbd = fbn->fbd;
+ int idx;
+
+ for (idx = ARRAY_SIZE(fbd->mac_addr); idx--;) {
+ struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[idx];
+
+ if (mac_addr->state != FBNIC_TCAM_S_VALID)
+ continue;
+
+ bitmap_clear(mac_addr->act_tcam,
+ FBNIC_MAC_ADDR_T_HOST_START,
+ FBNIC_MAC_ADDR_T_HOST_LEN);
+
+ if (bitmap_empty(mac_addr->act_tcam,
+ FBNIC_RPC_TCAM_ACT_NUM_ENTRIES))
+ mac_addr->state = FBNIC_TCAM_S_DELETE;
+ }
+
+ /* Write updates to hardware */
+ fbnic_write_macda(fbd);
+
+ __dev_uc_unsync(netdev, NULL);
+ __dev_mc_unsync(netdev, NULL);
+}
+
+static const struct net_device_ops fbnic_netdev_ops = {
+ .ndo_open = fbnic_open,
+ .ndo_stop = fbnic_stop,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_start_xmit = fbnic_xmit_frame,
+ .ndo_features_check = fbnic_features_check,
+ .ndo_set_mac_address = fbnic_set_mac,
+ .ndo_set_rx_mode = fbnic_set_rx_mode,
+};
+
+void fbnic_reset_queues(struct fbnic_net *fbn,
+ unsigned int tx, unsigned int rx)
+{
+ struct fbnic_dev *fbd = fbn->fbd;
+ unsigned int max_napis;
+
+ max_napis = fbd->num_irqs - FBNIC_NON_NAPI_VECTORS;
+
+ tx = min(tx, max_napis);
+ fbn->num_tx_queues = tx;
+
+ rx = min(rx, max_napis);
+ fbn->num_rx_queues = rx;
+
+ fbn->num_napi = max(tx, rx);
+}
+
+/**
+ * fbnic_netdev_free - Free the netdev associate with fbnic
+ * @fbd: Driver specific structure to free netdev from
+ *
+ * Allocate and initialize the netdev and netdev private structure. Bind
+ * together the hardware, netdev, and pci data structures.
+ **/
+void fbnic_netdev_free(struct fbnic_dev *fbd)
+{
+ struct fbnic_net *fbn = netdev_priv(fbd->netdev);
+
+ if (fbn->phylink)
+ phylink_destroy(fbn->phylink);
+
+ free_netdev(fbd->netdev);
+ fbd->netdev = NULL;
+}
+
+/**
+ * fbnic_netdev_alloc - Allocate a netdev and associate with fbnic
+ * @fbd: Driver specific structure to associate netdev with
+ *
+ * Allocate and initialize the netdev and netdev private structure. Bind
+ * together the hardware, netdev, and pci data structures.
+ *
+ * Return: 0 on success, negative on failure
+ **/
+struct net_device *fbnic_netdev_alloc(struct fbnic_dev *fbd)
+{
+ struct net_device *netdev;
+ struct fbnic_net *fbn;
+ int default_queues;
+
+ netdev = alloc_etherdev_mq(sizeof(*fbn), FBNIC_MAX_RXQS);
+ if (!netdev)
+ return NULL;
+
+ SET_NETDEV_DEV(netdev, fbd->dev);
+ fbd->netdev = netdev;
+
+ netdev->netdev_ops = &fbnic_netdev_ops;
+
+ fbn = netdev_priv(netdev);
+
+ fbn->netdev = netdev;
+ fbn->fbd = fbd;
+ INIT_LIST_HEAD(&fbn->napis);
+
+ fbn->txq_size = FBNIC_TXQ_SIZE_DEFAULT;
+ fbn->hpq_size = FBNIC_HPQ_SIZE_DEFAULT;
+ fbn->ppq_size = FBNIC_PPQ_SIZE_DEFAULT;
+ fbn->rcq_size = FBNIC_RCQ_SIZE_DEFAULT;
+
+ default_queues = netif_get_num_default_rss_queues();
+ if (default_queues > fbd->max_num_queues)
+ default_queues = fbd->max_num_queues;
+
+ fbnic_reset_queues(fbn, default_queues, default_queues);
+
+ fbnic_reset_indir_tbl(fbn);
+ fbnic_rss_key_fill(fbn->rss_key);
+ fbnic_rss_init_en_mask(fbn);
+
+ netdev->features |=
+ NETIF_F_RXHASH |
+ NETIF_F_SG |
+ NETIF_F_HW_CSUM |
+ NETIF_F_RXCSUM;
+
+ netdev->hw_features |= netdev->features;
+ netdev->vlan_features |= netdev->features;
+ netdev->hw_enc_features |= netdev->features;
+
+ netdev->min_mtu = IPV6_MIN_MTU;
+ netdev->max_mtu = FBNIC_MAX_JUMBO_FRAME_SIZE - ETH_HLEN;
+
+ /* TBD: This is workaround for BMC as phylink doesn't have support
+ * for leavling the link enabled if a BMC is present.
+ */
+ netdev->ethtool->wol_enabled = true;
+
+ fbn->fec = FBNIC_FEC_AUTO | FBNIC_FEC_RS;
+ fbn->link_mode = FBNIC_LINK_AUTO | FBNIC_LINK_50R2;
+ netif_carrier_off(netdev);
+
+ netif_tx_stop_all_queues(netdev);
+
+ if (fbnic_phylink_init(netdev)) {
+ fbnic_netdev_free(fbd);
+ return NULL;
+ }
+
+ return netdev;
+}
+
+static int fbnic_dsn_to_mac_addr(u64 dsn, char *addr)
+{
+ addr[0] = (dsn >> 56) & 0xFF;
+ addr[1] = (dsn >> 48) & 0xFF;
+ addr[2] = (dsn >> 40) & 0xFF;
+ addr[3] = (dsn >> 16) & 0xFF;
+ addr[4] = (dsn >> 8) & 0xFF;
+ addr[5] = dsn & 0xFF;
+
+ return is_valid_ether_addr(addr) ? 0 : -EINVAL;
+}
+
+/**
+ * fbnic_netdev_register - Initialize general software structures
+ * @netdev: Netdev containing structure to initialize and register
+ *
+ * Initialize the MAC address for the netdev and register it.
+ *
+ * Return: 0 on success, negative on failure
+ **/
+int fbnic_netdev_register(struct net_device *netdev)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_dev *fbd = fbn->fbd;
+ u64 dsn = fbd->dsn;
+ u8 addr[ETH_ALEN];
+ int err;
+
+ err = fbnic_dsn_to_mac_addr(dsn, addr);
+ if (!err) {
+ ether_addr_copy(netdev->perm_addr, addr);
+ eth_hw_addr_set(netdev, addr);
+ } else {
+ /* A randomly assigned MAC address will cause provisioning
+ * issues so instead just fail to spawn the netdev and
+ * avoid any confusion.
+ */
+ dev_err(fbd->dev, "MAC addr %pM invalid\n", addr);
+ return err;
+ }
+
+ return register_netdev(netdev);
+}
+
+void fbnic_netdev_unregister(struct net_device *netdev)
+{
+ unregister_netdev(netdev);
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h
new file mode 100644
index 000000000000..6bc0ebeb8182
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#ifndef _FBNIC_NETDEV_H_
+#define _FBNIC_NETDEV_H_
+
+#include <linux/types.h>
+#include <linux/phylink.h>
+
+#include "fbnic_csr.h"
+#include "fbnic_rpc.h"
+#include "fbnic_txrx.h"
+
+struct fbnic_net {
+ struct fbnic_ring *tx[FBNIC_MAX_TXQS];
+ struct fbnic_ring *rx[FBNIC_MAX_RXQS];
+
+ struct net_device *netdev;
+ struct fbnic_dev *fbd;
+
+ u32 txq_size;
+ u32 hpq_size;
+ u32 ppq_size;
+ u32 rcq_size;
+
+ u16 num_napi;
+
+ struct phylink *phylink;
+ struct phylink_config phylink_config;
+ struct phylink_pcs phylink_pcs;
+
+ /* TBD: Remove these when phylink supports FEC and lane config */
+ u8 fec;
+ u8 link_mode;
+
+ u16 num_tx_queues;
+ u16 num_rx_queues;
+
+ u8 indir_tbl[FBNIC_RPC_RSS_TBL_COUNT][FBNIC_RPC_RSS_TBL_SIZE];
+ u32 rss_key[FBNIC_RPC_RSS_KEY_DWORD_LEN];
+ u32 rss_flow_hash[FBNIC_NUM_HASH_OPT];
+
+ u64 link_down_events;
+
+ struct list_head napis;
+};
+
+int __fbnic_open(struct fbnic_net *fbn);
+void fbnic_up(struct fbnic_net *fbn);
+void fbnic_down(struct fbnic_net *fbn);
+
+struct net_device *fbnic_netdev_alloc(struct fbnic_dev *fbd);
+void fbnic_netdev_free(struct fbnic_dev *fbd);
+int fbnic_netdev_register(struct net_device *netdev);
+void fbnic_netdev_unregister(struct net_device *netdev);
+void fbnic_reset_queues(struct fbnic_net *fbn,
+ unsigned int tx, unsigned int rx);
+
+void __fbnic_set_rx_mode(struct net_device *netdev);
+void fbnic_clear_rx_mode(struct net_device *netdev);
+
+int fbnic_phylink_init(struct net_device *netdev);
+#endif /* _FBNIC_NETDEV_H_ */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_pci.c b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
new file mode 100644
index 000000000000..a4809fe0fc24
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
@@ -0,0 +1,564 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/rtnetlink.h>
+#include <linux/types.h>
+
+#include "fbnic.h"
+#include "fbnic_drvinfo.h"
+#include "fbnic_netdev.h"
+
+char fbnic_driver_name[] = DRV_NAME;
+
+MODULE_DESCRIPTION(DRV_SUMMARY);
+MODULE_LICENSE("GPL");
+
+static const struct fbnic_info fbnic_asic_info = {
+ .max_num_queues = FBNIC_MAX_QUEUES,
+ .bar_mask = BIT(0) | BIT(4)
+};
+
+static const struct fbnic_info *fbnic_info_tbl[] = {
+ [fbnic_board_asic] = &fbnic_asic_info,
+};
+
+static const struct pci_device_id fbnic_pci_tbl[] = {
+ { PCI_DEVICE_DATA(META, FBNIC_ASIC, fbnic_board_asic) },
+ /* Required last entry */
+ {0, }
+};
+MODULE_DEVICE_TABLE(pci, fbnic_pci_tbl);
+
+u32 fbnic_rd32(struct fbnic_dev *fbd, u32 reg)
+{
+ u32 __iomem *csr = READ_ONCE(fbd->uc_addr0);
+ u32 value;
+
+ if (!csr)
+ return ~0U;
+
+ value = readl(csr + reg);
+
+ /* If any bits are 0 value should be valid */
+ if (~value)
+ return value;
+
+ /* All 1's may be valid if ZEROs register still works */
+ if (reg != FBNIC_MASTER_SPARE_0 && ~readl(csr + FBNIC_MASTER_SPARE_0))
+ return value;
+
+ /* Hardware is giving us all 1's reads, assume it is gone */
+ WRITE_ONCE(fbd->uc_addr0, NULL);
+ WRITE_ONCE(fbd->uc_addr4, NULL);
+
+ dev_err(fbd->dev,
+ "Failed read (idx 0x%x AKA addr 0x%x), disabled CSR access, awaiting reset\n",
+ reg, reg << 2);
+
+ /* Notify stack that device has lost (PCIe) link */
+ if (!fbnic_init_failure(fbd))
+ netif_device_detach(fbd->netdev);
+
+ return ~0U;
+}
+
+bool fbnic_fw_present(struct fbnic_dev *fbd)
+{
+ return !!READ_ONCE(fbd->uc_addr4);
+}
+
+void fbnic_fw_wr32(struct fbnic_dev *fbd, u32 reg, u32 val)
+{
+ u32 __iomem *csr = READ_ONCE(fbd->uc_addr4);
+
+ if (csr)
+ writel(val, csr + reg);
+}
+
+u32 fbnic_fw_rd32(struct fbnic_dev *fbd, u32 reg)
+{
+ u32 __iomem *csr = READ_ONCE(fbd->uc_addr4);
+ u32 value;
+
+ if (!csr)
+ return ~0U;
+
+ value = readl(csr + reg);
+
+ /* If any bits are 0 value should be valid */
+ if (~value)
+ return value;
+
+ /* All 1's may be valid if ZEROs register still works */
+ if (reg != FBNIC_FW_ZERO_REG && ~readl(csr + FBNIC_FW_ZERO_REG))
+ return value;
+
+ /* Hardware is giving us all 1's reads, assume it is gone */
+ WRITE_ONCE(fbd->uc_addr0, NULL);
+ WRITE_ONCE(fbd->uc_addr4, NULL);
+
+ dev_err(fbd->dev,
+ "Failed read (idx 0x%x AKA addr 0x%x), disabled CSR access, awaiting reset\n",
+ reg, reg << 2);
+
+ /* Notify stack that device has lost (PCIe) link */
+ if (!fbnic_init_failure(fbd))
+ netif_device_detach(fbd->netdev);
+
+ return ~0U;
+}
+
+static void fbnic_service_task_start(struct fbnic_net *fbn)
+{
+ struct fbnic_dev *fbd = fbn->fbd;
+
+ schedule_delayed_work(&fbd->service_task, HZ);
+ phylink_resume(fbn->phylink);
+}
+
+static void fbnic_service_task_stop(struct fbnic_net *fbn)
+{
+ struct fbnic_dev *fbd = fbn->fbd;
+
+ phylink_suspend(fbn->phylink, fbnic_bmc_present(fbd));
+ cancel_delayed_work(&fbd->service_task);
+}
+
+void fbnic_up(struct fbnic_net *fbn)
+{
+ fbnic_enable(fbn);
+
+ fbnic_fill(fbn);
+
+ fbnic_rss_reinit_hw(fbn->fbd, fbn);
+
+ __fbnic_set_rx_mode(fbn->netdev);
+
+ /* Enable Tx/Rx processing */
+ fbnic_napi_enable(fbn);
+ netif_tx_start_all_queues(fbn->netdev);
+
+ fbnic_service_task_start(fbn);
+}
+
+static void fbnic_down_noidle(struct fbnic_net *fbn)
+{
+ fbnic_service_task_stop(fbn);
+
+ /* Disable Tx/Rx Processing */
+ fbnic_napi_disable(fbn);
+ netif_tx_disable(fbn->netdev);
+
+ fbnic_clear_rx_mode(fbn->netdev);
+ fbnic_clear_rules(fbn->fbd);
+ fbnic_rss_disable_hw(fbn->fbd);
+ fbnic_disable(fbn);
+}
+
+void fbnic_down(struct fbnic_net *fbn)
+{
+ fbnic_down_noidle(fbn);
+
+ fbnic_wait_all_queues_idle(fbn->fbd, false);
+
+ fbnic_flush(fbn);
+}
+
+static void fbnic_health_check(struct fbnic_dev *fbd)
+{
+ struct fbnic_fw_mbx *tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX];
+
+ /* As long as the heart is beating the FW is healty */
+ if (fbd->fw_heartbeat_enabled)
+ return;
+
+ /* If the Tx mailbox still has messages sitting in it then there likely
+ * isn't anything we can do. We will wait until the mailbox is empty to
+ * report the fault so we can collect the crashlog.
+ */
+ if (tx_mbx->head != tx_mbx->tail)
+ return;
+
+ /* TBD: Need to add a more thorough recovery here.
+ * Specifically I need to verify what all the firmware will have
+ * changed since we had setup and it rebooted. May just need to
+ * perform a down/up. For now we will just reclaim ownership so
+ * the heartbeat can catch the next fault.
+ */
+ fbnic_fw_xmit_ownership_msg(fbd, true);
+}
+
+static void fbnic_service_task(struct work_struct *work)
+{
+ struct fbnic_dev *fbd = container_of(to_delayed_work(work),
+ struct fbnic_dev, service_task);
+
+ rtnl_lock();
+
+ fbnic_fw_check_heartbeat(fbd);
+
+ fbnic_health_check(fbd);
+
+ if (netif_carrier_ok(fbd->netdev))
+ fbnic_napi_depletion_check(fbd->netdev);
+
+ if (netif_running(fbd->netdev))
+ schedule_delayed_work(&fbd->service_task, HZ);
+
+ rtnl_unlock();
+}
+
+/**
+ * fbnic_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in fbnic_pci_tbl
+ *
+ * Initializes a PCI device identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
+ *
+ * Return: 0 on success, negative on failure
+ **/
+static int fbnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ const struct fbnic_info *info = fbnic_info_tbl[ent->driver_data];
+ struct net_device *netdev;
+ struct fbnic_dev *fbd;
+ int err;
+
+ if (pdev->error_state != pci_channel_io_normal) {
+ dev_err(&pdev->dev,
+ "PCI device still in an error state. Unable to load...\n");
+ return -EIO;
+ }
+
+ err = pcim_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "PCI enable device failed: %d\n", err);
+ return err;
+ }
+
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(46));
+ if (err)
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pdev->dev, "DMA configuration failed: %d\n", err);
+ return err;
+ }
+
+ err = pcim_iomap_regions(pdev, info->bar_mask, fbnic_driver_name);
+ if (err) {
+ dev_err(&pdev->dev,
+ "pci_request_selected_regions failed: %d\n", err);
+ return err;
+ }
+
+ fbd = fbnic_devlink_alloc(pdev);
+ if (!fbd) {
+ dev_err(&pdev->dev, "Devlink allocation failed\n");
+ return -ENOMEM;
+ }
+
+ /* Populate driver with hardware-specific info and handlers */
+ fbd->max_num_queues = info->max_num_queues;
+
+ pci_set_master(pdev);
+ pci_save_state(pdev);
+
+ INIT_DELAYED_WORK(&fbd->service_task, fbnic_service_task);
+
+ err = fbnic_alloc_irqs(fbd);
+ if (err)
+ goto free_fbd;
+
+ err = fbnic_mac_init(fbd);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to initialize MAC: %d\n", err);
+ goto free_irqs;
+ }
+
+ err = fbnic_fw_enable_mbx(fbd);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Firmware mailbox initialization failure\n");
+ goto free_irqs;
+ }
+
+ fbnic_devlink_register(fbd);
+
+ if (!fbd->dsn) {
+ dev_warn(&pdev->dev, "Reading serial number failed\n");
+ goto init_failure_mode;
+ }
+
+ netdev = fbnic_netdev_alloc(fbd);
+ if (!netdev) {
+ dev_err(&pdev->dev, "Netdev allocation failed\n");
+ goto init_failure_mode;
+ }
+
+ err = fbnic_netdev_register(netdev);
+ if (err) {
+ dev_err(&pdev->dev, "Netdev registration failed: %d\n", err);
+ goto ifm_free_netdev;
+ }
+
+ return 0;
+
+ifm_free_netdev:
+ fbnic_netdev_free(fbd);
+init_failure_mode:
+ dev_warn(&pdev->dev, "Probe error encountered, entering init failure mode. Normal networking functionality will not be available.\n");
+ /* Always return 0 even on error so devlink is registered to allow
+ * firmware updates for fixes.
+ */
+ return 0;
+free_irqs:
+ fbnic_free_irqs(fbd);
+free_fbd:
+ pci_disable_device(pdev);
+ fbnic_devlink_free(fbd);
+
+ return err;
+}
+
+/**
+ * fbnic_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * Called by the PCI subsystem to alert the driver that it should release
+ * a PCI device. The could be caused by a Hot-Plug event, or because the
+ * driver is going to be removed from memory.
+ **/
+static void fbnic_remove(struct pci_dev *pdev)
+{
+ struct fbnic_dev *fbd = pci_get_drvdata(pdev);
+
+ if (!fbnic_init_failure(fbd)) {
+ struct net_device *netdev = fbd->netdev;
+
+ fbnic_netdev_unregister(netdev);
+ cancel_delayed_work_sync(&fbd->service_task);
+ fbnic_netdev_free(fbd);
+ }
+
+ fbnic_devlink_unregister(fbd);
+ fbnic_fw_disable_mbx(fbd);
+ fbnic_free_irqs(fbd);
+
+ pci_disable_device(pdev);
+ fbnic_devlink_free(fbd);
+}
+
+static int fbnic_pm_suspend(struct device *dev)
+{
+ struct fbnic_dev *fbd = dev_get_drvdata(dev);
+ struct net_device *netdev = fbd->netdev;
+
+ if (fbnic_init_failure(fbd))
+ goto null_uc_addr;
+
+ rtnl_lock();
+
+ netif_device_detach(netdev);
+
+ if (netif_running(netdev))
+ netdev->netdev_ops->ndo_stop(netdev);
+
+ rtnl_unlock();
+
+null_uc_addr:
+ fbnic_fw_disable_mbx(fbd);
+
+ /* Free the IRQs so they aren't trying to occupy sleeping CPUs */
+ fbnic_free_irqs(fbd);
+
+ /* Hardware is about to go away, so switch off MMIO access internally */
+ WRITE_ONCE(fbd->uc_addr0, NULL);
+ WRITE_ONCE(fbd->uc_addr4, NULL);
+
+ return 0;
+}
+
+static int __fbnic_pm_resume(struct device *dev)
+{
+ struct fbnic_dev *fbd = dev_get_drvdata(dev);
+ struct net_device *netdev = fbd->netdev;
+ void __iomem * const *iomap_table;
+ struct fbnic_net *fbn;
+ int err;
+
+ /* Restore MMIO access */
+ iomap_table = pcim_iomap_table(to_pci_dev(dev));
+ fbd->uc_addr0 = iomap_table[0];
+ fbd->uc_addr4 = iomap_table[4];
+
+ /* Rerequest the IRQs */
+ err = fbnic_alloc_irqs(fbd);
+ if (err)
+ goto err_invalidate_uc_addr;
+
+ fbd->mac->init_regs(fbd);
+
+ /* Re-enable mailbox */
+ err = fbnic_fw_enable_mbx(fbd);
+ if (err)
+ goto err_free_irqs;
+
+ /* No netdev means there isn't a network interface to bring up */
+ if (fbnic_init_failure(fbd))
+ return 0;
+
+ fbn = netdev_priv(netdev);
+
+ /* Reset the queues if needed */
+ fbnic_reset_queues(fbn, fbn->num_tx_queues, fbn->num_rx_queues);
+
+ rtnl_lock();
+
+ if (netif_running(netdev)) {
+ err = __fbnic_open(fbn);
+ if (err)
+ goto err_disable_mbx;
+ }
+
+ rtnl_unlock();
+
+ return 0;
+err_disable_mbx:
+ rtnl_unlock();
+ fbnic_fw_disable_mbx(fbd);
+err_free_irqs:
+ fbnic_free_irqs(fbd);
+err_invalidate_uc_addr:
+ WRITE_ONCE(fbd->uc_addr0, NULL);
+ WRITE_ONCE(fbd->uc_addr4, NULL);
+ return err;
+}
+
+static void __fbnic_pm_attach(struct device *dev)
+{
+ struct fbnic_dev *fbd = dev_get_drvdata(dev);
+ struct net_device *netdev = fbd->netdev;
+ struct fbnic_net *fbn;
+
+ if (fbnic_init_failure(fbd))
+ return;
+
+ fbn = netdev_priv(netdev);
+
+ if (netif_running(netdev))
+ fbnic_up(fbn);
+
+ netif_device_attach(netdev);
+}
+
+static int __maybe_unused fbnic_pm_resume(struct device *dev)
+{
+ int err;
+
+ err = __fbnic_pm_resume(dev);
+ if (!err)
+ __fbnic_pm_attach(dev);
+
+ return err;
+}
+
+static const struct dev_pm_ops fbnic_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(fbnic_pm_suspend, fbnic_pm_resume)
+};
+
+static void fbnic_shutdown(struct pci_dev *pdev)
+{
+ fbnic_pm_suspend(&pdev->dev);
+}
+
+static pci_ers_result_t fbnic_err_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ /* Disconnect device if failure is not recoverable via reset */
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ fbnic_pm_suspend(&pdev->dev);
+
+ /* Request a slot reset */
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t fbnic_err_slot_reset(struct pci_dev *pdev)
+{
+ int err;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
+
+ if (pci_enable_device_mem(pdev)) {
+ dev_err(&pdev->dev,
+ "Cannot re-enable PCI device after reset.\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ /* Restore device to previous state */
+ err = __fbnic_pm_resume(&pdev->dev);
+
+ return err ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
+}
+
+static void fbnic_err_resume(struct pci_dev *pdev)
+{
+ __fbnic_pm_attach(&pdev->dev);
+}
+
+static const struct pci_error_handlers fbnic_err_handler = {
+ .error_detected = fbnic_err_error_detected,
+ .slot_reset = fbnic_err_slot_reset,
+ .resume = fbnic_err_resume,
+};
+
+static struct pci_driver fbnic_driver = {
+ .name = fbnic_driver_name,
+ .id_table = fbnic_pci_tbl,
+ .probe = fbnic_probe,
+ .remove = fbnic_remove,
+ .driver.pm = &fbnic_pm_ops,
+ .shutdown = fbnic_shutdown,
+ .err_handler = &fbnic_err_handler,
+};
+
+/**
+ * fbnic_init_module - Driver Registration Routine
+ *
+ * The first routine called when the driver is loaded. All it does is
+ * register with the PCI subsystem.
+ *
+ * Return: 0 on success, negative on failure
+ **/
+static int __init fbnic_init_module(void)
+{
+ int err;
+
+ err = pci_register_driver(&fbnic_driver);
+ if (err)
+ goto out;
+
+ pr_info(DRV_SUMMARY " (%s)", fbnic_driver.name);
+out:
+ return err;
+}
+module_init(fbnic_init_module);
+
+/**
+ * fbnic_exit_module - Driver Exit Cleanup Routine
+ *
+ * Called just before the driver is removed from memory.
+ **/
+static void __exit fbnic_exit_module(void)
+{
+ pci_unregister_driver(&fbnic_driver);
+}
+module_exit(fbnic_exit_module);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c b/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c
new file mode 100644
index 000000000000..1a5e1e719b30
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c
@@ -0,0 +1,161 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include <linux/phy.h>
+#include <linux/phylink.h>
+
+#include "fbnic.h"
+#include "fbnic_mac.h"
+#include "fbnic_netdev.h"
+
+static struct fbnic_net *
+fbnic_pcs_to_net(struct phylink_pcs *pcs)
+{
+ return container_of(pcs, struct fbnic_net, phylink_pcs);
+}
+
+static void
+fbnic_phylink_pcs_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state)
+{
+ struct fbnic_net *fbn = fbnic_pcs_to_net(pcs);
+ struct fbnic_dev *fbd = fbn->fbd;
+
+ /* For now we use hard-coded defaults and FW config to determine
+ * the current values. In future patches we will add support for
+ * reconfiguring these values and changing link settings.
+ */
+ switch (fbd->fw_cap.link_speed) {
+ case FBNIC_FW_LINK_SPEED_25R1:
+ state->speed = SPEED_25000;
+ break;
+ case FBNIC_FW_LINK_SPEED_50R2:
+ state->speed = SPEED_50000;
+ break;
+ case FBNIC_FW_LINK_SPEED_100R2:
+ state->speed = SPEED_100000;
+ break;
+ default:
+ state->speed = SPEED_UNKNOWN;
+ break;
+ }
+
+ state->duplex = DUPLEX_FULL;
+
+ state->link = fbd->mac->pcs_get_link(fbd);
+}
+
+static int
+fbnic_phylink_pcs_enable(struct phylink_pcs *pcs)
+{
+ struct fbnic_net *fbn = fbnic_pcs_to_net(pcs);
+ struct fbnic_dev *fbd = fbn->fbd;
+
+ return fbd->mac->pcs_enable(fbd);
+}
+
+static void
+fbnic_phylink_pcs_disable(struct phylink_pcs *pcs)
+{
+ struct fbnic_net *fbn = fbnic_pcs_to_net(pcs);
+ struct fbnic_dev *fbd = fbn->fbd;
+
+ return fbd->mac->pcs_disable(fbd);
+}
+
+static int
+fbnic_phylink_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
+{
+ return 0;
+}
+
+static const struct phylink_pcs_ops fbnic_phylink_pcs_ops = {
+ .pcs_config = fbnic_phylink_pcs_config,
+ .pcs_enable = fbnic_phylink_pcs_enable,
+ .pcs_disable = fbnic_phylink_pcs_disable,
+ .pcs_get_state = fbnic_phylink_pcs_get_state,
+};
+
+static struct phylink_pcs *
+fbnic_phylink_mac_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
+{
+ struct net_device *netdev = to_net_dev(config->dev);
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ return &fbn->phylink_pcs;
+}
+
+static void
+fbnic_phylink_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+}
+
+static void
+fbnic_phylink_mac_link_down(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct net_device *netdev = to_net_dev(config->dev);
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_dev *fbd = fbn->fbd;
+
+ fbd->mac->link_down(fbd);
+
+ fbn->link_down_events++;
+}
+
+static void
+fbnic_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy, unsigned int mode,
+ phy_interface_t interface, int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ struct net_device *netdev = to_net_dev(config->dev);
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_dev *fbd = fbn->fbd;
+
+ fbd->mac->link_up(fbd, tx_pause, rx_pause);
+}
+
+static const struct phylink_mac_ops fbnic_phylink_mac_ops = {
+ .mac_select_pcs = fbnic_phylink_mac_select_pcs,
+ .mac_config = fbnic_phylink_mac_config,
+ .mac_link_down = fbnic_phylink_mac_link_down,
+ .mac_link_up = fbnic_phylink_mac_link_up,
+};
+
+int fbnic_phylink_init(struct net_device *netdev)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct phylink *phylink;
+
+ fbn->phylink_pcs.neg_mode = true;
+ fbn->phylink_pcs.ops = &fbnic_phylink_pcs_ops;
+
+ fbn->phylink_config.dev = &netdev->dev;
+ fbn->phylink_config.type = PHYLINK_NETDEV;
+ fbn->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
+ MAC_10000FD | MAC_25000FD |
+ MAC_40000FD | MAC_50000FD |
+ MAC_100000FD;
+ fbn->phylink_config.default_an_inband = true;
+
+ __set_bit(PHY_INTERFACE_MODE_XGMII,
+ fbn->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_XLGMII,
+ fbn->phylink_config.supported_interfaces);
+
+ phylink = phylink_create(&fbn->phylink_config, NULL,
+ PHY_INTERFACE_MODE_XLGMII,
+ &fbnic_phylink_mac_ops);
+ if (IS_ERR(phylink))
+ return PTR_ERR(phylink);
+
+ fbn->phylink = phylink;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c
new file mode 100644
index 000000000000..c8aa29fc052b
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c
@@ -0,0 +1,651 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+
+#include "fbnic.h"
+#include "fbnic_netdev.h"
+#include "fbnic_rpc.h"
+
+void fbnic_reset_indir_tbl(struct fbnic_net *fbn)
+{
+ unsigned int num_rx = fbn->num_rx_queues;
+ unsigned int i;
+
+ for (i = 0; i < FBNIC_RPC_RSS_TBL_SIZE; i++) {
+ fbn->indir_tbl[0][i] = ethtool_rxfh_indir_default(i, num_rx);
+ fbn->indir_tbl[1][i] = ethtool_rxfh_indir_default(i, num_rx);
+ }
+}
+
+void fbnic_rss_key_fill(u32 *buffer)
+{
+ static u32 rss_key[FBNIC_RPC_RSS_KEY_DWORD_LEN];
+
+ net_get_random_once(rss_key, sizeof(rss_key));
+ rss_key[FBNIC_RPC_RSS_KEY_LAST_IDX] &= FBNIC_RPC_RSS_KEY_LAST_MASK;
+
+ memcpy(buffer, rss_key, sizeof(rss_key));
+}
+
+#define RX_HASH_OPT_L4 \
+ (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)
+#define RX_HASH_OPT_L3 \
+ (RXH_IP_SRC | RXH_IP_DST)
+#define RX_HASH_OPT_L2 RXH_L2DA
+
+void fbnic_rss_init_en_mask(struct fbnic_net *fbn)
+{
+ fbn->rss_flow_hash[FBNIC_TCP4_HASH_OPT] = RX_HASH_OPT_L4;
+ fbn->rss_flow_hash[FBNIC_TCP6_HASH_OPT] = RX_HASH_OPT_L4;
+
+ fbn->rss_flow_hash[FBNIC_UDP4_HASH_OPT] = RX_HASH_OPT_L3;
+ fbn->rss_flow_hash[FBNIC_UDP6_HASH_OPT] = RX_HASH_OPT_L3;
+ fbn->rss_flow_hash[FBNIC_IPV4_HASH_OPT] = RX_HASH_OPT_L3;
+ fbn->rss_flow_hash[FBNIC_IPV6_HASH_OPT] = RX_HASH_OPT_L3;
+
+ fbn->rss_flow_hash[FBNIC_ETHER_HASH_OPT] = RX_HASH_OPT_L2;
+}
+
+void fbnic_rss_disable_hw(struct fbnic_dev *fbd)
+{
+ /* Disable RPC by clearing enable bit and configuration */
+ if (!fbnic_bmc_present(fbd))
+ wr32(fbd, FBNIC_RPC_RMI_CONFIG,
+ FIELD_PREP(FBNIC_RPC_RMI_CONFIG_OH_BYTES, 20));
+}
+
+#define FBNIC_FH_2_RSSEM_BIT(_fh, _rssem, _val) \
+ FIELD_PREP(FBNIC_RPC_ACT_TBL1_RSS_ENA_##_rssem, \
+ FIELD_GET(RXH_##_fh, _val))
+static u16 fbnic_flow_hash_2_rss_en_mask(struct fbnic_net *fbn, int flow_type)
+{
+ u32 flow_hash = fbn->rss_flow_hash[flow_type];
+ u32 rss_en_mask = 0;
+
+ rss_en_mask |= FBNIC_FH_2_RSSEM_BIT(L2DA, L2_DA, flow_hash);
+ rss_en_mask |= FBNIC_FH_2_RSSEM_BIT(IP_SRC, IP_SRC, flow_hash);
+ rss_en_mask |= FBNIC_FH_2_RSSEM_BIT(IP_DST, IP_DST, flow_hash);
+ rss_en_mask |= FBNIC_FH_2_RSSEM_BIT(L4_B_0_1, L4_SRC, flow_hash);
+ rss_en_mask |= FBNIC_FH_2_RSSEM_BIT(L4_B_2_3, L4_DST, flow_hash);
+
+ return rss_en_mask;
+}
+
+void fbnic_rss_reinit_hw(struct fbnic_dev *fbd, struct fbnic_net *fbn)
+{
+ unsigned int i;
+
+ for (i = 0; i < FBNIC_RPC_RSS_TBL_SIZE; i++) {
+ wr32(fbd, FBNIC_RPC_RSS_TBL(0, i), fbn->indir_tbl[0][i]);
+ wr32(fbd, FBNIC_RPC_RSS_TBL(1, i), fbn->indir_tbl[1][i]);
+ }
+
+ for (i = 0; i < FBNIC_RPC_RSS_KEY_DWORD_LEN; i++)
+ wr32(fbd, FBNIC_RPC_RSS_KEY(i), fbn->rss_key[i]);
+
+ /* Default action for this to drop w/ no destination */
+ wr32(fbd, FBNIC_RPC_ACT_TBL0_DEFAULT, FBNIC_RPC_ACT_TBL0_DROP);
+ wrfl(fbd);
+
+ wr32(fbd, FBNIC_RPC_ACT_TBL1_DEFAULT, 0);
+
+ /* If it isn't already enabled set the RMI Config value to enable RPC */
+ wr32(fbd, FBNIC_RPC_RMI_CONFIG,
+ FIELD_PREP(FBNIC_RPC_RMI_CONFIG_MTU, FBNIC_MAX_JUMBO_FRAME_SIZE) |
+ FIELD_PREP(FBNIC_RPC_RMI_CONFIG_OH_BYTES, 20) |
+ FBNIC_RPC_RMI_CONFIG_ENABLE);
+}
+
+void fbnic_bmc_rpc_all_multi_config(struct fbnic_dev *fbd,
+ bool enable_host)
+{
+ struct fbnic_act_tcam *act_tcam;
+ struct fbnic_mac_addr *mac_addr;
+ int j;
+
+ /* We need to add the all multicast filter at the end of the
+ * multicast address list. This way if there are any that are
+ * shared between the host and the BMC they can be directed to
+ * both. Otherwise the remainder just get sent directly to the
+ * BMC.
+ */
+ mac_addr = &fbd->mac_addr[fbd->mac_addr_boundary - 1];
+ if (fbnic_bmc_present(fbd) && fbd->fw_cap.all_multi) {
+ if (mac_addr->state != FBNIC_TCAM_S_VALID) {
+ eth_zero_addr(mac_addr->value.addr8);
+ eth_broadcast_addr(mac_addr->mask.addr8);
+ mac_addr->value.addr8[0] ^= 1;
+ mac_addr->mask.addr8[0] ^= 1;
+ set_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam);
+ mac_addr->state = FBNIC_TCAM_S_ADD;
+ }
+ if (enable_host)
+ set_bit(FBNIC_MAC_ADDR_T_ALLMULTI,
+ mac_addr->act_tcam);
+ else
+ clear_bit(FBNIC_MAC_ADDR_T_ALLMULTI,
+ mac_addr->act_tcam);
+ } else if (!test_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam) &&
+ !is_zero_ether_addr(mac_addr->mask.addr8) &&
+ mac_addr->state == FBNIC_TCAM_S_VALID) {
+ clear_bit(FBNIC_MAC_ADDR_T_ALLMULTI, mac_addr->act_tcam);
+ clear_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam);
+ mac_addr->state = FBNIC_TCAM_S_DELETE;
+ }
+
+ /* We have to add a special handler for multicast as the
+ * BMC may have an all-multi rule already in place. As such
+ * adding a rule ourselves won't do any good so we will have
+ * to modify the rules for the ALL MULTI below if the BMC
+ * already has the rule in place.
+ */
+ act_tcam = &fbd->act_tcam[FBNIC_RPC_ACT_TBL_BMC_ALL_MULTI_OFFSET];
+
+ /* If we are not enabling the rule just delete it. We will fall
+ * back to the RSS rules that support the multicast addresses.
+ */
+ if (!fbnic_bmc_present(fbd) || !fbd->fw_cap.all_multi || enable_host) {
+ if (act_tcam->state == FBNIC_TCAM_S_VALID)
+ act_tcam->state = FBNIC_TCAM_S_DELETE;
+ return;
+ }
+
+ /* Rewrite TCAM rule 23 to handle BMC all-multi traffic */
+ act_tcam->dest = FIELD_PREP(FBNIC_RPC_ACT_TBL0_DEST_MASK,
+ FBNIC_RPC_ACT_TBL0_DEST_BMC);
+ act_tcam->mask.tcam[0] = 0xffff;
+
+ /* MACDA 0 - 3 is reserved for the BMC MAC address */
+ act_tcam->value.tcam[1] =
+ FIELD_PREP(FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX,
+ fbd->mac_addr_boundary - 1) |
+ FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
+ act_tcam->mask.tcam[1] = 0xffff &
+ ~FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX &
+ ~FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
+
+ for (j = 2; j < FBNIC_RPC_TCAM_ACT_WORD_LEN; j++)
+ act_tcam->mask.tcam[j] = 0xffff;
+
+ act_tcam->state = FBNIC_TCAM_S_UPDATE;
+}
+
+void fbnic_bmc_rpc_init(struct fbnic_dev *fbd)
+{
+ int i = FBNIC_RPC_TCAM_MACDA_BMC_ADDR_IDX;
+ struct fbnic_act_tcam *act_tcam;
+ struct fbnic_mac_addr *mac_addr;
+ int j;
+
+ /* Check if BMC is present */
+ if (!fbnic_bmc_present(fbd))
+ return;
+
+ /* Fetch BMC MAC addresses from firmware capabilities */
+ for (j = 0; j < 4; j++) {
+ u8 *bmc_mac = fbd->fw_cap.bmc_mac_addr[j];
+
+ /* Validate BMC MAC addresses */
+ if (is_zero_ether_addr(bmc_mac))
+ continue;
+
+ if (is_multicast_ether_addr(bmc_mac))
+ mac_addr = __fbnic_mc_sync(fbd, bmc_mac);
+ else
+ mac_addr = &fbd->mac_addr[i++];
+
+ if (!mac_addr) {
+ netdev_err(fbd->netdev,
+ "No slot for BMC MAC address[%d]\n", j);
+ continue;
+ }
+
+ ether_addr_copy(mac_addr->value.addr8, bmc_mac);
+ eth_zero_addr(mac_addr->mask.addr8);
+
+ set_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam);
+ mac_addr->state = FBNIC_TCAM_S_ADD;
+ }
+
+ /* Validate Broadcast is also present, record it and tag it */
+ mac_addr = &fbd->mac_addr[FBNIC_RPC_TCAM_MACDA_BROADCAST_IDX];
+ eth_broadcast_addr(mac_addr->value.addr8);
+ set_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam);
+ mac_addr->state = FBNIC_TCAM_S_ADD;
+
+ /* Rewrite TCAM rule 0 if it isn't present to relocate BMC rules */
+ act_tcam = &fbd->act_tcam[FBNIC_RPC_ACT_TBL_BMC_OFFSET];
+ act_tcam->dest = FIELD_PREP(FBNIC_RPC_ACT_TBL0_DEST_MASK,
+ FBNIC_RPC_ACT_TBL0_DEST_BMC);
+ act_tcam->mask.tcam[0] = 0xffff;
+
+ /* MACDA 0 - 3 is reserved for the BMC MAC address
+ * to account for that we have to mask out the lower 2 bits
+ * of the macda by performing an &= with 0x1c.
+ */
+ act_tcam->value.tcam[1] = FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
+ act_tcam->mask.tcam[1] = 0xffff &
+ ~FIELD_PREP(FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX, 0x1c) &
+ ~FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
+
+ for (j = 2; j < FBNIC_RPC_TCAM_ACT_WORD_LEN; j++)
+ act_tcam->mask.tcam[j] = 0xffff;
+
+ act_tcam->state = FBNIC_TCAM_S_UPDATE;
+
+ fbnic_bmc_rpc_all_multi_config(fbd, false);
+}
+
+#define FBNIC_ACT1_INIT(_l4, _udp, _ip, _v6) \
+ (((_l4) ? FBNIC_RPC_TCAM_ACT1_L4_VALID : 0) | \
+ ((_udp) ? FBNIC_RPC_TCAM_ACT1_L4_IS_UDP : 0) | \
+ ((_ip) ? FBNIC_RPC_TCAM_ACT1_IP_VALID : 0) | \
+ ((_v6) ? FBNIC_RPC_TCAM_ACT1_IP_IS_V6 : 0))
+
+void fbnic_rss_reinit(struct fbnic_dev *fbd, struct fbnic_net *fbn)
+{
+ static const u32 act1_value[FBNIC_NUM_HASH_OPT] = {
+ FBNIC_ACT1_INIT(1, 1, 1, 1), /* UDP6 */
+ FBNIC_ACT1_INIT(1, 1, 1, 0), /* UDP4 */
+ FBNIC_ACT1_INIT(1, 0, 1, 1), /* TCP6 */
+ FBNIC_ACT1_INIT(1, 0, 1, 0), /* TCP4 */
+ FBNIC_ACT1_INIT(0, 0, 1, 1), /* IP6 */
+ FBNIC_ACT1_INIT(0, 0, 1, 0), /* IP4 */
+ 0 /* Ether */
+ };
+ unsigned int i;
+
+ /* To support scenarios where a BMC is present we must write the
+ * rules twice, once for the unicast cases, and once again for
+ * the broadcast/multicast cases as we have to support 2 destinations.
+ */
+ BUILD_BUG_ON(FBNIC_RSS_EN_NUM_UNICAST * 2 != FBNIC_RSS_EN_NUM_ENTRIES);
+ BUILD_BUG_ON(ARRAY_SIZE(act1_value) != FBNIC_NUM_HASH_OPT);
+
+ /* Program RSS hash enable mask for host in action TCAM/table. */
+ for (i = fbnic_bmc_present(fbd) ? 0 : FBNIC_RSS_EN_NUM_UNICAST;
+ i < FBNIC_RSS_EN_NUM_ENTRIES; i++) {
+ unsigned int idx = i + FBNIC_RPC_ACT_TBL_RSS_OFFSET;
+ struct fbnic_act_tcam *act_tcam = &fbd->act_tcam[idx];
+ u32 flow_hash, dest, rss_en_mask;
+ int flow_type, j;
+ u16 value = 0;
+
+ flow_type = i % FBNIC_RSS_EN_NUM_UNICAST;
+ flow_hash = fbn->rss_flow_hash[flow_type];
+
+ /* Set DEST_HOST based on absence of RXH_DISCARD */
+ dest = FIELD_PREP(FBNIC_RPC_ACT_TBL0_DEST_MASK,
+ !(RXH_DISCARD & flow_hash) ?
+ FBNIC_RPC_ACT_TBL0_DEST_HOST : 0);
+
+ if (i >= FBNIC_RSS_EN_NUM_UNICAST && fbnic_bmc_present(fbd))
+ dest |= FIELD_PREP(FBNIC_RPC_ACT_TBL0_DEST_MASK,
+ FBNIC_RPC_ACT_TBL0_DEST_BMC);
+
+ if (!dest)
+ dest = FBNIC_RPC_ACT_TBL0_DROP;
+
+ if (act1_value[flow_type] & FBNIC_RPC_TCAM_ACT1_L4_VALID)
+ dest |= FIELD_PREP(FBNIC_RPC_ACT_TBL0_DMA_HINT,
+ FBNIC_RCD_HDR_AL_DMA_HINT_L4);
+
+ rss_en_mask = fbnic_flow_hash_2_rss_en_mask(fbn, flow_type);
+
+ act_tcam->dest = dest;
+ act_tcam->rss_en_mask = rss_en_mask;
+ act_tcam->state = FBNIC_TCAM_S_UPDATE;
+
+ act_tcam->mask.tcam[0] = 0xffff;
+
+ /* We reserve the upper 8 MACDA TCAM entries for host
+ * unicast. So we set the value to 24, and the mask the
+ * lower bits so that the lower entries can be used as
+ * multicast or BMC addresses.
+ */
+ if (i < FBNIC_RSS_EN_NUM_UNICAST)
+ value = FIELD_PREP(FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX,
+ fbd->mac_addr_boundary);
+ value |= FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
+
+ flow_type = i % FBNIC_RSS_EN_NUM_UNICAST;
+ value |= act1_value[flow_type];
+
+ act_tcam->value.tcam[1] = value;
+ act_tcam->mask.tcam[1] = ~value;
+
+ for (j = 2; j < FBNIC_RPC_TCAM_ACT_WORD_LEN; j++)
+ act_tcam->mask.tcam[j] = 0xffff;
+
+ act_tcam->state = FBNIC_TCAM_S_UPDATE;
+ }
+}
+
+struct fbnic_mac_addr *__fbnic_uc_sync(struct fbnic_dev *fbd,
+ const unsigned char *addr)
+{
+ struct fbnic_mac_addr *avail_addr = NULL;
+ unsigned int i;
+
+ /* Scan from middle of list to bottom, filling bottom up.
+ * Skip the first entry which is reserved for dev_addr and
+ * leave the last entry to use for promiscuous filtering.
+ */
+ for (i = fbd->mac_addr_boundary - 1;
+ i < FBNIC_RPC_TCAM_MACDA_HOST_ADDR_IDX; i++) {
+ struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i];
+
+ if (mac_addr->state == FBNIC_TCAM_S_DISABLED) {
+ avail_addr = mac_addr;
+ } else if (ether_addr_equal(mac_addr->value.addr8, addr)) {
+ avail_addr = mac_addr;
+ break;
+ }
+ }
+
+ if (avail_addr && avail_addr->state == FBNIC_TCAM_S_DISABLED) {
+ ether_addr_copy(avail_addr->value.addr8, addr);
+ eth_zero_addr(avail_addr->mask.addr8);
+ avail_addr->state = FBNIC_TCAM_S_ADD;
+ }
+
+ return avail_addr;
+}
+
+struct fbnic_mac_addr *__fbnic_mc_sync(struct fbnic_dev *fbd,
+ const unsigned char *addr)
+{
+ struct fbnic_mac_addr *avail_addr = NULL;
+ unsigned int i;
+
+ /* Scan from middle of list to top, filling top down.
+ * Skip over the address reserved for the BMC MAC and
+ * exclude index 0 as that belongs to the broadcast address
+ */
+ for (i = fbd->mac_addr_boundary;
+ --i > FBNIC_RPC_TCAM_MACDA_BROADCAST_IDX;) {
+ struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i];
+
+ if (mac_addr->state == FBNIC_TCAM_S_DISABLED) {
+ avail_addr = mac_addr;
+ } else if (ether_addr_equal(mac_addr->value.addr8, addr)) {
+ avail_addr = mac_addr;
+ break;
+ }
+ }
+
+ /* Scan the BMC addresses to see if it may have already
+ * reserved the address.
+ */
+ while (--i) {
+ struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i];
+
+ if (!is_zero_ether_addr(mac_addr->mask.addr8))
+ continue;
+
+ /* Only move on if we find a match */
+ if (!ether_addr_equal(mac_addr->value.addr8, addr))
+ continue;
+
+ /* We need to pull this address to the shared area */
+ if (avail_addr) {
+ memcpy(avail_addr, mac_addr, sizeof(*mac_addr));
+ mac_addr->state = FBNIC_TCAM_S_DELETE;
+ avail_addr->state = FBNIC_TCAM_S_ADD;
+ }
+
+ break;
+ }
+
+ if (avail_addr && avail_addr->state == FBNIC_TCAM_S_DISABLED) {
+ ether_addr_copy(avail_addr->value.addr8, addr);
+ eth_zero_addr(avail_addr->mask.addr8);
+ avail_addr->state = FBNIC_TCAM_S_ADD;
+ }
+
+ return avail_addr;
+}
+
+int __fbnic_xc_unsync(struct fbnic_mac_addr *mac_addr, unsigned int tcam_idx)
+{
+ if (!test_and_clear_bit(tcam_idx, mac_addr->act_tcam))
+ return -ENOENT;
+
+ if (bitmap_empty(mac_addr->act_tcam, FBNIC_RPC_TCAM_ACT_NUM_ENTRIES))
+ mac_addr->state = FBNIC_TCAM_S_DELETE;
+
+ return 0;
+}
+
+void fbnic_sift_macda(struct fbnic_dev *fbd)
+{
+ int dest, src;
+
+ /* Move BMC only addresses back into BMC region */
+ for (dest = FBNIC_RPC_TCAM_MACDA_BMC_ADDR_IDX,
+ src = FBNIC_RPC_TCAM_MACDA_MULTICAST_IDX;
+ ++dest < FBNIC_RPC_TCAM_MACDA_BROADCAST_IDX &&
+ src < fbd->mac_addr_boundary;) {
+ struct fbnic_mac_addr *dest_addr = &fbd->mac_addr[dest];
+
+ if (dest_addr->state != FBNIC_TCAM_S_DISABLED)
+ continue;
+
+ while (src < fbd->mac_addr_boundary) {
+ struct fbnic_mac_addr *src_addr = &fbd->mac_addr[src++];
+
+ /* Verify BMC bit is set */
+ if (!test_bit(FBNIC_MAC_ADDR_T_BMC, src_addr->act_tcam))
+ continue;
+
+ /* Verify filter isn't already disabled */
+ if (src_addr->state == FBNIC_TCAM_S_DISABLED ||
+ src_addr->state == FBNIC_TCAM_S_DELETE)
+ continue;
+
+ /* Verify only BMC bit is set */
+ if (bitmap_weight(src_addr->act_tcam,
+ FBNIC_RPC_TCAM_ACT_NUM_ENTRIES) != 1)
+ continue;
+
+ /* Verify we are not moving wildcard address */
+ if (!is_zero_ether_addr(src_addr->mask.addr8))
+ continue;
+
+ memcpy(dest_addr, src_addr, sizeof(*src_addr));
+ src_addr->state = FBNIC_TCAM_S_DELETE;
+ dest_addr->state = FBNIC_TCAM_S_ADD;
+ }
+ }
+}
+
+static void fbnic_clear_macda_entry(struct fbnic_dev *fbd, unsigned int idx)
+{
+ int i;
+
+ /* Invalidate entry and clear addr state info */
+ for (i = 0; i <= FBNIC_RPC_TCAM_MACDA_WORD_LEN; i++)
+ wr32(fbd, FBNIC_RPC_TCAM_MACDA(idx, i), 0);
+}
+
+static void fbnic_clear_macda(struct fbnic_dev *fbd)
+{
+ int idx;
+
+ for (idx = ARRAY_SIZE(fbd->mac_addr); idx--;) {
+ struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[idx];
+
+ if (mac_addr->state == FBNIC_TCAM_S_DISABLED)
+ continue;
+
+ if (test_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam)) {
+ if (fbnic_bmc_present(fbd))
+ continue;
+ dev_warn_once(fbd->dev,
+ "Found BMC MAC address w/ BMC not present\n");
+ }
+
+ fbnic_clear_macda_entry(fbd, idx);
+
+ /* If rule was already destined for deletion just wipe it now */
+ if (mac_addr->state == FBNIC_TCAM_S_DELETE) {
+ memset(mac_addr, 0, sizeof(*mac_addr));
+ continue;
+ }
+
+ /* Change state to update so that we will rewrite
+ * this tcam the next time fbnic_write_macda is called.
+ */
+ mac_addr->state = FBNIC_TCAM_S_UPDATE;
+ }
+}
+
+static void fbnic_write_macda_entry(struct fbnic_dev *fbd, unsigned int idx,
+ struct fbnic_mac_addr *mac_addr)
+{
+ __be16 *mask, *value;
+ int i;
+
+ mask = &mac_addr->mask.addr16[FBNIC_RPC_TCAM_MACDA_WORD_LEN - 1];
+ value = &mac_addr->value.addr16[FBNIC_RPC_TCAM_MACDA_WORD_LEN - 1];
+
+ for (i = 0; i < FBNIC_RPC_TCAM_MACDA_WORD_LEN; i++)
+ wr32(fbd, FBNIC_RPC_TCAM_MACDA(idx, i),
+ FIELD_PREP(FBNIC_RPC_TCAM_MACDA_MASK, ntohs(*mask--)) |
+ FIELD_PREP(FBNIC_RPC_TCAM_MACDA_VALUE, ntohs(*value--)));
+
+ wrfl(fbd);
+
+ wr32(fbd, FBNIC_RPC_TCAM_MACDA(idx, i), FBNIC_RPC_TCAM_VALIDATE);
+}
+
+void fbnic_write_macda(struct fbnic_dev *fbd)
+{
+ int idx;
+
+ for (idx = ARRAY_SIZE(fbd->mac_addr); idx--;) {
+ struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[idx];
+
+ /* Check if update flag is set else exit. */
+ if (!(mac_addr->state & FBNIC_TCAM_S_UPDATE))
+ continue;
+
+ /* Clear by writing 0s. */
+ if (mac_addr->state == FBNIC_TCAM_S_DELETE) {
+ /* Invalidate entry and clear addr state info */
+ fbnic_clear_macda_entry(fbd, idx);
+ memset(mac_addr, 0, sizeof(*mac_addr));
+
+ continue;
+ }
+
+ fbnic_write_macda_entry(fbd, idx, mac_addr);
+
+ mac_addr->state = FBNIC_TCAM_S_VALID;
+ }
+}
+
+static void fbnic_clear_act_tcam(struct fbnic_dev *fbd, unsigned int idx)
+{
+ int i;
+
+ /* Invalidate entry and clear addr state info */
+ for (i = 0; i <= FBNIC_RPC_TCAM_ACT_WORD_LEN; i++)
+ wr32(fbd, FBNIC_RPC_TCAM_ACT(idx, i), 0);
+}
+
+void fbnic_clear_rules(struct fbnic_dev *fbd)
+{
+ u32 dest = FIELD_PREP(FBNIC_RPC_ACT_TBL0_DEST_MASK,
+ FBNIC_RPC_ACT_TBL0_DEST_BMC);
+ int i = FBNIC_RPC_TCAM_ACT_NUM_ENTRIES - 1;
+ struct fbnic_act_tcam *act_tcam;
+
+ /* Clear MAC rules */
+ fbnic_clear_macda(fbd);
+
+ /* If BMC is present we need to preserve the last rule which
+ * will be used to route traffic to the BMC if it is received.
+ *
+ * At this point it should be the only MAC address in the MACDA
+ * so any unicast or multicast traffic received should be routed
+ * to it. So leave the last rule in place.
+ *
+ * It will be rewritten to add the host again when we bring
+ * the interface back up.
+ */
+ if (fbnic_bmc_present(fbd)) {
+ act_tcam = &fbd->act_tcam[i];
+
+ if (act_tcam->state == FBNIC_TCAM_S_VALID &&
+ (act_tcam->dest & dest)) {
+ wr32(fbd, FBNIC_RPC_ACT_TBL0(i), dest);
+ wr32(fbd, FBNIC_RPC_ACT_TBL1(i), 0);
+
+ act_tcam->state = FBNIC_TCAM_S_UPDATE;
+
+ i--;
+ }
+ }
+
+ /* Work from the bottom up deleting all other rules from hardware */
+ do {
+ act_tcam = &fbd->act_tcam[i];
+
+ if (act_tcam->state != FBNIC_TCAM_S_VALID)
+ continue;
+
+ fbnic_clear_act_tcam(fbd, i);
+ act_tcam->state = FBNIC_TCAM_S_UPDATE;
+ } while (i--);
+}
+
+static void fbnic_delete_act_tcam(struct fbnic_dev *fbd, unsigned int idx)
+{
+ fbnic_clear_act_tcam(fbd, idx);
+ memset(&fbd->act_tcam[idx], 0, sizeof(struct fbnic_act_tcam));
+}
+
+static void fbnic_update_act_tcam(struct fbnic_dev *fbd, unsigned int idx)
+{
+ struct fbnic_act_tcam *act_tcam = &fbd->act_tcam[idx];
+ int i;
+
+ /* Update entry by writing the destination and RSS mask */
+ wr32(fbd, FBNIC_RPC_ACT_TBL0(idx), act_tcam->dest);
+ wr32(fbd, FBNIC_RPC_ACT_TBL1(idx), act_tcam->rss_en_mask);
+
+ /* Write new TCAM rule to hardware */
+ for (i = 0; i < FBNIC_RPC_TCAM_ACT_WORD_LEN; i++)
+ wr32(fbd, FBNIC_RPC_TCAM_ACT(idx, i),
+ FIELD_PREP(FBNIC_RPC_TCAM_ACT_MASK,
+ act_tcam->mask.tcam[i]) |
+ FIELD_PREP(FBNIC_RPC_TCAM_ACT_VALUE,
+ act_tcam->value.tcam[i]));
+
+ wrfl(fbd);
+
+ wr32(fbd, FBNIC_RPC_TCAM_ACT(idx, i), FBNIC_RPC_TCAM_VALIDATE);
+ act_tcam->state = FBNIC_TCAM_S_VALID;
+}
+
+void fbnic_write_rules(struct fbnic_dev *fbd)
+{
+ int i;
+
+ /* Flush any pending action table rules */
+ for (i = 0; i < FBNIC_RPC_ACT_TBL_NUM_ENTRIES; i++) {
+ struct fbnic_act_tcam *act_tcam = &fbd->act_tcam[i];
+
+ /* Check if update flag is set else exit. */
+ if (!(act_tcam->state & FBNIC_TCAM_S_UPDATE))
+ continue;
+
+ if (act_tcam->state == FBNIC_TCAM_S_DELETE)
+ fbnic_delete_act_tcam(fbd, i);
+ else
+ fbnic_update_act_tcam(fbd, i);
+ }
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_rpc.h b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.h
new file mode 100644
index 000000000000..d62935f722a2
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.h
@@ -0,0 +1,189 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#ifndef _FBNIC_RPC_H_
+#define _FBNIC_RPC_H_
+
+#include <uapi/linux/in6.h>
+#include <linux/bitfield.h>
+
+/* The TCAM state definitions follow an expected ordering.
+ * They start out disabled, then move through the following states:
+ * Disabled 0 -> Add 2
+ * Add 2 -> Valid 1
+ *
+ * Valid 1 -> Add/Update 2
+ * Add 2 -> Valid 1
+ *
+ * Valid 1 -> Delete 3
+ * Delete 3 -> Disabled 0
+ */
+enum {
+ FBNIC_TCAM_S_DISABLED = 0,
+ FBNIC_TCAM_S_VALID = 1,
+ FBNIC_TCAM_S_ADD = 2,
+ FBNIC_TCAM_S_UPDATE = FBNIC_TCAM_S_ADD,
+ FBNIC_TCAM_S_DELETE = 3,
+};
+
+/* 32 MAC Destination Address TCAM Entries
+ * 4 registers DA[1:0], DA[3:2], DA[5:4], Validate
+ */
+#define FBNIC_RPC_TCAM_MACDA_WORD_LEN 3
+#define FBNIC_RPC_TCAM_MACDA_NUM_ENTRIES 32
+
+#define FBNIC_RPC_TCAM_ACT_WORD_LEN 11
+#define FBNIC_RPC_TCAM_ACT_NUM_ENTRIES 64
+
+struct fbnic_mac_addr {
+ union {
+ unsigned char addr8[ETH_ALEN];
+ __be16 addr16[FBNIC_RPC_TCAM_MACDA_WORD_LEN];
+ } mask, value;
+ unsigned char state;
+ DECLARE_BITMAP(act_tcam, FBNIC_RPC_TCAM_ACT_NUM_ENTRIES);
+};
+
+struct fbnic_act_tcam {
+ struct {
+ u16 tcam[FBNIC_RPC_TCAM_ACT_WORD_LEN];
+ } mask, value;
+ unsigned char state;
+ u16 rss_en_mask;
+ u32 dest;
+};
+
+enum {
+ FBNIC_RSS_EN_HOST_UDP6,
+ FBNIC_RSS_EN_HOST_UDP4,
+ FBNIC_RSS_EN_HOST_TCP6,
+ FBNIC_RSS_EN_HOST_TCP4,
+ FBNIC_RSS_EN_HOST_IP6,
+ FBNIC_RSS_EN_HOST_IP4,
+ FBNIC_RSS_EN_HOST_ETHER,
+ FBNIC_RSS_EN_XCAST_UDP6,
+#define FBNIC_RSS_EN_NUM_UNICAST FBNIC_RSS_EN_XCAST_UDP6
+ FBNIC_RSS_EN_XCAST_UDP4,
+ FBNIC_RSS_EN_XCAST_TCP6,
+ FBNIC_RSS_EN_XCAST_TCP4,
+ FBNIC_RSS_EN_XCAST_IP6,
+ FBNIC_RSS_EN_XCAST_IP4,
+ FBNIC_RSS_EN_XCAST_ETHER,
+ FBNIC_RSS_EN_NUM_ENTRIES
+};
+
+/* Reserve the first 2 entries for the use by the BMC so that we can
+ * avoid allowing rules to get in the way of BMC unicast traffic.
+ */
+#define FBNIC_RPC_ACT_TBL_BMC_OFFSET 0
+#define FBNIC_RPC_ACT_TBL_BMC_ALL_MULTI_OFFSET 1
+
+/* We reserve the last 14 entries for RSS rules on the host. The BMC
+ * unicast rule will need to be populated above these and is expected to
+ * use MACDA TCAM entry 23 to store the BMC MAC address.
+ */
+#define FBNIC_RPC_ACT_TBL_RSS_OFFSET \
+ (FBNIC_RPC_ACT_TBL_NUM_ENTRIES - FBNIC_RSS_EN_NUM_ENTRIES)
+
+/* Flags used to identify the owner for this MAC filter. Note that any
+ * flags set for Broadcast thru Promisc indicate that the rule belongs
+ * to the RSS filters for the host.
+ */
+enum {
+ FBNIC_MAC_ADDR_T_BMC = 0,
+ FBNIC_MAC_ADDR_T_BROADCAST = FBNIC_RPC_ACT_TBL_RSS_OFFSET,
+#define FBNIC_MAC_ADDR_T_HOST_START FBNIC_MAC_ADDR_T_BROADCAST
+ FBNIC_MAC_ADDR_T_MULTICAST,
+ FBNIC_MAC_ADDR_T_UNICAST,
+ FBNIC_MAC_ADDR_T_ALLMULTI, /* BROADCAST ... MULTICAST*/
+ FBNIC_MAC_ADDR_T_PROMISC, /* BROADCAST ... UNICAST */
+ FBNIC_MAC_ADDR_T_HOST_LAST
+};
+
+#define FBNIC_MAC_ADDR_T_HOST_LEN \
+ (FBNIC_MAC_ADDR_T_HOST_LAST - FBNIC_MAC_ADDR_T_HOST_START)
+
+#define FBNIC_RPC_TCAM_ACT0_IPSRC_IDX CSR_GENMASK(2, 0)
+#define FBNIC_RPC_TCAM_ACT0_IPSRC_VALID CSR_BIT(3)
+#define FBNIC_RPC_TCAM_ACT0_IPDST_IDX CSR_GENMASK(6, 4)
+#define FBNIC_RPC_TCAM_ACT0_IPDST_VALID CSR_BIT(7)
+#define FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX CSR_GENMASK(10, 8)
+#define FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_VALID CSR_BIT(11)
+#define FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX CSR_GENMASK(14, 12)
+#define FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_VALID CSR_BIT(15)
+
+#define FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX CSR_GENMASK(9, 5)
+#define FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID CSR_BIT(10)
+#define FBNIC_RPC_TCAM_ACT1_IP_IS_V6 CSR_BIT(11)
+#define FBNIC_RPC_TCAM_ACT1_IP_VALID CSR_BIT(12)
+#define FBNIC_RPC_TCAM_ACT1_OUTER_IP_VALID CSR_BIT(13)
+#define FBNIC_RPC_TCAM_ACT1_L4_IS_UDP CSR_BIT(14)
+#define FBNIC_RPC_TCAM_ACT1_L4_VALID CSR_BIT(15)
+
+/* TCAM 0 - 3 reserved for BMC MAC addresses */
+#define FBNIC_RPC_TCAM_MACDA_BMC_ADDR_IDX 0
+/* TCAM 4 reserved for broadcast MAC address */
+#define FBNIC_RPC_TCAM_MACDA_BROADCAST_IDX 4
+/* TCAMs 5 - 30 will be used for multicast and unicast addresses. The
+ * boundary between the two can be variable it is currently set to 24
+ * on which the unicast addresses start. The general idea is that we will
+ * always go top-down with unicast, and bottom-up with multicast so that
+ * there should be free-space in the middle between the two.
+ *
+ * The entry at MADCA_DEFAULT_BOUNDARY is a special case as it can be used
+ * for the ALL MULTI address if the list is full, or the BMC has requested
+ * it.
+ */
+#define FBNIC_RPC_TCAM_MACDA_MULTICAST_IDX 5
+#define FBNIC_RPC_TCAM_MACDA_DEFAULT_BOUNDARY 24
+#define FBNIC_RPC_TCAM_MACDA_HOST_ADDR_IDX 30
+/* Reserved for use to record Multicast promisc, or Promiscuous */
+#define FBNIC_RPC_TCAM_MACDA_PROMISC_IDX 31
+
+enum {
+ FBNIC_UDP6_HASH_OPT,
+ FBNIC_UDP4_HASH_OPT,
+ FBNIC_TCP6_HASH_OPT,
+ FBNIC_TCP4_HASH_OPT,
+#define FBNIC_L4_HASH_OPT FBNIC_TCP4_HASH_OPT
+ FBNIC_IPV6_HASH_OPT,
+ FBNIC_IPV4_HASH_OPT,
+#define FBNIC_IP_HASH_OPT FBNIC_IPV4_HASH_OPT
+ FBNIC_ETHER_HASH_OPT,
+ FBNIC_NUM_HASH_OPT,
+};
+
+struct fbnic_dev;
+struct fbnic_net;
+
+void fbnic_bmc_rpc_init(struct fbnic_dev *fbd);
+void fbnic_bmc_rpc_all_multi_config(struct fbnic_dev *fbd, bool enable_host);
+
+void fbnic_reset_indir_tbl(struct fbnic_net *fbn);
+void fbnic_rss_key_fill(u32 *buffer);
+void fbnic_rss_init_en_mask(struct fbnic_net *fbn);
+void fbnic_rss_disable_hw(struct fbnic_dev *fbd);
+void fbnic_rss_reinit_hw(struct fbnic_dev *fbd, struct fbnic_net *fbn);
+void fbnic_rss_reinit(struct fbnic_dev *fbd, struct fbnic_net *fbn);
+
+int __fbnic_xc_unsync(struct fbnic_mac_addr *mac_addr, unsigned int tcam_idx);
+struct fbnic_mac_addr *__fbnic_uc_sync(struct fbnic_dev *fbd,
+ const unsigned char *addr);
+struct fbnic_mac_addr *__fbnic_mc_sync(struct fbnic_dev *fbd,
+ const unsigned char *addr);
+void fbnic_sift_macda(struct fbnic_dev *fbd);
+void fbnic_write_macda(struct fbnic_dev *fbd);
+
+static inline int __fbnic_uc_unsync(struct fbnic_mac_addr *mac_addr)
+{
+ return __fbnic_xc_unsync(mac_addr, FBNIC_MAC_ADDR_T_UNICAST);
+}
+
+static inline int __fbnic_mc_unsync(struct fbnic_mac_addr *mac_addr)
+{
+ return __fbnic_xc_unsync(mac_addr, FBNIC_MAC_ADDR_T_MULTICAST);
+}
+
+void fbnic_clear_rules(struct fbnic_dev *fbd);
+void fbnic_write_rules(struct fbnic_dev *fbd);
+#endif /* _FBNIC_RPC_H_ */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_tlv.c b/drivers/net/ethernet/meta/fbnic/fbnic_tlv.c
new file mode 100644
index 000000000000..2a174ab062a3
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_tlv.c
@@ -0,0 +1,529 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include <linux/gfp.h>
+#include <linux/mm.h>
+#include <linux/once.h>
+#include <linux/random.h>
+#include <linux/string.h>
+#include <uapi/linux/if_ether.h>
+
+#include "fbnic_tlv.h"
+
+/**
+ * fbnic_tlv_msg_alloc - Allocate page and initialize FW message header
+ * @msg_id: Identifier for new message we are starting
+ *
+ * Return: pointer to start of message, or NULL on failure.
+ *
+ * Allocates a page and initializes message header at start of page.
+ * Initial message size is 1 DWORD which is just the header.
+ **/
+struct fbnic_tlv_msg *fbnic_tlv_msg_alloc(u16 msg_id)
+{
+ struct fbnic_tlv_hdr hdr = { 0 };
+ struct fbnic_tlv_msg *msg;
+
+ msg = (struct fbnic_tlv_msg *)__get_free_page(GFP_KERNEL);
+ if (!msg)
+ return NULL;
+
+ /* Start with zero filled header and then back fill with data */
+ hdr.type = msg_id;
+ hdr.is_msg = 1;
+ hdr.len = cpu_to_le16(1);
+
+ /* Copy header into start of message */
+ msg->hdr = hdr;
+
+ return msg;
+}
+
+/**
+ * fbnic_tlv_attr_put_flag - Add flag value to message
+ * @msg: Message header we are adding flag attribute to
+ * @attr_id: ID of flag attribute we are adding to message
+ *
+ * Return: -ENOSPC if there is no room for the attribute. Otherwise 0.
+ *
+ * Adds a 1 DWORD flag attribute to the message. The presence of this
+ * attribute can be used as a boolean value indicating true, otherwise the
+ * value is considered false.
+ **/
+int fbnic_tlv_attr_put_flag(struct fbnic_tlv_msg *msg, const u16 attr_id)
+{
+ int attr_max_len = PAGE_SIZE - offset_in_page(msg) - sizeof(*msg);
+ struct fbnic_tlv_hdr hdr = { 0 };
+ struct fbnic_tlv_msg *attr;
+
+ attr_max_len -= le16_to_cpu(msg->hdr.len) * sizeof(u32);
+ if (attr_max_len < sizeof(*attr))
+ return -ENOSPC;
+
+ /* Get header pointer and bump attr to start of data */
+ attr = &msg[le16_to_cpu(msg->hdr.len)];
+
+ /* Record attribute type and size */
+ hdr.type = attr_id;
+ hdr.len = cpu_to_le16(sizeof(hdr));
+
+ attr->hdr = hdr;
+ le16_add_cpu(&msg->hdr.len,
+ FBNIC_TLV_MSG_SIZE(le16_to_cpu(hdr.len)));
+
+ return 0;
+}
+
+/**
+ * fbnic_tlv_attr_put_value - Add data to message
+ * @msg: Message header we are adding flag attribute to
+ * @attr_id: ID of flag attribute we are adding to message
+ * @value: Pointer to data to be stored
+ * @len: Size of data to be stored.
+ *
+ * Return: -ENOSPC if there is no room for the attribute. Otherwise 0.
+ *
+ * Adds header and copies data pointed to by value into the message. The
+ * result is rounded up to the nearest DWORD for sizing so that the
+ * headers remain aligned.
+ *
+ * The assumption is that the value field is in a format where byte
+ * ordering can be guaranteed such as a byte array or a little endian
+ * format.
+ **/
+int fbnic_tlv_attr_put_value(struct fbnic_tlv_msg *msg, const u16 attr_id,
+ const void *value, const int len)
+{
+ int attr_max_len = PAGE_SIZE - offset_in_page(msg) - sizeof(*msg);
+ struct fbnic_tlv_hdr hdr = { 0 };
+ struct fbnic_tlv_msg *attr;
+
+ attr_max_len -= le16_to_cpu(msg->hdr.len) * sizeof(u32);
+ if (attr_max_len < sizeof(*attr) + len)
+ return -ENOSPC;
+
+ /* Get header pointer and bump attr to start of data */
+ attr = &msg[le16_to_cpu(msg->hdr.len)];
+
+ /* Record attribute type and size */
+ hdr.type = attr_id;
+ hdr.len = cpu_to_le16(sizeof(hdr) + len);
+
+ /* Zero pad end of region to be written if we aren't aligned */
+ if (len % sizeof(hdr))
+ attr->value[len / sizeof(hdr)] = 0;
+
+ /* Copy data over */
+ memcpy(attr->value, value, len);
+
+ attr->hdr = hdr;
+ le16_add_cpu(&msg->hdr.len,
+ FBNIC_TLV_MSG_SIZE(le16_to_cpu(hdr.len)));
+
+ return 0;
+}
+
+/**
+ * __fbnic_tlv_attr_put_int - Add integer to message
+ * @msg: Message header we are adding flag attribute to
+ * @attr_id: ID of flag attribute we are adding to message
+ * @value: Data to be stored
+ * @len: Size of data to be stored, either 4 or 8 bytes.
+ *
+ * Return: -ENOSPC if there is no room for the attribute. Otherwise 0.
+ *
+ * Adds header and copies data pointed to by value into the message. Will
+ * format the data as little endian.
+ **/
+int __fbnic_tlv_attr_put_int(struct fbnic_tlv_msg *msg, const u16 attr_id,
+ s64 value, const int len)
+{
+ __le64 le64_value = cpu_to_le64(value);
+
+ return fbnic_tlv_attr_put_value(msg, attr_id, &le64_value, len);
+}
+
+/**
+ * fbnic_tlv_attr_put_mac_addr - Add mac_addr to message
+ * @msg: Message header we are adding flag attribute to
+ * @attr_id: ID of flag attribute we are adding to message
+ * @mac_addr: Byte pointer to MAC address to be stored
+ *
+ * Return: -ENOSPC if there is no room for the attribute. Otherwise 0.
+ *
+ * Adds header and copies data pointed to by mac_addr into the message. Will
+ * copy the address raw so it will be in big endian with start of MAC
+ * address at start of attribute.
+ **/
+int fbnic_tlv_attr_put_mac_addr(struct fbnic_tlv_msg *msg, const u16 attr_id,
+ const u8 *mac_addr)
+{
+ return fbnic_tlv_attr_put_value(msg, attr_id, mac_addr, ETH_ALEN);
+}
+
+/**
+ * fbnic_tlv_attr_put_string - Add string to message
+ * @msg: Message header we are adding flag attribute to
+ * @attr_id: ID of flag attribute we are adding to message
+ * @string: Byte pointer to null terminated string to be stored
+ *
+ * Return: -ENOSPC if there is no room for the attribute. Otherwise 0.
+ *
+ * Adds header and copies data pointed to by string into the message. Will
+ * copy the address raw so it will be in byte order.
+ **/
+int fbnic_tlv_attr_put_string(struct fbnic_tlv_msg *msg, u16 attr_id,
+ const char *string)
+{
+ int attr_max_len = PAGE_SIZE - sizeof(*msg);
+ int str_len = 1;
+
+ /* The max length will be message minus existing message and new
+ * attribute header. Since the message is measured in DWORDs we have
+ * to multiply the size by 4.
+ *
+ * The string length doesn't include the \0 so we have to add one to
+ * the final value, so start with that as our initial value.
+ *
+ * We will verify if the string will fit in fbnic_tlv_attr_put_value()
+ */
+ attr_max_len -= le16_to_cpu(msg->hdr.len) * sizeof(u32);
+ str_len += strnlen(string, attr_max_len);
+
+ return fbnic_tlv_attr_put_value(msg, attr_id, string, str_len);
+}
+
+/**
+ * fbnic_tlv_attr_get_unsigned - Retrieve unsigned value from result
+ * @attr: Attribute to retrieve data from
+ *
+ * Return: unsigned 64b value containing integer value
+ **/
+u64 fbnic_tlv_attr_get_unsigned(struct fbnic_tlv_msg *attr)
+{
+ __le64 le64_value = 0;
+
+ memcpy(&le64_value, &attr->value[0],
+ le16_to_cpu(attr->hdr.len) - sizeof(*attr));
+
+ return le64_to_cpu(le64_value);
+}
+
+/**
+ * fbnic_tlv_attr_get_signed - Retrieve signed value from result
+ * @attr: Attribute to retrieve data from
+ *
+ * Return: signed 64b value containing integer value
+ **/
+s64 fbnic_tlv_attr_get_signed(struct fbnic_tlv_msg *attr)
+{
+ int shift = (8 + sizeof(*attr) - le16_to_cpu(attr->hdr.len)) * 8;
+ __le64 le64_value = 0;
+ s64 value;
+
+ /* Copy the value and adjust for byte ordering */
+ memcpy(&le64_value, &attr->value[0],
+ le16_to_cpu(attr->hdr.len) - sizeof(*attr));
+ value = le64_to_cpu(le64_value);
+
+ /* Sign extend the return value by using a pair of shifts */
+ return (value << shift) >> shift;
+}
+
+/**
+ * fbnic_tlv_attr_get_string - Retrieve string value from result
+ * @attr: Attribute to retrieve data from
+ * @str: Pointer to an allocated string to store the data
+ * @max_size: The maximum size which can be in str
+ *
+ * Return: the size of the string read from firmware
+ **/
+size_t fbnic_tlv_attr_get_string(struct fbnic_tlv_msg *attr, char *str,
+ size_t max_size)
+{
+ max_size = min_t(size_t, max_size,
+ (le16_to_cpu(attr->hdr.len) * 4) - sizeof(*attr));
+ memcpy(str, &attr->value, max_size);
+
+ return max_size;
+}
+
+/**
+ * fbnic_tlv_attr_nest_start - Add nested attribute header to message
+ * @msg: Message header we are adding flag attribute to
+ * @attr_id: ID of flag attribute we are adding to message
+ *
+ * Return: NULL if there is no room for the attribute. Otherwise a pointer
+ * to the new attribute header.
+ *
+ * New header length is stored initially in DWORDs.
+ **/
+struct fbnic_tlv_msg *fbnic_tlv_attr_nest_start(struct fbnic_tlv_msg *msg,
+ u16 attr_id)
+{
+ int attr_max_len = PAGE_SIZE - offset_in_page(msg) - sizeof(*msg);
+ struct fbnic_tlv_msg *attr = &msg[le16_to_cpu(msg->hdr.len)];
+ struct fbnic_tlv_hdr hdr = { 0 };
+
+ /* Make sure we have space for at least the nest header plus one more */
+ attr_max_len -= le16_to_cpu(msg->hdr.len) * sizeof(u32);
+ if (attr_max_len < sizeof(*attr) * 2)
+ return NULL;
+
+ /* Record attribute type and size */
+ hdr.type = attr_id;
+
+ /* Add current message length to account for consumption within the
+ * page and leave it as a multiple of DWORDs, we will shift to
+ * bytes when we close it out.
+ */
+ hdr.len = cpu_to_le16(1);
+
+ attr->hdr = hdr;
+
+ return attr;
+}
+
+/**
+ * fbnic_tlv_attr_nest_stop - Close out nested attribute and add it to message
+ * @msg: Message header we are adding flag attribute to
+ *
+ * Closes out nested attribute, adds length to message, and then bumps
+ * length from DWORDs to bytes to match other attributes.
+ **/
+void fbnic_tlv_attr_nest_stop(struct fbnic_tlv_msg *msg)
+{
+ struct fbnic_tlv_msg *attr = &msg[le16_to_cpu(msg->hdr.len)];
+ u16 len = le16_to_cpu(attr->hdr.len);
+
+ /* Add attribute to message if there is more than just a header */
+ if (len <= 1)
+ return;
+
+ le16_add_cpu(&msg->hdr.len, len);
+
+ /* Convert from DWORDs to bytes */
+ attr->hdr.len = cpu_to_le16(len * sizeof(u32));
+}
+
+static int
+fbnic_tlv_attr_validate(struct fbnic_tlv_msg *attr,
+ const struct fbnic_tlv_index *tlv_index)
+{
+ u16 len = le16_to_cpu(attr->hdr.len) - sizeof(*attr);
+ u16 attr_id = attr->hdr.type;
+ __le32 *value = &attr->value[0];
+
+ if (attr->hdr.is_msg)
+ return -EINVAL;
+
+ if (attr_id >= FBNIC_TLV_RESULTS_MAX)
+ return -EINVAL;
+
+ while (tlv_index->id != attr_id) {
+ if (tlv_index->id == FBNIC_TLV_ATTR_ID_UNKNOWN) {
+ if (attr->hdr.cannot_ignore)
+ return -ENOENT;
+ return le16_to_cpu(attr->hdr.len);
+ }
+
+ tlv_index++;
+ }
+
+ if (offset_in_page(attr) + len > PAGE_SIZE - sizeof(*attr))
+ return -E2BIG;
+
+ switch (tlv_index->type) {
+ case FBNIC_TLV_STRING:
+ if (!len || len > tlv_index->len)
+ return -EINVAL;
+ if (((char *)value)[len - 1])
+ return -EINVAL;
+ break;
+ case FBNIC_TLV_FLAG:
+ if (len)
+ return -EINVAL;
+ break;
+ case FBNIC_TLV_UNSIGNED:
+ case FBNIC_TLV_SIGNED:
+ if (tlv_index->len > sizeof(__le64))
+ return -EINVAL;
+ fallthrough;
+ case FBNIC_TLV_BINARY:
+ if (!len || len > tlv_index->len)
+ return -EINVAL;
+ break;
+ case FBNIC_TLV_NESTED:
+ case FBNIC_TLV_ARRAY:
+ if (len % 4)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * fbnic_tlv_attr_parse_array - Parse array of attributes into results array
+ * @attr: Start of attributes in the message
+ * @len: Length of attributes in the message
+ * @results: Array of pointers to store the results of parsing
+ * @tlv_index: List of TLV attributes to be parsed from message
+ * @tlv_attr_id: Specific ID that is repeated in array
+ * @array_len: Number of results to store in results array
+ *
+ * Return: zero on success, or negative value on error.
+ *
+ * Will take a list of attributes and a parser definition and will capture
+ * the results in the results array to have the data extracted later.
+ **/
+int fbnic_tlv_attr_parse_array(struct fbnic_tlv_msg *attr, int len,
+ struct fbnic_tlv_msg **results,
+ const struct fbnic_tlv_index *tlv_index,
+ u16 tlv_attr_id, size_t array_len)
+{
+ int i = 0;
+
+ /* Initialize results table to NULL. */
+ memset(results, 0, array_len * sizeof(results[0]));
+
+ /* Nothing to parse if header was only thing there */
+ if (!len)
+ return 0;
+
+ /* Work through list of attributes, parsing them as necessary */
+ while (len > 0) {
+ u16 attr_id = attr->hdr.type;
+ u16 attr_len;
+ int err;
+
+ if (tlv_attr_id != attr_id)
+ return -EINVAL;
+
+ /* Stop parsing on full error */
+ err = fbnic_tlv_attr_validate(attr, tlv_index);
+ if (err < 0)
+ return err;
+
+ if (i >= array_len)
+ return -ENOSPC;
+
+ results[i++] = attr;
+
+ attr_len = FBNIC_TLV_MSG_SIZE(le16_to_cpu(attr->hdr.len));
+ len -= attr_len;
+ attr += attr_len;
+ }
+
+ return len == 0 ? 0 : -EINVAL;
+}
+
+/**
+ * fbnic_tlv_attr_parse - Parse attributes into a list of attribute results
+ * @attr: Start of attributes in the message
+ * @len: Length of attributes in the message
+ * @results: Array of pointers to store the results of parsing
+ * @tlv_index: List of TLV attributes to be parsed from message
+ *
+ * Return: zero on success, or negative value on error.
+ *
+ * Will take a list of attributes and a parser definition and will capture
+ * the results in the results array to have the data extracted later.
+ **/
+int fbnic_tlv_attr_parse(struct fbnic_tlv_msg *attr, int len,
+ struct fbnic_tlv_msg **results,
+ const struct fbnic_tlv_index *tlv_index)
+{
+ /* Initialize results table to NULL. */
+ memset(results, 0, sizeof(results[0]) * FBNIC_TLV_RESULTS_MAX);
+
+ /* Nothing to parse if header was only thing there */
+ if (!len)
+ return 0;
+
+ /* Work through list of attributes, parsing them as necessary */
+ while (len > 0) {
+ int err = fbnic_tlv_attr_validate(attr, tlv_index);
+ u16 attr_id = attr->hdr.type;
+ u16 attr_len;
+
+ /* Stop parsing on full error */
+ if (err < 0)
+ return err;
+
+ /* Ignore results for unsupported values */
+ if (!err) {
+ /* Do not overwrite existing entries */
+ if (results[attr_id])
+ return -EADDRINUSE;
+
+ results[attr_id] = attr;
+ }
+
+ attr_len = FBNIC_TLV_MSG_SIZE(le16_to_cpu(attr->hdr.len));
+ len -= attr_len;
+ attr += attr_len;
+ }
+
+ return len == 0 ? 0 : -EINVAL;
+}
+
+/**
+ * fbnic_tlv_msg_parse - Parse message and process via predetermined functions
+ * @opaque: Value passed to parser function to enable driver access
+ * @msg: Message to be parsed.
+ * @parser: TLV message parser definition.
+ *
+ * Return: zero on success, or negative value on error.
+ *
+ * Will take a message a number of message types via the attribute parsing
+ * definitions and function provided for the parser array.
+ **/
+int fbnic_tlv_msg_parse(void *opaque, struct fbnic_tlv_msg *msg,
+ const struct fbnic_tlv_parser *parser)
+{
+ struct fbnic_tlv_msg *results[FBNIC_TLV_RESULTS_MAX];
+ u16 msg_id = msg->hdr.type;
+ int err;
+
+ if (!msg->hdr.is_msg)
+ return -EINVAL;
+
+ if (le16_to_cpu(msg->hdr.len) > PAGE_SIZE / sizeof(u32))
+ return -E2BIG;
+
+ while (parser->id != msg_id) {
+ if (parser->id == FBNIC_TLV_MSG_ID_UNKNOWN)
+ return -ENOENT;
+ parser++;
+ }
+
+ err = fbnic_tlv_attr_parse(&msg[1], le16_to_cpu(msg->hdr.len) - 1,
+ results, parser->attr);
+ if (err)
+ return err;
+
+ return parser->func(opaque, results);
+}
+
+/**
+ * fbnic_tlv_parser_error - called if message doesn't match known type
+ * @opaque: (unused)
+ * @results: (unused)
+ *
+ * Return: -EBADMSG to indicate the message is an unsupported type
+ **/
+int fbnic_tlv_parser_error(void *opaque, struct fbnic_tlv_msg **results)
+{
+ return -EBADMSG;
+}
+
+void fbnic_tlv_attr_addr_copy(u8 *dest, struct fbnic_tlv_msg *src)
+{
+ u8 *mac_addr;
+
+ mac_addr = fbnic_tlv_attr_get_value_ptr(src);
+ memcpy(dest, mac_addr, ETH_ALEN);
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_tlv.h b/drivers/net/ethernet/meta/fbnic/fbnic_tlv.h
new file mode 100644
index 000000000000..67300ab44353
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_tlv.h
@@ -0,0 +1,175 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#ifndef _FBNIC_TLV_H_
+#define _FBNIC_TLV_H_
+
+#include <asm/byteorder.h>
+#include <linux/bits.h>
+#include <linux/const.h>
+#include <linux/types.h>
+
+#define FBNIC_TLV_MSG_ALIGN(len) ALIGN(len, sizeof(u32))
+#define FBNIC_TLV_MSG_SIZE(len) \
+ (FBNIC_TLV_MSG_ALIGN(len) / sizeof(u32))
+
+/* TLV Header Format
+ * 3 2 1
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Length |M|I|RSV| Type / ID |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * The TLV header format described above will be used for transferring
+ * messages between the host and the firmware. To ensure byte ordering
+ * we have defined all fields as being little endian.
+ * Type/ID: Identifier for message and/or attribute
+ * RSV: Reserved field for future use, likely as additional flags
+ * I: cannot_ignore flag, identifies if unrecognized attribute can be ignored
+ * M: is_msg, indicates that this is the start of a new message
+ * Length: Total length of message in dwords including header
+ * or
+ * Total length of attribute in bytes including header
+ */
+struct fbnic_tlv_hdr {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u16 type : 12; /* 0 .. 11 Type / ID */
+ u16 rsvd : 2; /* 12 .. 13 Reserved for future use */
+ u16 cannot_ignore : 1; /* 14 Attribute can be ignored */
+ u16 is_msg : 1; /* 15 Header belongs to message */
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ u16 is_msg : 1; /* 15 Header belongs to message */
+ u16 cannot_ignore : 1; /* 14 Attribute can be ignored */
+ u16 rsvd : 2; /* 13 .. 12 Reserved for future use */
+ u16 type : 12; /* 11 .. 0 Type / ID */
+#else
+#error "Missing defines from byteorder.h"
+#endif
+ __le16 len; /* 16 .. 32 length including TLV header */
+};
+
+#define FBNIC_TLV_RESULTS_MAX 32
+
+struct fbnic_tlv_msg {
+ struct fbnic_tlv_hdr hdr;
+ __le32 value[];
+};
+
+#define FBNIC_TLV_MSG_ID_UNKNOWN USHRT_MAX
+
+enum fbnic_tlv_type {
+ FBNIC_TLV_STRING,
+ FBNIC_TLV_FLAG,
+ FBNIC_TLV_UNSIGNED,
+ FBNIC_TLV_SIGNED,
+ FBNIC_TLV_BINARY,
+ FBNIC_TLV_NESTED,
+ FBNIC_TLV_ARRAY,
+ __FBNIC_TLV_MAX_TYPE
+};
+
+/* TLV Index
+ * Defines the relationship between the attribute IDs and their types.
+ * For each entry in the index there will be a size and type associated
+ * with it so that we can use this to parse the data and verify it matches
+ * the expected layout.
+ */
+struct fbnic_tlv_index {
+ u16 id;
+ u16 len;
+ enum fbnic_tlv_type type;
+};
+
+#define TLV_MAX_DATA (PAGE_SIZE - 512)
+#define FBNIC_TLV_ATTR_ID_UNKNOWN USHRT_MAX
+#define FBNIC_TLV_ATTR_STRING(id, len) { id, len, FBNIC_TLV_STRING }
+#define FBNIC_TLV_ATTR_FLAG(id) { id, 0, FBNIC_TLV_FLAG }
+#define FBNIC_TLV_ATTR_U32(id) { id, sizeof(u32), FBNIC_TLV_UNSIGNED }
+#define FBNIC_TLV_ATTR_U64(id) { id, sizeof(u64), FBNIC_TLV_UNSIGNED }
+#define FBNIC_TLV_ATTR_S32(id) { id, sizeof(s32), FBNIC_TLV_SIGNED }
+#define FBNIC_TLV_ATTR_S64(id) { id, sizeof(s64), FBNIC_TLV_SIGNED }
+#define FBNIC_TLV_ATTR_MAC_ADDR(id) { id, ETH_ALEN, FBNIC_TLV_BINARY }
+#define FBNIC_TLV_ATTR_NESTED(id) { id, 0, FBNIC_TLV_NESTED }
+#define FBNIC_TLV_ATTR_ARRAY(id) { id, 0, FBNIC_TLV_ARRAY }
+#define FBNIC_TLV_ATTR_RAW_DATA(id) { id, TLV_MAX_DATA, FBNIC_TLV_BINARY }
+#define FBNIC_TLV_ATTR_LAST { FBNIC_TLV_ATTR_ID_UNKNOWN, 0, 0 }
+
+struct fbnic_tlv_parser {
+ u16 id;
+ const struct fbnic_tlv_index *attr;
+ int (*func)(void *opaque,
+ struct fbnic_tlv_msg **results);
+};
+
+#define FBNIC_TLV_PARSER(id, attr, func) { FBNIC_TLV_MSG_ID_##id, attr, func }
+
+static inline void *
+fbnic_tlv_attr_get_value_ptr(struct fbnic_tlv_msg *attr)
+{
+ return (void *)&attr->value[0];
+}
+
+static inline bool fbnic_tlv_attr_get_bool(struct fbnic_tlv_msg *attr)
+{
+ return !!attr;
+}
+
+u64 fbnic_tlv_attr_get_unsigned(struct fbnic_tlv_msg *attr);
+s64 fbnic_tlv_attr_get_signed(struct fbnic_tlv_msg *attr);
+size_t fbnic_tlv_attr_get_string(struct fbnic_tlv_msg *attr, char *str,
+ size_t max_size);
+
+#define get_unsigned_result(id, location) \
+do { \
+ struct fbnic_tlv_msg *result = results[id]; \
+ if (result) \
+ location = fbnic_tlv_attr_get_unsigned(result); \
+} while (0)
+
+#define get_signed_result(id, location) \
+do { \
+ struct fbnic_tlv_msg *result = results[id]; \
+ if (result) \
+ location = fbnic_tlv_attr_get_signed(result); \
+} while (0)
+
+#define get_string_result(id, size, str, max_size) \
+do { \
+ struct fbnic_tlv_msg *result = results[id]; \
+ if (result) \
+ size = fbnic_tlv_attr_get_string(result, str, max_size); \
+} while (0)
+
+#define get_bool(id) (!!(results[id]))
+
+struct fbnic_tlv_msg *fbnic_tlv_msg_alloc(u16 msg_id);
+int fbnic_tlv_attr_put_flag(struct fbnic_tlv_msg *msg, const u16 attr_id);
+int fbnic_tlv_attr_put_value(struct fbnic_tlv_msg *msg, const u16 attr_id,
+ const void *value, const int len);
+int __fbnic_tlv_attr_put_int(struct fbnic_tlv_msg *msg, const u16 attr_id,
+ s64 value, const int len);
+#define fbnic_tlv_attr_put_int(msg, attr_id, value) \
+ __fbnic_tlv_attr_put_int(msg, attr_id, value, \
+ FBNIC_TLV_MSG_ALIGN(sizeof(value)))
+int fbnic_tlv_attr_put_mac_addr(struct fbnic_tlv_msg *msg, const u16 attr_id,
+ const u8 *mac_addr);
+int fbnic_tlv_attr_put_string(struct fbnic_tlv_msg *msg, u16 attr_id,
+ const char *string);
+struct fbnic_tlv_msg *fbnic_tlv_attr_nest_start(struct fbnic_tlv_msg *msg,
+ u16 attr_id);
+void fbnic_tlv_attr_nest_stop(struct fbnic_tlv_msg *msg);
+void fbnic_tlv_attr_addr_copy(u8 *dest, struct fbnic_tlv_msg *src);
+int fbnic_tlv_attr_parse_array(struct fbnic_tlv_msg *attr, int len,
+ struct fbnic_tlv_msg **results,
+ const struct fbnic_tlv_index *tlv_index,
+ u16 tlv_attr_id, size_t array_len);
+int fbnic_tlv_attr_parse(struct fbnic_tlv_msg *attr, int len,
+ struct fbnic_tlv_msg **results,
+ const struct fbnic_tlv_index *tlv_index);
+int fbnic_tlv_msg_parse(void *opaque, struct fbnic_tlv_msg *msg,
+ const struct fbnic_tlv_parser *parser);
+int fbnic_tlv_parser_error(void *opaque, struct fbnic_tlv_msg **results);
+
+#define FBNIC_TLV_MSG_ERROR \
+ FBNIC_TLV_PARSER(UNKNOWN, NULL, fbnic_tlv_parser_error)
+#endif /* _FBNIC_TLV_H_ */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
new file mode 100644
index 000000000000..0ed4c9fff5d8
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
@@ -0,0 +1,1913 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include <linux/bitfield.h>
+#include <linux/iopoll.h>
+#include <linux/pci.h>
+#include <net/netdev_queues.h>
+#include <net/page_pool/helpers.h>
+
+#include "fbnic.h"
+#include "fbnic_csr.h"
+#include "fbnic_netdev.h"
+#include "fbnic_txrx.h"
+
+struct fbnic_xmit_cb {
+ u32 bytecount;
+ u8 desc_count;
+ int hw_head;
+};
+
+#define FBNIC_XMIT_CB(__skb) ((struct fbnic_xmit_cb *)((__skb)->cb))
+
+static u32 __iomem *fbnic_ring_csr_base(const struct fbnic_ring *ring)
+{
+ unsigned long csr_base = (unsigned long)ring->doorbell;
+
+ csr_base &= ~(FBNIC_QUEUE_STRIDE * sizeof(u32) - 1);
+
+ return (u32 __iomem *)csr_base;
+}
+
+static u32 fbnic_ring_rd32(struct fbnic_ring *ring, unsigned int csr)
+{
+ u32 __iomem *csr_base = fbnic_ring_csr_base(ring);
+
+ return readl(csr_base + csr);
+}
+
+static void fbnic_ring_wr32(struct fbnic_ring *ring, unsigned int csr, u32 val)
+{
+ u32 __iomem *csr_base = fbnic_ring_csr_base(ring);
+
+ writel(val, csr_base + csr);
+}
+
+static unsigned int fbnic_desc_unused(struct fbnic_ring *ring)
+{
+ return (ring->head - ring->tail - 1) & ring->size_mask;
+}
+
+static unsigned int fbnic_desc_used(struct fbnic_ring *ring)
+{
+ return (ring->tail - ring->head) & ring->size_mask;
+}
+
+static struct netdev_queue *txring_txq(const struct net_device *dev,
+ const struct fbnic_ring *ring)
+{
+ return netdev_get_tx_queue(dev, ring->q_idx);
+}
+
+static int fbnic_maybe_stop_tx(const struct net_device *dev,
+ struct fbnic_ring *ring,
+ const unsigned int size)
+{
+ struct netdev_queue *txq = txring_txq(dev, ring);
+ int res;
+
+ res = netif_txq_maybe_stop(txq, fbnic_desc_unused(ring), size,
+ FBNIC_TX_DESC_WAKEUP);
+
+ return !res;
+}
+
+static bool fbnic_tx_sent_queue(struct sk_buff *skb, struct fbnic_ring *ring)
+{
+ struct netdev_queue *dev_queue = txring_txq(skb->dev, ring);
+ unsigned int bytecount = FBNIC_XMIT_CB(skb)->bytecount;
+ bool xmit_more = netdev_xmit_more();
+
+ /* TBD: Request completion more often if xmit_more becomes large */
+
+ return __netdev_tx_sent_queue(dev_queue, bytecount, xmit_more);
+}
+
+static void fbnic_unmap_single_twd(struct device *dev, __le64 *twd)
+{
+ u64 raw_twd = le64_to_cpu(*twd);
+ unsigned int len;
+ dma_addr_t dma;
+
+ dma = FIELD_GET(FBNIC_TWD_ADDR_MASK, raw_twd);
+ len = FIELD_GET(FBNIC_TWD_LEN_MASK, raw_twd);
+
+ dma_unmap_single(dev, dma, len, DMA_TO_DEVICE);
+}
+
+static void fbnic_unmap_page_twd(struct device *dev, __le64 *twd)
+{
+ u64 raw_twd = le64_to_cpu(*twd);
+ unsigned int len;
+ dma_addr_t dma;
+
+ dma = FIELD_GET(FBNIC_TWD_ADDR_MASK, raw_twd);
+ len = FIELD_GET(FBNIC_TWD_LEN_MASK, raw_twd);
+
+ dma_unmap_page(dev, dma, len, DMA_TO_DEVICE);
+}
+
+#define FBNIC_TWD_TYPE(_type) \
+ cpu_to_le64(FIELD_PREP(FBNIC_TWD_TYPE_MASK, FBNIC_TWD_TYPE_##_type))
+
+static bool
+fbnic_tx_offloads(struct fbnic_ring *ring, struct sk_buff *skb, __le64 *meta)
+{
+ unsigned int l2len, i3len;
+
+ if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL))
+ return false;
+
+ l2len = skb_mac_header_len(skb);
+ i3len = skb_checksum_start(skb) - skb_network_header(skb);
+
+ *meta |= cpu_to_le64(FIELD_PREP(FBNIC_TWD_CSUM_OFFSET_MASK,
+ skb->csum_offset / 2));
+
+ *meta |= cpu_to_le64(FBNIC_TWD_FLAG_REQ_CSO);
+
+ *meta |= cpu_to_le64(FIELD_PREP(FBNIC_TWD_L2_HLEN_MASK, l2len / 2) |
+ FIELD_PREP(FBNIC_TWD_L3_IHLEN_MASK, i3len / 2));
+ return false;
+}
+
+static void
+fbnic_rx_csum(u64 rcd, struct sk_buff *skb, struct fbnic_ring *rcq)
+{
+ skb_checksum_none_assert(skb);
+
+ if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM)))
+ return;
+
+ if (FIELD_GET(FBNIC_RCD_META_L4_CSUM_UNNECESSARY, rcd)) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else {
+ u16 csum = FIELD_GET(FBNIC_RCD_META_L2_CSUM_MASK, rcd);
+
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ skb->csum = (__force __wsum)csum;
+ }
+}
+
+static bool
+fbnic_tx_map(struct fbnic_ring *ring, struct sk_buff *skb, __le64 *meta)
+{
+ struct device *dev = skb->dev->dev.parent;
+ unsigned int tail = ring->tail, first;
+ unsigned int size, data_len;
+ skb_frag_t *frag;
+ dma_addr_t dma;
+ __le64 *twd;
+
+ ring->tx_buf[tail] = skb;
+
+ tail++;
+ tail &= ring->size_mask;
+ first = tail;
+
+ size = skb_headlen(skb);
+ data_len = skb->data_len;
+
+ if (size > FIELD_MAX(FBNIC_TWD_LEN_MASK))
+ goto dma_error;
+
+ dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
+
+ for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
+ twd = &ring->desc[tail];
+
+ if (dma_mapping_error(dev, dma))
+ goto dma_error;
+
+ *twd = cpu_to_le64(FIELD_PREP(FBNIC_TWD_ADDR_MASK, dma) |
+ FIELD_PREP(FBNIC_TWD_LEN_MASK, size) |
+ FIELD_PREP(FBNIC_TWD_TYPE_MASK,
+ FBNIC_TWD_TYPE_AL));
+
+ tail++;
+ tail &= ring->size_mask;
+
+ if (!data_len)
+ break;
+
+ size = skb_frag_size(frag);
+ data_len -= size;
+
+ if (size > FIELD_MAX(FBNIC_TWD_LEN_MASK))
+ goto dma_error;
+
+ dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
+ }
+
+ *twd |= FBNIC_TWD_TYPE(LAST_AL);
+
+ FBNIC_XMIT_CB(skb)->desc_count = ((twd - meta) + 1) & ring->size_mask;
+
+ ring->tail = tail;
+
+ /* Verify there is room for another packet */
+ fbnic_maybe_stop_tx(skb->dev, ring, FBNIC_MAX_SKB_DESC);
+
+ if (fbnic_tx_sent_queue(skb, ring)) {
+ *meta |= cpu_to_le64(FBNIC_TWD_FLAG_REQ_COMPLETION);
+
+ /* Force DMA writes to flush before writing to tail */
+ dma_wmb();
+
+ writel(tail, ring->doorbell);
+ }
+
+ return false;
+dma_error:
+ if (net_ratelimit())
+ netdev_err(skb->dev, "TX DMA map failed\n");
+
+ while (tail != first) {
+ tail--;
+ tail &= ring->size_mask;
+ twd = &ring->desc[tail];
+ if (tail == first)
+ fbnic_unmap_single_twd(dev, twd);
+ else
+ fbnic_unmap_page_twd(dev, twd);
+ }
+
+ return true;
+}
+
+#define FBNIC_MIN_FRAME_LEN 60
+
+static netdev_tx_t
+fbnic_xmit_frame_ring(struct sk_buff *skb, struct fbnic_ring *ring)
+{
+ __le64 *meta = &ring->desc[ring->tail];
+ u16 desc_needed;
+
+ if (skb_put_padto(skb, FBNIC_MIN_FRAME_LEN))
+ goto err_count;
+
+ /* Need: 1 descriptor per page,
+ * + 1 desc for skb_head,
+ * + 2 desc for metadata and timestamp metadata
+ * + 7 desc gap to keep tail from touching head
+ * otherwise try next time
+ */
+ desc_needed = skb_shinfo(skb)->nr_frags + 10;
+ if (fbnic_maybe_stop_tx(skb->dev, ring, desc_needed))
+ return NETDEV_TX_BUSY;
+
+ *meta = cpu_to_le64(FBNIC_TWD_FLAG_DEST_MAC);
+
+ /* Write all members within DWORD to condense this into 2 4B writes */
+ FBNIC_XMIT_CB(skb)->bytecount = skb->len;
+ FBNIC_XMIT_CB(skb)->desc_count = 0;
+
+ if (fbnic_tx_offloads(ring, skb, meta))
+ goto err_free;
+
+ if (fbnic_tx_map(ring, skb, meta))
+ goto err_free;
+
+ return NETDEV_TX_OK;
+
+err_free:
+ dev_kfree_skb_any(skb);
+err_count:
+ return NETDEV_TX_OK;
+}
+
+netdev_tx_t fbnic_xmit_frame(struct sk_buff *skb, struct net_device *dev)
+{
+ struct fbnic_net *fbn = netdev_priv(dev);
+ unsigned int q_map = skb->queue_mapping;
+
+ return fbnic_xmit_frame_ring(skb, fbn->tx[q_map]);
+}
+
+netdev_features_t
+fbnic_features_check(struct sk_buff *skb, struct net_device *dev,
+ netdev_features_t features)
+{
+ unsigned int l2len, l3len;
+
+ if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL))
+ return features;
+
+ l2len = skb_mac_header_len(skb);
+ l3len = skb_checksum_start(skb) - skb_network_header(skb);
+
+ /* Check header lengths are multiple of 2.
+ * In case of 6in6 we support longer headers (IHLEN + OHLEN)
+ * but keep things simple for now, 512B is plenty.
+ */
+ if ((l2len | l3len | skb->csum_offset) % 2 ||
+ !FIELD_FIT(FBNIC_TWD_L2_HLEN_MASK, l2len / 2) ||
+ !FIELD_FIT(FBNIC_TWD_L3_IHLEN_MASK, l3len / 2) ||
+ !FIELD_FIT(FBNIC_TWD_CSUM_OFFSET_MASK, skb->csum_offset / 2))
+ return features & ~NETIF_F_CSUM_MASK;
+
+ return features;
+}
+
+static void fbnic_clean_twq0(struct fbnic_napi_vector *nv, int napi_budget,
+ struct fbnic_ring *ring, bool discard,
+ unsigned int hw_head)
+{
+ u64 total_bytes = 0, total_packets = 0;
+ unsigned int head = ring->head;
+ struct netdev_queue *txq;
+ unsigned int clean_desc;
+
+ clean_desc = (hw_head - head) & ring->size_mask;
+
+ while (clean_desc) {
+ struct sk_buff *skb = ring->tx_buf[head];
+ unsigned int desc_cnt;
+
+ desc_cnt = FBNIC_XMIT_CB(skb)->desc_count;
+ if (desc_cnt > clean_desc)
+ break;
+
+ ring->tx_buf[head] = NULL;
+
+ clean_desc -= desc_cnt;
+
+ while (!(ring->desc[head] & FBNIC_TWD_TYPE(AL))) {
+ head++;
+ head &= ring->size_mask;
+ desc_cnt--;
+ }
+
+ fbnic_unmap_single_twd(nv->dev, &ring->desc[head]);
+ head++;
+ head &= ring->size_mask;
+ desc_cnt--;
+
+ while (desc_cnt--) {
+ fbnic_unmap_page_twd(nv->dev, &ring->desc[head]);
+ head++;
+ head &= ring->size_mask;
+ }
+
+ total_bytes += FBNIC_XMIT_CB(skb)->bytecount;
+ total_packets += 1;
+
+ napi_consume_skb(skb, napi_budget);
+ }
+
+ if (!total_bytes)
+ return;
+
+ ring->head = head;
+
+ txq = txring_txq(nv->napi.dev, ring);
+
+ if (unlikely(discard)) {
+ netdev_tx_completed_queue(txq, total_packets, total_bytes);
+ return;
+ }
+
+ netif_txq_completed_wake(txq, total_packets, total_bytes,
+ fbnic_desc_unused(ring),
+ FBNIC_TX_DESC_WAKEUP);
+}
+
+static void fbnic_page_pool_init(struct fbnic_ring *ring, unsigned int idx,
+ struct page *page)
+{
+ struct fbnic_rx_buf *rx_buf = &ring->rx_buf[idx];
+
+ page_pool_fragment_page(page, PAGECNT_BIAS_MAX);
+ rx_buf->pagecnt_bias = PAGECNT_BIAS_MAX;
+ rx_buf->page = page;
+}
+
+static struct page *fbnic_page_pool_get(struct fbnic_ring *ring,
+ unsigned int idx)
+{
+ struct fbnic_rx_buf *rx_buf = &ring->rx_buf[idx];
+
+ rx_buf->pagecnt_bias--;
+
+ return rx_buf->page;
+}
+
+static void fbnic_page_pool_drain(struct fbnic_ring *ring, unsigned int idx,
+ struct fbnic_napi_vector *nv, int budget)
+{
+ struct fbnic_rx_buf *rx_buf = &ring->rx_buf[idx];
+ struct page *page = rx_buf->page;
+
+ if (!page_pool_unref_page(page, rx_buf->pagecnt_bias))
+ page_pool_put_unrefed_page(nv->page_pool, page, -1, !!budget);
+
+ rx_buf->page = NULL;
+}
+
+static void fbnic_clean_twq(struct fbnic_napi_vector *nv, int napi_budget,
+ struct fbnic_q_triad *qt, s32 head0)
+{
+ if (head0 >= 0)
+ fbnic_clean_twq0(nv, napi_budget, &qt->sub0, false, head0);
+}
+
+static void
+fbnic_clean_tcq(struct fbnic_napi_vector *nv, struct fbnic_q_triad *qt,
+ int napi_budget)
+{
+ struct fbnic_ring *cmpl = &qt->cmpl;
+ __le64 *raw_tcd, done;
+ u32 head = cmpl->head;
+ s32 head0 = -1;
+
+ done = (head & (cmpl->size_mask + 1)) ? 0 : cpu_to_le64(FBNIC_TCD_DONE);
+ raw_tcd = &cmpl->desc[head & cmpl->size_mask];
+
+ /* Walk the completion queue collecting the heads reported by NIC */
+ while ((*raw_tcd & cpu_to_le64(FBNIC_TCD_DONE)) == done) {
+ u64 tcd;
+
+ dma_rmb();
+
+ tcd = le64_to_cpu(*raw_tcd);
+
+ switch (FIELD_GET(FBNIC_TCD_TYPE_MASK, tcd)) {
+ case FBNIC_TCD_TYPE_0:
+ if (!(tcd & FBNIC_TCD_TWQ1))
+ head0 = FIELD_GET(FBNIC_TCD_TYPE0_HEAD0_MASK,
+ tcd);
+ /* Currently all err status bits are related to
+ * timestamps and as those have yet to be added
+ * they are skipped for now.
+ */
+ break;
+ default:
+ break;
+ }
+
+ raw_tcd++;
+ head++;
+ if (!(head & cmpl->size_mask)) {
+ done ^= cpu_to_le64(FBNIC_TCD_DONE);
+ raw_tcd = &cmpl->desc[0];
+ }
+ }
+
+ /* Record the current head/tail of the queue */
+ if (cmpl->head != head) {
+ cmpl->head = head;
+ writel(head & cmpl->size_mask, cmpl->doorbell);
+ }
+
+ /* Unmap and free processed buffers */
+ fbnic_clean_twq(nv, napi_budget, qt, head0);
+}
+
+static void fbnic_clean_bdq(struct fbnic_napi_vector *nv, int napi_budget,
+ struct fbnic_ring *ring, unsigned int hw_head)
+{
+ unsigned int head = ring->head;
+
+ if (head == hw_head)
+ return;
+
+ do {
+ fbnic_page_pool_drain(ring, head, nv, napi_budget);
+
+ head++;
+ head &= ring->size_mask;
+ } while (head != hw_head);
+
+ ring->head = head;
+}
+
+static void fbnic_bd_prep(struct fbnic_ring *bdq, u16 id, struct page *page)
+{
+ __le64 *bdq_desc = &bdq->desc[id * FBNIC_BD_FRAG_COUNT];
+ dma_addr_t dma = page_pool_get_dma_addr(page);
+ u64 bd, i = FBNIC_BD_FRAG_COUNT;
+
+ bd = (FBNIC_BD_PAGE_ADDR_MASK & dma) |
+ FIELD_PREP(FBNIC_BD_PAGE_ID_MASK, id);
+
+ /* In the case that a page size is larger than 4K we will map a
+ * single page to multiple fragments. The fragments will be
+ * FBNIC_BD_FRAG_COUNT in size and the lower n bits will be use
+ * to indicate the individual fragment IDs.
+ */
+ do {
+ *bdq_desc = cpu_to_le64(bd);
+ bd += FIELD_PREP(FBNIC_BD_DESC_ADDR_MASK, 1) |
+ FIELD_PREP(FBNIC_BD_DESC_ID_MASK, 1);
+ } while (--i);
+}
+
+static void fbnic_fill_bdq(struct fbnic_napi_vector *nv, struct fbnic_ring *bdq)
+{
+ unsigned int count = fbnic_desc_unused(bdq);
+ unsigned int i = bdq->tail;
+
+ if (!count)
+ return;
+
+ do {
+ struct page *page;
+
+ page = page_pool_dev_alloc_pages(nv->page_pool);
+ if (!page)
+ break;
+
+ fbnic_page_pool_init(bdq, i, page);
+ fbnic_bd_prep(bdq, i, page);
+
+ i++;
+ i &= bdq->size_mask;
+
+ count--;
+ } while (count);
+
+ if (bdq->tail != i) {
+ bdq->tail = i;
+
+ /* Force DMA writes to flush before writing to tail */
+ dma_wmb();
+
+ writel(i, bdq->doorbell);
+ }
+}
+
+static unsigned int fbnic_hdr_pg_start(unsigned int pg_off)
+{
+ /* The headroom of the first header may be larger than FBNIC_RX_HROOM
+ * due to alignment. So account for that by just making the page
+ * offset 0 if we are starting at the first header.
+ */
+ if (ALIGN(FBNIC_RX_HROOM, 128) > FBNIC_RX_HROOM &&
+ pg_off == ALIGN(FBNIC_RX_HROOM, 128))
+ return 0;
+
+ return pg_off - FBNIC_RX_HROOM;
+}
+
+static unsigned int fbnic_hdr_pg_end(unsigned int pg_off, unsigned int len)
+{
+ /* Determine the end of the buffer by finding the start of the next
+ * and then subtracting the headroom from that frame.
+ */
+ pg_off += len + FBNIC_RX_TROOM + FBNIC_RX_HROOM;
+
+ return ALIGN(pg_off, 128) - FBNIC_RX_HROOM;
+}
+
+static void fbnic_pkt_prepare(struct fbnic_napi_vector *nv, u64 rcd,
+ struct fbnic_pkt_buff *pkt,
+ struct fbnic_q_triad *qt)
+{
+ unsigned int hdr_pg_idx = FIELD_GET(FBNIC_RCD_AL_BUFF_PAGE_MASK, rcd);
+ unsigned int hdr_pg_off = FIELD_GET(FBNIC_RCD_AL_BUFF_OFF_MASK, rcd);
+ struct page *page = fbnic_page_pool_get(&qt->sub0, hdr_pg_idx);
+ unsigned int len = FIELD_GET(FBNIC_RCD_AL_BUFF_LEN_MASK, rcd);
+ unsigned int frame_sz, hdr_pg_start, hdr_pg_end, headroom;
+ unsigned char *hdr_start;
+
+ /* data_hard_start should always be NULL when this is called */
+ WARN_ON_ONCE(pkt->buff.data_hard_start);
+
+ /* Short-cut the end calculation if we know page is fully consumed */
+ hdr_pg_end = FIELD_GET(FBNIC_RCD_AL_PAGE_FIN, rcd) ?
+ FBNIC_BD_FRAG_SIZE : fbnic_hdr_pg_end(hdr_pg_off, len);
+ hdr_pg_start = fbnic_hdr_pg_start(hdr_pg_off);
+
+ headroom = hdr_pg_off - hdr_pg_start + FBNIC_RX_PAD;
+ frame_sz = hdr_pg_end - hdr_pg_start;
+ xdp_init_buff(&pkt->buff, frame_sz, NULL);
+ hdr_pg_start += (FBNIC_RCD_AL_BUFF_FRAG_MASK & rcd) *
+ FBNIC_BD_FRAG_SIZE;
+
+ /* Sync DMA buffer */
+ dma_sync_single_range_for_cpu(nv->dev, page_pool_get_dma_addr(page),
+ hdr_pg_start, frame_sz,
+ DMA_BIDIRECTIONAL);
+
+ /* Build frame around buffer */
+ hdr_start = page_address(page) + hdr_pg_start;
+
+ xdp_prepare_buff(&pkt->buff, hdr_start, headroom,
+ len - FBNIC_RX_PAD, true);
+
+ pkt->data_truesize = 0;
+ pkt->data_len = 0;
+ pkt->nr_frags = 0;
+}
+
+static void fbnic_add_rx_frag(struct fbnic_napi_vector *nv, u64 rcd,
+ struct fbnic_pkt_buff *pkt,
+ struct fbnic_q_triad *qt)
+{
+ unsigned int pg_idx = FIELD_GET(FBNIC_RCD_AL_BUFF_PAGE_MASK, rcd);
+ unsigned int pg_off = FIELD_GET(FBNIC_RCD_AL_BUFF_OFF_MASK, rcd);
+ unsigned int len = FIELD_GET(FBNIC_RCD_AL_BUFF_LEN_MASK, rcd);
+ struct page *page = fbnic_page_pool_get(&qt->sub1, pg_idx);
+ struct skb_shared_info *shinfo;
+ unsigned int truesize;
+
+ truesize = FIELD_GET(FBNIC_RCD_AL_PAGE_FIN, rcd) ?
+ FBNIC_BD_FRAG_SIZE - pg_off : ALIGN(len, 128);
+
+ pg_off += (FBNIC_RCD_AL_BUFF_FRAG_MASK & rcd) *
+ FBNIC_BD_FRAG_SIZE;
+
+ /* Sync DMA buffer */
+ dma_sync_single_range_for_cpu(nv->dev, page_pool_get_dma_addr(page),
+ pg_off, truesize, DMA_BIDIRECTIONAL);
+
+ /* Add page to xdp shared info */
+ shinfo = xdp_get_shared_info_from_buff(&pkt->buff);
+
+ /* We use gso_segs to store truesize */
+ pkt->data_truesize += truesize;
+
+ __skb_fill_page_desc_noacc(shinfo, pkt->nr_frags++, page, pg_off, len);
+
+ /* Store data_len in gso_size */
+ pkt->data_len += len;
+}
+
+static void fbnic_put_pkt_buff(struct fbnic_napi_vector *nv,
+ struct fbnic_pkt_buff *pkt, int budget)
+{
+ struct skb_shared_info *shinfo;
+ struct page *page;
+ int nr_frags;
+
+ if (!pkt->buff.data_hard_start)
+ return;
+
+ shinfo = xdp_get_shared_info_from_buff(&pkt->buff);
+ nr_frags = pkt->nr_frags;
+
+ while (nr_frags--) {
+ page = skb_frag_page(&shinfo->frags[nr_frags]);
+ page_pool_put_full_page(nv->page_pool, page, !!budget);
+ }
+
+ page = virt_to_page(pkt->buff.data_hard_start);
+ page_pool_put_full_page(nv->page_pool, page, !!budget);
+}
+
+static struct sk_buff *fbnic_build_skb(struct fbnic_napi_vector *nv,
+ struct fbnic_pkt_buff *pkt)
+{
+ unsigned int nr_frags = pkt->nr_frags;
+ struct skb_shared_info *shinfo;
+ unsigned int truesize;
+ struct sk_buff *skb;
+
+ truesize = xdp_data_hard_end(&pkt->buff) + FBNIC_RX_TROOM -
+ pkt->buff.data_hard_start;
+
+ /* Build frame around buffer */
+ skb = napi_build_skb(pkt->buff.data_hard_start, truesize);
+ if (unlikely(!skb))
+ return NULL;
+
+ /* Push data pointer to start of data, put tail to end of data */
+ skb_reserve(skb, pkt->buff.data - pkt->buff.data_hard_start);
+ __skb_put(skb, pkt->buff.data_end - pkt->buff.data);
+
+ /* Add tracking for metadata at the start of the frame */
+ skb_metadata_set(skb, pkt->buff.data - pkt->buff.data_meta);
+
+ /* Add Rx frags */
+ if (nr_frags) {
+ /* Verify that shared info didn't move */
+ shinfo = xdp_get_shared_info_from_buff(&pkt->buff);
+ WARN_ON(skb_shinfo(skb) != shinfo);
+
+ skb->truesize += pkt->data_truesize;
+ skb->data_len += pkt->data_len;
+ shinfo->nr_frags = nr_frags;
+ skb->len += pkt->data_len;
+ }
+
+ skb_mark_for_recycle(skb);
+
+ /* Set MAC header specific fields */
+ skb->protocol = eth_type_trans(skb, nv->napi.dev);
+
+ return skb;
+}
+
+static enum pkt_hash_types fbnic_skb_hash_type(u64 rcd)
+{
+ return (FBNIC_RCD_META_L4_TYPE_MASK & rcd) ? PKT_HASH_TYPE_L4 :
+ (FBNIC_RCD_META_L3_TYPE_MASK & rcd) ? PKT_HASH_TYPE_L3 :
+ PKT_HASH_TYPE_L2;
+}
+
+static void fbnic_populate_skb_fields(struct fbnic_napi_vector *nv,
+ u64 rcd, struct sk_buff *skb,
+ struct fbnic_q_triad *qt)
+{
+ struct net_device *netdev = nv->napi.dev;
+ struct fbnic_ring *rcq = &qt->cmpl;
+
+ fbnic_rx_csum(rcd, skb, rcq);
+
+ if (netdev->features & NETIF_F_RXHASH)
+ skb_set_hash(skb,
+ FIELD_GET(FBNIC_RCD_META_RSS_HASH_MASK, rcd),
+ fbnic_skb_hash_type(rcd));
+
+ skb_record_rx_queue(skb, rcq->q_idx);
+}
+
+static bool fbnic_rcd_metadata_err(u64 rcd)
+{
+ return !!(FBNIC_RCD_META_UNCORRECTABLE_ERR_MASK & rcd);
+}
+
+static int fbnic_clean_rcq(struct fbnic_napi_vector *nv,
+ struct fbnic_q_triad *qt, int budget)
+{
+ struct fbnic_ring *rcq = &qt->cmpl;
+ struct fbnic_pkt_buff *pkt;
+ s32 head0 = -1, head1 = -1;
+ __le64 *raw_rcd, done;
+ u32 head = rcq->head;
+ u64 packets = 0;
+
+ done = (head & (rcq->size_mask + 1)) ? cpu_to_le64(FBNIC_RCD_DONE) : 0;
+ raw_rcd = &rcq->desc[head & rcq->size_mask];
+ pkt = rcq->pkt;
+
+ /* Walk the completion queue collecting the heads reported by NIC */
+ while (likely(packets < budget)) {
+ struct sk_buff *skb = ERR_PTR(-EINVAL);
+ u64 rcd;
+
+ if ((*raw_rcd & cpu_to_le64(FBNIC_RCD_DONE)) == done)
+ break;
+
+ dma_rmb();
+
+ rcd = le64_to_cpu(*raw_rcd);
+
+ switch (FIELD_GET(FBNIC_RCD_TYPE_MASK, rcd)) {
+ case FBNIC_RCD_TYPE_HDR_AL:
+ head0 = FIELD_GET(FBNIC_RCD_AL_BUFF_PAGE_MASK, rcd);
+ fbnic_pkt_prepare(nv, rcd, pkt, qt);
+
+ break;
+ case FBNIC_RCD_TYPE_PAY_AL:
+ head1 = FIELD_GET(FBNIC_RCD_AL_BUFF_PAGE_MASK, rcd);
+ fbnic_add_rx_frag(nv, rcd, pkt, qt);
+
+ break;
+ case FBNIC_RCD_TYPE_OPT_META:
+ /* Only type 0 is currently supported */
+ if (FIELD_GET(FBNIC_RCD_OPT_META_TYPE_MASK, rcd))
+ break;
+
+ /* We currently ignore the action table index */
+ break;
+ case FBNIC_RCD_TYPE_META:
+ if (likely(!fbnic_rcd_metadata_err(rcd)))
+ skb = fbnic_build_skb(nv, pkt);
+
+ /* Populate skb and invalidate XDP */
+ if (!IS_ERR_OR_NULL(skb)) {
+ fbnic_populate_skb_fields(nv, rcd, skb, qt);
+
+ packets++;
+
+ napi_gro_receive(&nv->napi, skb);
+ } else {
+ fbnic_put_pkt_buff(nv, pkt, 1);
+ }
+
+ pkt->buff.data_hard_start = NULL;
+
+ break;
+ }
+
+ raw_rcd++;
+ head++;
+ if (!(head & rcq->size_mask)) {
+ done ^= cpu_to_le64(FBNIC_RCD_DONE);
+ raw_rcd = &rcq->desc[0];
+ }
+ }
+
+ /* Unmap and free processed buffers */
+ if (head0 >= 0)
+ fbnic_clean_bdq(nv, budget, &qt->sub0, head0);
+ fbnic_fill_bdq(nv, &qt->sub0);
+
+ if (head1 >= 0)
+ fbnic_clean_bdq(nv, budget, &qt->sub1, head1);
+ fbnic_fill_bdq(nv, &qt->sub1);
+
+ /* Record the current head/tail of the queue */
+ if (rcq->head != head) {
+ rcq->head = head;
+ writel(head & rcq->size_mask, rcq->doorbell);
+ }
+
+ return packets;
+}
+
+static void fbnic_nv_irq_disable(struct fbnic_napi_vector *nv)
+{
+ struct fbnic_dev *fbd = nv->fbd;
+ u32 v_idx = nv->v_idx;
+
+ fbnic_wr32(fbd, FBNIC_INTR_MASK_SET(v_idx / 32), 1 << (v_idx % 32));
+}
+
+static void fbnic_nv_irq_rearm(struct fbnic_napi_vector *nv)
+{
+ struct fbnic_dev *fbd = nv->fbd;
+ u32 v_idx = nv->v_idx;
+
+ fbnic_wr32(fbd, FBNIC_INTR_CQ_REARM(v_idx),
+ FBNIC_INTR_CQ_REARM_INTR_UNMASK);
+}
+
+static int fbnic_poll(struct napi_struct *napi, int budget)
+{
+ struct fbnic_napi_vector *nv = container_of(napi,
+ struct fbnic_napi_vector,
+ napi);
+ int i, j, work_done = 0;
+
+ for (i = 0; i < nv->txt_count; i++)
+ fbnic_clean_tcq(nv, &nv->qt[i], budget);
+
+ for (j = 0; j < nv->rxt_count; j++, i++)
+ work_done += fbnic_clean_rcq(nv, &nv->qt[i], budget);
+
+ if (work_done >= budget)
+ return budget;
+
+ if (likely(napi_complete_done(napi, work_done)))
+ fbnic_nv_irq_rearm(nv);
+
+ return 0;
+}
+
+static irqreturn_t fbnic_msix_clean_rings(int __always_unused irq, void *data)
+{
+ struct fbnic_napi_vector *nv = data;
+
+ napi_schedule_irqoff(&nv->napi);
+
+ return IRQ_HANDLED;
+}
+
+static void fbnic_remove_tx_ring(struct fbnic_net *fbn,
+ struct fbnic_ring *txr)
+{
+ if (!(txr->flags & FBNIC_RING_F_STATS))
+ return;
+
+ /* Remove pointer to the Tx ring */
+ WARN_ON(fbn->tx[txr->q_idx] && fbn->tx[txr->q_idx] != txr);
+ fbn->tx[txr->q_idx] = NULL;
+}
+
+static void fbnic_remove_rx_ring(struct fbnic_net *fbn,
+ struct fbnic_ring *rxr)
+{
+ if (!(rxr->flags & FBNIC_RING_F_STATS))
+ return;
+
+ /* Remove pointer to the Rx ring */
+ WARN_ON(fbn->rx[rxr->q_idx] && fbn->rx[rxr->q_idx] != rxr);
+ fbn->rx[rxr->q_idx] = NULL;
+}
+
+static void fbnic_free_napi_vector(struct fbnic_net *fbn,
+ struct fbnic_napi_vector *nv)
+{
+ struct fbnic_dev *fbd = nv->fbd;
+ u32 v_idx = nv->v_idx;
+ int i, j;
+
+ for (i = 0; i < nv->txt_count; i++) {
+ fbnic_remove_tx_ring(fbn, &nv->qt[i].sub0);
+ fbnic_remove_tx_ring(fbn, &nv->qt[i].cmpl);
+ }
+
+ for (j = 0; j < nv->rxt_count; j++, i++) {
+ fbnic_remove_rx_ring(fbn, &nv->qt[i].sub0);
+ fbnic_remove_rx_ring(fbn, &nv->qt[i].sub1);
+ fbnic_remove_rx_ring(fbn, &nv->qt[i].cmpl);
+ }
+
+ fbnic_free_irq(fbd, v_idx, nv);
+ page_pool_destroy(nv->page_pool);
+ netif_napi_del(&nv->napi);
+ list_del(&nv->napis);
+ kfree(nv);
+}
+
+void fbnic_free_napi_vectors(struct fbnic_net *fbn)
+{
+ struct fbnic_napi_vector *nv, *temp;
+
+ list_for_each_entry_safe(nv, temp, &fbn->napis, napis)
+ fbnic_free_napi_vector(fbn, nv);
+}
+
+static void fbnic_name_napi_vector(struct fbnic_napi_vector *nv)
+{
+ unsigned char *dev_name = nv->napi.dev->name;
+
+ if (!nv->rxt_count)
+ snprintf(nv->name, sizeof(nv->name), "%s-Tx-%u", dev_name,
+ nv->v_idx - FBNIC_NON_NAPI_VECTORS);
+ else
+ snprintf(nv->name, sizeof(nv->name), "%s-TxRx-%u", dev_name,
+ nv->v_idx - FBNIC_NON_NAPI_VECTORS);
+}
+
+#define FBNIC_PAGE_POOL_FLAGS \
+ (PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV)
+
+static int fbnic_alloc_nv_page_pool(struct fbnic_net *fbn,
+ struct fbnic_napi_vector *nv)
+{
+ struct page_pool_params pp_params = {
+ .order = 0,
+ .flags = FBNIC_PAGE_POOL_FLAGS,
+ .pool_size = (fbn->hpq_size + fbn->ppq_size) * nv->rxt_count,
+ .nid = NUMA_NO_NODE,
+ .dev = nv->dev,
+ .dma_dir = DMA_BIDIRECTIONAL,
+ .offset = 0,
+ .max_len = PAGE_SIZE
+ };
+ struct page_pool *pp;
+
+ /* Page pool cannot exceed a size of 32768. This doesn't limit the
+ * pages on the ring but the number we can have cached waiting on
+ * the next use.
+ *
+ * TBD: Can this be reduced further? Would a multiple of
+ * NAPI_POLL_WEIGHT possibly make more sense? The question is how
+ * may pages do we need to hold in reserve to get the best return
+ * without hogging too much system memory.
+ */
+ if (pp_params.pool_size > 32768)
+ pp_params.pool_size = 32768;
+
+ pp = page_pool_create(&pp_params);
+ if (IS_ERR(pp))
+ return PTR_ERR(pp);
+
+ nv->page_pool = pp;
+
+ return 0;
+}
+
+static void fbnic_ring_init(struct fbnic_ring *ring, u32 __iomem *doorbell,
+ int q_idx, u8 flags)
+{
+ ring->doorbell = doorbell;
+ ring->q_idx = q_idx;
+ ring->flags = flags;
+}
+
+static int fbnic_alloc_napi_vector(struct fbnic_dev *fbd, struct fbnic_net *fbn,
+ unsigned int v_count, unsigned int v_idx,
+ unsigned int txq_count, unsigned int txq_idx,
+ unsigned int rxq_count, unsigned int rxq_idx)
+{
+ int txt_count = txq_count, rxt_count = rxq_count;
+ u32 __iomem *uc_addr = fbd->uc_addr0;
+ struct fbnic_napi_vector *nv;
+ struct fbnic_q_triad *qt;
+ int qt_count, err;
+ u32 __iomem *db;
+
+ qt_count = txt_count + rxq_count;
+ if (!qt_count)
+ return -EINVAL;
+
+ /* If MMIO has already failed there are no rings to initialize */
+ if (!uc_addr)
+ return -EIO;
+
+ /* Allocate NAPI vector and queue triads */
+ nv = kzalloc(struct_size(nv, qt, qt_count), GFP_KERNEL);
+ if (!nv)
+ return -ENOMEM;
+
+ /* Record queue triad counts */
+ nv->txt_count = txt_count;
+ nv->rxt_count = rxt_count;
+
+ /* Provide pointer back to fbnic and MSI-X vectors */
+ nv->fbd = fbd;
+ nv->v_idx = v_idx;
+
+ /* Record IRQ to NAPI struct */
+ netif_napi_set_irq(&nv->napi,
+ pci_irq_vector(to_pci_dev(fbd->dev), nv->v_idx));
+
+ /* Tie napi to netdev */
+ list_add(&nv->napis, &fbn->napis);
+ netif_napi_add(fbn->netdev, &nv->napi, fbnic_poll);
+
+ /* Tie nv back to PCIe dev */
+ nv->dev = fbd->dev;
+
+ /* Allocate page pool */
+ if (rxq_count) {
+ err = fbnic_alloc_nv_page_pool(fbn, nv);
+ if (err)
+ goto napi_del;
+ }
+
+ /* Initialize vector name */
+ fbnic_name_napi_vector(nv);
+
+ /* Request the IRQ for napi vector */
+ err = fbnic_request_irq(fbd, v_idx, &fbnic_msix_clean_rings,
+ IRQF_SHARED, nv->name, nv);
+ if (err)
+ goto pp_destroy;
+
+ /* Initialize queue triads */
+ qt = nv->qt;
+
+ while (txt_count) {
+ /* Configure Tx queue */
+ db = &uc_addr[FBNIC_QUEUE(txq_idx) + FBNIC_QUEUE_TWQ0_TAIL];
+
+ /* Assign Tx queue to netdev if applicable */
+ if (txq_count > 0) {
+ u8 flags = FBNIC_RING_F_CTX | FBNIC_RING_F_STATS;
+
+ fbnic_ring_init(&qt->sub0, db, txq_idx, flags);
+ fbn->tx[txq_idx] = &qt->sub0;
+ txq_count--;
+ } else {
+ fbnic_ring_init(&qt->sub0, db, 0,
+ FBNIC_RING_F_DISABLED);
+ }
+
+ /* Configure Tx completion queue */
+ db = &uc_addr[FBNIC_QUEUE(txq_idx) + FBNIC_QUEUE_TCQ_HEAD];
+ fbnic_ring_init(&qt->cmpl, db, 0, 0);
+
+ /* Update Tx queue index */
+ txt_count--;
+ txq_idx += v_count;
+
+ /* Move to next queue triad */
+ qt++;
+ }
+
+ while (rxt_count) {
+ /* Configure header queue */
+ db = &uc_addr[FBNIC_QUEUE(rxq_idx) + FBNIC_QUEUE_BDQ_HPQ_TAIL];
+ fbnic_ring_init(&qt->sub0, db, 0, FBNIC_RING_F_CTX);
+
+ /* Configure payload queue */
+ db = &uc_addr[FBNIC_QUEUE(rxq_idx) + FBNIC_QUEUE_BDQ_PPQ_TAIL];
+ fbnic_ring_init(&qt->sub1, db, 0, FBNIC_RING_F_CTX);
+
+ /* Configure Rx completion queue */
+ db = &uc_addr[FBNIC_QUEUE(rxq_idx) + FBNIC_QUEUE_RCQ_HEAD];
+ fbnic_ring_init(&qt->cmpl, db, rxq_idx, FBNIC_RING_F_STATS);
+ fbn->rx[rxq_idx] = &qt->cmpl;
+
+ /* Update Rx queue index */
+ rxt_count--;
+ rxq_idx += v_count;
+
+ /* Move to next queue triad */
+ qt++;
+ }
+
+ return 0;
+
+pp_destroy:
+ page_pool_destroy(nv->page_pool);
+napi_del:
+ netif_napi_del(&nv->napi);
+ list_del(&nv->napis);
+ kfree(nv);
+ return err;
+}
+
+int fbnic_alloc_napi_vectors(struct fbnic_net *fbn)
+{
+ unsigned int txq_idx = 0, rxq_idx = 0, v_idx = FBNIC_NON_NAPI_VECTORS;
+ unsigned int num_tx = fbn->num_tx_queues;
+ unsigned int num_rx = fbn->num_rx_queues;
+ unsigned int num_napi = fbn->num_napi;
+ struct fbnic_dev *fbd = fbn->fbd;
+ int err;
+
+ /* Allocate 1 Tx queue per napi vector */
+ if (num_napi < FBNIC_MAX_TXQS && num_napi == num_tx + num_rx) {
+ while (num_tx) {
+ err = fbnic_alloc_napi_vector(fbd, fbn,
+ num_napi, v_idx,
+ 1, txq_idx, 0, 0);
+ if (err)
+ goto free_vectors;
+
+ /* Update counts and index */
+ num_tx--;
+ txq_idx++;
+
+ v_idx++;
+ }
+ }
+
+ /* Allocate Tx/Rx queue pairs per vector, or allocate remaining Rx */
+ while (num_rx | num_tx) {
+ int tqpv = DIV_ROUND_UP(num_tx, num_napi - txq_idx);
+ int rqpv = DIV_ROUND_UP(num_rx, num_napi - rxq_idx);
+
+ err = fbnic_alloc_napi_vector(fbd, fbn, num_napi, v_idx,
+ tqpv, txq_idx, rqpv, rxq_idx);
+ if (err)
+ goto free_vectors;
+
+ /* Update counts and index */
+ num_tx -= tqpv;
+ txq_idx++;
+
+ num_rx -= rqpv;
+ rxq_idx++;
+
+ v_idx++;
+ }
+
+ return 0;
+
+free_vectors:
+ fbnic_free_napi_vectors(fbn);
+
+ return -ENOMEM;
+}
+
+static void fbnic_free_ring_resources(struct device *dev,
+ struct fbnic_ring *ring)
+{
+ kvfree(ring->buffer);
+ ring->buffer = NULL;
+
+ /* If size is not set there are no descriptors present */
+ if (!ring->size)
+ return;
+
+ dma_free_coherent(dev, ring->size, ring->desc, ring->dma);
+ ring->size_mask = 0;
+ ring->size = 0;
+}
+
+static int fbnic_alloc_tx_ring_desc(struct fbnic_net *fbn,
+ struct fbnic_ring *txr)
+{
+ struct device *dev = fbn->netdev->dev.parent;
+ size_t size;
+
+ /* Round size up to nearest 4K */
+ size = ALIGN(array_size(sizeof(*txr->desc), fbn->txq_size), 4096);
+
+ txr->desc = dma_alloc_coherent(dev, size, &txr->dma,
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!txr->desc)
+ return -ENOMEM;
+
+ /* txq_size should be a power of 2, so mask is just that -1 */
+ txr->size_mask = fbn->txq_size - 1;
+ txr->size = size;
+
+ return 0;
+}
+
+static int fbnic_alloc_tx_ring_buffer(struct fbnic_ring *txr)
+{
+ size_t size = array_size(sizeof(*txr->tx_buf), txr->size_mask + 1);
+
+ txr->tx_buf = kvzalloc(size, GFP_KERNEL | __GFP_NOWARN);
+
+ return txr->tx_buf ? 0 : -ENOMEM;
+}
+
+static int fbnic_alloc_tx_ring_resources(struct fbnic_net *fbn,
+ struct fbnic_ring *txr)
+{
+ struct device *dev = fbn->netdev->dev.parent;
+ int err;
+
+ if (txr->flags & FBNIC_RING_F_DISABLED)
+ return 0;
+
+ err = fbnic_alloc_tx_ring_desc(fbn, txr);
+ if (err)
+ return err;
+
+ if (!(txr->flags & FBNIC_RING_F_CTX))
+ return 0;
+
+ err = fbnic_alloc_tx_ring_buffer(txr);
+ if (err)
+ goto free_desc;
+
+ return 0;
+
+free_desc:
+ fbnic_free_ring_resources(dev, txr);
+ return err;
+}
+
+static int fbnic_alloc_rx_ring_desc(struct fbnic_net *fbn,
+ struct fbnic_ring *rxr)
+{
+ struct device *dev = fbn->netdev->dev.parent;
+ size_t desc_size = sizeof(*rxr->desc);
+ u32 rxq_size;
+ size_t size;
+
+ switch (rxr->doorbell - fbnic_ring_csr_base(rxr)) {
+ case FBNIC_QUEUE_BDQ_HPQ_TAIL:
+ rxq_size = fbn->hpq_size / FBNIC_BD_FRAG_COUNT;
+ desc_size *= FBNIC_BD_FRAG_COUNT;
+ break;
+ case FBNIC_QUEUE_BDQ_PPQ_TAIL:
+ rxq_size = fbn->ppq_size / FBNIC_BD_FRAG_COUNT;
+ desc_size *= FBNIC_BD_FRAG_COUNT;
+ break;
+ case FBNIC_QUEUE_RCQ_HEAD:
+ rxq_size = fbn->rcq_size;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Round size up to nearest 4K */
+ size = ALIGN(array_size(desc_size, rxq_size), 4096);
+
+ rxr->desc = dma_alloc_coherent(dev, size, &rxr->dma,
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!rxr->desc)
+ return -ENOMEM;
+
+ /* rxq_size should be a power of 2, so mask is just that -1 */
+ rxr->size_mask = rxq_size - 1;
+ rxr->size = size;
+
+ return 0;
+}
+
+static int fbnic_alloc_rx_ring_buffer(struct fbnic_ring *rxr)
+{
+ size_t size = array_size(sizeof(*rxr->rx_buf), rxr->size_mask + 1);
+
+ if (rxr->flags & FBNIC_RING_F_CTX)
+ size = sizeof(*rxr->rx_buf) * (rxr->size_mask + 1);
+ else
+ size = sizeof(*rxr->pkt);
+
+ rxr->rx_buf = kvzalloc(size, GFP_KERNEL | __GFP_NOWARN);
+
+ return rxr->rx_buf ? 0 : -ENOMEM;
+}
+
+static int fbnic_alloc_rx_ring_resources(struct fbnic_net *fbn,
+ struct fbnic_ring *rxr)
+{
+ struct device *dev = fbn->netdev->dev.parent;
+ int err;
+
+ err = fbnic_alloc_rx_ring_desc(fbn, rxr);
+ if (err)
+ return err;
+
+ err = fbnic_alloc_rx_ring_buffer(rxr);
+ if (err)
+ goto free_desc;
+
+ return 0;
+
+free_desc:
+ fbnic_free_ring_resources(dev, rxr);
+ return err;
+}
+
+static void fbnic_free_qt_resources(struct fbnic_net *fbn,
+ struct fbnic_q_triad *qt)
+{
+ struct device *dev = fbn->netdev->dev.parent;
+
+ fbnic_free_ring_resources(dev, &qt->cmpl);
+ fbnic_free_ring_resources(dev, &qt->sub1);
+ fbnic_free_ring_resources(dev, &qt->sub0);
+}
+
+static int fbnic_alloc_tx_qt_resources(struct fbnic_net *fbn,
+ struct fbnic_q_triad *qt)
+{
+ struct device *dev = fbn->netdev->dev.parent;
+ int err;
+
+ err = fbnic_alloc_tx_ring_resources(fbn, &qt->sub0);
+ if (err)
+ return err;
+
+ err = fbnic_alloc_tx_ring_resources(fbn, &qt->cmpl);
+ if (err)
+ goto free_sub1;
+
+ return 0;
+
+free_sub1:
+ fbnic_free_ring_resources(dev, &qt->sub0);
+ return err;
+}
+
+static int fbnic_alloc_rx_qt_resources(struct fbnic_net *fbn,
+ struct fbnic_q_triad *qt)
+{
+ struct device *dev = fbn->netdev->dev.parent;
+ int err;
+
+ err = fbnic_alloc_rx_ring_resources(fbn, &qt->sub0);
+ if (err)
+ return err;
+
+ err = fbnic_alloc_rx_ring_resources(fbn, &qt->sub1);
+ if (err)
+ goto free_sub0;
+
+ err = fbnic_alloc_rx_ring_resources(fbn, &qt->cmpl);
+ if (err)
+ goto free_sub1;
+
+ return 0;
+
+free_sub1:
+ fbnic_free_ring_resources(dev, &qt->sub1);
+free_sub0:
+ fbnic_free_ring_resources(dev, &qt->sub0);
+ return err;
+}
+
+static void fbnic_free_nv_resources(struct fbnic_net *fbn,
+ struct fbnic_napi_vector *nv)
+{
+ int i, j;
+
+ /* Free Tx Resources */
+ for (i = 0; i < nv->txt_count; i++)
+ fbnic_free_qt_resources(fbn, &nv->qt[i]);
+
+ for (j = 0; j < nv->rxt_count; j++, i++)
+ fbnic_free_qt_resources(fbn, &nv->qt[i]);
+}
+
+static int fbnic_alloc_nv_resources(struct fbnic_net *fbn,
+ struct fbnic_napi_vector *nv)
+{
+ int i, j, err;
+
+ /* Allocate Tx Resources */
+ for (i = 0; i < nv->txt_count; i++) {
+ err = fbnic_alloc_tx_qt_resources(fbn, &nv->qt[i]);
+ if (err)
+ goto free_resources;
+ }
+
+ /* Allocate Rx Resources */
+ for (j = 0; j < nv->rxt_count; j++, i++) {
+ err = fbnic_alloc_rx_qt_resources(fbn, &nv->qt[i]);
+ if (err)
+ goto free_resources;
+ }
+
+ return 0;
+
+free_resources:
+ while (i--)
+ fbnic_free_qt_resources(fbn, &nv->qt[i]);
+ return err;
+}
+
+void fbnic_free_resources(struct fbnic_net *fbn)
+{
+ struct fbnic_napi_vector *nv;
+
+ list_for_each_entry(nv, &fbn->napis, napis)
+ fbnic_free_nv_resources(fbn, nv);
+}
+
+int fbnic_alloc_resources(struct fbnic_net *fbn)
+{
+ struct fbnic_napi_vector *nv;
+ int err = -ENODEV;
+
+ list_for_each_entry(nv, &fbn->napis, napis) {
+ err = fbnic_alloc_nv_resources(fbn, nv);
+ if (err)
+ goto free_resources;
+ }
+
+ return 0;
+
+free_resources:
+ list_for_each_entry_continue_reverse(nv, &fbn->napis, napis)
+ fbnic_free_nv_resources(fbn, nv);
+
+ return err;
+}
+
+static void fbnic_disable_twq0(struct fbnic_ring *txr)
+{
+ u32 twq_ctl = fbnic_ring_rd32(txr, FBNIC_QUEUE_TWQ0_CTL);
+
+ twq_ctl &= ~FBNIC_QUEUE_TWQ_CTL_ENABLE;
+
+ fbnic_ring_wr32(txr, FBNIC_QUEUE_TWQ0_CTL, twq_ctl);
+}
+
+static void fbnic_disable_tcq(struct fbnic_ring *txr)
+{
+ fbnic_ring_wr32(txr, FBNIC_QUEUE_TCQ_CTL, 0);
+ fbnic_ring_wr32(txr, FBNIC_QUEUE_TIM_MASK, FBNIC_QUEUE_TIM_MASK_MASK);
+}
+
+static void fbnic_disable_bdq(struct fbnic_ring *hpq, struct fbnic_ring *ppq)
+{
+ u32 bdq_ctl = fbnic_ring_rd32(hpq, FBNIC_QUEUE_BDQ_CTL);
+
+ bdq_ctl &= ~FBNIC_QUEUE_BDQ_CTL_ENABLE;
+
+ fbnic_ring_wr32(hpq, FBNIC_QUEUE_BDQ_CTL, bdq_ctl);
+}
+
+static void fbnic_disable_rcq(struct fbnic_ring *rxr)
+{
+ fbnic_ring_wr32(rxr, FBNIC_QUEUE_RCQ_CTL, 0);
+ fbnic_ring_wr32(rxr, FBNIC_QUEUE_RIM_MASK, FBNIC_QUEUE_RIM_MASK_MASK);
+}
+
+void fbnic_napi_disable(struct fbnic_net *fbn)
+{
+ struct fbnic_napi_vector *nv;
+
+ list_for_each_entry(nv, &fbn->napis, napis) {
+ napi_disable(&nv->napi);
+
+ fbnic_nv_irq_disable(nv);
+ }
+}
+
+void fbnic_disable(struct fbnic_net *fbn)
+{
+ struct fbnic_dev *fbd = fbn->fbd;
+ struct fbnic_napi_vector *nv;
+ int i, j;
+
+ list_for_each_entry(nv, &fbn->napis, napis) {
+ /* Disable Tx queue triads */
+ for (i = 0; i < nv->txt_count; i++) {
+ struct fbnic_q_triad *qt = &nv->qt[i];
+
+ fbnic_disable_twq0(&qt->sub0);
+ fbnic_disable_tcq(&qt->cmpl);
+ }
+
+ /* Disable Rx queue triads */
+ for (j = 0; j < nv->rxt_count; j++, i++) {
+ struct fbnic_q_triad *qt = &nv->qt[i];
+
+ fbnic_disable_bdq(&qt->sub0, &qt->sub1);
+ fbnic_disable_rcq(&qt->cmpl);
+ }
+ }
+
+ fbnic_wrfl(fbd);
+}
+
+static void fbnic_tx_flush(struct fbnic_dev *fbd)
+{
+ netdev_warn(fbd->netdev, "triggering Tx flush\n");
+
+ fbnic_rmw32(fbd, FBNIC_TMI_DROP_CTRL, FBNIC_TMI_DROP_CTRL_EN,
+ FBNIC_TMI_DROP_CTRL_EN);
+}
+
+static void fbnic_tx_flush_off(struct fbnic_dev *fbd)
+{
+ fbnic_rmw32(fbd, FBNIC_TMI_DROP_CTRL, FBNIC_TMI_DROP_CTRL_EN, 0);
+}
+
+struct fbnic_idle_regs {
+ u32 reg_base;
+ u8 reg_cnt;
+};
+
+static bool fbnic_all_idle(struct fbnic_dev *fbd,
+ const struct fbnic_idle_regs *regs,
+ unsigned int nregs)
+{
+ unsigned int i, j;
+
+ for (i = 0; i < nregs; i++) {
+ for (j = 0; j < regs[i].reg_cnt; j++) {
+ if (fbnic_rd32(fbd, regs[i].reg_base + j) != ~0U)
+ return false;
+ }
+ }
+ return true;
+}
+
+static void fbnic_idle_dump(struct fbnic_dev *fbd,
+ const struct fbnic_idle_regs *regs,
+ unsigned int nregs, const char *dir, int err)
+{
+ unsigned int i, j;
+
+ netdev_err(fbd->netdev, "error waiting for %s idle %d\n", dir, err);
+ for (i = 0; i < nregs; i++)
+ for (j = 0; j < regs[i].reg_cnt; j++)
+ netdev_err(fbd->netdev, "0x%04x: %08x\n",
+ regs[i].reg_base + j,
+ fbnic_rd32(fbd, regs[i].reg_base + j));
+}
+
+int fbnic_wait_all_queues_idle(struct fbnic_dev *fbd, bool may_fail)
+{
+ static const struct fbnic_idle_regs tx[] = {
+ { FBNIC_QM_TWQ_IDLE(0), FBNIC_QM_TWQ_IDLE_CNT, },
+ { FBNIC_QM_TQS_IDLE(0), FBNIC_QM_TQS_IDLE_CNT, },
+ { FBNIC_QM_TDE_IDLE(0), FBNIC_QM_TDE_IDLE_CNT, },
+ { FBNIC_QM_TCQ_IDLE(0), FBNIC_QM_TCQ_IDLE_CNT, },
+ }, rx[] = {
+ { FBNIC_QM_HPQ_IDLE(0), FBNIC_QM_HPQ_IDLE_CNT, },
+ { FBNIC_QM_PPQ_IDLE(0), FBNIC_QM_PPQ_IDLE_CNT, },
+ { FBNIC_QM_RCQ_IDLE(0), FBNIC_QM_RCQ_IDLE_CNT, },
+ };
+ bool idle;
+ int err;
+
+ err = read_poll_timeout_atomic(fbnic_all_idle, idle, idle, 2, 500000,
+ false, fbd, tx, ARRAY_SIZE(tx));
+ if (err == -ETIMEDOUT) {
+ fbnic_tx_flush(fbd);
+ err = read_poll_timeout_atomic(fbnic_all_idle, idle, idle,
+ 2, 500000, false,
+ fbd, tx, ARRAY_SIZE(tx));
+ fbnic_tx_flush_off(fbd);
+ }
+ if (err) {
+ fbnic_idle_dump(fbd, tx, ARRAY_SIZE(tx), "Tx", err);
+ if (may_fail)
+ return err;
+ }
+
+ err = read_poll_timeout_atomic(fbnic_all_idle, idle, idle, 2, 500000,
+ false, fbd, rx, ARRAY_SIZE(rx));
+ if (err)
+ fbnic_idle_dump(fbd, rx, ARRAY_SIZE(rx), "Rx", err);
+ return err;
+}
+
+void fbnic_flush(struct fbnic_net *fbn)
+{
+ struct fbnic_napi_vector *nv;
+
+ list_for_each_entry(nv, &fbn->napis, napis) {
+ int i, j;
+
+ /* Flush any processed Tx Queue Triads and drop the rest */
+ for (i = 0; i < nv->txt_count; i++) {
+ struct fbnic_q_triad *qt = &nv->qt[i];
+ struct netdev_queue *tx_queue;
+
+ /* Clean the work queues of unprocessed work */
+ fbnic_clean_twq0(nv, 0, &qt->sub0, true, qt->sub0.tail);
+
+ /* Reset completion queue descriptor ring */
+ memset(qt->cmpl.desc, 0, qt->cmpl.size);
+
+ /* Nothing else to do if Tx queue is disabled */
+ if (qt->sub0.flags & FBNIC_RING_F_DISABLED)
+ continue;
+
+ /* Reset BQL associated with Tx queue */
+ tx_queue = netdev_get_tx_queue(nv->napi.dev,
+ qt->sub0.q_idx);
+ netdev_tx_reset_queue(tx_queue);
+
+ /* Disassociate Tx queue from NAPI */
+ netif_queue_set_napi(nv->napi.dev, qt->sub0.q_idx,
+ NETDEV_QUEUE_TYPE_TX, NULL);
+ }
+
+ /* Flush any processed Rx Queue Triads and drop the rest */
+ for (j = 0; j < nv->rxt_count; j++, i++) {
+ struct fbnic_q_triad *qt = &nv->qt[i];
+
+ /* Clean the work queues of unprocessed work */
+ fbnic_clean_bdq(nv, 0, &qt->sub0, qt->sub0.tail);
+ fbnic_clean_bdq(nv, 0, &qt->sub1, qt->sub1.tail);
+
+ /* Reset completion queue descriptor ring */
+ memset(qt->cmpl.desc, 0, qt->cmpl.size);
+
+ fbnic_put_pkt_buff(nv, qt->cmpl.pkt, 0);
+ qt->cmpl.pkt->buff.data_hard_start = NULL;
+
+ /* Disassociate Rx queue from NAPI */
+ netif_queue_set_napi(nv->napi.dev, qt->cmpl.q_idx,
+ NETDEV_QUEUE_TYPE_RX, NULL);
+ }
+ }
+}
+
+void fbnic_fill(struct fbnic_net *fbn)
+{
+ struct fbnic_napi_vector *nv;
+
+ list_for_each_entry(nv, &fbn->napis, napis) {
+ int i, j;
+
+ /* Configure NAPI mapping for Tx */
+ for (i = 0; i < nv->txt_count; i++) {
+ struct fbnic_q_triad *qt = &nv->qt[i];
+
+ /* Nothing to do if Tx queue is disabled */
+ if (qt->sub0.flags & FBNIC_RING_F_DISABLED)
+ continue;
+
+ /* Associate Tx queue with NAPI */
+ netif_queue_set_napi(nv->napi.dev, qt->sub0.q_idx,
+ NETDEV_QUEUE_TYPE_TX, &nv->napi);
+ }
+
+ /* Configure NAPI mapping and populate pages
+ * in the BDQ rings to use for Rx
+ */
+ for (j = 0; j < nv->rxt_count; j++, i++) {
+ struct fbnic_q_triad *qt = &nv->qt[i];
+
+ /* Associate Rx queue with NAPI */
+ netif_queue_set_napi(nv->napi.dev, qt->cmpl.q_idx,
+ NETDEV_QUEUE_TYPE_RX, &nv->napi);
+
+ /* Populate the header and payload BDQs */
+ fbnic_fill_bdq(nv, &qt->sub0);
+ fbnic_fill_bdq(nv, &qt->sub1);
+ }
+ }
+}
+
+static void fbnic_enable_twq0(struct fbnic_ring *twq)
+{
+ u32 log_size = fls(twq->size_mask);
+
+ if (!twq->size_mask)
+ return;
+
+ /* Reset head/tail */
+ fbnic_ring_wr32(twq, FBNIC_QUEUE_TWQ0_CTL, FBNIC_QUEUE_TWQ_CTL_RESET);
+ twq->tail = 0;
+ twq->head = 0;
+
+ /* Store descriptor ring address and size */
+ fbnic_ring_wr32(twq, FBNIC_QUEUE_TWQ0_BAL, lower_32_bits(twq->dma));
+ fbnic_ring_wr32(twq, FBNIC_QUEUE_TWQ0_BAH, upper_32_bits(twq->dma));
+
+ /* Write lower 4 bits of log size as 64K ring size is 0 */
+ fbnic_ring_wr32(twq, FBNIC_QUEUE_TWQ0_SIZE, log_size & 0xf);
+
+ fbnic_ring_wr32(twq, FBNIC_QUEUE_TWQ0_CTL, FBNIC_QUEUE_TWQ_CTL_ENABLE);
+}
+
+static void fbnic_enable_tcq(struct fbnic_napi_vector *nv,
+ struct fbnic_ring *tcq)
+{
+ u32 log_size = fls(tcq->size_mask);
+
+ if (!tcq->size_mask)
+ return;
+
+ /* Reset head/tail */
+ fbnic_ring_wr32(tcq, FBNIC_QUEUE_TCQ_CTL, FBNIC_QUEUE_TCQ_CTL_RESET);
+ tcq->tail = 0;
+ tcq->head = 0;
+
+ /* Store descriptor ring address and size */
+ fbnic_ring_wr32(tcq, FBNIC_QUEUE_TCQ_BAL, lower_32_bits(tcq->dma));
+ fbnic_ring_wr32(tcq, FBNIC_QUEUE_TCQ_BAH, upper_32_bits(tcq->dma));
+
+ /* Write lower 4 bits of log size as 64K ring size is 0 */
+ fbnic_ring_wr32(tcq, FBNIC_QUEUE_TCQ_SIZE, log_size & 0xf);
+
+ /* Store interrupt information for the completion queue */
+ fbnic_ring_wr32(tcq, FBNIC_QUEUE_TIM_CTL, nv->v_idx);
+ fbnic_ring_wr32(tcq, FBNIC_QUEUE_TIM_THRESHOLD, tcq->size_mask / 2);
+ fbnic_ring_wr32(tcq, FBNIC_QUEUE_TIM_MASK, 0);
+
+ /* Enable queue */
+ fbnic_ring_wr32(tcq, FBNIC_QUEUE_TCQ_CTL, FBNIC_QUEUE_TCQ_CTL_ENABLE);
+}
+
+static void fbnic_enable_bdq(struct fbnic_ring *hpq, struct fbnic_ring *ppq)
+{
+ u32 bdq_ctl = FBNIC_QUEUE_BDQ_CTL_ENABLE;
+ u32 log_size;
+
+ /* Reset head/tail */
+ fbnic_ring_wr32(hpq, FBNIC_QUEUE_BDQ_CTL, FBNIC_QUEUE_BDQ_CTL_RESET);
+ ppq->tail = 0;
+ ppq->head = 0;
+ hpq->tail = 0;
+ hpq->head = 0;
+
+ log_size = fls(hpq->size_mask);
+
+ /* Store descriptor ring address and size */
+ fbnic_ring_wr32(hpq, FBNIC_QUEUE_BDQ_HPQ_BAL, lower_32_bits(hpq->dma));
+ fbnic_ring_wr32(hpq, FBNIC_QUEUE_BDQ_HPQ_BAH, upper_32_bits(hpq->dma));
+
+ /* Write lower 4 bits of log size as 64K ring size is 0 */
+ fbnic_ring_wr32(hpq, FBNIC_QUEUE_BDQ_HPQ_SIZE, log_size & 0xf);
+
+ if (!ppq->size_mask)
+ goto write_ctl;
+
+ log_size = fls(ppq->size_mask);
+
+ /* Add enabling of PPQ to BDQ control */
+ bdq_ctl |= FBNIC_QUEUE_BDQ_CTL_PPQ_ENABLE;
+
+ /* Store descriptor ring address and size */
+ fbnic_ring_wr32(ppq, FBNIC_QUEUE_BDQ_PPQ_BAL, lower_32_bits(ppq->dma));
+ fbnic_ring_wr32(ppq, FBNIC_QUEUE_BDQ_PPQ_BAH, upper_32_bits(ppq->dma));
+ fbnic_ring_wr32(ppq, FBNIC_QUEUE_BDQ_PPQ_SIZE, log_size & 0xf);
+
+write_ctl:
+ fbnic_ring_wr32(hpq, FBNIC_QUEUE_BDQ_CTL, bdq_ctl);
+}
+
+static void fbnic_config_drop_mode_rcq(struct fbnic_napi_vector *nv,
+ struct fbnic_ring *rcq)
+{
+ u32 drop_mode, rcq_ctl;
+
+ drop_mode = FBNIC_QUEUE_RDE_CTL0_DROP_IMMEDIATE;
+
+ /* Specify packet layout */
+ rcq_ctl = FIELD_PREP(FBNIC_QUEUE_RDE_CTL0_DROP_MODE_MASK, drop_mode) |
+ FIELD_PREP(FBNIC_QUEUE_RDE_CTL0_MIN_HROOM_MASK, FBNIC_RX_HROOM) |
+ FIELD_PREP(FBNIC_QUEUE_RDE_CTL0_MIN_TROOM_MASK, FBNIC_RX_TROOM);
+
+ fbnic_ring_wr32(rcq, FBNIC_QUEUE_RDE_CTL0, rcq_ctl);
+}
+
+static void fbnic_enable_rcq(struct fbnic_napi_vector *nv,
+ struct fbnic_ring *rcq)
+{
+ u32 log_size = fls(rcq->size_mask);
+ u32 rcq_ctl;
+
+ fbnic_config_drop_mode_rcq(nv, rcq);
+
+ rcq_ctl = FIELD_PREP(FBNIC_QUEUE_RDE_CTL1_PADLEN_MASK, FBNIC_RX_PAD) |
+ FIELD_PREP(FBNIC_QUEUE_RDE_CTL1_MAX_HDR_MASK,
+ FBNIC_RX_MAX_HDR) |
+ FIELD_PREP(FBNIC_QUEUE_RDE_CTL1_PAYLD_OFF_MASK,
+ FBNIC_RX_PAYLD_OFFSET) |
+ FIELD_PREP(FBNIC_QUEUE_RDE_CTL1_PAYLD_PG_CL_MASK,
+ FBNIC_RX_PAYLD_PG_CL);
+ fbnic_ring_wr32(rcq, FBNIC_QUEUE_RDE_CTL1, rcq_ctl);
+
+ /* Reset head/tail */
+ fbnic_ring_wr32(rcq, FBNIC_QUEUE_RCQ_CTL, FBNIC_QUEUE_RCQ_CTL_RESET);
+ rcq->head = 0;
+ rcq->tail = 0;
+
+ /* Store descriptor ring address and size */
+ fbnic_ring_wr32(rcq, FBNIC_QUEUE_RCQ_BAL, lower_32_bits(rcq->dma));
+ fbnic_ring_wr32(rcq, FBNIC_QUEUE_RCQ_BAH, upper_32_bits(rcq->dma));
+
+ /* Write lower 4 bits of log size as 64K ring size is 0 */
+ fbnic_ring_wr32(rcq, FBNIC_QUEUE_RCQ_SIZE, log_size & 0xf);
+
+ /* Store interrupt information for the completion queue */
+ fbnic_ring_wr32(rcq, FBNIC_QUEUE_RIM_CTL, nv->v_idx);
+ fbnic_ring_wr32(rcq, FBNIC_QUEUE_RIM_THRESHOLD, rcq->size_mask / 2);
+ fbnic_ring_wr32(rcq, FBNIC_QUEUE_RIM_MASK, 0);
+
+ /* Enable queue */
+ fbnic_ring_wr32(rcq, FBNIC_QUEUE_RCQ_CTL, FBNIC_QUEUE_RCQ_CTL_ENABLE);
+}
+
+void fbnic_enable(struct fbnic_net *fbn)
+{
+ struct fbnic_dev *fbd = fbn->fbd;
+ struct fbnic_napi_vector *nv;
+ int i, j;
+
+ list_for_each_entry(nv, &fbn->napis, napis) {
+ /* Setup Tx Queue Triads */
+ for (i = 0; i < nv->txt_count; i++) {
+ struct fbnic_q_triad *qt = &nv->qt[i];
+
+ fbnic_enable_twq0(&qt->sub0);
+ fbnic_enable_tcq(nv, &qt->cmpl);
+ }
+
+ /* Setup Rx Queue Triads */
+ for (j = 0; j < nv->rxt_count; j++, i++) {
+ struct fbnic_q_triad *qt = &nv->qt[i];
+
+ fbnic_enable_bdq(&qt->sub0, &qt->sub1);
+ fbnic_config_drop_mode_rcq(nv, &qt->cmpl);
+ fbnic_enable_rcq(nv, &qt->cmpl);
+ }
+ }
+
+ fbnic_wrfl(fbd);
+}
+
+static void fbnic_nv_irq_enable(struct fbnic_napi_vector *nv)
+{
+ struct fbnic_dev *fbd = nv->fbd;
+ u32 val;
+
+ val = FBNIC_INTR_CQ_REARM_INTR_UNMASK;
+
+ fbnic_wr32(fbd, FBNIC_INTR_CQ_REARM(nv->v_idx), val);
+}
+
+void fbnic_napi_enable(struct fbnic_net *fbn)
+{
+ u32 irqs[FBNIC_MAX_MSIX_VECS / 32] = {};
+ struct fbnic_dev *fbd = fbn->fbd;
+ struct fbnic_napi_vector *nv;
+ int i;
+
+ list_for_each_entry(nv, &fbn->napis, napis) {
+ napi_enable(&nv->napi);
+
+ fbnic_nv_irq_enable(nv);
+
+ /* Record bit used for NAPI IRQs so we can
+ * set the mask appropriately
+ */
+ irqs[nv->v_idx / 32] |= BIT(nv->v_idx % 32);
+ }
+
+ /* Force the first interrupt on the device to guarantee
+ * that any packets that may have been enqueued during the
+ * bringup are processed.
+ */
+ for (i = 0; i < ARRAY_SIZE(irqs); i++) {
+ if (!irqs[i])
+ continue;
+ fbnic_wr32(fbd, FBNIC_INTR_SET(i), irqs[i]);
+ }
+
+ fbnic_wrfl(fbd);
+}
+
+void fbnic_napi_depletion_check(struct net_device *netdev)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ u32 irqs[FBNIC_MAX_MSIX_VECS / 32] = {};
+ struct fbnic_dev *fbd = fbn->fbd;
+ struct fbnic_napi_vector *nv;
+ int i, j;
+
+ list_for_each_entry(nv, &fbn->napis, napis) {
+ /* Find RQs which are completely out of pages */
+ for (i = nv->txt_count, j = 0; j < nv->rxt_count; j++, i++) {
+ /* Assume 4 pages is always enough to fit a packet
+ * and therefore generate a completion and an IRQ.
+ */
+ if (fbnic_desc_used(&nv->qt[i].sub0) < 4 ||
+ fbnic_desc_used(&nv->qt[i].sub1) < 4)
+ irqs[nv->v_idx / 32] |= BIT(nv->v_idx % 32);
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(irqs); i++) {
+ if (!irqs[i])
+ continue;
+ fbnic_wr32(fbd, FBNIC_INTR_MASK_CLEAR(i), irqs[i]);
+ fbnic_wr32(fbd, FBNIC_INTR_SET(i), irqs[i]);
+ }
+
+ fbnic_wrfl(fbd);
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
new file mode 100644
index 000000000000..4a206c0e7192
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#ifndef _FBNIC_TXRX_H_
+#define _FBNIC_TXRX_H_
+
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <net/xdp.h>
+
+struct fbnic_net;
+
+/* Guarantee we have space needed for storing the buffer
+ * To store the buffer we need:
+ * 1 descriptor per page
+ * + 1 descriptor for skb head
+ * + 2 descriptors for metadata and optional metadata
+ * + 7 descriptors to keep tail out of the same cacheline as head
+ * If we cannot guarantee that then we should return TX_BUSY
+ */
+#define FBNIC_MAX_SKB_DESC (MAX_SKB_FRAGS + 10)
+#define FBNIC_TX_DESC_WAKEUP (FBNIC_MAX_SKB_DESC * 2)
+#define FBNIC_TX_DESC_MIN roundup_pow_of_two(FBNIC_TX_DESC_WAKEUP)
+
+#define FBNIC_MAX_TXQS 128u
+#define FBNIC_MAX_RXQS 128u
+
+#define FBNIC_TXQ_SIZE_DEFAULT 1024
+#define FBNIC_HPQ_SIZE_DEFAULT 256
+#define FBNIC_PPQ_SIZE_DEFAULT 256
+#define FBNIC_RCQ_SIZE_DEFAULT 1024
+
+#define FBNIC_RX_TROOM \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
+#define FBNIC_RX_HROOM \
+ (ALIGN(FBNIC_RX_TROOM + NET_SKB_PAD, 128) - FBNIC_RX_TROOM)
+#define FBNIC_RX_PAD 0
+#define FBNIC_RX_MAX_HDR (1536 - FBNIC_RX_PAD)
+#define FBNIC_RX_PAYLD_OFFSET 0
+#define FBNIC_RX_PAYLD_PG_CL 0
+
+#define FBNIC_RING_F_DISABLED BIT(0)
+#define FBNIC_RING_F_CTX BIT(1)
+#define FBNIC_RING_F_STATS BIT(2) /* Ring's stats may be used */
+
+struct fbnic_pkt_buff {
+ struct xdp_buff buff;
+ u32 data_truesize;
+ u16 data_len;
+ u16 nr_frags;
+};
+
+/* Pagecnt bias is long max to reserve the last bit to catch overflow
+ * cases where if we overcharge the bias it will flip over to be negative.
+ */
+#define PAGECNT_BIAS_MAX LONG_MAX
+struct fbnic_rx_buf {
+ struct page *page;
+ long pagecnt_bias;
+};
+
+struct fbnic_ring {
+ /* Pointer to buffer specific info */
+ union {
+ struct fbnic_pkt_buff *pkt; /* RCQ */
+ struct fbnic_rx_buf *rx_buf; /* BDQ */
+ void **tx_buf; /* TWQ */
+ void *buffer; /* Generic pointer */
+ };
+
+ u32 __iomem *doorbell; /* Pointer to CSR space for ring */
+ __le64 *desc; /* Descriptor ring memory */
+ u16 size_mask; /* Size of ring in descriptors - 1 */
+ u8 q_idx; /* Logical netdev ring index */
+ u8 flags; /* Ring flags (FBNIC_RING_F_*) */
+
+ u32 head, tail; /* Head/Tail of ring */
+
+ /* Slow path fields follow */
+ dma_addr_t dma; /* Phys addr of descriptor memory */
+ size_t size; /* Size of descriptor ring in memory */
+};
+
+struct fbnic_q_triad {
+ struct fbnic_ring sub0, sub1, cmpl;
+};
+
+struct fbnic_napi_vector {
+ struct napi_struct napi;
+ struct device *dev; /* Device for DMA unmapping */
+ struct page_pool *page_pool;
+ struct fbnic_dev *fbd;
+ char name[IFNAMSIZ + 9];
+
+ u16 v_idx;
+ u8 txt_count;
+ u8 rxt_count;
+
+ struct list_head napis;
+
+ struct fbnic_q_triad qt[];
+};
+
+#define FBNIC_MAX_TXQS 128u
+#define FBNIC_MAX_RXQS 128u
+
+netdev_tx_t fbnic_xmit_frame(struct sk_buff *skb, struct net_device *dev);
+netdev_features_t
+fbnic_features_check(struct sk_buff *skb, struct net_device *dev,
+ netdev_features_t features);
+
+int fbnic_alloc_napi_vectors(struct fbnic_net *fbn);
+void fbnic_free_napi_vectors(struct fbnic_net *fbn);
+int fbnic_alloc_resources(struct fbnic_net *fbn);
+void fbnic_free_resources(struct fbnic_net *fbn);
+void fbnic_napi_enable(struct fbnic_net *fbn);
+void fbnic_napi_disable(struct fbnic_net *fbn);
+void fbnic_enable(struct fbnic_net *fbn);
+void fbnic_disable(struct fbnic_net *fbn);
+void fbnic_flush(struct fbnic_net *fbn);
+void fbnic_fill(struct fbnic_net *fbn);
+
+void fbnic_napi_depletion_check(struct net_device *netdev);
+int fbnic_wait_all_queues_idle(struct fbnic_dev *fbd, bool may_fail);
+
+#endif /* _FBNIC_TXRX_H_ */