summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/intel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2024-07-16 19:28:34 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2024-07-16 19:28:34 -0700
commit51835949dda3783d4639cfa74ce13a3c9829de00 (patch)
tree2b593de5eba6ecc73f7c58fc65fdaffae45c7323 /drivers/net/ethernet/intel
parent0434dbe32053d07d658165be681505120c6b1abc (diff)
parent77ae5e5b00720372af2860efdc4bc652ac682696 (diff)
Merge tag 'net-next-6.11' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-nextHEADmaster
Pull networking updates from Jakub Kicinski: "Not much excitement - a handful of large patchsets (devmem among them) did not make it in time. Core & protocols: - Use local_lock in addition to local_bh_disable() to protect per-CPU resources in networking, a step closer for local_bh_disable() not to act as a big lock on PREEMPT_RT - Use flex array for netdevice priv area, ensure its cache alignment - Add a sysctl knob to allow user to specify a default rto_min at socket init time. Bit of a big hammer but multiple companies were independently carrying such patch downstream so clearly it's useful - Support scheduling transmission of packets based on CLOCK_TAI - Un-pin TCP TIMEWAIT timer to avoid it firing on CPUs later cordoned off using cpusets - Support multiple L2TPv3 UDP tunnels using the same 5-tuple address - Allow configuration of multipath hash seed, to both allow synchronizing hashing of two routers, and preventing partial accidental sync - Improve TCP compliance with RFC 9293 for simultaneous connect() - Support sending NAT keepalives in IPsec ESP in UDP states. Userspace IKE daemon had to do this before, but the kernel can better keep track of it - Support sending supervision HSR frames with MAC addresses stored in ProxyNodeTable when RedBox (i.e. HSR-SAN) is enabled - Introduce IPPROTO_SMC for selecting SMC when socket is created - Allow UDP GSO transmit from devices with no checksum offload - openvswitch: add packet sampling via psample, separating the sampled traffic from "upcall" packets sent to user space for forwarding - nf_tables: shrink memory consumption for transaction objects Things we sprinkled into general kernel code: - Power Sequencing subsystem (used by Qualcomm Bluetooth driver for QCA6390) [ Already merged separately - Linus ] - Add IRQ information in sysfs for auxiliary bus - Introduce guard definition for local_lock - Add aligned flavor of __cacheline_group_{begin, end}() markings for grouping fields in structures BPF: - Notify user space (via epoll) when a struct_ops object is getting detached/unregistered - Add new kfuncs for a generic, open-coded bits iterator - Enable BPF programs to declare arrays of kptr, bpf_rb_root, and bpf_list_head - Support resilient split BTF which cuts down on duplication and makes BTF as compact as possible WRT BTF from modules - Add support for dumping kfunc prototypes from BTF which enables both detecting as well as dumping compilable prototypes for kfuncs - riscv64 BPF JIT improvements in particular to add 12-argument support for BPF trampolines and to utilize bpf_prog_pack for the latter - Add the capability to offload the netfilter flowtable in XDP layer through kfuncs Driver API: - Allow users to configure IRQ tresholds between which automatic IRQ moderation can choose - Expand Power Sourcing (PoE) status with power, class and failure reason. Support setting power limits - Track additional RSS contexts in the core, make sure configuration changes don't break them - Support IPsec crypto offload for IPv6 ESP and IPv4 UDP-encapsulated ESP data paths - Support updating firmware on SFP modules Tests and tooling: - mptcp: use net/lib.sh to manage netns - TCP-AO and TCP-MD5: replace debug prints used by tests with tracepoints - openvswitch: make test self-contained (don't depend on OvS CLI tools) Drivers: - Ethernet high-speed NICs: - Broadcom (bnxt): - increase the max total outstanding PTP TX packets to 4 - add timestamping statistics support - implement netdev_queue_mgmt_ops - support new RSS context API - Intel (100G, ice, idpf): - implement FEC statistics and dumping signal quality indicators - support E825C products (with 56Gbps PHYs) - nVidia/Mellanox: - support HW-GRO - mlx4/mlx5: support per-queue statistics via netlink - obey the max number of EQs setting in sub-functions - AMD/Solarflare: - support new RSS context API - AMD/Pensando: - ionic: rework fix for doorbell miss to lower overhead and skip it on new HW - Wangxun: - txgbe: support Flow Director perfect filters - Ethernet NICs consumer, embedded and virtual: - Add driver for Tehuti Networks TN40xx chips - Add driver for Meta's internal NIC chips - Add driver for Ethernet MAC on Airoha EN7581 SoCs - Add driver for Renesas Ethernet-TSN devices - Google cloud vNIC: - flow steering support - Microsoft vNIC: - support page sizes other than 4KB on ARM64 - vmware vNIC: - support latency measurement (update to version 9) - VirtIO net: - support for Byte Queue Limits - support configuring thresholds for automatic IRQ moderation - support for AF_XDP Rx zero-copy - Synopsys (stmmac): - support for STM32MP13 SoC - let platforms select the right PCS implementation - TI: - icssg-prueth: add multicast filtering support - icssg-prueth: enable PTP timestamping and PPS - Renesas: - ravb: improve Rx performance 30-400% by using page pool, theaded NAPI and timer-based IRQ coalescing - ravb: add MII support for R-Car V4M - Cadence (macb): - macb: add ARP support to Wake-On-LAN - Cortina: - use phylib for RX and TX pause configuration - Ethernet switches: - nVidia/Mellanox: - support configuration of multipath hash seed - report more accurate max MTU - use page_pool to improve Rx performance - MediaTek: - mt7530: add support for bridge port isolation - Qualcomm: - qca8k: add support for bridge port isolation - Microchip: - lan9371/2: add 100BaseTX PHY support - NXP: - vsc73xx: implement VLAN operations - Ethernet PHYs: - aquantia: enable support for aqr115c - aquantia: add support for PHY LEDs - realtek: add support for rtl8224 2.5Gbps PHY - xpcs: add memory-mapped device support - add BroadR-Reach link mode and support in Broadcom's PHY driver - CAN: - add document for ISO 15765-2 protocol support - mcp251xfd: workaround for erratum DS80000789E, use timestamps to catch when device returns incorrect FIFO status - WiFi: - mac80211/cfg80211: - parse Transmit Power Envelope (TPE) data in mac80211 instead of in drivers - improvements for 6 GHz regulatory flexibility - multi-link improvements - support multiple radios per wiphy - remove DEAUTH_NEED_MGD_TX_PREP flag - Intel (iwlwifi): - bump FW API to 91 for BZ/SC devices - report 64-bit radiotap timestamp - enable P2P low latency by default - handle Transmit Power Envelope (TPE) advertised by AP - remove support for older FW for new devices - fast resume (keeping the device configured) - mvm: re-enable Multi-Link Operation (MLO) - aggregation (A-MSDU) optimizations - MediaTek (mt76): - mt7925 Multi-Link Operation (MLO) support - Qualcomm (ath10k): - LED support for various chipsets - Qualcomm (ath12k): - remove unsupported Tx monitor handling - support channel 2 in 6 GHz band - support Spatial Multiplexing Power Save (SMPS) in 6 GHz band - supprt multiple BSSID (MBSSID) and Enhanced Multi-BSSID Advertisements (EMA) - support dynamic VLAN - add panic handler for resetting the firmware state - DebugFS support for datapath statistics - WCN7850: support for Wake on WLAN - Microchip (wilc1000): - read MAC address during probe to make it visible to user space - suspend/resume improvements - TI (wl18xx): - support newer firmware versions - RealTek (rtw89): - preparation for RTL8852BE-VT support - Wake on WLAN support for WiFi 6 chips - 36-bit PCI DMA support - RealTek (rtlwifi): - RTL8192DU support - Broadcom (brcmfmac): - Management Frame Protection support (to enable WPA3) - Bluetooth: - qualcomm: use the power sequencer for QCA6390 - btusb: mediatek: add ISO data transmission functions - hci_bcm4377: add BCM4388 support - btintel: add support for BlazarU core - btintel: add support for Whale Peak2 - btnxpuart: add support for AW693 A1 chipset - btnxpuart: add support for IW615 chipset - btusb: add Realtek RTL8852BE support ID 0x13d3:0x3591" * tag 'net-next-6.11' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next: (1589 commits) eth: fbnic: Fix spelling mistake "tiggerring" -> "triggering" tcp: Replace strncpy() with strscpy() wifi: ath12k: fix build vs old compiler tcp: Don't access uninit tcp_rsk(req)->ao_keyid in tcp_create_openreq_child(). eth: fbnic: Write the TCAM tables used for RSS control and Rx to host eth: fbnic: Add L2 address programming eth: fbnic: Add basic Rx handling eth: fbnic: Add basic Tx handling eth: fbnic: Add link detection eth: fbnic: Add initial messaging to notify FW of our presence eth: fbnic: Implement Rx queue alloc/start/stop/free eth: fbnic: Implement Tx queue alloc/start/stop/free eth: fbnic: Allocate a netdevice and napi vectors with queues eth: fbnic: Add FW communication mechanism eth: fbnic: Add message parsing for FW messages eth: fbnic: Add register init to set PCIe/Ethernet device config eth: fbnic: Allocate core device specific structures and devlink interface eth: fbnic: Add scaffolding for Meta's NIC driver PCI: Add Meta Platforms vendor ID net/sched: cls_flower: propagate tca[TCA_OPTIONS] to NL_REQ_ATTR_CHECK ...
Diffstat (limited to 'drivers/net/ethernet/intel')
-rw-r--r--drivers/net/ethernet/intel/Kconfig13
-rw-r--r--drivers/net/ethernet/intel/e100.c1
-rw-r--r--drivers/net/ethernet/intel/e1000/Makefile2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c1
-rw-r--r--drivers/net/ethernet/intel/e1000e/Makefile7
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c1
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c1
-rw-r--r--drivers/net/ethernet/intel/i40e/Makefile2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c1
-rw-r--r--drivers/net/ethernet/intel/iavf/Makefile5
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c1
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/devlink.c128
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/devlink_port.c61
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adapter.c60
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h63
-rw-r--r--drivers/net/ethernet/intel/ice/ice_cgu_regs.h77
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c188
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h32
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.c30
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.h15
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.c101
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.h20
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch_br.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch_br.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c444
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.h29
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c26
-rw-r--r--drivers/net/ethernet/intel/ice/ice_protocol_type.h43
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c211
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_consts.h402
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.c3242
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.h295
-rw-r--r--drivers/net/ethernet/intel/ice/ice_repr.c16
-rw-r--r--drivers/net/ethernet/intel/ice/ice_repr.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sbq_cmd.h10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c34
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c674
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.h20
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_trace.h18
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h69
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.c2
-rw-r--r--drivers/net/ethernet/intel/idpf/Kconfig26
-rw-r--r--drivers/net/ethernet/intel/idpf/Makefile3
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf.h11
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ethtool.c152
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h2
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lib.c88
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_main.c1
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c306
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.c1412
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.h734
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.c178
-rw-r--r--drivers/net/ethernet/intel/igb/Makefile6
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c5
-rw-r--r--drivers/net/ethernet/intel/igbvf/Makefile6
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c1
-rw-r--r--drivers/net/ethernet/intel/igc/Makefile6
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h1
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c8
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/Makefile8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/Makefile6
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c1
-rw-r--r--drivers/net/ethernet/intel/libeth/Makefile2
-rw-r--r--drivers/net/ethernet/intel/libeth/rx.c133
-rw-r--r--drivers/net/ethernet/intel/libie/Makefile2
-rw-r--r--drivers/net/ethernet/intel/libie/rx.c1
78 files changed, 6772 insertions, 2716 deletions
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index e0287fbd501d..0375c7448a57 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -384,17 +384,6 @@ config IGC_LEDS
Optional support for controlling the NIC LED's with the netdev
LED trigger.
-config IDPF
- tristate "Intel(R) Infrastructure Data Path Function Support"
- depends on PCI_MSI
- select DIMLIB
- select PAGE_POOL
- select PAGE_POOL_STATS
- help
- This driver supports Intel(R) Infrastructure Data Path Function
- devices.
-
- To compile this driver as a module, choose M here. The module
- will be called idpf.
+source "drivers/net/ethernet/intel/idpf/Kconfig"
endif # NET_VENDOR_INTEL
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 9b068d40778d..aa139b67a55b 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -161,7 +161,6 @@
#define FIRMWARE_D102E "e100/d102e_ucode.bin"
MODULE_DESCRIPTION(DRV_DESCRIPTION);
-MODULE_AUTHOR(DRV_COPYRIGHT);
MODULE_LICENSE("GPL v2");
MODULE_FIRMWARE(FIRMWARE_D101M);
MODULE_FIRMWARE(FIRMWARE_D101S);
diff --git a/drivers/net/ethernet/intel/e1000/Makefile b/drivers/net/ethernet/intel/e1000/Makefile
index 314c52d44b7c..79491dec47e1 100644
--- a/drivers/net/ethernet/intel/e1000/Makefile
+++ b/drivers/net/ethernet/intel/e1000/Makefile
@@ -7,4 +7,4 @@
obj-$(CONFIG_E1000) += e1000.o
-e1000-objs := e1000_main.o e1000_hw.o e1000_ethtool.o e1000_param.o
+e1000-y := e1000_main.o e1000_hw.o e1000_ethtool.o e1000_param.o
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 60fff9a6c53e..ab7ae418d294 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -187,7 +187,6 @@ static struct pci_driver e1000_driver = {
.err_handler = &e1000_err_handler
};
-MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/intel/e1000e/Makefile b/drivers/net/ethernet/intel/e1000e/Makefile
index 0baa15503c38..18f22b6374d5 100644
--- a/drivers/net/ethernet/intel/e1000e/Makefile
+++ b/drivers/net/ethernet/intel/e1000e/Makefile
@@ -10,7 +10,6 @@ subdir-ccflags-y += -I$(src)
obj-$(CONFIG_E1000E) += e1000e.o
-e1000e-objs := 82571.o ich8lan.o 80003es2lan.o \
- mac.o manage.o nvm.o phy.o \
- param.o ethtool.o netdev.o ptp.o
-
+e1000e-y := 82571.o ich8lan.o 80003es2lan.o \
+ mac.o manage.o nvm.o phy.o \
+ param.o ethtool.o netdev.o ptp.o
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 85da20778e0f..9364bc2b4eb1 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -2263,7 +2263,7 @@ static int e1000e_set_eee(struct net_device *netdev, struct ethtool_keee *edata)
}
static int e1000e_get_ts_info(struct net_device *netdev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 3cd161c6672b..360ee26557f7 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -7969,7 +7969,6 @@ static void __exit e1000_exit_module(void)
}
module_exit(e1000_exit_module);
-MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index fc373472e4e1..142f07ca8bc0 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -17,7 +17,6 @@ static const char fm10k_driver_string[] = DRV_SUMMARY;
static const char fm10k_copyright[] =
"Copyright(c) 2013 - 2019 Intel Corporation.";
-MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION(DRV_SUMMARY);
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/intel/i40e/Makefile b/drivers/net/ethernet/intel/i40e/Makefile
index cad93f323bd5..9faa4339a76c 100644
--- a/drivers/net/ethernet/intel/i40e/Makefile
+++ b/drivers/net/ethernet/intel/i40e/Makefile
@@ -10,7 +10,7 @@ subdir-ccflags-y += -I$(src)
obj-$(CONFIG_I40E) += i40e.o
-i40e-objs := i40e_main.o \
+i40e-y := i40e_main.o \
i40e_ethtool.o \
i40e_adminq.o \
i40e_common.o \
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index bca2084cc54b..d546567e0286 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -735,7 +735,7 @@ __i40e_pf_next_veb(struct i40e_pf *pf, int *idx)
_i++, _veb = __i40e_pf_next_veb(_pf, &_i))
/**
- * i40e_mac_to_hkey - Convert a 6-byte MAC Address to a u64 hash key
+ * i40e_addr_to_hkey - Convert a 6-byte MAC Address to a u64 hash key
* @macaddr: the MAC Address as the base key
*
* Simply copies the address and returns it as a u64 for hashing
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 4e28785c9fb2..1d0d2e526adb 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -2546,7 +2546,7 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
}
static int i40e_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct i40e_pf *pf = i40e_netdev_to_pf(dev);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 310513d9321b..cbcfada7b357 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -98,7 +98,6 @@ static int debug = -1;
module_param(debug, uint, 0);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX)");
-MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
MODULE_IMPORT_NS(LIBIE);
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/intel/iavf/Makefile b/drivers/net/ethernet/intel/iavf/Makefile
index 2d154a4e2fd7..356ac9faa5bf 100644
--- a/drivers/net/ethernet/intel/iavf/Makefile
+++ b/drivers/net/ethernet/intel/iavf/Makefile
@@ -11,6 +11,5 @@ subdir-ccflags-y += -I$(src)
obj-$(CONFIG_IAVF) += iavf.o
-iavf-objs := iavf_main.o iavf_ethtool.o iavf_virtchnl.o iavf_fdir.o \
- iavf_adv_rss.o \
- iavf_txrx.o iavf_common.o iavf_adminq.o
+iavf-y := iavf_main.o iavf_ethtool.o iavf_virtchnl.o iavf_fdir.o \
+ iavf_adv_rss.o iavf_txrx.o iavf_common.o iavf_adminq.o
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index c6dff0963053..ff11bafb3b4f 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -45,7 +45,6 @@ static const struct pci_device_id iavf_pci_tbl[] = {
MODULE_DEVICE_TABLE(pci, iavf_pci_tbl);
MODULE_ALIAS("i40evf");
-MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver");
MODULE_IMPORT_NS(LIBETH);
MODULE_IMPORT_NS(LIBIE);
diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink.c b/drivers/net/ethernet/intel/ice/devlink/devlink.c
index 704e9ad5144e..810a901d7afd 100644
--- a/drivers/net/ethernet/intel/ice/devlink/devlink.c
+++ b/drivers/net/ethernet/intel/ice/devlink/devlink.c
@@ -794,10 +794,8 @@ int ice_devlink_rate_init_tx_topology(struct devlink *devlink, struct ice_vsi *v
tc_node = pi->root->children[0];
mutex_lock(&pi->sched_lock);
- devl_lock(devlink);
for (i = 0; i < tc_node->num_children; i++)
ice_traverse_tx_tree(devlink, tc_node->children[i], tc_node, pf);
- devl_unlock(devlink);
mutex_unlock(&pi->sched_lock);
return 0;
@@ -1383,9 +1381,129 @@ ice_devlink_enable_iw_validate(struct devlink *devlink, u32 id,
return 0;
}
+#define DEVLINK_LOCAL_FWD_DISABLED_STR "disabled"
+#define DEVLINK_LOCAL_FWD_ENABLED_STR "enabled"
+#define DEVLINK_LOCAL_FWD_PRIORITIZED_STR "prioritized"
+
+/**
+ * ice_devlink_local_fwd_mode_to_str - Get string for local_fwd mode.
+ * @mode: local forwarding for mode used in port_info struct.
+ *
+ * Return: Mode respective string or "Invalid".
+ */
+static const char *
+ice_devlink_local_fwd_mode_to_str(enum ice_local_fwd_mode mode)
+{
+ switch (mode) {
+ case ICE_LOCAL_FWD_MODE_ENABLED:
+ return DEVLINK_LOCAL_FWD_ENABLED_STR;
+ case ICE_LOCAL_FWD_MODE_PRIORITIZED:
+ return DEVLINK_LOCAL_FWD_PRIORITIZED_STR;
+ case ICE_LOCAL_FWD_MODE_DISABLED:
+ return DEVLINK_LOCAL_FWD_DISABLED_STR;
+ }
+
+ return "Invalid";
+}
+
+/**
+ * ice_devlink_local_fwd_str_to_mode - Get local_fwd mode from string name.
+ * @mode_str: local forwarding mode string.
+ *
+ * Return: Mode value or negative number if invalid.
+ */
+static int ice_devlink_local_fwd_str_to_mode(const char *mode_str)
+{
+ if (!strcmp(mode_str, DEVLINK_LOCAL_FWD_ENABLED_STR))
+ return ICE_LOCAL_FWD_MODE_ENABLED;
+ else if (!strcmp(mode_str, DEVLINK_LOCAL_FWD_PRIORITIZED_STR))
+ return ICE_LOCAL_FWD_MODE_PRIORITIZED;
+ else if (!strcmp(mode_str, DEVLINK_LOCAL_FWD_DISABLED_STR))
+ return ICE_LOCAL_FWD_MODE_DISABLED;
+
+ return -EINVAL;
+}
+
+/**
+ * ice_devlink_local_fwd_get - Get local_fwd parameter.
+ * @devlink: Pointer to the devlink instance.
+ * @id: The parameter ID to set.
+ * @ctx: Context to store the parameter value.
+ *
+ * Return: Zero.
+ */
+static int ice_devlink_local_fwd_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+ struct ice_port_info *pi;
+ const char *mode_str;
+
+ pi = pf->hw.port_info;
+ mode_str = ice_devlink_local_fwd_mode_to_str(pi->local_fwd_mode);
+ snprintf(ctx->val.vstr, sizeof(ctx->val.vstr), "%s", mode_str);
+
+ return 0;
+}
+
+/**
+ * ice_devlink_local_fwd_set - Set local_fwd parameter.
+ * @devlink: Pointer to the devlink instance.
+ * @id: The parameter ID to set.
+ * @ctx: Context to get the parameter value.
+ * @extack: Netlink extended ACK structure.
+ *
+ * Return: Zero.
+ */
+static int ice_devlink_local_fwd_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
+{
+ int new_local_fwd_mode = ice_devlink_local_fwd_str_to_mode(ctx->val.vstr);
+ struct ice_pf *pf = devlink_priv(devlink);
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_port_info *pi;
+
+ pi = pf->hw.port_info;
+ if (pi->local_fwd_mode != new_local_fwd_mode) {
+ pi->local_fwd_mode = new_local_fwd_mode;
+ dev_info(dev, "Setting local_fwd to %s\n", ctx->val.vstr);
+ ice_schedule_reset(pf, ICE_RESET_CORER);
+ }
+
+ return 0;
+}
+
+/**
+ * ice_devlink_local_fwd_validate - Validate passed local_fwd parameter value.
+ * @devlink: Unused pointer to devlink instance.
+ * @id: The parameter ID to validate.
+ * @val: Value to validate.
+ * @extack: Netlink extended ACK structure.
+ *
+ * Supported values are:
+ * "enabled" - local_fwd is enabled, "disabled" - local_fwd is disabled
+ * "prioritized" - local_fwd traffic is prioritized in scheduling.
+ *
+ * Return: Zero when passed parameter value is supported. Negative value on
+ * error.
+ */
+static int ice_devlink_local_fwd_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ if (ice_devlink_local_fwd_str_to_mode(val.vstr) < 0) {
+ NL_SET_ERR_MSG_MOD(extack, "Error: Requested value is not supported.");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
enum ice_param_id {
ICE_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
ICE_DEVLINK_PARAM_ID_TX_SCHED_LAYERS,
+ ICE_DEVLINK_PARAM_ID_LOCAL_FWD,
};
static const struct devlink_param ice_dvl_rdma_params[] = {
@@ -1407,6 +1525,12 @@ static const struct devlink_param ice_dvl_sched_params[] = {
ice_devlink_tx_sched_layers_get,
ice_devlink_tx_sched_layers_set,
ice_devlink_tx_sched_layers_validate),
+ DEVLINK_PARAM_DRIVER(ICE_DEVLINK_PARAM_ID_LOCAL_FWD,
+ "local_forwarding", DEVLINK_PARAM_TYPE_STRING,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ ice_devlink_local_fwd_get,
+ ice_devlink_local_fwd_set,
+ ice_devlink_local_fwd_validate),
};
static void ice_devlink_free(void *devlink_ptr)
diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink_port.c b/drivers/net/ethernet/intel/ice/devlink/devlink_port.c
index 13e6790d3cae..00fed5a61d62 100644
--- a/drivers/net/ethernet/intel/ice/devlink/devlink_port.c
+++ b/drivers/net/ethernet/intel/ice/devlink/devlink_port.c
@@ -373,6 +373,62 @@ void ice_devlink_destroy_pf_port(struct ice_pf *pf)
}
/**
+ * ice_devlink_port_get_vf_fn_mac - .port_fn_hw_addr_get devlink handler
+ * @port: devlink port structure
+ * @hw_addr: MAC address of the port
+ * @hw_addr_len: length of MAC address
+ * @extack: extended netdev ack structure
+ *
+ * Callback for the devlink .port_fn_hw_addr_get operation
+ * Return: zero on success or an error code on failure.
+ */
+static int ice_devlink_port_get_vf_fn_mac(struct devlink_port *port,
+ u8 *hw_addr, int *hw_addr_len,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_vf *vf = container_of(port, struct ice_vf, devlink_port);
+
+ ether_addr_copy(hw_addr, vf->dev_lan_addr);
+ *hw_addr_len = ETH_ALEN;
+
+ return 0;
+}
+
+/**
+ * ice_devlink_port_set_vf_fn_mac - .port_fn_hw_addr_set devlink handler
+ * @port: devlink port structure
+ * @hw_addr: MAC address of the port
+ * @hw_addr_len: length of MAC address
+ * @extack: extended netdev ack structure
+ *
+ * Callback for the devlink .port_fn_hw_addr_set operation
+ * Return: zero on success or an error code on failure.
+ */
+static int ice_devlink_port_set_vf_fn_mac(struct devlink_port *port,
+ const u8 *hw_addr,
+ int hw_addr_len,
+ struct netlink_ext_ack *extack)
+
+{
+ struct devlink_port_attrs *attrs = &port->attrs;
+ struct devlink_port_pci_vf_attrs *pci_vf;
+ struct devlink *devlink = port->devlink;
+ struct ice_pf *pf;
+ u16 vf_id;
+
+ pf = devlink_priv(devlink);
+ pci_vf = &attrs->pci_vf;
+ vf_id = pci_vf->vf;
+
+ return __ice_set_vf_mac(pf, vf_id, hw_addr);
+}
+
+static const struct devlink_port_ops ice_devlink_vf_port_ops = {
+ .port_fn_hw_addr_get = ice_devlink_port_get_vf_fn_mac,
+ .port_fn_hw_addr_set = ice_devlink_port_set_vf_fn_mac,
+};
+
+/**
* ice_devlink_create_vf_port - Create a devlink port for this VF
* @vf: the VF to create a port for
*
@@ -407,7 +463,8 @@ int ice_devlink_create_vf_port(struct ice_vf *vf)
devlink_port_attrs_set(devlink_port, &attrs);
devlink = priv_to_devlink(pf);
- err = devlink_port_register(devlink, devlink_port, vsi->idx);
+ err = devl_port_register_with_ops(devlink, devlink_port, vsi->idx,
+ &ice_devlink_vf_port_ops);
if (err) {
dev_err(dev, "Failed to create devlink port for VF %d, error %d\n",
vf->vf_id, err);
@@ -426,5 +483,5 @@ int ice_devlink_create_vf_port(struct ice_vf *vf)
void ice_devlink_destroy_vf_port(struct ice_vf *vf)
{
devl_rate_leaf_destroy(&vf->devlink_port);
- devlink_port_unregister(&vf->devlink_port);
+ devl_port_unregister(&vf->devlink_port);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_adapter.c b/drivers/net/ethernet/intel/ice/ice_adapter.c
index 52d15ef7f4b1..ad84d8ad49a6 100644
--- a/drivers/net/ethernet/intel/ice/ice_adapter.c
+++ b/drivers/net/ethernet/intel/ice/ice_adapter.c
@@ -11,6 +11,7 @@
#include "ice_adapter.h"
static DEFINE_XARRAY(ice_adapters);
+static DEFINE_MUTEX(ice_adapters_mutex);
/* PCI bus number is 8 bits. Slot is 5 bits. Domain can have the rest. */
#define INDEX_FIELD_DOMAIN GENMASK(BITS_PER_LONG - 1, 13)
@@ -47,8 +48,6 @@ static void ice_adapter_free(struct ice_adapter *adapter)
kfree(adapter);
}
-DEFINE_FREE(ice_adapter_free, struct ice_adapter*, if (_T) ice_adapter_free(_T))
-
/**
* ice_adapter_get - Get a shared ice_adapter structure.
* @pdev: Pointer to the pci_dev whose driver is getting the ice_adapter.
@@ -64,27 +63,26 @@ DEFINE_FREE(ice_adapter_free, struct ice_adapter*, if (_T) ice_adapter_free(_T))
*/
struct ice_adapter *ice_adapter_get(const struct pci_dev *pdev)
{
- struct ice_adapter *ret, __free(ice_adapter_free) *adapter = NULL;
unsigned long index = ice_adapter_index(pdev);
-
- adapter = ice_adapter_new();
- if (!adapter)
- return ERR_PTR(-ENOMEM);
-
- xa_lock(&ice_adapters);
- ret = __xa_cmpxchg(&ice_adapters, index, NULL, adapter, GFP_KERNEL);
- if (xa_is_err(ret)) {
- ret = ERR_PTR(xa_err(ret));
- goto unlock;
- }
- if (ret) {
- refcount_inc(&ret->refcount);
- goto unlock;
+ struct ice_adapter *adapter;
+ int err;
+
+ scoped_guard(mutex, &ice_adapters_mutex) {
+ err = xa_insert(&ice_adapters, index, NULL, GFP_KERNEL);
+ if (err == -EBUSY) {
+ adapter = xa_load(&ice_adapters, index);
+ refcount_inc(&adapter->refcount);
+ return adapter;
+ }
+ if (err)
+ return ERR_PTR(err);
+
+ adapter = ice_adapter_new();
+ if (!adapter)
+ return ERR_PTR(-ENOMEM);
+ xa_store(&ice_adapters, index, adapter, GFP_KERNEL);
}
- ret = no_free_ptr(adapter);
-unlock:
- xa_unlock(&ice_adapters);
- return ret;
+ return adapter;
}
/**
@@ -94,23 +92,21 @@ unlock:
* Releases the reference to ice_adapter previously obtained with
* ice_adapter_get.
*
- * Context: Any.
+ * Context: Process, may sleep.
*/
void ice_adapter_put(const struct pci_dev *pdev)
{
unsigned long index = ice_adapter_index(pdev);
struct ice_adapter *adapter;
- xa_lock(&ice_adapters);
- adapter = xa_load(&ice_adapters, index);
- if (WARN_ON(!adapter))
- goto unlock;
+ scoped_guard(mutex, &ice_adapters_mutex) {
+ adapter = xa_load(&ice_adapters, index);
+ if (WARN_ON(!adapter))
+ return;
+ if (!refcount_dec_and_test(&adapter->refcount))
+ return;
- if (!refcount_dec_and_test(&adapter->refcount))
- goto unlock;
-
- WARN_ON(__xa_erase(&ice_adapters, index) != adapter);
+ WARN_ON(xa_erase(&ice_adapters, index) != adapter);
+ }
ice_adapter_free(adapter);
-unlock:
- xa_unlock(&ice_adapters);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index e76c388b9905..66f02988d549 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -122,6 +122,7 @@ struct ice_aqc_list_caps_elem {
#define ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT 0x0077
#define ICE_AQC_CAPS_NVM_MGMT 0x0080
#define ICE_AQC_CAPS_TX_SCHED_TOPO_COMP_MODE 0x0085
+#define ICE_AQC_CAPS_NAC_TOPOLOGY 0x0087
#define ICE_AQC_CAPS_FW_LAG_SUPPORT 0x0092
#define ICE_AQC_BIT_ROCEV2_LAG 0x01
#define ICE_AQC_BIT_SRIOV_LAG 0x02
@@ -231,6 +232,13 @@ struct ice_aqc_get_sw_cfg_resp_elem {
#define ICE_AQC_GET_SW_CONF_RESP_IS_VF BIT(15)
};
+/* Loopback port parameter mode values. */
+enum ice_local_fwd_mode {
+ ICE_LOCAL_FWD_MODE_ENABLED = 0,
+ ICE_LOCAL_FWD_MODE_DISABLED = 1,
+ ICE_LOCAL_FWD_MODE_PRIORITIZED = 2,
+};
+
/* Set Port parameters, (direct, 0x0203) */
struct ice_aqc_set_port_params {
__le16 cmd_flags;
@@ -239,7 +247,9 @@ struct ice_aqc_set_port_params {
__le16 swid;
#define ICE_AQC_PORT_SWID_VALID BIT(15)
#define ICE_AQC_PORT_SWID_M 0xFF
- u8 reserved[10];
+ u8 local_fwd_mode;
+#define ICE_AQC_SET_P_PARAMS_LOCAL_FWD_MODE_VALID BIT(2)
+ u8 reserved[9];
};
/* These resource type defines are used for all switch resource
@@ -1460,6 +1470,55 @@ struct ice_aqc_get_sensor_reading_resp {
} data;
};
+/* DNL call command (indirect 0x0682)
+ * Struct is used for both command and response
+ */
+struct ice_aqc_dnl_call_command {
+ u8 ctx; /* Used in command, reserved in response */
+ u8 reserved;
+ __le16 activity_id;
+#define ICE_AQC_ACT_ID_DNL 0x1129
+ __le32 reserved1;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct ice_aqc_dnl_equa_param {
+ __le16 data_in;
+#define ICE_AQC_RX_EQU_SHIFT 8
+#define ICE_AQC_RX_EQU_PRE2 (0x10 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_PRE1 (0x11 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_POST1 (0x12 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_BFLF (0x13 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_BFHF (0x14 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_DRATE (0x15 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_TX_EQU_PRE1 0x0
+#define ICE_AQC_TX_EQU_PRE3 0x3
+#define ICE_AQC_TX_EQU_ATTEN 0x4
+#define ICE_AQC_TX_EQU_POST1 0x8
+#define ICE_AQC_TX_EQU_PRE2 0xC
+ __le16 op_code_serdes_sel;
+#define ICE_AQC_OP_CODE_SHIFT 4
+#define ICE_AQC_OP_CODE_RX_EQU (0x9 << ICE_AQC_OP_CODE_SHIFT)
+#define ICE_AQC_OP_CODE_TX_EQU (0x10 << ICE_AQC_OP_CODE_SHIFT)
+ __le32 reserved[3];
+};
+
+struct ice_aqc_dnl_equa_respon {
+ /* Equalization value can be negative */
+ int val;
+ __le32 reserved[3];
+};
+
+/* DNL call command/response buffer (indirect 0x0682) */
+struct ice_aqc_dnl_call {
+ union {
+ struct ice_aqc_dnl_equa_param txrx_equa_reqs;
+ __le32 stores[4];
+ struct ice_aqc_dnl_equa_respon txrx_equa_resp;
+ } sto;
+};
+
struct ice_aqc_link_topo_params {
u8 lport_num;
u8 lport_num_valid;
@@ -2563,6 +2622,7 @@ struct ice_aq_desc {
struct ice_aqc_get_link_status get_link_status;
struct ice_aqc_event_lan_overflow lan_overflow;
struct ice_aqc_get_link_topo get_link_topo;
+ struct ice_aqc_dnl_call_command dnl_call;
struct ice_aqc_i2c read_write_i2c;
struct ice_aqc_read_i2c_resp read_i2c_resp;
struct ice_aqc_get_set_tx_topo get_set_tx_topo;
@@ -2687,6 +2747,7 @@ enum ice_adminq_opc {
ice_aqc_opc_set_phy_rec_clk_out = 0x0630,
ice_aqc_opc_get_phy_rec_clk_out = 0x0631,
ice_aqc_opc_get_sensor_reading = 0x0632,
+ ice_aqc_opc_dnl_call = 0x0682,
ice_aqc_opc_get_link_topo = 0x06E0,
ice_aqc_opc_read_i2c = 0x06E2,
ice_aqc_opc_write_i2c = 0x06E3,
diff --git a/drivers/net/ethernet/intel/ice/ice_cgu_regs.h b/drivers/net/ethernet/intel/ice/ice_cgu_regs.h
index 57abd52386d0..10d9d74f3545 100644
--- a/drivers/net/ethernet/intel/ice/ice_cgu_regs.h
+++ b/drivers/net/ethernet/intel/ice/ice_cgu_regs.h
@@ -23,7 +23,18 @@ union nac_cgu_dword9 {
u32 clk_synce0_amp : 2;
u32 one_pps_out_amp : 2;
u32 misc24 : 12;
- } field;
+ };
+ u32 val;
+};
+
+#define NAC_CGU_DWORD16_E825C 0x40
+union nac_cgu_dword16_e825c {
+ struct {
+ u32 synce_remndr : 6;
+ u32 synce_phlmt_en : 1;
+ u32 misc13 : 17;
+ u32 tspll_ck_refclkfreq : 8;
+ };
u32 val;
};
@@ -39,7 +50,7 @@ union nac_cgu_dword19 {
u32 japll_ndivratio : 4;
u32 japll_iref_ndivratio : 3;
u32 misc27 : 1;
- } field;
+ };
u32 val;
};
@@ -63,7 +74,23 @@ union nac_cgu_dword22 {
u32 fdpllclk_sel_div2 : 1;
u32 time1588clk_sel_div2 : 1;
u32 misc3 : 1;
- } field;
+ };
+ u32 val;
+};
+
+#define NAC_CGU_DWORD23_E825C 0x5C
+union nac_cgu_dword23_e825c {
+ struct {
+ u32 cgupll_fbdiv_intgr : 10;
+ u32 ux56pll_fbdiv_intgr : 10;
+ u32 misc20 : 4;
+ u32 ts_pll_enable : 1;
+ u32 time_sync_tspll_align_sel : 1;
+ u32 ext_synce_sel : 1;
+ u32 ref1588_ck_div : 4;
+ u32 time_ref_sel : 1;
+
+ };
u32 val;
};
@@ -77,7 +104,7 @@ union nac_cgu_dword24 {
u32 ext_synce_sel : 1;
u32 ref1588_ck_div : 4;
u32 time_ref_sel : 1;
- } field;
+ };
u32 val;
};
@@ -92,7 +119,7 @@ union tspll_cntr_bist_settings {
u32 i_plllock_cnt_6_0 : 7;
u32 i_plllock_cnt_10_7 : 4;
u32 reserved200 : 4;
- } field;
+ };
u32 val;
};
@@ -109,7 +136,45 @@ union tspll_ro_bwm_lf {
u32 afcdone_cri : 1;
u32 feedfwrdgain_cal_cri_7_0 : 8;
u32 m2fbdivmod_cri_7_0 : 8;
- } field;
+ };
+ u32 val;
+};
+
+#define TSPLL_RO_LOCK_E825C 0x3f0
+union tspll_ro_lock_e825c {
+ struct {
+ u32 bw_freqov_high_cri_7_0 : 8;
+ u32 bw_freqov_high_cri_9_8 : 2;
+ u32 reserved455 : 1;
+ u32 plllock_gain_tran_cri : 1;
+ u32 plllock_true_lock_cri : 1;
+ u32 pllunlock_flag_cri : 1;
+ u32 afcerr_cri : 1;
+ u32 afcdone_cri : 1;
+ u32 feedfwrdgain_cal_cri_7_0 : 8;
+ u32 reserved462 : 8;
+ };
+ u32 val;
+};
+
+#define TSPLL_BW_TDC_E825C 0x31c
+union tspll_bw_tdc_e825c {
+ struct {
+ u32 i_tdc_offset_lock_1_0 : 2;
+ u32 i_bbthresh1_2_0 : 3;
+ u32 i_bbthresh2_2_0 : 3;
+ u32 i_tdcsel_1_0 : 2;
+ u32 i_tdcovccorr_en_h : 1;
+ u32 i_divretimeren : 1;
+ u32 i_bw_ampmeas_window : 1;
+ u32 i_bw_lowerbound_2_0 : 3;
+ u32 i_bw_upperbound_2_0 : 3;
+ u32 i_bw_mode_1_0 : 2;
+ u32 i_ft_mode_sel_2_0 : 3;
+ u32 i_bwphase_4_0 : 5;
+ u32 i_plllock_sel_1_0 : 2;
+ u32 i_afc_divratio : 1;
+ };
u32 val;
};
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 24716a3b494c..009716a12a26 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -240,6 +240,30 @@ bool ice_is_e810t(struct ice_hw *hw)
}
/**
+ * ice_is_e822 - Check if a device is E822 family device
+ * @hw: pointer to the hardware structure
+ *
+ * Return: true if the device is E822 based, false if not.
+ */
+bool ice_is_e822(struct ice_hw *hw)
+{
+ switch (hw->device_id) {
+ case ICE_DEV_ID_E822C_BACKPLANE:
+ case ICE_DEV_ID_E822C_QSFP:
+ case ICE_DEV_ID_E822C_SFP:
+ case ICE_DEV_ID_E822C_10G_BASE_T:
+ case ICE_DEV_ID_E822C_SGMII:
+ case ICE_DEV_ID_E822L_BACKPLANE:
+ case ICE_DEV_ID_E822L_SFP:
+ case ICE_DEV_ID_E822L_10G_BASE_T:
+ case ICE_DEV_ID_E822L_SGMII:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
* ice_is_e823
* @hw: pointer to the hardware structure
*
@@ -910,6 +934,9 @@ static int ice_init_fltr_mgmt_struct(struct ice_hw *hw)
INIT_LIST_HEAD(&sw->vsi_list_map_head);
sw->prof_res_bm_init = 0;
+ /* Initialize recipe count with default recipes read from NVM */
+ sw->recp_cnt = ICE_SW_LKUP_LAST;
+
status = ice_init_def_sw_recp(hw);
if (status) {
devm_kfree(ice_hw_to_dev(hw), hw->switch_info);
@@ -937,14 +964,7 @@ static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
}
recps = sw->recp_list;
for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
- struct ice_recp_grp_entry *rg_entry, *tmprg_entry;
-
recps[i].root_rid = i;
- list_for_each_entry_safe(rg_entry, tmprg_entry,
- &recps[i].rg_list, l_entry) {
- list_del(&rg_entry->l_entry);
- devm_kfree(ice_hw_to_dev(hw), rg_entry);
- }
if (recps[i].adv_rule) {
struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
@@ -969,7 +989,6 @@ static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
devm_kfree(ice_hw_to_dev(hw), lst_itr);
}
}
- devm_kfree(ice_hw_to_dev(hw), recps[i].root_buf);
}
ice_rm_all_sw_replay_rule_info(hw);
devm_kfree(ice_hw_to_dev(hw), sw->recp_list);
@@ -1062,6 +1081,7 @@ int ice_init_hw(struct ice_hw *hw)
goto err_unroll_cqinit;
}
+ hw->port_info->local_fwd_mode = ICE_LOCAL_FWD_MODE_ENABLED;
/* set the back pointer to HW */
hw->port_info->hw = hw;
@@ -1473,8 +1493,9 @@ ice_sbq_send_cmd(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc,
* ice_sbq_rw_reg - Fill Sideband Queue command
* @hw: pointer to the HW struct
* @in: message info to be filled in descriptor
+ * @flags: control queue descriptor flags
*/
-int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in)
+int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in, u16 flags)
{
struct ice_sbq_cmd_desc desc = {0};
struct ice_sbq_msg_req msg = {0};
@@ -1498,7 +1519,7 @@ int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in)
*/
msg_len -= sizeof(msg.data);
- desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags = cpu_to_le16(flags);
desc.opcode = cpu_to_le16(ice_sbq_opc_neigh_dev_req);
desc.param0.cmd_len = cpu_to_le16(msg_len);
status = ice_sbq_send_cmd(hw, &desc, &msg, msg_len, NULL);
@@ -2290,8 +2311,13 @@ ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
info->tmr_index_owned = ((number & ICE_TS_TMR_IDX_OWND_M) != 0);
info->tmr_index_assoc = ((number & ICE_TS_TMR_IDX_ASSOC_M) != 0);
- info->clk_freq = FIELD_GET(ICE_TS_CLK_FREQ_M, number);
- info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0);
+ if (!ice_is_e825c(hw)) {
+ info->clk_freq = FIELD_GET(ICE_TS_CLK_FREQ_M, number);
+ info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0);
+ } else {
+ info->clk_freq = ICE_TIME_REF_FREQ_156_250;
+ info->clk_src = ICE_CLK_SRC_TCXO;
+ }
if (info->clk_freq < NUM_ICE_TIME_REF_FREQ) {
info->time_ref = (enum ice_time_ref_freq)info->clk_freq;
@@ -2565,6 +2591,34 @@ ice_parse_sensor_reading_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
}
/**
+ * ice_parse_nac_topo_dev_caps - Parse ICE_AQC_CAPS_NAC_TOPOLOGY cap
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse ICE_AQC_CAPS_NAC_TOPOLOGY for device capabilities.
+ */
+static void ice_parse_nac_topo_dev_caps(struct ice_hw *hw,
+ struct ice_hw_dev_caps *dev_p,
+ struct ice_aqc_list_caps_elem *cap)
+{
+ dev_p->nac_topo.mode = le32_to_cpu(cap->number);
+ dev_p->nac_topo.id = le32_to_cpu(cap->phys_id) & ICE_NAC_TOPO_ID_M;
+
+ dev_info(ice_hw_to_dev(hw),
+ "PF is configured in %s mode with IP instance ID %d\n",
+ (dev_p->nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M) ?
+ "primary" : "secondary", dev_p->nac_topo.id);
+
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology is_primary = %d\n",
+ !!(dev_p->nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M));
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology is_dual = %d\n",
+ !!(dev_p->nac_topo.mode & ICE_NAC_TOPO_DUAL_M));
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology id = %d\n",
+ dev_p->nac_topo.id);
+}
+
+/**
* ice_parse_dev_caps - Parse device capabilities
* @hw: pointer to the HW struct
* @dev_p: pointer to device capabilities structure
@@ -2615,6 +2669,9 @@ ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
case ICE_AQC_CAPS_SENSOR_READING:
ice_parse_sensor_reading_cap(hw, dev_p, &cap_resp[i]);
break;
+ case ICE_AQC_CAPS_NAC_TOPOLOGY:
+ ice_parse_nac_topo_dev_caps(hw, dev_p, &cap_resp[i]);
+ break;
default:
/* Don't list common capabilities as unknown */
if (!found)
@@ -3010,6 +3067,9 @@ ice_aq_set_port_params(struct ice_port_info *pi, bool double_vlan,
cmd_flags |= ICE_AQC_SET_P_PARAMS_DOUBLE_VLAN_ENA;
cmd->cmd_flags = cpu_to_le16(cmd_flags);
+ cmd->local_fwd_mode = pi->local_fwd_mode |
+ ICE_AQC_SET_P_PARAMS_LOCAL_FWD_MODE_VALID;
+
return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
}
@@ -3043,11 +3103,13 @@ bool ice_is_100m_speed_supported(struct ice_hw *hw)
* Note: In the structure of [phy_type_low, phy_type_high], there should
* be one bit set, as this function will convert one PHY type to its
* speed.
- * If no bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned
- * If more than one bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned
+ *
+ * Return:
+ * * PHY speed for recognized PHY type
+ * * If no bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned
+ * * If more than one bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned
*/
-static u16
-ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
+u16 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
{
u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
@@ -3309,6 +3371,100 @@ int ice_update_link_info(struct ice_port_info *pi)
}
/**
+ * ice_aq_get_phy_equalization - function to read serdes equaliser
+ * value from firmware using admin queue command.
+ * @hw: pointer to the HW struct
+ * @data_in: represents the serdes equalization parameter requested
+ * @op_code: represents the serdes number and flag to represent tx or rx
+ * @serdes_num: represents the serdes number
+ * @output: pointer to the caller-supplied buffer to return serdes equaliser
+ *
+ * Return: non-zero status on error and 0 on success.
+ */
+int ice_aq_get_phy_equalization(struct ice_hw *hw, u16 data_in, u16 op_code,
+ u8 serdes_num, int *output)
+{
+ struct ice_aqc_dnl_call_command *cmd;
+ struct ice_aqc_dnl_call buf = {};
+ struct ice_aq_desc desc;
+ int err;
+
+ buf.sto.txrx_equa_reqs.data_in = cpu_to_le16(data_in);
+ buf.sto.txrx_equa_reqs.op_code_serdes_sel =
+ cpu_to_le16(op_code | (serdes_num & 0xF));
+ cmd = &desc.params.dnl_call;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dnl_call);
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_BUF |
+ ICE_AQ_FLAG_RD |
+ ICE_AQ_FLAG_SI);
+ desc.datalen = cpu_to_le16(sizeof(struct ice_aqc_dnl_call));
+ cmd->activity_id = cpu_to_le16(ICE_AQC_ACT_ID_DNL);
+
+ err = ice_aq_send_cmd(hw, &desc, &buf, sizeof(struct ice_aqc_dnl_call),
+ NULL);
+ *output = err ? 0 : buf.sto.txrx_equa_resp.val;
+
+ return err;
+}
+
+#define FEC_REG_PORT(port) { \
+ FEC_CORR_LOW_REG_PORT##port, \
+ FEC_CORR_HIGH_REG_PORT##port, \
+ FEC_UNCORR_LOW_REG_PORT##port, \
+ FEC_UNCORR_HIGH_REG_PORT##port, \
+}
+
+static const u32 fec_reg[][ICE_FEC_MAX] = {
+ FEC_REG_PORT(0),
+ FEC_REG_PORT(1),
+ FEC_REG_PORT(2),
+ FEC_REG_PORT(3)
+};
+
+/**
+ * ice_aq_get_fec_stats - reads fec stats from phy
+ * @hw: pointer to the HW struct
+ * @pcs_quad: represents pcsquad of user input serdes
+ * @pcs_port: represents the pcs port number part of above pcs quad
+ * @fec_type: represents FEC stats type
+ * @output: pointer to the caller-supplied buffer to return requested fec stats
+ *
+ * Return: non-zero status on error and 0 on success.
+ */
+int ice_aq_get_fec_stats(struct ice_hw *hw, u16 pcs_quad, u16 pcs_port,
+ enum ice_fec_stats_types fec_type, u32 *output)
+{
+ u16 flag = (ICE_AQ_FLAG_RD | ICE_AQ_FLAG_BUF | ICE_AQ_FLAG_SI);
+ struct ice_sbq_msg_input msg = {};
+ u32 receiver_id, reg_offset;
+ int err;
+
+ if (pcs_port > 3)
+ return -EINVAL;
+
+ reg_offset = fec_reg[pcs_port][fec_type];
+
+ if (pcs_quad == 0)
+ receiver_id = FEC_RECEIVER_ID_PCS0;
+ else if (pcs_quad == 1)
+ receiver_id = FEC_RECEIVER_ID_PCS1;
+ else
+ return -EINVAL;
+
+ msg.msg_addr_low = lower_16_bits(reg_offset);
+ msg.msg_addr_high = receiver_id;
+ msg.opcode = ice_sbq_msg_rd;
+ msg.dest_dev = rmn_0;
+
+ err = ice_sbq_rw_reg(hw, &msg, flag);
+ if (err)
+ return err;
+
+ *output = msg.data;
+ return 0;
+}
+
+/**
* ice_cache_phy_user_req
* @pi: port information structure
* @cache_data: PHY logging data
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index ffb22c7ce28b..66f29bac783a 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -17,13 +17,34 @@
#define ICE_SQ_SEND_DELAY_TIME_MS 10
#define ICE_SQ_SEND_MAX_EXECUTE 3
+#define FEC_REG_SHIFT 2
+#define FEC_RECV_ID_SHIFT 4
+#define FEC_CORR_LOW_REG_PORT0 (0x02 << FEC_REG_SHIFT)
+#define FEC_CORR_HIGH_REG_PORT0 (0x03 << FEC_REG_SHIFT)
+#define FEC_UNCORR_LOW_REG_PORT0 (0x04 << FEC_REG_SHIFT)
+#define FEC_UNCORR_HIGH_REG_PORT0 (0x05 << FEC_REG_SHIFT)
+#define FEC_CORR_LOW_REG_PORT1 (0x42 << FEC_REG_SHIFT)
+#define FEC_CORR_HIGH_REG_PORT1 (0x43 << FEC_REG_SHIFT)
+#define FEC_UNCORR_LOW_REG_PORT1 (0x44 << FEC_REG_SHIFT)
+#define FEC_UNCORR_HIGH_REG_PORT1 (0x45 << FEC_REG_SHIFT)
+#define FEC_CORR_LOW_REG_PORT2 (0x4A << FEC_REG_SHIFT)
+#define FEC_CORR_HIGH_REG_PORT2 (0x4B << FEC_REG_SHIFT)
+#define FEC_UNCORR_LOW_REG_PORT2 (0x4C << FEC_REG_SHIFT)
+#define FEC_UNCORR_HIGH_REG_PORT2 (0x4D << FEC_REG_SHIFT)
+#define FEC_CORR_LOW_REG_PORT3 (0x52 << FEC_REG_SHIFT)
+#define FEC_CORR_HIGH_REG_PORT3 (0x53 << FEC_REG_SHIFT)
+#define FEC_UNCORR_LOW_REG_PORT3 (0x54 << FEC_REG_SHIFT)
+#define FEC_UNCORR_HIGH_REG_PORT3 (0x55 << FEC_REG_SHIFT)
+#define FEC_RECEIVER_ID_PCS0 (0x33 << FEC_RECV_ID_SHIFT)
+#define FEC_RECEIVER_ID_PCS1 (0x34 << FEC_RECV_ID_SHIFT)
+
int ice_init_hw(struct ice_hw *hw);
void ice_deinit_hw(struct ice_hw *hw);
int ice_check_reset(struct ice_hw *hw);
int ice_reset(struct ice_hw *hw, enum ice_reset_req req);
int ice_create_all_ctrlq(struct ice_hw *hw);
int ice_init_all_ctrlq(struct ice_hw *hw);
-void ice_shutdown_all_ctrlq(struct ice_hw *hw);
+void ice_shutdown_all_ctrlq(struct ice_hw *hw, bool unloading);
void ice_destroy_all_ctrlq(struct ice_hw *hw);
int
ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
@@ -121,6 +142,11 @@ int
ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
struct ice_port_info *pi);
bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps);
+int ice_aq_get_phy_equalization(struct ice_hw *hw, u16 data_in, u16 op_code,
+ u8 serdes_num, int *output);
+int
+ice_aq_get_fec_stats(struct ice_hw *hw, u16 pcs_quad, u16 pcs_port,
+ enum ice_fec_stats_types fec_type, u32 *output);
enum ice_fc_mode ice_caps_to_fc_mode(u8 caps);
enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options);
@@ -201,7 +227,7 @@ int ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle);
void ice_replay_post(struct ice_hw *hw);
struct ice_q_ctx *
ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle);
-int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in);
+int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in, u16 flag);
int
ice_aq_get_cgu_abilities(struct ice_hw *hw,
struct ice_aqc_get_cgu_abilities *abilities);
@@ -249,6 +275,7 @@ void
ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
u64 *prev_stat, u64 *cur_stat);
bool ice_is_e810t(struct ice_hw *hw);
+bool ice_is_e822(struct ice_hw *hw);
bool ice_is_e823(struct ice_hw *hw);
bool ice_is_e825c(struct ice_hw *hw);
int
@@ -261,6 +288,7 @@ int
ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
bool *value, struct ice_sq_cd *cd);
bool ice_is_100m_speed_supported(struct ice_hw *hw);
+u16 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high);
int
ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
struct ice_sq_cd *cd);
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c
index ffe660f34992..ffaa6511c455 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.c
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.c
@@ -510,16 +510,19 @@ shutdown_sq_out:
*/
static bool ice_aq_ver_check(struct ice_hw *hw)
{
- if (hw->api_maj_ver > EXP_FW_API_VER_MAJOR) {
+ u8 exp_fw_api_ver_major = EXP_FW_API_VER_MAJOR_BY_MAC(hw);
+ u8 exp_fw_api_ver_minor = EXP_FW_API_VER_MINOR_BY_MAC(hw);
+
+ if (hw->api_maj_ver > exp_fw_api_ver_major) {
/* Major API version is newer than expected, don't load */
dev_warn(ice_hw_to_dev(hw),
"The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
return false;
- } else if (hw->api_maj_ver == EXP_FW_API_VER_MAJOR) {
- if (hw->api_min_ver > (EXP_FW_API_VER_MINOR + 2))
+ } else if (hw->api_maj_ver == exp_fw_api_ver_major) {
+ if (hw->api_min_ver > (exp_fw_api_ver_minor + 2))
dev_info(ice_hw_to_dev(hw),
"The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
- else if ((hw->api_min_ver + 2) < EXP_FW_API_VER_MINOR)
+ else if ((hw->api_min_ver + 2) < exp_fw_api_ver_minor)
dev_info(ice_hw_to_dev(hw),
"The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
} else {
@@ -684,10 +687,12 @@ struct ice_ctl_q_info *ice_get_sbq(struct ice_hw *hw)
* ice_shutdown_ctrlq - shutdown routine for any control queue
* @hw: pointer to the hardware structure
* @q_type: specific Control queue type
+ * @unloading: is the driver unloading itself
*
* NOTE: this function does not destroy the control queue locks.
*/
-static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
+static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type,
+ bool unloading)
{
struct ice_ctl_q_info *cq;
@@ -695,7 +700,7 @@ static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
case ICE_CTL_Q_ADMIN:
cq = &hw->adminq;
if (ice_check_sq_alive(hw, cq))
- ice_aq_q_shutdown(hw, true);
+ ice_aq_q_shutdown(hw, unloading);
break;
case ICE_CTL_Q_SB:
cq = &hw->sbq;
@@ -714,20 +719,21 @@ static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
/**
* ice_shutdown_all_ctrlq - shutdown routine for all control queues
* @hw: pointer to the hardware structure
+ * @unloading: is the driver unloading itself
*
* NOTE: this function does not destroy the control queue locks. The driver
* may call this at runtime to shutdown and later restart control queues, such
* as in response to a reset event.
*/
-void ice_shutdown_all_ctrlq(struct ice_hw *hw)
+void ice_shutdown_all_ctrlq(struct ice_hw *hw, bool unloading)
{
/* Shutdown FW admin queue */
- ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
+ ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN, unloading);
/* Shutdown PHY Sideband */
if (ice_is_sbq_supported(hw))
- ice_shutdown_ctrlq(hw, ICE_CTL_Q_SB);
+ ice_shutdown_ctrlq(hw, ICE_CTL_Q_SB, unloading);
/* Shutdown PF-VF Mailbox */
- ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX);
+ ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX, unloading);
}
/**
@@ -759,7 +765,7 @@ int ice_init_all_ctrlq(struct ice_hw *hw)
break;
ice_debug(hw, ICE_DBG_AQ_MSG, "Retry Admin Queue init due to FW critical error\n");
- ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
+ ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN, true);
msleep(ICE_CTL_Q_ADMIN_INIT_MSEC);
} while (retry++ < ICE_CTL_Q_ADMIN_INIT_TIMEOUT);
@@ -840,7 +846,7 @@ static void ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq)
void ice_destroy_all_ctrlq(struct ice_hw *hw)
{
/* shut down all the control queues first */
- ice_shutdown_all_ctrlq(hw);
+ ice_shutdown_all_ctrlq(hw, true);
ice_destroy_ctrlq_locks(&hw->adminq);
if (ice_is_sbq_supported(hw))
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.h b/drivers/net/ethernet/intel/ice/ice_controlq.h
index 8f2fd1613a95..1d54b1cdb1c5 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.h
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.h
@@ -21,9 +21,18 @@
/* Defines that help manage the driver vs FW API checks.
* Take a look at ice_aq_ver_check in ice_controlq.c for actual usage.
*/
-#define EXP_FW_API_VER_BRANCH 0x00
-#define EXP_FW_API_VER_MAJOR 0x01
-#define EXP_FW_API_VER_MINOR 0x05
+#define EXP_FW_API_VER_MAJOR_E810 0x01
+#define EXP_FW_API_VER_MINOR_E810 0x05
+
+#define EXP_FW_API_VER_MAJOR_E830 0x01
+#define EXP_FW_API_VER_MINOR_E830 0x07
+
+#define EXP_FW_API_VER_MAJOR_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? \
+ EXP_FW_API_VER_MAJOR_E830 : \
+ EXP_FW_API_VER_MAJOR_E810)
+#define EXP_FW_API_VER_MINOR_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? \
+ EXP_FW_API_VER_MINOR_E830 : \
+ EXP_FW_API_VER_MINOR_E810)
/* Different control queue types: These are mainly for SW consumption. */
enum ice_ctl_q {
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c
index b102db8b829a..3cfa071e3718 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.c
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c
@@ -117,17 +117,10 @@ static int ice_eswitch_setup_repr(struct ice_pf *pf, struct ice_repr *repr)
struct ice_vsi *vsi = repr->src_vsi;
struct metadata_dst *dst;
- ice_remove_vsi_fltr(&pf->hw, vsi->idx);
repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
GFP_KERNEL);
if (!repr->dst)
- goto err_add_mac_fltr;
-
- if (ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof))
- goto err_dst_free;
-
- if (ice_vsi_add_vlan_zero(vsi))
- goto err_update_security;
+ return -ENOMEM;
netif_keep_dst(uplink_vsi->netdev);
@@ -136,16 +129,48 @@ static int ice_eswitch_setup_repr(struct ice_pf *pf, struct ice_repr *repr)
dst->u.port_info.lower_dev = uplink_vsi->netdev;
return 0;
+}
-err_update_security:
+/**
+ * ice_eswitch_cfg_vsi - configure VSI to work in slow-path
+ * @vsi: VSI structure of representee
+ * @mac: representee MAC
+ *
+ * Return: 0 on success, non-zero on error.
+ */
+int ice_eswitch_cfg_vsi(struct ice_vsi *vsi, const u8 *mac)
+{
+ int err;
+
+ ice_remove_vsi_fltr(&vsi->back->hw, vsi->idx);
+
+ err = ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof);
+ if (err)
+ goto err_update_security;
+
+ err = ice_vsi_add_vlan_zero(vsi);
+ if (err)
+ goto err_vlan_zero;
+
+ return 0;
+
+err_vlan_zero:
ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
-err_dst_free:
- metadata_dst_free(repr->dst);
- repr->dst = NULL;
-err_add_mac_fltr:
- ice_fltr_add_mac_and_broadcast(vsi, repr->parent_mac, ICE_FWD_TO_VSI);
+err_update_security:
+ ice_fltr_add_mac_and_broadcast(vsi, mac, ICE_FWD_TO_VSI);
- return -ENODEV;
+ return err;
+}
+
+/**
+ * ice_eswitch_decfg_vsi - unroll changes done to VSI for switchdev
+ * @vsi: VSI structure of representee
+ * @mac: representee MAC
+ */
+void ice_eswitch_decfg_vsi(struct ice_vsi *vsi, const u8 *mac)
+{
+ ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
+ ice_fltr_add_mac_and_broadcast(vsi, mac, ICE_FWD_TO_VSI);
}
/**
@@ -153,16 +178,16 @@ err_add_mac_fltr:
* @repr_id: representor ID
* @vsi: VSI for which port representor is configured
*/
-void ice_eswitch_update_repr(unsigned long repr_id, struct ice_vsi *vsi)
+void ice_eswitch_update_repr(unsigned long *repr_id, struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
struct ice_repr *repr;
- int ret;
+ int err;
if (!ice_is_switchdev_running(pf))
return;
- repr = xa_load(&pf->eswitch.reprs, repr_id);
+ repr = xa_load(&pf->eswitch.reprs, *repr_id);
if (!repr)
return;
@@ -172,12 +197,19 @@ void ice_eswitch_update_repr(unsigned long repr_id, struct ice_vsi *vsi)
if (repr->br_port)
repr->br_port->vsi = vsi;
- ret = ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof);
- if (ret) {
- ice_fltr_add_mac_and_broadcast(vsi, repr->parent_mac,
- ICE_FWD_TO_VSI);
+ err = ice_eswitch_cfg_vsi(vsi, repr->parent_mac);
+ if (err)
dev_err(ice_pf_to_dev(pf), "Failed to update VSI of port representor %d",
repr->id);
+
+ /* The VSI number is different, reload the PR with new id */
+ if (repr->id != vsi->vsi_num) {
+ xa_erase(&pf->eswitch.reprs, repr->id);
+ repr->id = vsi->vsi_num;
+ if (xa_insert(&pf->eswitch.reprs, repr->id, repr, GFP_KERNEL))
+ dev_err(ice_pf_to_dev(pf), "Failed to reload port representor %d",
+ repr->id);
+ *repr_id = repr->id;
}
}
@@ -423,6 +455,7 @@ static void ice_eswitch_start_reprs(struct ice_pf *pf)
int
ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf)
{
+ struct devlink *devlink = priv_to_devlink(pf);
struct ice_repr *repr;
int err;
@@ -437,7 +470,9 @@ ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf)
ice_eswitch_stop_reprs(pf);
+ devl_lock(devlink);
repr = ice_repr_add_vf(vf);
+ devl_unlock(devlink);
if (IS_ERR(repr)) {
err = PTR_ERR(repr);
goto err_create_repr;
@@ -460,7 +495,9 @@ ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf)
err_xa_alloc:
ice_eswitch_release_repr(pf, repr);
err_setup_repr:
+ devl_lock(devlink);
ice_repr_rem_vf(repr);
+ devl_unlock(devlink);
err_create_repr:
if (xa_empty(&pf->eswitch.reprs))
ice_eswitch_disable_switchdev(pf);
@@ -484,6 +521,7 @@ void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf)
ice_eswitch_disable_switchdev(pf);
ice_eswitch_release_repr(pf, repr);
+ devl_lock(devlink);
ice_repr_rem_vf(repr);
if (xa_empty(&pf->eswitch.reprs)) {
@@ -491,28 +529,11 @@ void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf)
* no point in keeping the nodes
*/
ice_devlink_rate_clear_tx_topology(ice_get_main_vsi(pf));
- devl_lock(devlink);
devl_rate_nodes_destroy(devlink);
- devl_unlock(devlink);
} else {
ice_eswitch_start_reprs(pf);
}
-}
-
-/**
- * ice_eswitch_rebuild - rebuild eswitch
- * @pf: pointer to PF structure
- */
-void ice_eswitch_rebuild(struct ice_pf *pf)
-{
- struct ice_repr *repr;
- unsigned long id;
-
- if (!ice_is_switchdev_running(pf))
- return;
-
- xa_for_each(&pf->eswitch.reprs, id, repr)
- ice_eswitch_detach(pf, repr->vf);
+ devl_unlock(devlink);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.h b/drivers/net/ethernet/intel/ice/ice_eswitch.h
index e2e5c0c75e7d..78fd39a6935d 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.h
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.h
@@ -10,7 +10,6 @@
void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf);
int
ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf);
-void ice_eswitch_rebuild(struct ice_pf *pf);
int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode);
int
@@ -18,7 +17,7 @@ ice_eswitch_mode_set(struct devlink *devlink, u16 mode,
struct netlink_ext_ack *extack);
bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf);
-void ice_eswitch_update_repr(unsigned long repr_id, struct ice_vsi *vsi);
+void ice_eswitch_update_repr(unsigned long *repr_id, struct ice_vsi *vsi);
void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf);
@@ -28,6 +27,9 @@ netdev_tx_t
ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev);
struct net_device *ice_eswitch_get_target(struct ice_rx_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc);
+
+int ice_eswitch_cfg_vsi(struct ice_vsi *vsi, const u8 *mac);
+void ice_eswitch_decfg_vsi(struct ice_vsi *vsi, const u8 *mac);
#else /* CONFIG_ICE_SWITCHDEV */
static inline void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf) { }
@@ -44,18 +46,13 @@ ice_eswitch_set_target_vsi(struct sk_buff *skb,
struct ice_tx_offload_params *off) { }
static inline void
-ice_eswitch_update_repr(unsigned long repr_id, struct ice_vsi *vsi) { }
+ice_eswitch_update_repr(unsigned long *repr_id, struct ice_vsi *vsi) { }
static inline int ice_eswitch_configure(struct ice_pf *pf)
{
return 0;
}
-static inline int ice_eswitch_rebuild(struct ice_pf *pf)
-{
- return -EOPNOTSUPP;
-}
-
static inline int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode)
{
return DEVLINK_ESWITCH_MODE_LEGACY;
@@ -85,5 +82,12 @@ ice_eswitch_get_target(struct ice_rx_ring *rx_ring,
{
return rx_ring->netdev;
}
+
+static inline int ice_eswitch_cfg_vsi(struct ice_vsi *vsi, const u8 *mac)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void ice_eswitch_decfg_vsi(struct ice_vsi *vsi, const u8 *mac) { }
#endif /* CONFIG_ICE_SWITCHDEV */
#endif /* _ICE_ESWITCH_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch_br.c b/drivers/net/ethernet/intel/ice/ice_eswitch_br.c
index ac5beecd028b..f5aceb32bf4d 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch_br.c
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch_br.c
@@ -896,7 +896,8 @@ ice_eswitch_br_port_deinit(struct ice_esw_br *bridge,
if (br_port->type == ICE_ESWITCH_BR_UPLINK_PORT && vsi->back) {
vsi->back->br_port = NULL;
} else {
- struct ice_repr *repr = ice_repr_get_by_vsi(vsi);
+ struct ice_repr *repr =
+ ice_repr_get(vsi->back, br_port->repr_id);
if (repr)
repr->br_port = NULL;
@@ -937,6 +938,7 @@ ice_eswitch_br_vf_repr_port_init(struct ice_esw_br *bridge,
br_port->vsi = repr->src_vsi;
br_port->vsi_idx = br_port->vsi->idx;
br_port->type = ICE_ESWITCH_BR_VF_REPR_PORT;
+ br_port->repr_id = repr->id;
repr->br_port = br_port;
err = xa_insert(&bridge->ports, br_port->vsi_idx, br_port, GFP_KERNEL);
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch_br.h b/drivers/net/ethernet/intel/ice/ice_eswitch_br.h
index 85a8fadb2928..c15c7344d7f8 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch_br.h
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch_br.h
@@ -46,6 +46,7 @@ struct ice_esw_br_port {
enum ice_esw_br_port_type type;
u16 vsi_idx;
u16 pvid;
+ u32 repr_id;
struct xarray vlans;
};
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 62c8205fceba..8c990c976132 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -463,7 +463,354 @@ ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
static int ice_get_regs_len(struct net_device __always_unused *netdev)
{
- return sizeof(ice_regs_dump_list);
+ return (sizeof(ice_regs_dump_list) +
+ sizeof(struct ice_regdump_to_ethtool));
+}
+
+/**
+ * ice_ethtool_get_maxspeed - Get the max speed for given lport
+ * @hw: pointer to the HW struct
+ * @lport: logical port for which max speed is requested
+ * @max_speed: return max speed for input lport
+ *
+ * Return: 0 on success, negative on failure.
+ */
+static int ice_ethtool_get_maxspeed(struct ice_hw *hw, u8 lport, u8 *max_speed)
+{
+ struct ice_aqc_get_port_options_elem options[ICE_AQC_PORT_OPT_MAX] = {};
+ bool active_valid = false, pending_valid = true;
+ u8 option_count = ICE_AQC_PORT_OPT_MAX;
+ u8 active_idx = 0, pending_idx = 0;
+ int status;
+
+ status = ice_aq_get_port_options(hw, options, &option_count, lport,
+ true, &active_idx, &active_valid,
+ &pending_idx, &pending_valid);
+ if (status)
+ return -EIO;
+ if (!active_valid)
+ return -EINVAL;
+
+ *max_speed = options[active_idx].max_lane_speed & ICE_AQC_PORT_OPT_MAX_LANE_M;
+ return 0;
+}
+
+/**
+ * ice_is_serdes_muxed - returns whether serdes is muxed in hardware
+ * @hw: pointer to the HW struct
+ *
+ * Return: true when serdes is muxed, false when serdes is not muxed.
+ */
+static bool ice_is_serdes_muxed(struct ice_hw *hw)
+{
+ u32 reg_value = rd32(hw, GLGEN_SWITCH_MODE_CONFIG);
+
+ return FIELD_GET(GLGEN_SWITCH_MODE_CONFIG_25X4_QUAD_M, reg_value);
+}
+
+static int ice_map_port_topology_for_sfp(struct ice_port_topology *port_topology,
+ u8 lport, bool is_muxed)
+{
+ switch (lport) {
+ case 0:
+ port_topology->pcs_quad_select = 0;
+ port_topology->pcs_port = 0;
+ port_topology->primary_serdes_lane = 0;
+ break;
+ case 1:
+ port_topology->pcs_quad_select = 1;
+ port_topology->pcs_port = 0;
+ if (is_muxed)
+ port_topology->primary_serdes_lane = 2;
+ else
+ port_topology->primary_serdes_lane = 4;
+ break;
+ case 2:
+ port_topology->pcs_quad_select = 0;
+ port_topology->pcs_port = 1;
+ port_topology->primary_serdes_lane = 1;
+ break;
+ case 3:
+ port_topology->pcs_quad_select = 1;
+ port_topology->pcs_port = 1;
+ if (is_muxed)
+ port_topology->primary_serdes_lane = 3;
+ else
+ port_topology->primary_serdes_lane = 5;
+ break;
+ case 4:
+ port_topology->pcs_quad_select = 0;
+ port_topology->pcs_port = 2;
+ port_topology->primary_serdes_lane = 2;
+ break;
+ case 5:
+ port_topology->pcs_quad_select = 1;
+ port_topology->pcs_port = 2;
+ port_topology->primary_serdes_lane = 6;
+ break;
+ case 6:
+ port_topology->pcs_quad_select = 0;
+ port_topology->pcs_port = 3;
+ port_topology->primary_serdes_lane = 3;
+ break;
+ case 7:
+ port_topology->pcs_quad_select = 1;
+ port_topology->pcs_port = 3;
+ port_topology->primary_serdes_lane = 7;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ice_map_port_topology_for_qsfp(struct ice_port_topology *port_topology,
+ u8 lport, bool is_muxed)
+{
+ switch (lport) {
+ case 0:
+ port_topology->pcs_quad_select = 0;
+ port_topology->pcs_port = 0;
+ port_topology->primary_serdes_lane = 0;
+ break;
+ case 1:
+ port_topology->pcs_quad_select = 1;
+ port_topology->pcs_port = 0;
+ if (is_muxed)
+ port_topology->primary_serdes_lane = 2;
+ else
+ port_topology->primary_serdes_lane = 4;
+ break;
+ case 2:
+ port_topology->pcs_quad_select = 0;
+ port_topology->pcs_port = 1;
+ port_topology->primary_serdes_lane = 1;
+ break;
+ case 3:
+ port_topology->pcs_quad_select = 1;
+ port_topology->pcs_port = 1;
+ if (is_muxed)
+ port_topology->primary_serdes_lane = 3;
+ else
+ port_topology->primary_serdes_lane = 5;
+ break;
+ case 4:
+ port_topology->pcs_quad_select = 0;
+ port_topology->pcs_port = 2;
+ port_topology->primary_serdes_lane = 2;
+ break;
+ case 5:
+ port_topology->pcs_quad_select = 1;
+ port_topology->pcs_port = 2;
+ port_topology->primary_serdes_lane = 6;
+ break;
+ case 6:
+ port_topology->pcs_quad_select = 0;
+ port_topology->pcs_port = 3;
+ port_topology->primary_serdes_lane = 3;
+ break;
+ case 7:
+ port_topology->pcs_quad_select = 1;
+ port_topology->pcs_port = 3;
+ port_topology->primary_serdes_lane = 7;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_get_port_topology - returns physical topology like pcsquad, pcsport,
+ * serdes number
+ * @hw: pointer to the HW struct
+ * @lport: logical port for which physical info requested
+ * @port_topology: buffer to hold port topology
+ *
+ * Return: 0 on success, negative on failure.
+ */
+static int ice_get_port_topology(struct ice_hw *hw, u8 lport,
+ struct ice_port_topology *port_topology)
+{
+ struct ice_aqc_get_link_topo cmd = {};
+ u16 node_handle = 0;
+ u8 cage_type = 0;
+ bool is_muxed;
+ int err;
+ u8 ctx;
+
+ ctx = ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE << ICE_AQC_LINK_TOPO_NODE_TYPE_S;
+ ctx |= ICE_AQC_LINK_TOPO_NODE_CTX_PORT << ICE_AQC_LINK_TOPO_NODE_CTX_S;
+ cmd.addr.topo_params.node_type_ctx = ctx;
+
+ err = ice_aq_get_netlist_node(hw, &cmd, &cage_type, &node_handle);
+ if (err)
+ return -EINVAL;
+
+ is_muxed = ice_is_serdes_muxed(hw);
+
+ if (cage_type == 0x11 || /* SFP+ */
+ cage_type == 0x12) { /* SFP28 */
+ port_topology->serdes_lane_count = 1;
+ err = ice_map_port_topology_for_sfp(port_topology, lport, is_muxed);
+ if (err)
+ return err;
+ } else if (cage_type == 0x13 || /* QSFP */
+ cage_type == 0x14) { /* QSFP28 */
+ u8 max_speed = 0;
+
+ err = ice_ethtool_get_maxspeed(hw, lport, &max_speed);
+ if (err)
+ return err;
+
+ if (max_speed == ICE_AQC_PORT_OPT_MAX_LANE_100G)
+ port_topology->serdes_lane_count = 4;
+ else if (max_speed == ICE_AQC_PORT_OPT_MAX_LANE_50G)
+ port_topology->serdes_lane_count = 2;
+ else
+ port_topology->serdes_lane_count = 1;
+
+ err = ice_map_port_topology_for_qsfp(port_topology, lport, is_muxed);
+ if (err)
+ return err;
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_get_tx_rx_equa - read serdes tx rx equaliser param
+ * @hw: pointer to the HW struct
+ * @serdes_num: represents the serdes number
+ * @ptr: structure to read all serdes parameter for given serdes
+ *
+ * Return: all serdes equalization parameter supported per serdes number
+ */
+static int ice_get_tx_rx_equa(struct ice_hw *hw, u8 serdes_num,
+ struct ice_serdes_equalization_to_ethtool *ptr)
+{
+ int err;
+
+ err = ice_aq_get_phy_equalization(hw, ICE_AQC_TX_EQU_PRE1,
+ ICE_AQC_OP_CODE_TX_EQU, serdes_num,
+ &ptr->tx_equalization_pre1);
+ if (err)
+ return err;
+
+ err = ice_aq_get_phy_equalization(hw, ICE_AQC_TX_EQU_PRE3,
+ ICE_AQC_OP_CODE_TX_EQU, serdes_num,
+ &ptr->tx_equalization_pre3);
+ if (err)
+ return err;
+
+ err = ice_aq_get_phy_equalization(hw, ICE_AQC_TX_EQU_ATTEN,
+ ICE_AQC_OP_CODE_TX_EQU, serdes_num,
+ &ptr->tx_equalization_atten);
+ if (err)
+ return err;
+
+ err = ice_aq_get_phy_equalization(hw, ICE_AQC_TX_EQU_POST1,
+ ICE_AQC_OP_CODE_TX_EQU, serdes_num,
+ &ptr->tx_equalization_post1);
+ if (err)
+ return err;
+
+ err = ice_aq_get_phy_equalization(hw, ICE_AQC_TX_EQU_PRE2,
+ ICE_AQC_OP_CODE_TX_EQU, serdes_num,
+ &ptr->tx_equalization_pre2);
+ if (err)
+ return err;
+
+ err = ice_aq_get_phy_equalization(hw, ICE_AQC_RX_EQU_PRE2,
+ ICE_AQC_OP_CODE_RX_EQU, serdes_num,
+ &ptr->rx_equalization_pre2);
+ if (err)
+ return err;
+
+ err = ice_aq_get_phy_equalization(hw, ICE_AQC_RX_EQU_PRE1,
+ ICE_AQC_OP_CODE_RX_EQU, serdes_num,
+ &ptr->rx_equalization_pre1);
+ if (err)
+ return err;
+
+ err = ice_aq_get_phy_equalization(hw, ICE_AQC_RX_EQU_POST1,
+ ICE_AQC_OP_CODE_RX_EQU, serdes_num,
+ &ptr->rx_equalization_post1);
+ if (err)
+ return err;
+
+ err = ice_aq_get_phy_equalization(hw, ICE_AQC_RX_EQU_BFLF,
+ ICE_AQC_OP_CODE_RX_EQU, serdes_num,
+ &ptr->rx_equalization_bflf);
+ if (err)
+ return err;
+
+ err = ice_aq_get_phy_equalization(hw, ICE_AQC_RX_EQU_BFHF,
+ ICE_AQC_OP_CODE_RX_EQU, serdes_num,
+ &ptr->rx_equalization_bfhf);
+ if (err)
+ return err;
+
+ err = ice_aq_get_phy_equalization(hw, ICE_AQC_RX_EQU_DRATE,
+ ICE_AQC_OP_CODE_RX_EQU, serdes_num,
+ &ptr->rx_equalization_drate);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/**
+ * ice_get_extended_regs - returns FEC correctable, uncorrectable stats per
+ * pcsquad, pcsport
+ * @netdev: pointer to net device structure
+ * @p: output buffer to fill requested register dump
+ *
+ * Return: 0 on success, negative on failure.
+ */
+static int ice_get_extended_regs(struct net_device *netdev, void *p)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_regdump_to_ethtool *ice_prv_regs_buf;
+ struct ice_port_topology port_topology = {};
+ struct ice_port_info *pi;
+ struct ice_pf *pf;
+ struct ice_hw *hw;
+ unsigned int i;
+ int err;
+
+ pf = np->vsi->back;
+ hw = &pf->hw;
+ pi = np->vsi->port_info;
+
+ /* Serdes parameters are not supported if not the PF VSI */
+ if (np->vsi->type != ICE_VSI_PF || !pi)
+ return -EINVAL;
+
+ err = ice_get_port_topology(hw, pi->lport, &port_topology);
+ if (err)
+ return -EINVAL;
+ if (port_topology.serdes_lane_count > 4)
+ return -EINVAL;
+
+ ice_prv_regs_buf = p;
+
+ /* Get serdes equalization parameter for available serdes */
+ for (i = 0; i < port_topology.serdes_lane_count; i++) {
+ u8 serdes_num = 0;
+
+ serdes_num = port_topology.primary_serdes_lane + i;
+ err = ice_get_tx_rx_equa(hw, serdes_num,
+ &ice_prv_regs_buf->equalization[i]);
+ if (err)
+ return -EINVAL;
+ }
+
+ return 0;
}
static void
@@ -475,10 +822,12 @@ ice_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
u32 *regs_buf = (u32 *)p;
unsigned int i;
- regs->version = 1;
+ regs->version = 2;
for (i = 0; i < ARRAY_SIZE(ice_regs_dump_list); ++i)
regs_buf[i] = rd32(hw, ice_regs_dump_list[i]);
+
+ ice_get_extended_regs(netdev, (void *)&regs_buf[i]);
}
static u32 ice_get_msglevel(struct net_device *netdev)
@@ -3434,7 +3783,7 @@ ice_set_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh,
}
static int
-ice_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
+ice_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *info)
{
struct ice_pf *pf = ice_netdev_to_pf(dev);
@@ -4282,6 +4631,94 @@ ice_get_module_eeprom(struct net_device *netdev,
return 0;
}
+/**
+ * ice_get_port_fec_stats - returns FEC correctable, uncorrectable stats per
+ * pcsquad, pcsport
+ * @hw: pointer to the HW struct
+ * @pcs_quad: pcsquad for input port
+ * @pcs_port: pcsport for input port
+ * @fec_stats: buffer to hold FEC statistics for given port
+ *
+ * Return: 0 on success, negative on failure.
+ */
+static int ice_get_port_fec_stats(struct ice_hw *hw, u16 pcs_quad, u16 pcs_port,
+ struct ethtool_fec_stats *fec_stats)
+{
+ u32 fec_uncorr_low_val = 0, fec_uncorr_high_val = 0;
+ u32 fec_corr_low_val = 0, fec_corr_high_val = 0;
+ int err;
+
+ if (pcs_quad > 1 || pcs_port > 3)
+ return -EINVAL;
+
+ err = ice_aq_get_fec_stats(hw, pcs_quad, pcs_port, ICE_FEC_CORR_LOW,
+ &fec_corr_low_val);
+ if (err)
+ return err;
+
+ err = ice_aq_get_fec_stats(hw, pcs_quad, pcs_port, ICE_FEC_CORR_HIGH,
+ &fec_corr_high_val);
+ if (err)
+ return err;
+
+ err = ice_aq_get_fec_stats(hw, pcs_quad, pcs_port,
+ ICE_FEC_UNCORR_LOW,
+ &fec_uncorr_low_val);
+ if (err)
+ return err;
+
+ err = ice_aq_get_fec_stats(hw, pcs_quad, pcs_port,
+ ICE_FEC_UNCORR_HIGH,
+ &fec_uncorr_high_val);
+ if (err)
+ return err;
+
+ fec_stats->uncorrectable_blocks.total = (fec_corr_high_val << 16) +
+ fec_corr_low_val;
+ fec_stats->corrected_blocks.total = (fec_uncorr_high_val << 16) +
+ fec_uncorr_low_val;
+ return 0;
+}
+
+/**
+ * ice_get_fec_stats - returns FEC correctable, uncorrectable stats per netdev
+ * @netdev: network interface device structure
+ * @fec_stats: buffer to hold FEC statistics for given port
+ *
+ */
+static void ice_get_fec_stats(struct net_device *netdev,
+ struct ethtool_fec_stats *fec_stats)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_port_topology port_topology;
+ struct ice_port_info *pi;
+ struct ice_pf *pf;
+ struct ice_hw *hw;
+ int err;
+
+ pf = np->vsi->back;
+ hw = &pf->hw;
+ pi = np->vsi->port_info;
+
+ /* Serdes parameters are not supported if not the PF VSI */
+ if (np->vsi->type != ICE_VSI_PF || !pi)
+ return;
+
+ err = ice_get_port_topology(hw, pi->lport, &port_topology);
+ if (err) {
+ netdev_info(netdev, "Extended register dump failed Lport %d\n",
+ pi->lport);
+ return;
+ }
+
+ /* Get FEC correctable, uncorrectable counter */
+ err = ice_get_port_fec_stats(hw, port_topology.pcs_quad_select,
+ port_topology.pcs_port, fec_stats);
+ if (err)
+ netdev_info(netdev, "FEC stats get failed Lport %d Err %d\n",
+ pi->lport, err);
+}
+
static const struct ethtool_ops ice_ethtool_ops = {
.cap_rss_ctx_supported = true,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
@@ -4290,6 +4727,7 @@ static const struct ethtool_ops ice_ethtool_ops = {
.cap_rss_sym_xor_supported = true,
.get_link_ksettings = ice_get_link_ksettings,
.set_link_ksettings = ice_set_link_ksettings,
+ .get_fec_stats = ice_get_fec_stats,
.get_drvinfo = ice_get_drvinfo,
.get_regs_len = ice_get_regs_len,
.get_regs = ice_get_regs,
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.h b/drivers/net/ethernet/intel/ice/ice_ethtool.h
index b88e3da06f13..9acccae38625 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.h
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.h
@@ -9,6 +9,35 @@ struct ice_phy_type_to_ethtool {
u8 link_mode;
};
+struct ice_serdes_equalization_to_ethtool {
+ int rx_equalization_pre2;
+ int rx_equalization_pre1;
+ int rx_equalization_post1;
+ int rx_equalization_bflf;
+ int rx_equalization_bfhf;
+ int rx_equalization_drate;
+ int tx_equalization_pre1;
+ int tx_equalization_pre3;
+ int tx_equalization_atten;
+ int tx_equalization_post1;
+ int tx_equalization_pre2;
+};
+
+struct ice_regdump_to_ethtool {
+ /* A multilane port can have max 4 serdes */
+ struct ice_serdes_equalization_to_ethtool equalization[4];
+};
+
+/* Port topology from lport i.e.
+ * serdes mapping, pcsquad, macport, cage etc...
+ */
+struct ice_port_topology {
+ u16 pcs_port;
+ u16 primary_serdes_lane;
+ u16 serdes_lane_count;
+ u16 pcs_quad_select;
+};
+
/* Macro to make PHY type to Ethtool link mode table entry.
* The index is the PHY type.
*/
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index cfac1d432c15..91cbae1eec89 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -157,6 +157,8 @@
#define GLGEN_RTRIG_CORER_M BIT(0)
#define GLGEN_RTRIG_GLOBR_M BIT(1)
#define GLGEN_STAT 0x000B612C
+#define GLGEN_SWITCH_MODE_CONFIG 0x000B81E0
+#define GLGEN_SWITCH_MODE_CONFIG_25X4_QUAD_M BIT(2)
#define GLGEN_VFLRSTAT(_i) (0x00093A04 + ((_i) * 4))
#define PFGEN_CTRL 0x00091000
#define PFGEN_CTRL_PFSWR_M BIT(0)
@@ -177,6 +179,8 @@
#define GLINT_CTL_ITR_GRAN_50_M ICE_M(0xF, 24)
#define GLINT_CTL_ITR_GRAN_25_S 28
#define GLINT_CTL_ITR_GRAN_25_M ICE_M(0xF, 28)
+#define GLGEN_MAC_LINK_TOPO 0x000B81DC
+#define GLGEN_MAC_LINK_TOPO_LINK_TOPO_M GENMASK(1, 0)
#define GLINT_DYN_CTL(_INT) (0x00160000 + ((_INT) * 4))
#define GLINT_DYN_CTL_INTENA_M BIT(0)
#define GLINT_DYN_CTL_CLEARPBA_M BIT(1)
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 7629b0190578..f559e60992fa 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -2580,8 +2580,8 @@ void ice_vsi_free_irq(struct ice_vsi *vsi)
if (!IS_ENABLED(CONFIG_RFS_ACCEL))
irq_set_affinity_notifier(irq_num, NULL);
- /* clear the affinity_mask in the IRQ descriptor */
- irq_set_affinity_hint(irq_num, NULL);
+ /* clear the affinity_hint in the IRQ descriptor */
+ irq_update_affinity_hint(irq_num, NULL);
synchronize_irq(irq_num);
devm_free_irq(ice_pf_to_dev(pf), irq_num, vsi->q_vectors[i]);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 55a42aad92a5..ec636be4d17d 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -35,7 +35,6 @@ static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
#define ICE_DDP_PKG_PATH "intel/ice/ddp/"
#define ICE_DDP_PKG_FILE ICE_DDP_PKG_PATH "ice.pkg"
-MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION(DRV_SUMMARY);
MODULE_IMPORT_NS(LIBIE);
MODULE_LICENSE("GPL v2");
@@ -623,7 +622,7 @@ skip:
if (hw->port_info)
ice_sched_clear_port(hw->port_info);
- ice_shutdown_all_ctrlq(hw);
+ ice_shutdown_all_ctrlq(hw, false);
set_bit(ICE_PREPARED_FOR_RESET, pf->state);
}
@@ -2610,7 +2609,7 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
}
/* assign the mask for this irq */
- irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
+ irq_update_affinity_hint(irq_num, &q_vector->affinity_mask);
}
err = ice_set_cpu_rx_rmap(vsi);
@@ -2628,7 +2627,7 @@ free_q_irqs:
irq_num = vsi->q_vectors[vector]->irq.virq;
if (!IS_ENABLED(CONFIG_RFS_ACCEL))
irq_set_affinity_notifier(irq_num, NULL);
- irq_set_affinity_hint(irq_num, NULL);
+ irq_update_affinity_hint(irq_num, NULL);
devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]);
}
return err;
@@ -4158,13 +4157,17 @@ int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked)
/* set for the next time the netdev is started */
if (!netif_running(vsi->netdev)) {
- ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
+ err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
+ if (err)
+ goto rebuild_err;
dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n");
goto done;
}
ice_vsi_close(vsi);
- ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
+ err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
+ if (err)
+ goto rebuild_err;
ice_for_each_traffic_class(i) {
if (vsi->tc_cfg.ena_tc & BIT(i))
@@ -4175,6 +4178,11 @@ int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked)
}
ice_pf_dcb_recfg(pf, locked);
ice_vsi_open(vsi);
+ goto done;
+
+rebuild_err:
+ dev_err(ice_pf_to_dev(pf), "Error during VSI rebuild: %d. Unload and reload the driver.\n",
+ err);
done:
clear_bit(ICE_CFG_BUSY, pf->state);
return err;
@@ -5490,7 +5498,7 @@ static void ice_prepare_for_shutdown(struct ice_pf *pf)
if (pf->vsi[v])
pf->vsi[v]->vsi_num = 0;
- ice_shutdown_all_ctrlq(hw);
+ ice_shutdown_all_ctrlq(hw, true);
}
/**
@@ -7694,8 +7702,6 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
goto err_vsi_rebuild;
}
- ice_eswitch_rebuild(pf);
-
if (reset_type == ICE_RESET_PFR) {
err = ice_rebuild_channels(pf);
if (err) {
@@ -7750,7 +7756,7 @@ err_vsi_rebuild:
err_sched_init_port:
ice_sched_cleanup_all(hw);
err_init_ctrlq:
- ice_shutdown_all_ctrlq(hw);
+ ice_shutdown_all_ctrlq(hw, false);
set_bit(ICE_RESET_FAILED, pf->state);
clear_recovery:
/* set this bit in PF state to control service task scheduling */
diff --git a/drivers/net/ethernet/intel/ice/ice_protocol_type.h b/drivers/net/ethernet/intel/ice/ice_protocol_type.h
index 755a9c55267c..7c09ea0f03ba 100644
--- a/drivers/net/ethernet/intel/ice/ice_protocol_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_protocol_type.h
@@ -7,18 +7,24 @@
/* Each recipe can match up to 5 different fields. Fields to match can be meta-
* data, values extracted from packet headers, or results from other recipes.
- * One of the 5 fields is reserved for matching the switch ID. So, up to 4
- * recipes can provide intermediate results to another one through chaining,
- * e.g. recipes 0, 1, 2, and 3 can provide intermediate results to recipe 4.
+ * Therefore, up to 5 recipes can provide intermediate results to another one
+ * through chaining, e.g. recipes 0, 1, 2, 3 and 4 can provide intermediate
+ * results to recipe 5. Note that one of the fields in one of the recipes must
+ * always be reserved for matching the switch ID.
*/
-#define ICE_NUM_WORDS_RECIPE 4
+#define ICE_NUM_WORDS_RECIPE 5
-/* Max recipes that can be chained */
+/* Max recipes that can be chained, not including the last one, which combines
+ * intermediate results.
+ */
#define ICE_MAX_CHAIN_RECIPE 5
-/* 1 word reserved for switch ID from allowed 5 words.
- * So a recipe can have max 4 words. And you can chain 5 such recipes
- * together. So maximum words that can be programmed for look up is 5 * 4.
+/* Total max recipes in chain recipe (including intermediate results) */
+#define ICE_MAX_CHAIN_RECIPE_RES (ICE_MAX_CHAIN_RECIPE + 1)
+
+/* A recipe can have max 5 words, and 5 recipes can be chained together (using
+ * the 6th one, which would contain only result indexes). So maximum words that
+ * can be programmed for lookup is 5 * 5 (not including intermediate results).
*/
#define ICE_MAX_CHAIN_WORDS (ICE_NUM_WORDS_RECIPE * ICE_MAX_CHAIN_RECIPE)
@@ -449,32 +455,11 @@ struct ice_prot_ext_tbl_entry {
/* Extractions to be looked up for a given recipe */
struct ice_prot_lkup_ext {
- u16 prot_type;
u8 n_val_words;
/* create a buffer to hold max words per recipe */
- u16 field_off[ICE_MAX_CHAIN_WORDS];
u16 field_mask[ICE_MAX_CHAIN_WORDS];
struct ice_fv_word fv_words[ICE_MAX_CHAIN_WORDS];
-
- /* Indicate field offsets that have field vector indices assigned */
- DECLARE_BITMAP(done, ICE_MAX_CHAIN_WORDS);
};
-struct ice_pref_recipe_group {
- u8 n_val_pairs; /* Number of valid pairs */
- struct ice_fv_word pairs[ICE_NUM_WORDS_RECIPE];
- u16 mask[ICE_NUM_WORDS_RECIPE];
-};
-
-struct ice_recp_grp_entry {
- struct list_head l_entry;
-
-#define ICE_INVAL_CHAIN_IND 0xFF
- u16 rid;
- u8 chain_idx;
- u16 fv_idx[ICE_NUM_WORDS_RECIPE];
- u16 fv_mask[ICE_NUM_WORDS_RECIPE];
- struct ice_pref_recipe_group r_group;
-};
#endif /* _ICE_PROTOCOL_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index 927b623cedd5..51fac8f18cb0 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -7,8 +7,6 @@
#define E810_OUT_PROP_DELAY_NS 1
-#define UNKNOWN_INCVAL_E82X 0x100000000ULL
-
static const struct ptp_pin_desc ice_pin_desc_e810t[] = {
/* name idx func chan */
{ "GNSS", GNSS, PTP_PF_EXTTS, 0, { 0, } },
@@ -813,7 +811,7 @@ static enum ice_tx_tstamp_work ice_ptp_tx_tstamp_owner(struct ice_pf *pf)
}
mutex_unlock(&pf->ptp.ports_owner.lock);
- for (i = 0; i < ICE_MAX_QUAD; i++) {
+ for (i = 0; i < ICE_GET_QUAD_NUM(pf->hw.ptp.num_lports); i++) {
u64 tstamp_ready;
int err;
@@ -1014,6 +1012,28 @@ ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
}
/**
+ * ice_ptp_init_tx_eth56g - Initialize tracking for Tx timestamps
+ * @pf: Board private structure
+ * @tx: the Tx tracking structure to initialize
+ * @port: the port this structure tracks
+ *
+ * Initialize the Tx timestamp tracker for this port. ETH56G PHYs
+ * have independent memory blocks for all ports.
+ *
+ * Return: 0 for success, -ENOMEM when failed to allocate Tx tracker
+ */
+static int ice_ptp_init_tx_eth56g(struct ice_pf *pf, struct ice_ptp_tx *tx,
+ u8 port)
+{
+ tx->block = port;
+ tx->offset = 0;
+ tx->len = INDEX_PER_PORT_ETH56G;
+ tx->has_ready_bitmap = 1;
+
+ return ice_ptp_alloc_tx_tracker(tx);
+}
+
+/**
* ice_ptp_init_tx_e82x - Initialize tracking for Tx timestamps
* @pf: Board private structure
* @tx: the Tx tracking structure to initialize
@@ -1027,7 +1047,7 @@ ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
static int
ice_ptp_init_tx_e82x(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
{
- tx->block = port / ICE_PORTS_PER_QUAD;
+ tx->block = ICE_GET_QUAD_NUM(port);
tx->offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT_E82X;
tx->len = INDEX_PER_PORT_E82X;
tx->has_ready_bitmap = 1;
@@ -1210,12 +1230,7 @@ static u64 ice_base_incval(struct ice_pf *pf)
struct ice_hw *hw = &pf->hw;
u64 incval;
- if (ice_is_e810(hw))
- incval = ICE_PTP_NOMINAL_INCVAL_E810;
- else if (ice_e82x_time_ref(hw) < NUM_ICE_TIME_REF_FREQ)
- incval = ice_e82x_nominal_incval(ice_e82x_time_ref(hw));
- else
- incval = UNKNOWN_INCVAL_E82X;
+ incval = ice_get_base_incval(hw);
dev_dbg(ice_pf_to_dev(pf), "PTP: using base increment value of 0x%016llx\n",
incval);
@@ -1229,8 +1244,8 @@ static u64 ice_base_incval(struct ice_pf *pf)
*/
static int ice_ptp_check_tx_fifo(struct ice_ptp_port *port)
{
- int quad = port->port_num / ICE_PORTS_PER_QUAD;
int offs = port->port_num % ICE_PORTS_PER_QUAD;
+ int quad = ICE_GET_QUAD_NUM(port->port_num);
struct ice_pf *pf;
struct ice_hw *hw;
u32 val, phy_sts;
@@ -1348,10 +1363,19 @@ ice_ptp_port_phy_stop(struct ice_ptp_port *ptp_port)
mutex_lock(&ptp_port->ps_lock);
- kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
+ switch (hw->ptp.phy_model) {
+ case ICE_PHY_ETH56G:
+ err = ice_stop_phy_timer_eth56g(hw, port, true);
+ break;
+ case ICE_PHY_E82X:
+ kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
- err = ice_stop_phy_timer_e82x(hw, port, true);
- if (err)
+ err = ice_stop_phy_timer_e82x(hw, port, true);
+ break;
+ default:
+ err = -ENODEV;
+ }
+ if (err && err != -EBUSY)
dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d down, err %d\n",
port, err);
@@ -1385,27 +1409,39 @@ ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port)
mutex_lock(&ptp_port->ps_lock);
- kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
+ switch (hw->ptp.phy_model) {
+ case ICE_PHY_ETH56G:
+ err = ice_start_phy_timer_eth56g(hw, port);
+ break;
+ case ICE_PHY_E82X:
+ /* Start the PHY timer in Vernier mode */
+ kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
- /* temporarily disable Tx timestamps while calibrating PHY offset */
- spin_lock_irqsave(&ptp_port->tx.lock, flags);
- ptp_port->tx.calibrating = true;
- spin_unlock_irqrestore(&ptp_port->tx.lock, flags);
- ptp_port->tx_fifo_busy_cnt = 0;
+ /* temporarily disable Tx timestamps while calibrating
+ * PHY offset
+ */
+ spin_lock_irqsave(&ptp_port->tx.lock, flags);
+ ptp_port->tx.calibrating = true;
+ spin_unlock_irqrestore(&ptp_port->tx.lock, flags);
+ ptp_port->tx_fifo_busy_cnt = 0;
- /* Start the PHY timer in Vernier mode */
- err = ice_start_phy_timer_e82x(hw, port);
- if (err)
- goto out_unlock;
+ /* Start the PHY timer in Vernier mode */
+ err = ice_start_phy_timer_e82x(hw, port);
+ if (err)
+ break;
- /* Enable Tx timestamps right away */
- spin_lock_irqsave(&ptp_port->tx.lock, flags);
- ptp_port->tx.calibrating = false;
- spin_unlock_irqrestore(&ptp_port->tx.lock, flags);
+ /* Enable Tx timestamps right away */
+ spin_lock_irqsave(&ptp_port->tx.lock, flags);
+ ptp_port->tx.calibrating = false;
+ spin_unlock_irqrestore(&ptp_port->tx.lock, flags);
- kthread_queue_delayed_work(pf->ptp.kworker, &ptp_port->ov_work, 0);
+ kthread_queue_delayed_work(pf->ptp.kworker, &ptp_port->ov_work,
+ 0);
+ break;
+ default:
+ err = -ENODEV;
+ }
-out_unlock:
if (err)
dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d up, err %d\n",
port, err);
@@ -1429,20 +1465,23 @@ void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
if (pf->ptp.state != ICE_PTP_READY)
return;
- if (WARN_ON_ONCE(port >= ICE_NUM_EXTERNAL_PORTS))
+ if (WARN_ON_ONCE(port >= hw->ptp.num_lports))
return;
ptp_port = &pf->ptp.port;
+ if (ice_is_e825c(hw) && hw->ptp.is_2x50g_muxed_topo)
+ port *= 2;
if (WARN_ON_ONCE(ptp_port->port_num != port))
return;
/* Update cached link status for this port immediately */
ptp_port->link_up = linkup;
- switch (hw->phy_model) {
+ switch (hw->ptp.phy_model) {
case ICE_PHY_E810:
/* Do not reconfigure E810 PHY */
return;
+ case ICE_PHY_ETH56G:
case ICE_PHY_E82X:
ice_ptp_port_phy_restart(ptp_port);
return;
@@ -1457,42 +1496,62 @@ void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
* @ena: bool value to enable or disable interrupt
* @threshold: Minimum number of packets at which intr is triggered
*
- * Utility function to enable or disable Tx timestamp interrupt and threshold
+ * Utility function to configure all the PHY interrupt settings, including
+ * whether the PHY interrupt is enabled, and what threshold to use. Also
+ * configures The E82X timestamp owner to react to interrupts from all PHYs.
+ *
+ * Return: 0 on success, -EOPNOTSUPP when PHY model incorrect, other error codes
+ * when failed to configure PHY interrupt for E82X
*/
static int ice_ptp_cfg_phy_interrupt(struct ice_pf *pf, bool ena, u32 threshold)
{
+ struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
- int err = 0;
- int quad;
- u32 val;
ice_ptp_reset_ts_memory(hw);
- for (quad = 0; quad < ICE_MAX_QUAD; quad++) {
- err = ice_read_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG,
- &val);
- if (err)
- break;
+ switch (hw->ptp.phy_model) {
+ case ICE_PHY_ETH56G: {
+ int port;
- if (ena) {
- val |= Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M;
- val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_THR_M;
- val |= FIELD_PREP(Q_REG_TX_MEM_GBL_CFG_INTR_THR_M,
- threshold);
- } else {
- val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M;
+ for (port = 0; port < hw->ptp.num_lports; port++) {
+ int err;
+
+ err = ice_phy_cfg_intr_eth56g(hw, port, ena, threshold);
+ if (err) {
+ dev_err(dev, "Failed to configure PHY interrupt for port %d, err %d\n",
+ port, err);
+ return err;
+ }
}
- err = ice_write_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG,
- val);
- if (err)
- break;
+ return 0;
}
+ case ICE_PHY_E82X: {
+ int quad;
- if (err)
- dev_err(ice_pf_to_dev(pf), "PTP failed in intr ena, err %d\n",
- err);
- return err;
+ for (quad = 0; quad < ICE_GET_QUAD_NUM(hw->ptp.num_lports);
+ quad++) {
+ int err;
+
+ err = ice_phy_cfg_intr_e82x(hw, quad, ena, threshold);
+ if (err) {
+ dev_err(dev, "Failed to configure PHY interrupt for quad %d, err %d\n",
+ quad, err);
+ return err;
+ }
+ }
+
+ return 0;
+ }
+ case ICE_PHY_E810:
+ return 0;
+ case ICE_PHY_UNSUP:
+ default:
+ dev_warn(dev, "%s: Unexpected PHY model %d\n", __func__,
+ hw->ptp.phy_model);
+ return -EOPNOTSUPP;
+ }
}
/**
@@ -1767,8 +1826,7 @@ static int ice_ptp_cfg_clkout(struct ice_pf *pf, unsigned int chan,
* maintaining phase
*/
if (start_time < current_time)
- start_time = div64_u64(current_time + NSEC_PER_SEC - 1,
- NSEC_PER_SEC) * NSEC_PER_SEC + phase;
+ start_time = roundup_u64(current_time, NSEC_PER_SEC) + phase;
if (ice_is_e810(hw))
start_time -= E810_OUT_PROP_DELAY_NS;
@@ -1994,11 +2052,14 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts)
struct ice_hw *hw = &pf->hw;
int err;
- /* For Vernier mode, we need to recalibrate after new settime
- * Start with disabling timestamp block
+ /* For Vernier mode on E82X, we need to recalibrate after new settime.
+ * Start with marking timestamps as invalid.
*/
- if (pf->ptp.port.link_up)
- ice_ptp_port_phy_stop(&pf->ptp.port);
+ if (hw->ptp.phy_model == ICE_PHY_E82X) {
+ err = ice_ptp_clear_phy_offset_ready_e82x(hw);
+ if (err)
+ dev_warn(ice_pf_to_dev(pf), "Failed to mark timestamps as invalid before settime\n");
+ }
if (!ice_ptp_lock(hw)) {
err = -EBUSY;
@@ -2018,7 +2079,7 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts)
ice_ptp_enable_all_clkout(pf);
/* Recalibrate and re-enable timestamp blocks for E822/E823 */
- if (hw->phy_model == ICE_PHY_E82X)
+ if (hw->ptp.phy_model == ICE_PHY_E82X)
ice_ptp_restart_all_phy(pf);
exit:
if (err) {
@@ -2644,7 +2705,7 @@ static void ice_ptp_maybe_trigger_tx_interrupt(struct ice_pf *pf)
if (!ice_pf_src_tmr_owned(pf))
return;
- for (i = 0; i < ICE_MAX_QUAD; i++) {
+ for (i = 0; i < ICE_GET_QUAD_NUM(hw->ptp.num_lports); i++) {
u64 tstamp_ready;
int err;
@@ -3080,12 +3141,10 @@ static int ice_ptp_init_owner(struct ice_pf *pf)
/* Release the global hardware lock */
ice_ptp_unlock(hw);
- if (!ice_is_e810(hw)) {
- /* Enable quad interrupts */
- err = ice_ptp_cfg_phy_interrupt(pf, true, 1);
- if (err)
- goto err_exit;
- }
+ /* Configure PHY interrupt settings */
+ err = ice_ptp_cfg_phy_interrupt(pf, true, 1);
+ if (err)
+ goto err_exit;
/* Ensure we have a clock device */
err = ice_ptp_create_clock(pf);
@@ -3146,7 +3205,10 @@ static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port)
mutex_init(&ptp_port->ps_lock);
- switch (hw->phy_model) {
+ switch (hw->ptp.phy_model) {
+ case ICE_PHY_ETH56G:
+ return ice_ptp_init_tx_eth56g(pf, &ptp_port->tx,
+ ptp_port->port_num);
case ICE_PHY_E810:
return ice_ptp_init_tx_e810(pf, &ptp_port->tx);
case ICE_PHY_E82X:
@@ -3241,7 +3303,7 @@ static void ice_ptp_remove_auxbus_device(struct ice_pf *pf)
*/
static void ice_ptp_init_tx_interrupt_mode(struct ice_pf *pf)
{
- switch (pf->hw.phy_model) {
+ switch (pf->hw.ptp.phy_model) {
case ICE_PHY_E82X:
/* E822 based PHY has the clock owner process the interrupt
* for all ports.
@@ -3277,7 +3339,7 @@ void ice_ptp_init(struct ice_pf *pf)
ptp->state = ICE_PTP_INITIALIZING;
- ice_ptp_init_phy_model(hw);
+ ice_ptp_init_hw(hw);
ice_ptp_init_tx_interrupt_mode(pf);
@@ -3291,6 +3353,9 @@ void ice_ptp_init(struct ice_pf *pf)
}
ptp->port.port_num = hw->pf_id;
+ if (ice_is_e825c(hw) && hw->ptp.is_2x50g_muxed_topo)
+ ptp->port.port_num = hw->pf_id * 2;
+
err = ice_ptp_init_port(pf, &ptp->port);
if (err)
goto err;
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h
index e2af9749061c..2db2257a0fb2 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.h
@@ -160,6 +160,7 @@ struct ice_ptp_tx {
#define INDEX_PER_QUAD 64
#define INDEX_PER_PORT_E82X 16
#define INDEX_PER_PORT_E810 64
+#define INDEX_PER_PORT_ETH56G 64
/**
* struct ice_ptp_port - data used to initialize an external port for PTP
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
index 2c4dab0c48ab..e6980b94a6c1 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
@@ -9,6 +9,321 @@
*/
/* Constants defined for the PTP 1588 clock hardware. */
+const struct ice_phy_reg_info_eth56g eth56g_phy_res[NUM_ETH56G_PHY_RES] = {
+ /* ETH56G_PHY_REG_PTP */
+ {
+ /* base_addr */
+ {
+ 0x092000,
+ 0x126000,
+ 0x1BA000,
+ 0x24E000,
+ 0x2E2000,
+ },
+ /* step */
+ 0x98,
+ },
+ /* ETH56G_PHY_MEM_PTP */
+ {
+ /* base_addr */
+ {
+ 0x093000,
+ 0x127000,
+ 0x1BB000,
+ 0x24F000,
+ 0x2E3000,
+ },
+ /* step */
+ 0x200,
+ },
+ /* ETH56G_PHY_REG_XPCS */
+ {
+ /* base_addr */
+ {
+ 0x000000,
+ 0x009400,
+ 0x128000,
+ 0x1BC000,
+ 0x250000,
+ },
+ /* step */
+ 0x21000,
+ },
+ /* ETH56G_PHY_REG_MAC */
+ {
+ /* base_addr */
+ {
+ 0x085000,
+ 0x119000,
+ 0x1AD000,
+ 0x241000,
+ 0x2D5000,
+ },
+ /* step */
+ 0x1000,
+ },
+ /* ETH56G_PHY_REG_GPCS */
+ {
+ /* base_addr */
+ {
+ 0x084000,
+ 0x118000,
+ 0x1AC000,
+ 0x240000,
+ 0x2D4000,
+ },
+ /* step */
+ 0x400,
+ },
+};
+
+const
+struct ice_eth56g_mac_reg_cfg eth56g_mac_cfg[NUM_ICE_ETH56G_LNK_SPD] = {
+ [ICE_ETH56G_LNK_SPD_1G] = {
+ .tx_mode = { .def = 6, },
+ .rx_mode = { .def = 6, },
+ .blks_per_clk = 1,
+ .blktime = 0x4000, /* 32 */
+ .tx_offset = {
+ .serdes = 0x6666, /* 51.2 */
+ .no_fec = 0xd066, /* 104.2 */
+ .sfd = 0x3000, /* 24 */
+ .onestep = 0x30000 /* 384 */
+ },
+ .rx_offset = {
+ .serdes = 0xffffc59a, /* -29.2 */
+ .no_fec = 0xffff0a80, /* -122.75 */
+ .sfd = 0x2c00, /* 22 */
+ .bs_ds = 0x19a /* 0.8 */
+ /* Dynamic bitslip 0 equals to 10 */
+ }
+ },
+ [ICE_ETH56G_LNK_SPD_2_5G] = {
+ .tx_mode = { .def = 6, },
+ .rx_mode = { .def = 6, },
+ .blks_per_clk = 1,
+ .blktime = 0x199a, /* 12.8 */
+ .tx_offset = {
+ .serdes = 0x28f6, /* 20.48 */
+ .no_fec = 0x53b8, /* 41.86 */
+ .sfd = 0x1333, /* 9.6 */
+ .onestep = 0x13333 /* 153.6 */
+ },
+ .rx_offset = {
+ .serdes = 0xffffe8a4, /* -11.68 */
+ .no_fec = 0xffff9a76, /* -50.77 */
+ .sfd = 0xf33, /* 7.6 */
+ .bs_ds = 0xa4 /* 0.32 */
+ }
+ },
+ [ICE_ETH56G_LNK_SPD_10G] = {
+ .tx_mode = { .def = 1, },
+ .rx_mode = { .def = 1, },
+ .blks_per_clk = 1,
+ .blktime = 0x666, /* 3.2 */
+ .tx_offset = {
+ .serdes = 0x234c, /* 17.6484848 */
+ .no_fec = 0x8e80, /* 71.25 */
+ .fc = 0xb4a4, /* 90.32 */
+ .sfd = 0x4a4, /* 2.32 */
+ .onestep = 0x4ccd /* 38.4 */
+ },
+ .rx_offset = {
+ .serdes = 0xffffeb27, /* -10.42424 */
+ .no_fec = 0xffffcccd, /* -25.6 */
+ .fc = 0xfffe0014, /* -255.96 */
+ .sfd = 0x4a4, /* 2.32 */
+ .bs_ds = 0x32 /* 0.0969697 */
+ }
+ },
+ [ICE_ETH56G_LNK_SPD_25G] = {
+ .tx_mode = {
+ .def = 1,
+ .rs = 4
+ },
+ .tx_mk_dly = 4,
+ .tx_cw_dly = {
+ .def = 1,
+ .onestep = 6
+ },
+ .rx_mode = {
+ .def = 1,
+ .rs = 4
+ },
+ .rx_mk_dly = {
+ .def = 1,
+ .rs = 1
+ },
+ .rx_cw_dly = {
+ .def = 1,
+ .rs = 1
+ },
+ .blks_per_clk = 1,
+ .blktime = 0x28f, /* 1.28 */
+ .mktime = 0x147b, /* 10.24, only if RS-FEC enabled */
+ .tx_offset = {
+ .serdes = 0xe1e, /* 7.0593939 */
+ .no_fec = 0x3857, /* 28.17 */
+ .fc = 0x48c3, /* 36.38 */
+ .rs = 0x8100, /* 64.5 */
+ .sfd = 0x1dc, /* 0.93 */
+ .onestep = 0x1eb8 /* 15.36 */
+ },
+ .rx_offset = {
+ .serdes = 0xfffff7a9, /* -4.1697 */
+ .no_fec = 0xffffe71a, /* -12.45 */
+ .fc = 0xfffe894d, /* -187.35 */
+ .rs = 0xfffff8cd, /* -3.6 */
+ .sfd = 0x1dc, /* 0.93 */
+ .bs_ds = 0x14 /* 0.0387879, RS-FEC 0 */
+ }
+ },
+ [ICE_ETH56G_LNK_SPD_40G] = {
+ .tx_mode = { .def = 3 },
+ .tx_mk_dly = 4,
+ .tx_cw_dly = {
+ .def = 1,
+ .onestep = 6
+ },
+ .rx_mode = { .def = 4 },
+ .rx_mk_dly = { .def = 1 },
+ .rx_cw_dly = { .def = 1 },
+ .blktime = 0x333, /* 1.6 */
+ .mktime = 0xccd, /* 6.4 */
+ .tx_offset = {
+ .serdes = 0x234c, /* 17.6484848 */
+ .no_fec = 0x5a8a, /* 45.27 */
+ .fc = 0x81b8, /* 64.86 */
+ .sfd = 0x4a4, /* 2.32 */
+ .onestep = 0x1333 /* 9.6 */
+ },
+ .rx_offset = {
+ .serdes = 0xffffeb27, /* -10.42424 */
+ .no_fec = 0xfffff594, /* -5.21 */
+ .fc = 0xfffe3080, /* -231.75 */
+ .sfd = 0x4a4, /* 2.32 */
+ .bs_ds = 0xccd /* 6.4 */
+ }
+ },
+ [ICE_ETH56G_LNK_SPD_50G] = {
+ .tx_mode = { .def = 5 },
+ .tx_mk_dly = 4,
+ .tx_cw_dly = {
+ .def = 1,
+ .onestep = 6
+ },
+ .rx_mode = { .def = 5 },
+ .rx_mk_dly = { .def = 1 },
+ .rx_cw_dly = { .def = 1 },
+ .blktime = 0x28f, /* 1.28 */
+ .mktime = 0xa3d, /* 5.12 */
+ .tx_offset = {
+ .serdes = 0x13ba, /* 9.86353 */
+ .rs = 0x5400, /* 42 */
+ .sfd = 0xe6, /* 0.45 */
+ .onestep = 0xf5c /* 7.68 */
+ },
+ .rx_offset = {
+ .serdes = 0xfffff7e8, /* -4.04706 */
+ .rs = 0xfffff994, /* -3.21 */
+ .sfd = 0xe6 /* 0.45 */
+ }
+ },
+ [ICE_ETH56G_LNK_SPD_50G2] = {
+ .tx_mode = {
+ .def = 3,
+ .rs = 2
+ },
+ .tx_mk_dly = 4,
+ .tx_cw_dly = {
+ .def = 1,
+ .onestep = 6
+ },
+ .rx_mode = {
+ .def = 4,
+ .rs = 1
+ },
+ .rx_mk_dly = { .def = 1 },
+ .rx_cw_dly = { .def = 1 },
+ .blktime = 0x28f, /* 1.28 */
+ .mktime = 0xa3d, /* 5.12 */
+ .tx_offset = {
+ .serdes = 0xe1e, /* 7.0593939 */
+ .no_fec = 0x3d33, /* 30.6 */
+ .rs = 0x5057, /* 40.17 */
+ .sfd = 0x1dc, /* 0.93 */
+ .onestep = 0xf5c /* 7.68 */
+ },
+ .rx_offset = {
+ .serdes = 0xfffff7a9, /* -4.1697 */
+ .no_fec = 0xfffff8cd, /* -3.6 */
+ .rs = 0xfffff21a, /* -6.95 */
+ .sfd = 0x1dc, /* 0.93 */
+ .bs_ds = 0xa3d /* 5.12, RS-FEC 0x633 (3.1) */
+ }
+ },
+ [ICE_ETH56G_LNK_SPD_100G] = {
+ .tx_mode = {
+ .def = 3,
+ .rs = 2
+ },
+ .tx_mk_dly = 10,
+ .tx_cw_dly = {
+ .def = 3,
+ .onestep = 6
+ },
+ .rx_mode = {
+ .def = 4,
+ .rs = 1
+ },
+ .rx_mk_dly = { .def = 5 },
+ .rx_cw_dly = { .def = 5 },
+ .blks_per_clk = 1,
+ .blktime = 0x148, /* 0.64 */
+ .mktime = 0x199a, /* 12.8 */
+ .tx_offset = {
+ .serdes = 0xe1e, /* 7.0593939 */
+ .no_fec = 0x67ec, /* 51.96 */
+ .rs = 0x44fb, /* 34.49 */
+ .sfd = 0x1dc, /* 0.93 */
+ .onestep = 0xf5c /* 7.68 */
+ },
+ .rx_offset = {
+ .serdes = 0xfffff7a9, /* -4.1697 */
+ .no_fec = 0xfffff5a9, /* -5.17 */
+ .rs = 0xfffff6e6, /* -4.55 */
+ .sfd = 0x1dc, /* 0.93 */
+ .bs_ds = 0x199a /* 12.8, RS-FEC 0x31b (1.552) */
+ }
+ },
+ [ICE_ETH56G_LNK_SPD_100G2] = {
+ .tx_mode = { .def = 5 },
+ .tx_mk_dly = 10,
+ .tx_cw_dly = {
+ .def = 3,
+ .onestep = 6
+ },
+ .rx_mode = { .def = 5 },
+ .rx_mk_dly = { .def = 5 },
+ .rx_cw_dly = { .def = 5 },
+ .blks_per_clk = 1,
+ .blktime = 0x148, /* 0.64 */
+ .mktime = 0x199a, /* 12.8 */
+ .tx_offset = {
+ .serdes = 0x13ba, /* 9.86353 */
+ .rs = 0x460a, /* 35.02 */
+ .sfd = 0xe6, /* 0.45 */
+ .onestep = 0xf5c /* 7.68 */
+ },
+ .rx_offset = {
+ .serdes = 0xfffff7e8, /* -4.04706 */
+ .rs = 0xfffff548, /* -5.36 */
+ .sfd = 0xe6, /* 0.45 */
+ .bs_ds = 0x303 /* 1.506 */
+ }
+ }
+};
+
/* struct ice_time_ref_info_e82x
*
* E822 hardware can use different sources as the reference for the PTP
@@ -155,6 +470,93 @@ const struct ice_cgu_pll_params_e82x e822_cgu_params[NUM_ICE_TIME_REF_FREQ] = {
},
};
+const
+struct ice_cgu_pll_params_e825c e825c_cgu_params[NUM_ICE_TIME_REF_FREQ] = {
+ /* ICE_TIME_REF_FREQ_25_000 -> 25 MHz */
+ {
+ /* tspll_ck_refclkfreq */
+ 0x19,
+ /* tspll_ndivratio */
+ 1,
+ /* tspll_fbdiv_intgr */
+ 320,
+ /* tspll_fbdiv_frac */
+ 0,
+ /* ref1588_ck_div */
+ 0,
+ },
+
+ /* ICE_TIME_REF_FREQ_122_880 -> 122.88 MHz */
+ {
+ /* tspll_ck_refclkfreq */
+ 0x29,
+ /* tspll_ndivratio */
+ 3,
+ /* tspll_fbdiv_intgr */
+ 195,
+ /* tspll_fbdiv_frac */
+ 1342177280UL,
+ /* ref1588_ck_div */
+ 0,
+ },
+
+ /* ICE_TIME_REF_FREQ_125_000 -> 125 MHz */
+ {
+ /* tspll_ck_refclkfreq */
+ 0x3E,
+ /* tspll_ndivratio */
+ 2,
+ /* tspll_fbdiv_intgr */
+ 128,
+ /* tspll_fbdiv_frac */
+ 0,
+ /* ref1588_ck_div */
+ 0,
+ },
+
+ /* ICE_TIME_REF_FREQ_153_600 -> 153.6 MHz */
+ {
+ /* tspll_ck_refclkfreq */
+ 0x33,
+ /* tspll_ndivratio */
+ 3,
+ /* tspll_fbdiv_intgr */
+ 156,
+ /* tspll_fbdiv_frac */
+ 1073741824UL,
+ /* ref1588_ck_div */
+ 0,
+ },
+
+ /* ICE_TIME_REF_FREQ_156_250 -> 156.25 MHz */
+ {
+ /* tspll_ck_refclkfreq */
+ 0x1F,
+ /* tspll_ndivratio */
+ 5,
+ /* tspll_fbdiv_intgr */
+ 256,
+ /* tspll_fbdiv_frac */
+ 0,
+ /* ref1588_ck_div */
+ 0,
+ },
+
+ /* ICE_TIME_REF_FREQ_245_760 -> 245.76 MHz */
+ {
+ /* tspll_ck_refclkfreq */
+ 0x52,
+ /* tspll_ndivratio */
+ 3,
+ /* tspll_fbdiv_intgr */
+ 97,
+ /* tspll_fbdiv_frac */
+ 2818572288UL,
+ /* ref1588_ck_div */
+ 0,
+ },
+};
+
/* struct ice_vernier_info_e82x
*
* E822 hardware calibrates the delay of the timestamp indication from the
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
index 2b9423a173bb..3a33e6b9b313 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
@@ -2,6 +2,7 @@
/* Copyright (C) 2021, Intel Corporation. */
#include <linux/delay.h>
+#include <linux/iopoll.h>
#include "ice_common.h"
#include "ice_ptp_hw.h"
#include "ice_ptp_consts.h"
@@ -227,40 +228,632 @@ static u64 ice_ptp_read_src_incval(struct ice_hw *hw)
}
/**
- * ice_ptp_src_cmd - Prepare source timer for a timer command
- * @hw: pointer to HW structure
+ * ice_read_cgu_reg_e82x - Read a CGU register
+ * @hw: pointer to the HW struct
+ * @addr: Register address to read
+ * @val: storage for register value read
+ *
+ * Read the contents of a register of the Clock Generation Unit. Only
+ * applicable to E822 devices.
+ *
+ * Return: 0 on success, other error codes when failed to read from CGU
+ */
+static int ice_read_cgu_reg_e82x(struct ice_hw *hw, u32 addr, u32 *val)
+{
+ struct ice_sbq_msg_input cgu_msg = {
+ .opcode = ice_sbq_msg_rd,
+ .dest_dev = cgu,
+ .msg_addr_low = addr
+ };
+ int err;
+
+ err = ice_sbq_rw_reg(hw, &cgu_msg, ICE_AQ_FLAG_RD);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read CGU register 0x%04x, err %d\n",
+ addr, err);
+ return err;
+ }
+
+ *val = cgu_msg.data;
+
+ return 0;
+}
+
+/**
+ * ice_write_cgu_reg_e82x - Write a CGU register
+ * @hw: pointer to the HW struct
+ * @addr: Register address to write
+ * @val: value to write into the register
+ *
+ * Write the specified value to a register of the Clock Generation Unit. Only
+ * applicable to E822 devices.
+ *
+ * Return: 0 on success, other error codes when failed to write to CGU
+ */
+static int ice_write_cgu_reg_e82x(struct ice_hw *hw, u32 addr, u32 val)
+{
+ struct ice_sbq_msg_input cgu_msg = {
+ .opcode = ice_sbq_msg_wr,
+ .dest_dev = cgu,
+ .msg_addr_low = addr,
+ .data = val
+ };
+ int err;
+
+ err = ice_sbq_rw_reg(hw, &cgu_msg, ICE_AQ_FLAG_RD);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write CGU register 0x%04x, err %d\n",
+ addr, err);
+ return err;
+ }
+
+ return err;
+}
+
+/**
+ * ice_clk_freq_str - Convert time_ref_freq to string
+ * @clk_freq: Clock frequency
+ *
+ * Return: specified TIME_REF clock frequency converted to a string
+ */
+static const char *ice_clk_freq_str(enum ice_time_ref_freq clk_freq)
+{
+ switch (clk_freq) {
+ case ICE_TIME_REF_FREQ_25_000:
+ return "25 MHz";
+ case ICE_TIME_REF_FREQ_122_880:
+ return "122.88 MHz";
+ case ICE_TIME_REF_FREQ_125_000:
+ return "125 MHz";
+ case ICE_TIME_REF_FREQ_153_600:
+ return "153.6 MHz";
+ case ICE_TIME_REF_FREQ_156_250:
+ return "156.25 MHz";
+ case ICE_TIME_REF_FREQ_245_760:
+ return "245.76 MHz";
+ default:
+ return "Unknown";
+ }
+}
+
+/**
+ * ice_clk_src_str - Convert time_ref_src to string
+ * @clk_src: Clock source
+ *
+ * Return: specified clock source converted to its string name
+ */
+static const char *ice_clk_src_str(enum ice_clk_src clk_src)
+{
+ switch (clk_src) {
+ case ICE_CLK_SRC_TCXO:
+ return "TCXO";
+ case ICE_CLK_SRC_TIME_REF:
+ return "TIME_REF";
+ default:
+ return "Unknown";
+ }
+}
+
+/**
+ * ice_cfg_cgu_pll_e82x - Configure the Clock Generation Unit
+ * @hw: pointer to the HW struct
+ * @clk_freq: Clock frequency to program
+ * @clk_src: Clock source to select (TIME_REF, or TCXO)
+ *
+ * Configure the Clock Generation Unit with the desired clock frequency and
+ * time reference, enabling the PLL which drives the PTP hardware clock.
+ *
+ * Return:
+ * * %0 - success
+ * * %-EINVAL - input parameters are incorrect
+ * * %-EBUSY - failed to lock TS PLL
+ * * %other - CGU read/write failure
+ */
+static int ice_cfg_cgu_pll_e82x(struct ice_hw *hw,
+ enum ice_time_ref_freq clk_freq,
+ enum ice_clk_src clk_src)
+{
+ union tspll_ro_bwm_lf bwm_lf;
+ union nac_cgu_dword19 dw19;
+ union nac_cgu_dword22 dw22;
+ union nac_cgu_dword24 dw24;
+ union nac_cgu_dword9 dw9;
+ int err;
+
+ if (clk_freq >= NUM_ICE_TIME_REF_FREQ) {
+ dev_warn(ice_hw_to_dev(hw), "Invalid TIME_REF frequency %u\n",
+ clk_freq);
+ return -EINVAL;
+ }
+
+ if (clk_src >= NUM_ICE_CLK_SRC) {
+ dev_warn(ice_hw_to_dev(hw), "Invalid clock source %u\n",
+ clk_src);
+ return -EINVAL;
+ }
+
+ if (clk_src == ICE_CLK_SRC_TCXO &&
+ clk_freq != ICE_TIME_REF_FREQ_25_000) {
+ dev_warn(ice_hw_to_dev(hw),
+ "TCXO only supports 25 MHz frequency\n");
+ return -EINVAL;
+ }
+
+ err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD9, &dw9.val);
+ if (err)
+ return err;
+
+ err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD24, &dw24.val);
+ if (err)
+ return err;
+
+ err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_BWM_LF, &bwm_lf.val);
+ if (err)
+ return err;
+
+ /* Log the current clock configuration */
+ ice_debug(hw, ICE_DBG_PTP, "Current CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
+ dw24.ts_pll_enable ? "enabled" : "disabled",
+ ice_clk_src_str(dw24.time_ref_sel),
+ ice_clk_freq_str(dw9.time_ref_freq_sel),
+ bwm_lf.plllock_true_lock_cri ? "locked" : "unlocked");
+
+ /* Disable the PLL before changing the clock source or frequency */
+ if (dw24.ts_pll_enable) {
+ dw24.ts_pll_enable = 0;
+
+ err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val);
+ if (err)
+ return err;
+ }
+
+ /* Set the frequency */
+ dw9.time_ref_freq_sel = clk_freq;
+ err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD9, dw9.val);
+ if (err)
+ return err;
+
+ /* Configure the TS PLL feedback divisor */
+ err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD19, &dw19.val);
+ if (err)
+ return err;
+
+ dw19.tspll_fbdiv_intgr = e822_cgu_params[clk_freq].feedback_div;
+ dw19.tspll_ndivratio = 1;
+
+ err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD19, dw19.val);
+ if (err)
+ return err;
+
+ /* Configure the TS PLL post divisor */
+ err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD22, &dw22.val);
+ if (err)
+ return err;
+
+ dw22.time1588clk_div = e822_cgu_params[clk_freq].post_pll_div;
+ dw22.time1588clk_sel_div2 = 0;
+
+ err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD22, dw22.val);
+ if (err)
+ return err;
+
+ /* Configure the TS PLL pre divisor and clock source */
+ err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD24, &dw24.val);
+ if (err)
+ return err;
+
+ dw24.ref1588_ck_div = e822_cgu_params[clk_freq].refclk_pre_div;
+ dw24.tspll_fbdiv_frac = e822_cgu_params[clk_freq].frac_n_div;
+ dw24.time_ref_sel = clk_src;
+
+ err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val);
+ if (err)
+ return err;
+
+ /* Finally, enable the PLL */
+ dw24.ts_pll_enable = 1;
+
+ err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val);
+ if (err)
+ return err;
+
+ /* Wait to verify if the PLL locks */
+ usleep_range(1000, 5000);
+
+ err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_BWM_LF, &bwm_lf.val);
+ if (err)
+ return err;
+
+ if (!bwm_lf.plllock_true_lock_cri) {
+ dev_warn(ice_hw_to_dev(hw), "CGU PLL failed to lock\n");
+ return -EBUSY;
+ }
+
+ /* Log the current clock configuration */
+ ice_debug(hw, ICE_DBG_PTP, "New CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
+ dw24.ts_pll_enable ? "enabled" : "disabled",
+ ice_clk_src_str(dw24.time_ref_sel),
+ ice_clk_freq_str(dw9.time_ref_freq_sel),
+ bwm_lf.plllock_true_lock_cri ? "locked" : "unlocked");
+
+ return 0;
+}
+
+/**
+ * ice_cfg_cgu_pll_e825c - Configure the Clock Generation Unit for E825-C
+ * @hw: pointer to the HW struct
+ * @clk_freq: Clock frequency to program
+ * @clk_src: Clock source to select (TIME_REF, or TCXO)
+ *
+ * Configure the Clock Generation Unit with the desired clock frequency and
+ * time reference, enabling the PLL which drives the PTP hardware clock.
+ *
+ * Return:
+ * * %0 - success
+ * * %-EINVAL - input parameters are incorrect
+ * * %-EBUSY - failed to lock TS PLL
+ * * %other - CGU read/write failure
+ */
+static int ice_cfg_cgu_pll_e825c(struct ice_hw *hw,
+ enum ice_time_ref_freq clk_freq,
+ enum ice_clk_src clk_src)
+{
+ union tspll_ro_lock_e825c ro_lock;
+ union nac_cgu_dword16_e825c dw16;
+ union nac_cgu_dword23_e825c dw23;
+ union nac_cgu_dword19 dw19;
+ union nac_cgu_dword22 dw22;
+ union nac_cgu_dword24 dw24;
+ union nac_cgu_dword9 dw9;
+ int err;
+
+ if (clk_freq >= NUM_ICE_TIME_REF_FREQ) {
+ dev_warn(ice_hw_to_dev(hw), "Invalid TIME_REF frequency %u\n",
+ clk_freq);
+ return -EINVAL;
+ }
+
+ if (clk_src >= NUM_ICE_CLK_SRC) {
+ dev_warn(ice_hw_to_dev(hw), "Invalid clock source %u\n",
+ clk_src);
+ return -EINVAL;
+ }
+
+ if (clk_src == ICE_CLK_SRC_TCXO &&
+ clk_freq != ICE_TIME_REF_FREQ_156_250) {
+ dev_warn(ice_hw_to_dev(hw),
+ "TCXO only supports 156.25 MHz frequency\n");
+ return -EINVAL;
+ }
+
+ err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD9, &dw9.val);
+ if (err)
+ return err;
+
+ err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD24, &dw24.val);
+ if (err)
+ return err;
+
+ err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD16_E825C, &dw16.val);
+ if (err)
+ return err;
+
+ err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD23_E825C, &dw23.val);
+ if (err)
+ return err;
+
+ err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_LOCK_E825C, &ro_lock.val);
+ if (err)
+ return err;
+
+ /* Log the current clock configuration */
+ ice_debug(hw, ICE_DBG_PTP, "Current CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
+ dw24.ts_pll_enable ? "enabled" : "disabled",
+ ice_clk_src_str(dw23.time_ref_sel),
+ ice_clk_freq_str(dw9.time_ref_freq_sel),
+ ro_lock.plllock_true_lock_cri ? "locked" : "unlocked");
+
+ /* Disable the PLL before changing the clock source or frequency */
+ if (dw23.ts_pll_enable) {
+ dw23.ts_pll_enable = 0;
+
+ err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD23_E825C,
+ dw23.val);
+ if (err)
+ return err;
+ }
+
+ /* Set the frequency */
+ dw9.time_ref_freq_sel = clk_freq;
+
+ /* Enable the correct receiver */
+ if (clk_src == ICE_CLK_SRC_TCXO) {
+ dw9.time_ref_en = 0;
+ dw9.clk_eref0_en = 1;
+ } else {
+ dw9.time_ref_en = 1;
+ dw9.clk_eref0_en = 0;
+ }
+ err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD9, dw9.val);
+ if (err)
+ return err;
+
+ /* Choose the referenced frequency */
+ dw16.tspll_ck_refclkfreq =
+ e825c_cgu_params[clk_freq].tspll_ck_refclkfreq;
+ err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD16_E825C, dw16.val);
+ if (err)
+ return err;
+
+ /* Configure the TS PLL feedback divisor */
+ err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD19, &dw19.val);
+ if (err)
+ return err;
+
+ dw19.tspll_fbdiv_intgr =
+ e825c_cgu_params[clk_freq].tspll_fbdiv_intgr;
+ dw19.tspll_ndivratio =
+ e825c_cgu_params[clk_freq].tspll_ndivratio;
+
+ err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD19, dw19.val);
+ if (err)
+ return err;
+
+ /* Configure the TS PLL post divisor */
+ err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD22, &dw22.val);
+ if (err)
+ return err;
+
+ /* These two are constant for E825C */
+ dw22.time1588clk_div = 5;
+ dw22.time1588clk_sel_div2 = 0;
+
+ err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD22, dw22.val);
+ if (err)
+ return err;
+
+ /* Configure the TS PLL pre divisor and clock source */
+ err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD23_E825C, &dw23.val);
+ if (err)
+ return err;
+
+ dw23.ref1588_ck_div =
+ e825c_cgu_params[clk_freq].ref1588_ck_div;
+ dw23.time_ref_sel = clk_src;
+
+ err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD23_E825C, dw23.val);
+ if (err)
+ return err;
+
+ dw24.tspll_fbdiv_frac =
+ e825c_cgu_params[clk_freq].tspll_fbdiv_frac;
+
+ err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val);
+ if (err)
+ return err;
+
+ /* Finally, enable the PLL */
+ dw23.ts_pll_enable = 1;
+
+ err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD23_E825C, dw23.val);
+ if (err)
+ return err;
+
+ /* Wait to verify if the PLL locks */
+ usleep_range(1000, 5000);
+
+ err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_LOCK_E825C, &ro_lock.val);
+ if (err)
+ return err;
+
+ if (!ro_lock.plllock_true_lock_cri) {
+ dev_warn(ice_hw_to_dev(hw), "CGU PLL failed to lock\n");
+ return -EBUSY;
+ }
+
+ /* Log the current clock configuration */
+ ice_debug(hw, ICE_DBG_PTP, "New CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
+ dw24.ts_pll_enable ? "enabled" : "disabled",
+ ice_clk_src_str(dw23.time_ref_sel),
+ ice_clk_freq_str(dw9.time_ref_freq_sel),
+ ro_lock.plllock_true_lock_cri ? "locked" : "unlocked");
+
+ return 0;
+}
+
+/**
+ * ice_cfg_cgu_pll_dis_sticky_bits_e82x - disable TS PLL sticky bits
+ * @hw: pointer to the HW struct
+ *
+ * Configure the Clock Generation Unit TS PLL sticky bits so they don't latch on
+ * losing TS PLL lock, but always show current state.
+ *
+ * Return: 0 on success, other error codes when failed to read/write CGU
+ */
+static int ice_cfg_cgu_pll_dis_sticky_bits_e82x(struct ice_hw *hw)
+{
+ union tspll_cntr_bist_settings cntr_bist;
+ int err;
+
+ err = ice_read_cgu_reg_e82x(hw, TSPLL_CNTR_BIST_SETTINGS,
+ &cntr_bist.val);
+ if (err)
+ return err;
+
+ /* Disable sticky lock detection so lock err reported is accurate */
+ cntr_bist.i_plllock_sel_0 = 0;
+ cntr_bist.i_plllock_sel_1 = 0;
+
+ return ice_write_cgu_reg_e82x(hw, TSPLL_CNTR_BIST_SETTINGS,
+ cntr_bist.val);
+}
+
+/**
+ * ice_cfg_cgu_pll_dis_sticky_bits_e825c - disable TS PLL sticky bits for E825-C
+ * @hw: pointer to the HW struct
+ *
+ * Configure the Clock Generation Unit TS PLL sticky bits so they don't latch on
+ * losing TS PLL lock, but always show current state.
+ *
+ * Return: 0 on success, other error codes when failed to read/write CGU
+ */
+static int ice_cfg_cgu_pll_dis_sticky_bits_e825c(struct ice_hw *hw)
+{
+ union tspll_bw_tdc_e825c bw_tdc;
+ int err;
+
+ err = ice_read_cgu_reg_e82x(hw, TSPLL_BW_TDC_E825C, &bw_tdc.val);
+ if (err)
+ return err;
+
+ bw_tdc.i_plllock_sel_1_0 = 0;
+
+ return ice_write_cgu_reg_e82x(hw, TSPLL_BW_TDC_E825C, bw_tdc.val);
+}
+
+/**
+ * ice_init_cgu_e82x - Initialize CGU with settings from firmware
+ * @hw: pointer to the HW structure
+ *
+ * Initialize the Clock Generation Unit of the E822 device.
+ *
+ * Return: 0 on success, other error codes when failed to read/write/cfg CGU
+ */
+static int ice_init_cgu_e82x(struct ice_hw *hw)
+{
+ struct ice_ts_func_info *ts_info = &hw->func_caps.ts_func_info;
+ int err;
+
+ /* Disable sticky lock detection so lock err reported is accurate */
+ if (ice_is_e825c(hw))
+ err = ice_cfg_cgu_pll_dis_sticky_bits_e825c(hw);
+ else
+ err = ice_cfg_cgu_pll_dis_sticky_bits_e82x(hw);
+ if (err)
+ return err;
+
+ /* Configure the CGU PLL using the parameters from the function
+ * capabilities.
+ */
+ if (ice_is_e825c(hw))
+ err = ice_cfg_cgu_pll_e825c(hw, ts_info->time_ref,
+ (enum ice_clk_src)ts_info->clk_src);
+ else
+ err = ice_cfg_cgu_pll_e82x(hw, ts_info->time_ref,
+ (enum ice_clk_src)ts_info->clk_src);
+
+ return err;
+}
+
+/**
+ * ice_ptp_tmr_cmd_to_src_reg - Convert to source timer command value
+ * @hw: pointer to HW struct
* @cmd: Timer command
*
- * Prepare the source timer for an upcoming timer sync command.
+ * Return: the source timer command register value for the given PTP timer
+ * command.
*/
-void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
+static u32 ice_ptp_tmr_cmd_to_src_reg(struct ice_hw *hw,
+ enum ice_ptp_tmr_cmd cmd)
{
- u32 cmd_val;
- u8 tmr_idx;
+ u32 cmd_val, tmr_idx;
+
+ switch (cmd) {
+ case ICE_PTP_INIT_TIME:
+ cmd_val = GLTSYN_CMD_INIT_TIME;
+ break;
+ case ICE_PTP_INIT_INCVAL:
+ cmd_val = GLTSYN_CMD_INIT_INCVAL;
+ break;
+ case ICE_PTP_ADJ_TIME:
+ cmd_val = GLTSYN_CMD_ADJ_TIME;
+ break;
+ case ICE_PTP_ADJ_TIME_AT_TIME:
+ cmd_val = GLTSYN_CMD_ADJ_INIT_TIME;
+ break;
+ case ICE_PTP_NOP:
+ case ICE_PTP_READ_TIME:
+ cmd_val = GLTSYN_CMD_READ_TIME;
+ break;
+ default:
+ dev_warn(ice_hw_to_dev(hw),
+ "Ignoring unrecognized timer command %u\n", cmd);
+ cmd_val = 0;
+ }
tmr_idx = ice_get_ptp_src_clock_index(hw);
- cmd_val = tmr_idx << SEL_CPK_SRC;
+
+ return tmr_idx << SEL_CPK_SRC | cmd_val;
+}
+
+/**
+ * ice_ptp_tmr_cmd_to_port_reg- Convert to port timer command value
+ * @hw: pointer to HW struct
+ * @cmd: Timer command
+ *
+ * Note that some hardware families use a different command register value for
+ * the PHY ports, while other hardware families use the same register values
+ * as the source timer.
+ *
+ * Return: the PHY port timer command register value for the given PTP timer
+ * command.
+ */
+static u32 ice_ptp_tmr_cmd_to_port_reg(struct ice_hw *hw,
+ enum ice_ptp_tmr_cmd cmd)
+{
+ u32 cmd_val, tmr_idx;
+
+ /* Certain hardware families share the same register values for the
+ * port register and source timer register.
+ */
+ switch (hw->ptp.phy_model) {
+ case ICE_PHY_E810:
+ return ice_ptp_tmr_cmd_to_src_reg(hw, cmd) & TS_CMD_MASK_E810;
+ default:
+ break;
+ }
switch (cmd) {
case ICE_PTP_INIT_TIME:
- cmd_val |= GLTSYN_CMD_INIT_TIME;
+ cmd_val = PHY_CMD_INIT_TIME;
break;
case ICE_PTP_INIT_INCVAL:
- cmd_val |= GLTSYN_CMD_INIT_INCVAL;
+ cmd_val = PHY_CMD_INIT_INCVAL;
break;
case ICE_PTP_ADJ_TIME:
- cmd_val |= GLTSYN_CMD_ADJ_TIME;
+ cmd_val = PHY_CMD_ADJ_TIME;
break;
case ICE_PTP_ADJ_TIME_AT_TIME:
- cmd_val |= GLTSYN_CMD_ADJ_INIT_TIME;
+ cmd_val = PHY_CMD_ADJ_TIME_AT_TIME;
break;
case ICE_PTP_READ_TIME:
- cmd_val |= GLTSYN_CMD_READ_TIME;
+ cmd_val = PHY_CMD_READ_TIME;
break;
case ICE_PTP_NOP:
+ cmd_val = 0;
break;
+ default:
+ dev_warn(ice_hw_to_dev(hw),
+ "Ignoring unrecognized timer command %u\n", cmd);
+ cmd_val = 0;
}
+ tmr_idx = ice_get_ptp_src_clock_index(hw);
+
+ return tmr_idx << SEL_PHY_SRC | cmd_val;
+}
+
+/**
+ * ice_ptp_src_cmd - Prepare source timer for a timer command
+ * @hw: pointer to HW structure
+ * @cmd: Timer command
+ *
+ * Prepare the source timer for an upcoming timer sync command.
+ */
+void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
+{
+ u32 cmd_val = ice_ptp_tmr_cmd_to_src_reg(hw, cmd);
+
wr32(hw, GLTSYN_CMD, cmd_val);
}
@@ -281,6 +874,1832 @@ static void ice_ptp_exec_tmr_cmd(struct ice_hw *hw)
ice_flush(hw);
}
+/* 56G PHY device functions
+ *
+ * The following functions operate on devices with the ETH 56G PHY.
+ */
+
+/**
+ * ice_write_phy_eth56g - Write a PHY port register
+ * @hw: pointer to the HW struct
+ * @phy_idx: PHY index
+ * @addr: PHY register address
+ * @val: Value to write
+ *
+ * Return: 0 on success, other error codes when failed to write to PHY
+ */
+static int ice_write_phy_eth56g(struct ice_hw *hw, u8 phy_idx, u32 addr,
+ u32 val)
+{
+ struct ice_sbq_msg_input phy_msg;
+ int err;
+
+ phy_msg.opcode = ice_sbq_msg_wr;
+
+ phy_msg.msg_addr_low = lower_16_bits(addr);
+ phy_msg.msg_addr_high = upper_16_bits(addr);
+
+ phy_msg.data = val;
+ phy_msg.dest_dev = hw->ptp.phy.eth56g.phy_addr[phy_idx];
+
+ err = ice_sbq_rw_reg(hw, &phy_msg, ICE_AQ_FLAG_RD);
+
+ if (err)
+ ice_debug(hw, ICE_DBG_PTP, "PTP failed to send msg to phy %d\n",
+ err);
+
+ return err;
+}
+
+/**
+ * ice_read_phy_eth56g - Read a PHY port register
+ * @hw: pointer to the HW struct
+ * @phy_idx: PHY index
+ * @addr: PHY register address
+ * @val: Value to write
+ *
+ * Return: 0 on success, other error codes when failed to read from PHY
+ */
+static int ice_read_phy_eth56g(struct ice_hw *hw, u8 phy_idx, u32 addr,
+ u32 *val)
+{
+ struct ice_sbq_msg_input phy_msg;
+ int err;
+
+ phy_msg.opcode = ice_sbq_msg_rd;
+
+ phy_msg.msg_addr_low = lower_16_bits(addr);
+ phy_msg.msg_addr_high = upper_16_bits(addr);
+
+ phy_msg.data = 0;
+ phy_msg.dest_dev = hw->ptp.phy.eth56g.phy_addr[phy_idx];
+
+ err = ice_sbq_rw_reg(hw, &phy_msg, ICE_AQ_FLAG_RD);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "PTP failed to send msg to phy %d\n",
+ err);
+ return err;
+ }
+
+ *val = phy_msg.data;
+
+ return 0;
+}
+
+/**
+ * ice_phy_res_address_eth56g - Calculate a PHY port register address
+ * @port: Port number to be written
+ * @res_type: resource type (register/memory)
+ * @offset: Offset from PHY port register base
+ * @addr: The result address
+ *
+ * Return:
+ * * %0 - success
+ * * %EINVAL - invalid port number or resource type
+ */
+static int ice_phy_res_address_eth56g(u8 port, enum eth56g_res_type res_type,
+ u32 offset, u32 *addr)
+{
+ u8 lane = port % ICE_PORTS_PER_QUAD;
+ u8 phy = ICE_GET_QUAD_NUM(port);
+
+ if (res_type >= NUM_ETH56G_PHY_RES)
+ return -EINVAL;
+
+ *addr = eth56g_phy_res[res_type].base[phy] +
+ lane * eth56g_phy_res[res_type].step + offset;
+ return 0;
+}
+
+/**
+ * ice_write_port_eth56g - Write a PHY port register
+ * @hw: pointer to the HW struct
+ * @offset: PHY register offset
+ * @port: Port number
+ * @val: Value to write
+ * @res_type: resource type (register/memory)
+ *
+ * Return:
+ * * %0 - success
+ * * %EINVAL - invalid port number or resource type
+ * * %other - failed to write to PHY
+ */
+static int ice_write_port_eth56g(struct ice_hw *hw, u8 port, u32 offset,
+ u32 val, enum eth56g_res_type res_type)
+{
+ u8 phy_port = port % hw->ptp.ports_per_phy;
+ u8 phy_idx = port / hw->ptp.ports_per_phy;
+ u32 addr;
+ int err;
+
+ if (port >= hw->ptp.num_lports)
+ return -EINVAL;
+
+ err = ice_phy_res_address_eth56g(phy_port, res_type, offset, &addr);
+ if (err)
+ return err;
+
+ return ice_write_phy_eth56g(hw, phy_idx, addr, val);
+}
+
+/**
+ * ice_read_port_eth56g - Read a PHY port register
+ * @hw: pointer to the HW struct
+ * @offset: PHY register offset
+ * @port: Port number
+ * @val: Value to write
+ * @res_type: resource type (register/memory)
+ *
+ * Return:
+ * * %0 - success
+ * * %EINVAL - invalid port number or resource type
+ * * %other - failed to read from PHY
+ */
+static int ice_read_port_eth56g(struct ice_hw *hw, u8 port, u32 offset,
+ u32 *val, enum eth56g_res_type res_type)
+{
+ u8 phy_port = port % hw->ptp.ports_per_phy;
+ u8 phy_idx = port / hw->ptp.ports_per_phy;
+ u32 addr;
+ int err;
+
+ if (port >= hw->ptp.num_lports)
+ return -EINVAL;
+
+ err = ice_phy_res_address_eth56g(phy_port, res_type, offset, &addr);
+ if (err)
+ return err;
+
+ return ice_read_phy_eth56g(hw, phy_idx, addr, val);
+}
+
+/**
+ * ice_write_ptp_reg_eth56g - Write a PHY port register
+ * @hw: pointer to the HW struct
+ * @port: Port number to be written
+ * @offset: Offset from PHY port register base
+ * @val: Value to write
+ *
+ * Return:
+ * * %0 - success
+ * * %EINVAL - invalid port number or resource type
+ * * %other - failed to write to PHY
+ */
+static int ice_write_ptp_reg_eth56g(struct ice_hw *hw, u8 port, u16 offset,
+ u32 val)
+{
+ return ice_write_port_eth56g(hw, port, offset, val, ETH56G_PHY_REG_PTP);
+}
+
+/**
+ * ice_write_mac_reg_eth56g - Write a MAC PHY port register
+ * parameter
+ * @hw: pointer to the HW struct
+ * @port: Port number to be written
+ * @offset: Offset from PHY port register base
+ * @val: Value to write
+ *
+ * Return:
+ * * %0 - success
+ * * %EINVAL - invalid port number or resource type
+ * * %other - failed to write to PHY
+ */
+static int ice_write_mac_reg_eth56g(struct ice_hw *hw, u8 port, u32 offset,
+ u32 val)
+{
+ return ice_write_port_eth56g(hw, port, offset, val, ETH56G_PHY_REG_MAC);
+}
+
+/**
+ * ice_write_xpcs_reg_eth56g - Write a PHY port register
+ * @hw: pointer to the HW struct
+ * @port: Port number to be written
+ * @offset: Offset from PHY port register base
+ * @val: Value to write
+ *
+ * Return:
+ * * %0 - success
+ * * %EINVAL - invalid port number or resource type
+ * * %other - failed to write to PHY
+ */
+static int ice_write_xpcs_reg_eth56g(struct ice_hw *hw, u8 port, u32 offset,
+ u32 val)
+{
+ return ice_write_port_eth56g(hw, port, offset, val,
+ ETH56G_PHY_REG_XPCS);
+}
+
+/**
+ * ice_read_ptp_reg_eth56g - Read a PHY port register
+ * @hw: pointer to the HW struct
+ * @port: Port number to be read
+ * @offset: Offset from PHY port register base
+ * @val: Pointer to the value to read (out param)
+ *
+ * Return:
+ * * %0 - success
+ * * %EINVAL - invalid port number or resource type
+ * * %other - failed to read from PHY
+ */
+static int ice_read_ptp_reg_eth56g(struct ice_hw *hw, u8 port, u16 offset,
+ u32 *val)
+{
+ return ice_read_port_eth56g(hw, port, offset, val, ETH56G_PHY_REG_PTP);
+}
+
+/**
+ * ice_read_mac_reg_eth56g - Read a PHY port register
+ * @hw: pointer to the HW struct
+ * @port: Port number to be read
+ * @offset: Offset from PHY port register base
+ * @val: Pointer to the value to read (out param)
+ *
+ * Return:
+ * * %0 - success
+ * * %EINVAL - invalid port number or resource type
+ * * %other - failed to read from PHY
+ */
+static int ice_read_mac_reg_eth56g(struct ice_hw *hw, u8 port, u16 offset,
+ u32 *val)
+{
+ return ice_read_port_eth56g(hw, port, offset, val, ETH56G_PHY_REG_MAC);
+}
+
+/**
+ * ice_read_gpcs_reg_eth56g - Read a PHY port register
+ * @hw: pointer to the HW struct
+ * @port: Port number to be read
+ * @offset: Offset from PHY port register base
+ * @val: Pointer to the value to read (out param)
+ *
+ * Return:
+ * * %0 - success
+ * * %EINVAL - invalid port number or resource type
+ * * %other - failed to read from PHY
+ */
+static int ice_read_gpcs_reg_eth56g(struct ice_hw *hw, u8 port, u16 offset,
+ u32 *val)
+{
+ return ice_read_port_eth56g(hw, port, offset, val, ETH56G_PHY_REG_GPCS);
+}
+
+/**
+ * ice_read_port_mem_eth56g - Read a PHY port memory location
+ * @hw: pointer to the HW struct
+ * @port: Port number to be read
+ * @offset: Offset from PHY port register base
+ * @val: Pointer to the value to read (out param)
+ *
+ * Return:
+ * * %0 - success
+ * * %EINVAL - invalid port number or resource type
+ * * %other - failed to read from PHY
+ */
+static int ice_read_port_mem_eth56g(struct ice_hw *hw, u8 port, u16 offset,
+ u32 *val)
+{
+ return ice_read_port_eth56g(hw, port, offset, val, ETH56G_PHY_MEM_PTP);
+}
+
+/**
+ * ice_write_port_mem_eth56g - Write a PHY port memory location
+ * @hw: pointer to the HW struct
+ * @port: Port number to be read
+ * @offset: Offset from PHY port register base
+ * @val: Pointer to the value to read (out param)
+ *
+ * Return:
+ * * %0 - success
+ * * %EINVAL - invalid port number or resource type
+ * * %other - failed to write to PHY
+ */
+static int ice_write_port_mem_eth56g(struct ice_hw *hw, u8 port, u16 offset,
+ u32 val)
+{
+ return ice_write_port_eth56g(hw, port, offset, val, ETH56G_PHY_MEM_PTP);
+}
+
+/**
+ * ice_is_64b_phy_reg_eth56g - Check if this is a 64bit PHY register
+ * @low_addr: the low address to check
+ * @high_addr: on return, contains the high address of the 64bit register
+ *
+ * Write the appropriate high register offset to use.
+ *
+ * Return: true if the provided low address is one of the known 64bit PHY values
+ * represented as two 32bit registers, false otherwise.
+ */
+static bool ice_is_64b_phy_reg_eth56g(u16 low_addr, u16 *high_addr)
+{
+ switch (low_addr) {
+ case PHY_REG_TX_TIMER_INC_PRE_L:
+ *high_addr = PHY_REG_TX_TIMER_INC_PRE_U;
+ return true;
+ case PHY_REG_RX_TIMER_INC_PRE_L:
+ *high_addr = PHY_REG_RX_TIMER_INC_PRE_U;
+ return true;
+ case PHY_REG_TX_CAPTURE_L:
+ *high_addr = PHY_REG_TX_CAPTURE_U;
+ return true;
+ case PHY_REG_RX_CAPTURE_L:
+ *high_addr = PHY_REG_RX_CAPTURE_U;
+ return true;
+ case PHY_REG_TOTAL_TX_OFFSET_L:
+ *high_addr = PHY_REG_TOTAL_TX_OFFSET_U;
+ return true;
+ case PHY_REG_TOTAL_RX_OFFSET_L:
+ *high_addr = PHY_REG_TOTAL_RX_OFFSET_U;
+ return true;
+ case PHY_REG_TX_MEMORY_STATUS_L:
+ *high_addr = PHY_REG_TX_MEMORY_STATUS_U;
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * ice_is_40b_phy_reg_eth56g - Check if this is a 40bit PHY register
+ * @low_addr: the low address to check
+ * @high_addr: on return, contains the high address of the 40bit value
+ *
+ * Write the appropriate high register offset to use.
+ *
+ * Return: true if the provided low address is one of the known 40bit PHY
+ * values split into two registers with the lower 8 bits in the low register and
+ * the upper 32 bits in the high register, false otherwise.
+ */
+static bool ice_is_40b_phy_reg_eth56g(u16 low_addr, u16 *high_addr)
+{
+ switch (low_addr) {
+ case PHY_REG_TIMETUS_L:
+ *high_addr = PHY_REG_TIMETUS_U;
+ return true;
+ case PHY_PCS_REF_TUS_L:
+ *high_addr = PHY_PCS_REF_TUS_U;
+ return true;
+ case PHY_PCS_REF_INC_L:
+ *high_addr = PHY_PCS_REF_INC_U;
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * ice_read_64b_phy_reg_eth56g - Read a 64bit value from PHY registers
+ * @hw: pointer to the HW struct
+ * @port: PHY port to read from
+ * @low_addr: offset of the lower register to read from
+ * @val: on return, the contents of the 64bit value from the PHY registers
+ * @res_type: resource type
+ *
+ * Check if the caller has specified a known 40 bit register offset and read
+ * the two registers associated with a 40bit value and return it in the val
+ * pointer.
+ *
+ * Return:
+ * * %0 - success
+ * * %EINVAL - not a 64 bit register
+ * * %other - failed to read from PHY
+ */
+static int ice_read_64b_phy_reg_eth56g(struct ice_hw *hw, u8 port, u16 low_addr,
+ u64 *val, enum eth56g_res_type res_type)
+{
+ u16 high_addr;
+ u32 lo, hi;
+ int err;
+
+ if (!ice_is_64b_phy_reg_eth56g(low_addr, &high_addr))
+ return -EINVAL;
+
+ err = ice_read_port_eth56g(hw, port, low_addr, &lo, res_type);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read from low register %#08x\n, err %d",
+ low_addr, err);
+ return err;
+ }
+
+ err = ice_read_port_eth56g(hw, port, high_addr, &hi, res_type);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read from high register %#08x\n, err %d",
+ high_addr, err);
+ return err;
+ }
+
+ *val = ((u64)hi << 32) | lo;
+
+ return 0;
+}
+
+/**
+ * ice_read_64b_ptp_reg_eth56g - Read a 64bit value from PHY registers
+ * @hw: pointer to the HW struct
+ * @port: PHY port to read from
+ * @low_addr: offset of the lower register to read from
+ * @val: on return, the contents of the 64bit value from the PHY registers
+ *
+ * Check if the caller has specified a known 40 bit register offset and read
+ * the two registers associated with a 40bit value and return it in the val
+ * pointer.
+ *
+ * Return:
+ * * %0 - success
+ * * %EINVAL - not a 64 bit register
+ * * %other - failed to read from PHY
+ */
+static int ice_read_64b_ptp_reg_eth56g(struct ice_hw *hw, u8 port, u16 low_addr,
+ u64 *val)
+{
+ return ice_read_64b_phy_reg_eth56g(hw, port, low_addr, val,
+ ETH56G_PHY_REG_PTP);
+}
+
+/**
+ * ice_write_40b_phy_reg_eth56g - Write a 40b value to the PHY
+ * @hw: pointer to the HW struct
+ * @port: port to write to
+ * @low_addr: offset of the low register
+ * @val: 40b value to write
+ * @res_type: resource type
+ *
+ * Check if the caller has specified a known 40 bit register offset and write
+ * provided 40b value to the two associated registers by splitting it up into
+ * two chunks, the lower 8 bits and the upper 32 bits.
+ *
+ * Return:
+ * * %0 - success
+ * * %EINVAL - not a 40 bit register
+ * * %other - failed to write to PHY
+ */
+static int ice_write_40b_phy_reg_eth56g(struct ice_hw *hw, u8 port,
+ u16 low_addr, u64 val,
+ enum eth56g_res_type res_type)
+{
+ u16 high_addr;
+ u32 lo, hi;
+ int err;
+
+ if (!ice_is_40b_phy_reg_eth56g(low_addr, &high_addr))
+ return -EINVAL;
+
+ lo = FIELD_GET(P_REG_40B_LOW_M, val);
+ hi = (u32)(val >> P_REG_40B_HIGH_S);
+
+ err = ice_write_port_eth56g(hw, port, low_addr, lo, res_type);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register 0x%08x\n, err %d",
+ low_addr, err);
+ return err;
+ }
+
+ err = ice_write_port_eth56g(hw, port, high_addr, hi, res_type);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write to high register 0x%08x\n, err %d",
+ high_addr, err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_write_40b_ptp_reg_eth56g - Write a 40b value to the PHY
+ * @hw: pointer to the HW struct
+ * @port: port to write to
+ * @low_addr: offset of the low register
+ * @val: 40b value to write
+ *
+ * Check if the caller has specified a known 40 bit register offset and write
+ * provided 40b value to the two associated registers by splitting it up into
+ * two chunks, the lower 8 bits and the upper 32 bits.
+ *
+ * Return:
+ * * %0 - success
+ * * %EINVAL - not a 40 bit register
+ * * %other - failed to write to PHY
+ */
+static int ice_write_40b_ptp_reg_eth56g(struct ice_hw *hw, u8 port,
+ u16 low_addr, u64 val)
+{
+ return ice_write_40b_phy_reg_eth56g(hw, port, low_addr, val,
+ ETH56G_PHY_REG_PTP);
+}
+
+/**
+ * ice_write_64b_phy_reg_eth56g - Write a 64bit value to PHY registers
+ * @hw: pointer to the HW struct
+ * @port: PHY port to read from
+ * @low_addr: offset of the lower register to read from
+ * @val: the contents of the 64bit value to write to PHY
+ * @res_type: resource type
+ *
+ * Check if the caller has specified a known 64 bit register offset and write
+ * the 64bit value to the two associated 32bit PHY registers.
+ *
+ * Return:
+ * * %0 - success
+ * * %EINVAL - not a 64 bit register
+ * * %other - failed to write to PHY
+ */
+static int ice_write_64b_phy_reg_eth56g(struct ice_hw *hw, u8 port,
+ u16 low_addr, u64 val,
+ enum eth56g_res_type res_type)
+{
+ u16 high_addr;
+ u32 lo, hi;
+ int err;
+
+ if (!ice_is_64b_phy_reg_eth56g(low_addr, &high_addr))
+ return -EINVAL;
+
+ lo = lower_32_bits(val);
+ hi = upper_32_bits(val);
+
+ err = ice_write_port_eth56g(hw, port, low_addr, lo, res_type);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register 0x%08x\n, err %d",
+ low_addr, err);
+ return err;
+ }
+
+ err = ice_write_port_eth56g(hw, port, high_addr, hi, res_type);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write to high register 0x%08x\n, err %d",
+ high_addr, err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_write_64b_ptp_reg_eth56g - Write a 64bit value to PHY registers
+ * @hw: pointer to the HW struct
+ * @port: PHY port to read from
+ * @low_addr: offset of the lower register to read from
+ * @val: the contents of the 64bit value to write to PHY
+ *
+ * Check if the caller has specified a known 64 bit register offset and write
+ * the 64bit value to the two associated 32bit PHY registers.
+ *
+ * Return:
+ * * %0 - success
+ * * %EINVAL - not a 64 bit register
+ * * %other - failed to write to PHY
+ */
+static int ice_write_64b_ptp_reg_eth56g(struct ice_hw *hw, u8 port,
+ u16 low_addr, u64 val)
+{
+ return ice_write_64b_phy_reg_eth56g(hw, port, low_addr, val,
+ ETH56G_PHY_REG_PTP);
+}
+
+/**
+ * ice_read_ptp_tstamp_eth56g - Read a PHY timestamp out of the port memory
+ * @hw: pointer to the HW struct
+ * @port: the port to read from
+ * @idx: the timestamp index to read
+ * @tstamp: on return, the 40bit timestamp value
+ *
+ * Read a 40bit timestamp value out of the two associated entries in the
+ * port memory block of the internal PHYs of the 56G devices.
+ *
+ * Return:
+ * * %0 - success
+ * * %other - failed to read from PHY
+ */
+static int ice_read_ptp_tstamp_eth56g(struct ice_hw *hw, u8 port, u8 idx,
+ u64 *tstamp)
+{
+ u16 lo_addr, hi_addr;
+ u32 lo, hi;
+ int err;
+
+ lo_addr = (u16)PHY_TSTAMP_L(idx);
+ hi_addr = (u16)PHY_TSTAMP_U(idx);
+
+ err = ice_read_port_mem_eth56g(hw, port, lo_addr, &lo);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read low PTP timestamp register, err %d\n",
+ err);
+ return err;
+ }
+
+ err = ice_read_port_mem_eth56g(hw, port, hi_addr, &hi);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read high PTP timestamp register, err %d\n",
+ err);
+ return err;
+ }
+
+ /* For 56G based internal PHYs, the timestamp is reported with the
+ * lower 8 bits in the low register, and the upper 32 bits in the high
+ * register.
+ */
+ *tstamp = ((u64)hi) << TS_PHY_HIGH_S | ((u64)lo & TS_PHY_LOW_M);
+
+ return 0;
+}
+
+/**
+ * ice_clear_ptp_tstamp_eth56g - Clear a timestamp from the quad block
+ * @hw: pointer to the HW struct
+ * @port: the quad to read from
+ * @idx: the timestamp index to reset
+ *
+ * Read and then forcibly clear the timestamp index to ensure the valid bit is
+ * cleared and the timestamp status bit is reset in the PHY port memory of
+ * internal PHYs of the 56G devices.
+ *
+ * To directly clear the contents of the timestamp block entirely, discarding
+ * all timestamp data at once, software should instead use
+ * ice_ptp_reset_ts_memory_quad_eth56g().
+ *
+ * This function should only be called on an idx whose bit is set according to
+ * ice_get_phy_tx_tstamp_ready().
+ *
+ * Return:
+ * * %0 - success
+ * * %other - failed to write to PHY
+ */
+static int ice_clear_ptp_tstamp_eth56g(struct ice_hw *hw, u8 port, u8 idx)
+{
+ u64 unused_tstamp;
+ u16 lo_addr;
+ int err;
+
+ /* Read the timestamp register to ensure the timestamp status bit is
+ * cleared.
+ */
+ err = ice_read_ptp_tstamp_eth56g(hw, port, idx, &unused_tstamp);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read the PHY timestamp register for port %u, idx %u, err %d\n",
+ port, idx, err);
+ }
+
+ lo_addr = (u16)PHY_TSTAMP_L(idx);
+
+ err = ice_write_port_mem_eth56g(hw, port, lo_addr, 0);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to clear low PTP timestamp register for port %u, idx %u, err %d\n",
+ port, idx, err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_ptp_reset_ts_memory_eth56g - Clear all timestamps from the port block
+ * @hw: pointer to the HW struct
+ */
+static void ice_ptp_reset_ts_memory_eth56g(struct ice_hw *hw)
+{
+ unsigned int port;
+
+ for (port = 0; port < hw->ptp.num_lports; port++) {
+ ice_write_ptp_reg_eth56g(hw, port, PHY_REG_TX_MEMORY_STATUS_L,
+ 0);
+ ice_write_ptp_reg_eth56g(hw, port, PHY_REG_TX_MEMORY_STATUS_U,
+ 0);
+ }
+}
+
+/**
+ * ice_ptp_prep_port_time_eth56g - Prepare one PHY port with initial time
+ * @hw: pointer to the HW struct
+ * @port: port number
+ * @time: time to initialize the PHY port clocks to
+ *
+ * Write a new initial time value into registers of a specific PHY port.
+ *
+ * Return:
+ * * %0 - success
+ * * %other - failed to write to PHY
+ */
+static int ice_ptp_prep_port_time_eth56g(struct ice_hw *hw, u8 port,
+ u64 time)
+{
+ int err;
+
+ /* Tx case */
+ err = ice_write_64b_ptp_reg_eth56g(hw, port, PHY_REG_TX_TIMER_INC_PRE_L,
+ time);
+ if (err)
+ return err;
+
+ /* Rx case */
+ return ice_write_64b_ptp_reg_eth56g(hw, port,
+ PHY_REG_RX_TIMER_INC_PRE_L, time);
+}
+
+/**
+ * ice_ptp_prep_phy_time_eth56g - Prepare PHY port with initial time
+ * @hw: pointer to the HW struct
+ * @time: Time to initialize the PHY port clocks to
+ *
+ * Program the PHY port registers with a new initial time value. The port
+ * clock will be initialized once the driver issues an ICE_PTP_INIT_TIME sync
+ * command. The time value is the upper 32 bits of the PHY timer, usually in
+ * units of nominal nanoseconds.
+ *
+ * Return:
+ * * %0 - success
+ * * %other - failed to write to PHY
+ */
+static int ice_ptp_prep_phy_time_eth56g(struct ice_hw *hw, u32 time)
+{
+ u64 phy_time;
+ u8 port;
+
+ /* The time represents the upper 32 bits of the PHY timer, so we need
+ * to shift to account for this when programming.
+ */
+ phy_time = (u64)time << 32;
+
+ for (port = 0; port < hw->ptp.num_lports; port++) {
+ int err;
+
+ err = ice_ptp_prep_port_time_eth56g(hw, port, phy_time);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write init time for port %u, err %d\n",
+ port, err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ice_ptp_prep_port_adj_eth56g - Prepare a single port for time adjust
+ * @hw: pointer to HW struct
+ * @port: Port number to be programmed
+ * @time: time in cycles to adjust the port clocks
+ *
+ * Program the port for an atomic adjustment by writing the Tx and Rx timer
+ * registers. The atomic adjustment won't be completed until the driver issues
+ * an ICE_PTP_ADJ_TIME command.
+ *
+ * Note that time is not in units of nanoseconds. It is in clock time
+ * including the lower sub-nanosecond portion of the port timer.
+ *
+ * Negative adjustments are supported using 2s complement arithmetic.
+ *
+ * Return:
+ * * %0 - success
+ * * %other - failed to write to PHY
+ */
+static int ice_ptp_prep_port_adj_eth56g(struct ice_hw *hw, u8 port, s64 time)
+{
+ u32 l_time, u_time;
+ int err;
+
+ l_time = lower_32_bits(time);
+ u_time = upper_32_bits(time);
+
+ /* Tx case */
+ err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_TX_TIMER_INC_PRE_L,
+ l_time);
+ if (err)
+ goto exit_err;
+
+ err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_TX_TIMER_INC_PRE_U,
+ u_time);
+ if (err)
+ goto exit_err;
+
+ /* Rx case */
+ err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_RX_TIMER_INC_PRE_L,
+ l_time);
+ if (err)
+ goto exit_err;
+
+ err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_RX_TIMER_INC_PRE_U,
+ u_time);
+ if (err)
+ goto exit_err;
+
+ return 0;
+
+exit_err:
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write time adjust for port %u, err %d\n",
+ port, err);
+ return err;
+}
+
+/**
+ * ice_ptp_prep_phy_adj_eth56g - Prep PHY ports for a time adjustment
+ * @hw: pointer to HW struct
+ * @adj: adjustment in nanoseconds
+ *
+ * Prepare the PHY ports for an atomic time adjustment by programming the PHY
+ * Tx and Rx port registers. The actual adjustment is completed by issuing an
+ * ICE_PTP_ADJ_TIME or ICE_PTP_ADJ_TIME_AT_TIME sync command.
+ *
+ * Return:
+ * * %0 - success
+ * * %other - failed to write to PHY
+ */
+static int ice_ptp_prep_phy_adj_eth56g(struct ice_hw *hw, s32 adj)
+{
+ s64 cycles;
+ u8 port;
+
+ /* The port clock supports adjustment of the sub-nanosecond portion of
+ * the clock (lowest 32 bits). We shift the provided adjustment in
+ * nanoseconds by 32 to calculate the appropriate adjustment to program
+ * into the PHY ports.
+ */
+ cycles = (s64)adj << 32;
+
+ for (port = 0; port < hw->ptp.num_lports; port++) {
+ int err;
+
+ err = ice_ptp_prep_port_adj_eth56g(hw, port, cycles);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_ptp_prep_phy_incval_eth56g - Prepare PHY ports for time adjustment
+ * @hw: pointer to HW struct
+ * @incval: new increment value to prepare
+ *
+ * Prepare each of the PHY ports for a new increment value by programming the
+ * port's TIMETUS registers. The new increment value will be updated after
+ * issuing an ICE_PTP_INIT_INCVAL command.
+ *
+ * Return:
+ * * %0 - success
+ * * %other - failed to write to PHY
+ */
+static int ice_ptp_prep_phy_incval_eth56g(struct ice_hw *hw, u64 incval)
+{
+ u8 port;
+
+ for (port = 0; port < hw->ptp.num_lports; port++) {
+ int err;
+
+ err = ice_write_40b_ptp_reg_eth56g(hw, port, PHY_REG_TIMETUS_L,
+ incval);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write incval for port %u, err %d\n",
+ port, err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ice_ptp_read_port_capture_eth56g - Read a port's local time capture
+ * @hw: pointer to HW struct
+ * @port: Port number to read
+ * @tx_ts: on return, the Tx port time capture
+ * @rx_ts: on return, the Rx port time capture
+ *
+ * Read the port's Tx and Rx local time capture values.
+ *
+ * Return:
+ * * %0 - success
+ * * %other - failed to read from PHY
+ */
+static int ice_ptp_read_port_capture_eth56g(struct ice_hw *hw, u8 port,
+ u64 *tx_ts, u64 *rx_ts)
+{
+ int err;
+
+ /* Tx case */
+ err = ice_read_64b_ptp_reg_eth56g(hw, port, PHY_REG_TX_CAPTURE_L,
+ tx_ts);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read REG_TX_CAPTURE, err %d\n",
+ err);
+ return err;
+ }
+
+ ice_debug(hw, ICE_DBG_PTP, "tx_init = %#016llx\n", *tx_ts);
+
+ /* Rx case */
+ err = ice_read_64b_ptp_reg_eth56g(hw, port, PHY_REG_RX_CAPTURE_L,
+ rx_ts);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_CAPTURE, err %d\n",
+ err);
+ return err;
+ }
+
+ ice_debug(hw, ICE_DBG_PTP, "rx_init = %#016llx\n", *rx_ts);
+
+ return 0;
+}
+
+/**
+ * ice_ptp_write_port_cmd_eth56g - Prepare a single PHY port for a timer command
+ * @hw: pointer to HW struct
+ * @port: Port to which cmd has to be sent
+ * @cmd: Command to be sent to the port
+ *
+ * Prepare the requested port for an upcoming timer sync command.
+ *
+ * Return:
+ * * %0 - success
+ * * %other - failed to write to PHY
+ */
+static int ice_ptp_write_port_cmd_eth56g(struct ice_hw *hw, u8 port,
+ enum ice_ptp_tmr_cmd cmd)
+{
+ u32 val = ice_ptp_tmr_cmd_to_port_reg(hw, cmd);
+ int err;
+
+ /* Tx case */
+ err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_TX_TMR_CMD, val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write back TX_TMR_CMD, err %d\n",
+ err);
+ return err;
+ }
+
+ /* Rx case */
+ err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_RX_TMR_CMD, val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write back RX_TMR_CMD, err %d\n",
+ err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_phy_get_speed_eth56g - Get link speed based on PHY link type
+ * @li: pointer to link information struct
+ *
+ * Return: simplified ETH56G PHY speed
+ */
+static enum ice_eth56g_link_spd
+ice_phy_get_speed_eth56g(struct ice_link_status *li)
+{
+ u16 speed = ice_get_link_speed_based_on_phy_type(li->phy_type_low,
+ li->phy_type_high);
+
+ switch (speed) {
+ case ICE_AQ_LINK_SPEED_1000MB:
+ return ICE_ETH56G_LNK_SPD_1G;
+ case ICE_AQ_LINK_SPEED_2500MB:
+ return ICE_ETH56G_LNK_SPD_2_5G;
+ case ICE_AQ_LINK_SPEED_10GB:
+ return ICE_ETH56G_LNK_SPD_10G;
+ case ICE_AQ_LINK_SPEED_25GB:
+ return ICE_ETH56G_LNK_SPD_25G;
+ case ICE_AQ_LINK_SPEED_40GB:
+ return ICE_ETH56G_LNK_SPD_40G;
+ case ICE_AQ_LINK_SPEED_50GB:
+ switch (li->phy_type_low) {
+ case ICE_PHY_TYPE_LOW_50GBASE_SR:
+ case ICE_PHY_TYPE_LOW_50GBASE_FR:
+ case ICE_PHY_TYPE_LOW_50GBASE_LR:
+ case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
+ case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_50G_AUI1:
+ return ICE_ETH56G_LNK_SPD_50G;
+ default:
+ return ICE_ETH56G_LNK_SPD_50G2;
+ }
+ case ICE_AQ_LINK_SPEED_100GB:
+ if (li->phy_type_high ||
+ li->phy_type_low == ICE_PHY_TYPE_LOW_100GBASE_SR2)
+ return ICE_ETH56G_LNK_SPD_100G2;
+ else
+ return ICE_ETH56G_LNK_SPD_100G;
+ default:
+ return ICE_ETH56G_LNK_SPD_1G;
+ }
+}
+
+/**
+ * ice_phy_cfg_parpcs_eth56g - Configure TUs per PAR/PCS clock cycle
+ * @hw: pointer to the HW struct
+ * @port: port to configure
+ *
+ * Configure the number of TUs for the PAR and PCS clocks used as part of the
+ * timestamp calibration process.
+ *
+ * Return:
+ * * %0 - success
+ * * %other - PHY read/write failed
+ */
+static int ice_phy_cfg_parpcs_eth56g(struct ice_hw *hw, u8 port)
+{
+ u8 port_blk = port & ~(ICE_PORTS_PER_QUAD - 1);
+ u32 val;
+ int err;
+
+ err = ice_write_xpcs_reg_eth56g(hw, port, PHY_VENDOR_TXLANE_THRESH,
+ ICE_ETH56G_NOMINAL_THRESH4);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read VENDOR_TXLANE_THRESH, status: %d",
+ err);
+ return err;
+ }
+
+ switch (ice_phy_get_speed_eth56g(&hw->port_info->phy.link_info)) {
+ case ICE_ETH56G_LNK_SPD_1G:
+ case ICE_ETH56G_LNK_SPD_2_5G:
+ err = ice_read_ptp_reg_eth56g(hw, port_blk,
+ PHY_GPCS_CONFIG_REG0, &val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read PHY_GPCS_CONFIG_REG0, status: %d",
+ err);
+ return err;
+ }
+
+ val &= ~PHY_GPCS_CONFIG_REG0_TX_THR_M;
+ val |= FIELD_PREP(PHY_GPCS_CONFIG_REG0_TX_THR_M,
+ ICE_ETH56G_NOMINAL_TX_THRESH);
+
+ err = ice_write_ptp_reg_eth56g(hw, port_blk,
+ PHY_GPCS_CONFIG_REG0, val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write PHY_GPCS_CONFIG_REG0, status: %d",
+ err);
+ return err;
+ }
+ break;
+ default:
+ break;
+ }
+
+ err = ice_write_40b_ptp_reg_eth56g(hw, port, PHY_PCS_REF_TUS_L,
+ ICE_ETH56G_NOMINAL_PCS_REF_TUS);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write PHY_PCS_REF_TUS, status: %d",
+ err);
+ return err;
+ }
+
+ err = ice_write_40b_ptp_reg_eth56g(hw, port, PHY_PCS_REF_INC_L,
+ ICE_ETH56G_NOMINAL_PCS_REF_INC);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write PHY_PCS_REF_INC, status: %d",
+ err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_phy_cfg_ptp_1step_eth56g - Configure 1-step PTP settings
+ * @hw: Pointer to the HW struct
+ * @port: Port to configure
+ *
+ * Return:
+ * * %0 - success
+ * * %other - PHY read/write failed
+ */
+int ice_phy_cfg_ptp_1step_eth56g(struct ice_hw *hw, u8 port)
+{
+ u8 port_blk = port & ~(ICE_PORTS_PER_QUAD - 1);
+ u8 blk_port = port & (ICE_PORTS_PER_QUAD - 1);
+ bool enable, sfd_ena;
+ u32 val, peer_delay;
+ int err;
+
+ enable = hw->ptp.phy.eth56g.onestep_ena;
+ peer_delay = hw->ptp.phy.eth56g.peer_delay;
+ sfd_ena = hw->ptp.phy.eth56g.sfd_ena;
+
+ /* PHY_PTP_1STEP_CONFIG */
+ err = ice_read_ptp_reg_eth56g(hw, port_blk, PHY_PTP_1STEP_CONFIG, &val);
+ if (err)
+ return err;
+
+ if (enable)
+ val |= blk_port;
+ else
+ val &= ~blk_port;
+
+ val &= ~(PHY_PTP_1STEP_T1S_UP64_M | PHY_PTP_1STEP_T1S_DELTA_M);
+
+ err = ice_write_ptp_reg_eth56g(hw, port_blk, PHY_PTP_1STEP_CONFIG, val);
+ if (err)
+ return err;
+
+ /* PHY_PTP_1STEP_PEER_DELAY */
+ val = FIELD_PREP(PHY_PTP_1STEP_PD_DELAY_M, peer_delay);
+ if (peer_delay)
+ val |= PHY_PTP_1STEP_PD_ADD_PD_M;
+ val |= PHY_PTP_1STEP_PD_DLY_V_M;
+ err = ice_write_ptp_reg_eth56g(hw, port_blk,
+ PHY_PTP_1STEP_PEER_DELAY(blk_port), val);
+ if (err)
+ return err;
+
+ val &= ~PHY_PTP_1STEP_PD_DLY_V_M;
+ err = ice_write_ptp_reg_eth56g(hw, port_blk,
+ PHY_PTP_1STEP_PEER_DELAY(blk_port), val);
+ if (err)
+ return err;
+
+ /* PHY_MAC_XIF_MODE */
+ err = ice_read_mac_reg_eth56g(hw, port, PHY_MAC_XIF_MODE, &val);
+ if (err)
+ return err;
+
+ val &= ~(PHY_MAC_XIF_1STEP_ENA_M | PHY_MAC_XIF_TS_BIN_MODE_M |
+ PHY_MAC_XIF_TS_SFD_ENA_M | PHY_MAC_XIF_GMII_TS_SEL_M);
+
+ switch (ice_phy_get_speed_eth56g(&hw->port_info->phy.link_info)) {
+ case ICE_ETH56G_LNK_SPD_1G:
+ case ICE_ETH56G_LNK_SPD_2_5G:
+ val |= PHY_MAC_XIF_GMII_TS_SEL_M;
+ break;
+ default:
+ break;
+ }
+
+ val |= FIELD_PREP(PHY_MAC_XIF_1STEP_ENA_M, enable) |
+ FIELD_PREP(PHY_MAC_XIF_TS_BIN_MODE_M, enable) |
+ FIELD_PREP(PHY_MAC_XIF_TS_SFD_ENA_M, sfd_ena);
+
+ return ice_write_mac_reg_eth56g(hw, port, PHY_MAC_XIF_MODE, val);
+}
+
+/**
+ * mul_u32_u32_fx_q9 - Multiply two u32 fixed point Q9 values
+ * @a: multiplier value
+ * @b: multiplicand value
+ *
+ * Return: result of multiplication
+ */
+static u32 mul_u32_u32_fx_q9(u32 a, u32 b)
+{
+ return (u32)(((u64)a * b) >> ICE_ETH56G_MAC_CFG_FRAC_W);
+}
+
+/**
+ * add_u32_u32_fx - Add two u32 fixed point values and discard overflow
+ * @a: first value
+ * @b: second value
+ *
+ * Return: result of addition
+ */
+static u32 add_u32_u32_fx(u32 a, u32 b)
+{
+ return lower_32_bits(((u64)a + b));
+}
+
+/**
+ * ice_ptp_calc_bitslip_eth56g - Calculate bitslip value
+ * @hw: pointer to the HW struct
+ * @port: port to configure
+ * @bs: bitslip multiplier
+ * @fc: FC-FEC enabled
+ * @rs: RS-FEC enabled
+ * @spd: link speed
+ *
+ * Return: calculated bitslip value
+ */
+static u32 ice_ptp_calc_bitslip_eth56g(struct ice_hw *hw, u8 port, u32 bs,
+ bool fc, bool rs,
+ enum ice_eth56g_link_spd spd)
+{
+ u8 port_offset = port & (ICE_PORTS_PER_QUAD - 1);
+ u8 port_blk = port & ~(ICE_PORTS_PER_QUAD - 1);
+ u32 bitslip;
+ int err;
+
+ if (!bs || rs)
+ return 0;
+
+ if (spd == ICE_ETH56G_LNK_SPD_1G || spd == ICE_ETH56G_LNK_SPD_2_5G)
+ err = ice_read_gpcs_reg_eth56g(hw, port, PHY_GPCS_BITSLIP,
+ &bitslip);
+ else
+ err = ice_read_ptp_reg_eth56g(hw, port_blk,
+ PHY_REG_SD_BIT_SLIP(port_offset),
+ &bitslip);
+ if (err)
+ return 0;
+
+ if (spd == ICE_ETH56G_LNK_SPD_1G && !bitslip) {
+ /* Bitslip register value of 0 corresponds to 10 so substitute
+ * it for calculations
+ */
+ bitslip = 10;
+ } else if (spd == ICE_ETH56G_LNK_SPD_10G ||
+ spd == ICE_ETH56G_LNK_SPD_25G) {
+ if (fc)
+ bitslip = bitslip * 2 + 32;
+ else
+ bitslip = (u32)((s32)bitslip * -1 + 20);
+ }
+
+ bitslip <<= ICE_ETH56G_MAC_CFG_FRAC_W;
+ return mul_u32_u32_fx_q9(bitslip, bs);
+}
+
+/**
+ * ice_ptp_calc_deskew_eth56g - Calculate deskew value
+ * @hw: pointer to the HW struct
+ * @port: port to configure
+ * @ds: deskew multiplier
+ * @rs: RS-FEC enabled
+ * @spd: link speed
+ *
+ * Return: calculated deskew value
+ */
+static u32 ice_ptp_calc_deskew_eth56g(struct ice_hw *hw, u8 port, u32 ds,
+ bool rs, enum ice_eth56g_link_spd spd)
+{
+ u32 deskew_i, deskew_f;
+ int err;
+
+ if (!ds)
+ return 0;
+
+ read_poll_timeout(ice_read_ptp_reg_eth56g, err,
+ FIELD_GET(PHY_REG_DESKEW_0_VALID, deskew_i), 500,
+ 50 * USEC_PER_MSEC, false, hw, port, PHY_REG_DESKEW_0,
+ &deskew_i);
+ if (err)
+ return err;
+
+ deskew_f = FIELD_GET(PHY_REG_DESKEW_0_RLEVEL_FRAC, deskew_i);
+ deskew_i = FIELD_GET(PHY_REG_DESKEW_0_RLEVEL, deskew_i);
+
+ if (rs && spd == ICE_ETH56G_LNK_SPD_50G2)
+ ds = 0x633; /* 3.1 */
+ else if (rs && spd == ICE_ETH56G_LNK_SPD_100G)
+ ds = 0x31b; /* 1.552 */
+
+ deskew_i = FIELD_PREP(ICE_ETH56G_MAC_CFG_RX_OFFSET_INT, deskew_i);
+ /* Shift 3 fractional bits to the end of the integer part */
+ deskew_f <<= ICE_ETH56G_MAC_CFG_FRAC_W - PHY_REG_DESKEW_0_RLEVEL_FRAC_W;
+ return mul_u32_u32_fx_q9(deskew_i | deskew_f, ds);
+}
+
+/**
+ * ice_phy_set_offsets_eth56g - Set Tx/Rx offset values
+ * @hw: pointer to the HW struct
+ * @port: port to configure
+ * @spd: link speed
+ * @cfg: structure to store output values
+ * @fc: FC-FEC enabled
+ * @rs: RS-FEC enabled
+ *
+ * Return:
+ * * %0 - success
+ * * %other - failed to write to PHY
+ */
+static int ice_phy_set_offsets_eth56g(struct ice_hw *hw, u8 port,
+ enum ice_eth56g_link_spd spd,
+ const struct ice_eth56g_mac_reg_cfg *cfg,
+ bool fc, bool rs)
+{
+ u32 rx_offset, tx_offset, bs_ds;
+ bool onestep, sfd;
+
+ onestep = hw->ptp.phy.eth56g.onestep_ena;
+ sfd = hw->ptp.phy.eth56g.sfd_ena;
+ bs_ds = cfg->rx_offset.bs_ds;
+
+ if (fc)
+ rx_offset = cfg->rx_offset.fc;
+ else if (rs)
+ rx_offset = cfg->rx_offset.rs;
+ else
+ rx_offset = cfg->rx_offset.no_fec;
+
+ rx_offset = add_u32_u32_fx(rx_offset, cfg->rx_offset.serdes);
+ if (sfd)
+ rx_offset = add_u32_u32_fx(rx_offset, cfg->rx_offset.sfd);
+
+ if (spd < ICE_ETH56G_LNK_SPD_40G)
+ bs_ds = ice_ptp_calc_bitslip_eth56g(hw, port, bs_ds, fc, rs,
+ spd);
+ else
+ bs_ds = ice_ptp_calc_deskew_eth56g(hw, port, bs_ds, rs, spd);
+ rx_offset = add_u32_u32_fx(rx_offset, bs_ds);
+ rx_offset &= ICE_ETH56G_MAC_CFG_RX_OFFSET_INT |
+ ICE_ETH56G_MAC_CFG_RX_OFFSET_FRAC;
+
+ if (fc)
+ tx_offset = cfg->tx_offset.fc;
+ else if (rs)
+ tx_offset = cfg->tx_offset.rs;
+ else
+ tx_offset = cfg->tx_offset.no_fec;
+ tx_offset += cfg->tx_offset.serdes + cfg->tx_offset.sfd * sfd +
+ cfg->tx_offset.onestep * onestep;
+
+ ice_write_mac_reg_eth56g(hw, port, PHY_MAC_RX_OFFSET, rx_offset);
+ return ice_write_mac_reg_eth56g(hw, port, PHY_MAC_TX_OFFSET, tx_offset);
+}
+
+/**
+ * ice_phy_cfg_mac_eth56g - Configure MAC for PTP
+ * @hw: Pointer to the HW struct
+ * @port: Port to configure
+ *
+ * Return:
+ * * %0 - success
+ * * %other - failed to write to PHY
+ */
+static int ice_phy_cfg_mac_eth56g(struct ice_hw *hw, u8 port)
+{
+ const struct ice_eth56g_mac_reg_cfg *cfg;
+ enum ice_eth56g_link_spd spd;
+ struct ice_link_status *li;
+ bool fc = false;
+ bool rs = false;
+ bool onestep;
+ u32 val;
+ int err;
+
+ onestep = hw->ptp.phy.eth56g.onestep_ena;
+ li = &hw->port_info->phy.link_info;
+ spd = ice_phy_get_speed_eth56g(li);
+ if (!!(li->an_info & ICE_AQ_FEC_EN)) {
+ if (spd == ICE_ETH56G_LNK_SPD_10G) {
+ fc = true;
+ } else {
+ fc = !!(li->fec_info & ICE_AQ_LINK_25G_KR_FEC_EN);
+ rs = !!(li->fec_info & ~ICE_AQ_LINK_25G_KR_FEC_EN);
+ }
+ }
+ cfg = &eth56g_mac_cfg[spd];
+
+ err = ice_write_mac_reg_eth56g(hw, port, PHY_MAC_RX_MODULO, 0);
+ if (err)
+ return err;
+
+ err = ice_write_mac_reg_eth56g(hw, port, PHY_MAC_TX_MODULO, 0);
+ if (err)
+ return err;
+
+ val = FIELD_PREP(PHY_MAC_TSU_CFG_TX_MODE_M,
+ cfg->tx_mode.def + rs * cfg->tx_mode.rs) |
+ FIELD_PREP(PHY_MAC_TSU_CFG_TX_MII_MK_DLY_M, cfg->tx_mk_dly) |
+ FIELD_PREP(PHY_MAC_TSU_CFG_TX_MII_CW_DLY_M,
+ cfg->tx_cw_dly.def +
+ onestep * cfg->tx_cw_dly.onestep) |
+ FIELD_PREP(PHY_MAC_TSU_CFG_RX_MODE_M,
+ cfg->rx_mode.def + rs * cfg->rx_mode.rs) |
+ FIELD_PREP(PHY_MAC_TSU_CFG_RX_MII_MK_DLY_M,
+ cfg->rx_mk_dly.def + rs * cfg->rx_mk_dly.rs) |
+ FIELD_PREP(PHY_MAC_TSU_CFG_RX_MII_CW_DLY_M,
+ cfg->rx_cw_dly.def + rs * cfg->rx_cw_dly.rs) |
+ FIELD_PREP(PHY_MAC_TSU_CFG_BLKS_PER_CLK_M, cfg->blks_per_clk);
+ err = ice_write_mac_reg_eth56g(hw, port, PHY_MAC_TSU_CONFIG, val);
+ if (err)
+ return err;
+
+ err = ice_write_mac_reg_eth56g(hw, port, PHY_MAC_BLOCKTIME,
+ cfg->blktime);
+ if (err)
+ return err;
+
+ err = ice_phy_set_offsets_eth56g(hw, port, spd, cfg, fc, rs);
+ if (err)
+ return err;
+
+ if (spd == ICE_ETH56G_LNK_SPD_25G && !rs)
+ val = 0;
+ else
+ val = cfg->mktime;
+
+ return ice_write_mac_reg_eth56g(hw, port, PHY_MAC_MARKERTIME, val);
+}
+
+/**
+ * ice_phy_cfg_intr_eth56g - Configure TX timestamp interrupt
+ * @hw: pointer to the HW struct
+ * @port: the timestamp port
+ * @ena: enable or disable interrupt
+ * @threshold: interrupt threshold
+ *
+ * Configure TX timestamp interrupt for the specified port
+ *
+ * Return:
+ * * %0 - success
+ * * %other - PHY read/write failed
+ */
+int ice_phy_cfg_intr_eth56g(struct ice_hw *hw, u8 port, bool ena, u8 threshold)
+{
+ int err;
+ u32 val;
+
+ err = ice_read_ptp_reg_eth56g(hw, port, PHY_REG_TS_INT_CONFIG, &val);
+ if (err)
+ return err;
+
+ if (ena) {
+ val |= PHY_TS_INT_CONFIG_ENA_M;
+ val &= ~PHY_TS_INT_CONFIG_THRESHOLD_M;
+ val |= FIELD_PREP(PHY_TS_INT_CONFIG_THRESHOLD_M, threshold);
+ } else {
+ val &= ~PHY_TS_INT_CONFIG_ENA_M;
+ }
+
+ return ice_write_ptp_reg_eth56g(hw, port, PHY_REG_TS_INT_CONFIG, val);
+}
+
+/**
+ * ice_read_phy_and_phc_time_eth56g - Simultaneously capture PHC and PHY time
+ * @hw: pointer to the HW struct
+ * @port: the PHY port to read
+ * @phy_time: on return, the 64bit PHY timer value
+ * @phc_time: on return, the lower 64bits of PHC time
+ *
+ * Issue a ICE_PTP_READ_TIME timer command to simultaneously capture the PHY
+ * and PHC timer values.
+ *
+ * Return:
+ * * %0 - success
+ * * %other - PHY read/write failed
+ */
+static int ice_read_phy_and_phc_time_eth56g(struct ice_hw *hw, u8 port,
+ u64 *phy_time, u64 *phc_time)
+{
+ u64 tx_time, rx_time;
+ u32 zo, lo;
+ u8 tmr_idx;
+ int err;
+
+ tmr_idx = ice_get_ptp_src_clock_index(hw);
+
+ /* Prepare the PHC timer for a ICE_PTP_READ_TIME capture command */
+ ice_ptp_src_cmd(hw, ICE_PTP_READ_TIME);
+
+ /* Prepare the PHY timer for a ICE_PTP_READ_TIME capture command */
+ err = ice_ptp_one_port_cmd(hw, port, ICE_PTP_READ_TIME);
+ if (err)
+ return err;
+
+ /* Issue the sync to start the ICE_PTP_READ_TIME capture */
+ ice_ptp_exec_tmr_cmd(hw);
+
+ /* Read the captured PHC time from the shadow time registers */
+ zo = rd32(hw, GLTSYN_SHTIME_0(tmr_idx));
+ lo = rd32(hw, GLTSYN_SHTIME_L(tmr_idx));
+ *phc_time = (u64)lo << 32 | zo;
+
+ /* Read the captured PHY time from the PHY shadow registers */
+ err = ice_ptp_read_port_capture_eth56g(hw, port, &tx_time, &rx_time);
+ if (err)
+ return err;
+
+ /* If the PHY Tx and Rx timers don't match, log a warning message.
+ * Note that this should not happen in normal circumstances since the
+ * driver always programs them together.
+ */
+ if (tx_time != rx_time)
+ dev_warn(ice_hw_to_dev(hw), "PHY port %u Tx and Rx timers do not match, tx_time 0x%016llX, rx_time 0x%016llX\n",
+ port, tx_time, rx_time);
+
+ *phy_time = tx_time;
+
+ return 0;
+}
+
+/**
+ * ice_sync_phy_timer_eth56g - Synchronize the PHY timer with PHC timer
+ * @hw: pointer to the HW struct
+ * @port: the PHY port to synchronize
+ *
+ * Perform an adjustment to ensure that the PHY and PHC timers are in sync.
+ * This is done by issuing a ICE_PTP_READ_TIME command which triggers a
+ * simultaneous read of the PHY timer and PHC timer. Then we use the
+ * difference to calculate an appropriate 2s complement addition to add
+ * to the PHY timer in order to ensure it reads the same value as the
+ * primary PHC timer.
+ *
+ * Return:
+ * * %0 - success
+ * * %-EBUSY- failed to acquire PTP semaphore
+ * * %other - PHY read/write failed
+ */
+static int ice_sync_phy_timer_eth56g(struct ice_hw *hw, u8 port)
+{
+ u64 phc_time, phy_time, difference;
+ int err;
+
+ if (!ice_ptp_lock(hw)) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to acquire PTP semaphore\n");
+ return -EBUSY;
+ }
+
+ err = ice_read_phy_and_phc_time_eth56g(hw, port, &phy_time, &phc_time);
+ if (err)
+ goto err_unlock;
+
+ /* Calculate the amount required to add to the port time in order for
+ * it to match the PHC time.
+ *
+ * Note that the port adjustment is done using 2s complement
+ * arithmetic. This is convenient since it means that we can simply
+ * calculate the difference between the PHC time and the port time,
+ * and it will be interpreted correctly.
+ */
+
+ ice_ptp_src_cmd(hw, ICE_PTP_NOP);
+ difference = phc_time - phy_time;
+
+ err = ice_ptp_prep_port_adj_eth56g(hw, port, (s64)difference);
+ if (err)
+ goto err_unlock;
+
+ err = ice_ptp_one_port_cmd(hw, port, ICE_PTP_ADJ_TIME);
+ if (err)
+ goto err_unlock;
+
+ /* Issue the sync to activate the time adjustment */
+ ice_ptp_exec_tmr_cmd(hw);
+
+ /* Re-capture the timer values to flush the command registers and
+ * verify that the time was properly adjusted.
+ */
+ err = ice_read_phy_and_phc_time_eth56g(hw, port, &phy_time, &phc_time);
+ if (err)
+ goto err_unlock;
+
+ dev_info(ice_hw_to_dev(hw),
+ "Port %u PHY time synced to PHC: 0x%016llX, 0x%016llX\n",
+ port, phy_time, phc_time);
+
+err_unlock:
+ ice_ptp_unlock(hw);
+ return err;
+}
+
+/**
+ * ice_stop_phy_timer_eth56g - Stop the PHY clock timer
+ * @hw: pointer to the HW struct
+ * @port: the PHY port to stop
+ * @soft_reset: if true, hold the SOFT_RESET bit of PHY_REG_PS
+ *
+ * Stop the clock of a PHY port. This must be done as part of the flow to
+ * re-calibrate Tx and Rx timestamping offsets whenever the clock time is
+ * initialized or when link speed changes.
+ *
+ * Return:
+ * * %0 - success
+ * * %other - failed to write to PHY
+ */
+int ice_stop_phy_timer_eth56g(struct ice_hw *hw, u8 port, bool soft_reset)
+{
+ int err;
+
+ err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_TX_OFFSET_READY, 0);
+ if (err)
+ return err;
+
+ err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_RX_OFFSET_READY, 0);
+ if (err)
+ return err;
+
+ ice_debug(hw, ICE_DBG_PTP, "Disabled clock on PHY port %u\n", port);
+
+ return 0;
+}
+
+/**
+ * ice_start_phy_timer_eth56g - Start the PHY clock timer
+ * @hw: pointer to the HW struct
+ * @port: the PHY port to start
+ *
+ * Start the clock of a PHY port. This must be done as part of the flow to
+ * re-calibrate Tx and Rx timestamping offsets whenever the clock time is
+ * initialized or when link speed changes.
+ *
+ * Return:
+ * * %0 - success
+ * * %other - PHY read/write failed
+ */
+int ice_start_phy_timer_eth56g(struct ice_hw *hw, u8 port)
+{
+ u32 lo, hi;
+ u64 incval;
+ u8 tmr_idx;
+ int err;
+
+ tmr_idx = ice_get_ptp_src_clock_index(hw);
+
+ err = ice_stop_phy_timer_eth56g(hw, port, false);
+ if (err)
+ return err;
+
+ ice_ptp_src_cmd(hw, ICE_PTP_NOP);
+
+ err = ice_phy_cfg_parpcs_eth56g(hw, port);
+ if (err)
+ return err;
+
+ err = ice_phy_cfg_ptp_1step_eth56g(hw, port);
+ if (err)
+ return err;
+
+ err = ice_phy_cfg_mac_eth56g(hw, port);
+ if (err)
+ return err;
+
+ lo = rd32(hw, GLTSYN_INCVAL_L(tmr_idx));
+ hi = rd32(hw, GLTSYN_INCVAL_H(tmr_idx));
+ incval = (u64)hi << 32 | lo;
+
+ err = ice_write_40b_ptp_reg_eth56g(hw, port, PHY_REG_TIMETUS_L, incval);
+ if (err)
+ return err;
+
+ err = ice_ptp_one_port_cmd(hw, port, ICE_PTP_INIT_INCVAL);
+ if (err)
+ return err;
+
+ ice_ptp_exec_tmr_cmd(hw);
+
+ err = ice_sync_phy_timer_eth56g(hw, port);
+ if (err)
+ return err;
+
+ err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_TX_OFFSET_READY, 1);
+ if (err)
+ return err;
+
+ err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_RX_OFFSET_READY, 1);
+ if (err)
+ return err;
+
+ ice_debug(hw, ICE_DBG_PTP, "Enabled clock on PHY port %u\n", port);
+
+ return 0;
+}
+
+/**
+ * ice_sb_access_ena_eth56g - Enable SB devices (PHY and others) access
+ * @hw: pointer to HW struct
+ * @enable: Enable or disable access
+ *
+ * Enable sideband devices (PHY and others) access.
+ */
+static void ice_sb_access_ena_eth56g(struct ice_hw *hw, bool enable)
+{
+ u32 val = rd32(hw, PF_SB_REM_DEV_CTL);
+
+ if (enable)
+ val |= BIT(eth56g_phy_0) | BIT(cgu) | BIT(eth56g_phy_1);
+ else
+ val &= ~(BIT(eth56g_phy_0) | BIT(cgu) | BIT(eth56g_phy_1));
+
+ wr32(hw, PF_SB_REM_DEV_CTL, val);
+}
+
+/**
+ * ice_ptp_init_phc_eth56g - Perform E82X specific PHC initialization
+ * @hw: pointer to HW struct
+ *
+ * Perform PHC initialization steps specific to E82X devices.
+ *
+ * Return:
+ * * %0 - success
+ * * %other - failed to initialize CGU
+ */
+static int ice_ptp_init_phc_eth56g(struct ice_hw *hw)
+{
+ ice_sb_access_ena_eth56g(hw, true);
+ /* Initialize the Clock Generation Unit */
+ return ice_init_cgu_e82x(hw);
+}
+
+/**
+ * ice_ptp_read_tx_hwtstamp_status_eth56g - Get TX timestamp status
+ * @hw: pointer to the HW struct
+ * @ts_status: the timestamp mask pointer
+ *
+ * Read the PHY Tx timestamp status mask indicating which ports have Tx
+ * timestamps available.
+ *
+ * Return:
+ * * %0 - success
+ * * %other - failed to read from PHY
+ */
+int ice_ptp_read_tx_hwtstamp_status_eth56g(struct ice_hw *hw, u32 *ts_status)
+{
+ const struct ice_eth56g_params *params = &hw->ptp.phy.eth56g;
+ u8 phy, mask;
+ u32 status;
+
+ mask = (1 << hw->ptp.ports_per_phy) - 1;
+ *ts_status = 0;
+
+ for (phy = 0; phy < params->num_phys; phy++) {
+ int err;
+
+ err = ice_read_phy_eth56g(hw, phy, PHY_PTP_INT_STATUS, &status);
+ if (err)
+ return err;
+
+ *ts_status |= (status & mask) << (phy * hw->ptp.ports_per_phy);
+ }
+
+ ice_debug(hw, ICE_DBG_PTP, "PHY interrupt err: %x\n", *ts_status);
+
+ return 0;
+}
+
+/**
+ * ice_get_phy_tx_tstamp_ready_eth56g - Read the Tx memory status register
+ * @hw: pointer to the HW struct
+ * @port: the PHY port to read from
+ * @tstamp_ready: contents of the Tx memory status register
+ *
+ * Read the PHY_REG_TX_MEMORY_STATUS register indicating which timestamps in
+ * the PHY are ready. A set bit means the corresponding timestamp is valid and
+ * ready to be captured from the PHY timestamp block.
+ *
+ * Return:
+ * * %0 - success
+ * * %other - failed to read from PHY
+ */
+static int ice_get_phy_tx_tstamp_ready_eth56g(struct ice_hw *hw, u8 port,
+ u64 *tstamp_ready)
+{
+ int err;
+
+ err = ice_read_64b_ptp_reg_eth56g(hw, port, PHY_REG_TX_MEMORY_STATUS_L,
+ tstamp_ready);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_MEMORY_STATUS for port %u, err %d\n",
+ port, err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_is_muxed_topo - detect breakout 2x50G topology for E825C
+ * @hw: pointer to the HW struct
+ *
+ * Return: true if it's 2x50 breakout topology, false otherwise
+ */
+static bool ice_is_muxed_topo(struct ice_hw *hw)
+{
+ u8 link_topo;
+ bool mux;
+ u32 val;
+
+ val = rd32(hw, GLGEN_SWITCH_MODE_CONFIG);
+ mux = FIELD_GET(GLGEN_SWITCH_MODE_CONFIG_25X4_QUAD_M, val);
+ val = rd32(hw, GLGEN_MAC_LINK_TOPO);
+ link_topo = FIELD_GET(GLGEN_MAC_LINK_TOPO_LINK_TOPO_M, val);
+
+ return (mux && link_topo == ICE_LINK_TOPO_UP_TO_2_LINKS);
+}
+
+/**
+ * ice_ptp_init_phy_e825c - initialize PHY parameters
+ * @hw: pointer to the HW struct
+ */
+static void ice_ptp_init_phy_e825c(struct ice_hw *hw)
+{
+ struct ice_ptp_hw *ptp = &hw->ptp;
+ struct ice_eth56g_params *params;
+ u8 phy;
+
+ ptp->phy_model = ICE_PHY_ETH56G;
+ params = &ptp->phy.eth56g;
+ params->onestep_ena = false;
+ params->peer_delay = 0;
+ params->sfd_ena = false;
+ params->phy_addr[0] = eth56g_phy_0;
+ params->phy_addr[1] = eth56g_phy_1;
+ params->num_phys = 2;
+ ptp->ports_per_phy = 4;
+ ptp->num_lports = params->num_phys * ptp->ports_per_phy;
+
+ ice_sb_access_ena_eth56g(hw, true);
+ for (phy = 0; phy < params->num_phys; phy++) {
+ u32 phy_rev;
+ int err;
+
+ err = ice_read_phy_eth56g(hw, phy, PHY_REG_REVISION, &phy_rev);
+ if (err || phy_rev != PHY_REVISION_ETH56G) {
+ ptp->phy_model = ICE_PHY_UNSUP;
+ return;
+ }
+ }
+
+ ptp->is_2x50g_muxed_topo = ice_is_muxed_topo(hw);
+}
+
/* E822 family functions
*
* The following functions operate on the E822 family of devices.
@@ -288,18 +2707,21 @@ static void ice_ptp_exec_tmr_cmd(struct ice_hw *hw)
/**
* ice_fill_phy_msg_e82x - Fill message data for a PHY register access
+ * @hw: pointer to the HW struct
* @msg: the PHY message buffer to fill in
* @port: the port to access
* @offset: the register offset
*/
-static void
-ice_fill_phy_msg_e82x(struct ice_sbq_msg_input *msg, u8 port, u16 offset)
+static void ice_fill_phy_msg_e82x(struct ice_hw *hw,
+ struct ice_sbq_msg_input *msg, u8 port,
+ u16 offset)
{
int phy_port, phy, quadtype;
- phy_port = port % ICE_PORTS_PER_PHY_E82X;
- phy = port / ICE_PORTS_PER_PHY_E82X;
- quadtype = (port / ICE_PORTS_PER_QUAD) % ICE_QUADS_PER_PHY_E82X;
+ phy_port = port % hw->ptp.ports_per_phy;
+ phy = port / hw->ptp.ports_per_phy;
+ quadtype = ICE_GET_QUAD_NUM(port) %
+ ICE_GET_QUAD_NUM(hw->ptp.ports_per_phy);
if (quadtype == 0) {
msg->msg_addr_low = P_Q0_L(P_0_BASE + offset, phy_port);
@@ -430,10 +2852,10 @@ ice_read_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 offset, u32 *val)
struct ice_sbq_msg_input msg = {0};
int err;
- ice_fill_phy_msg_e82x(&msg, port, offset);
+ ice_fill_phy_msg_e82x(hw, &msg, port, offset);
msg.opcode = ice_sbq_msg_rd;
- err = ice_sbq_rw_reg(hw, &msg);
+ err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
err);
@@ -507,11 +2929,11 @@ ice_write_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 offset, u32 val)
struct ice_sbq_msg_input msg = {0};
int err;
- ice_fill_phy_msg_e82x(&msg, port, offset);
+ ice_fill_phy_msg_e82x(hw, &msg, port, offset);
msg.opcode = ice_sbq_msg_wr;
msg.data = val;
- err = ice_sbq_rw_reg(hw, &msg);
+ err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
err);
@@ -546,8 +2968,7 @@ ice_write_40b_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
low_addr);
return -EINVAL;
}
-
- low = (u32)(val & P_REG_40B_LOW_M);
+ low = FIELD_GET(P_REG_40B_LOW_M, val);
high = (u32)(val >> P_REG_40B_HIGH_S);
err = ice_write_phy_reg_e82x(hw, port, low_addr, low);
@@ -617,24 +3038,30 @@ ice_write_64b_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
/**
* ice_fill_quad_msg_e82x - Fill message data for quad register access
+ * @hw: pointer to the HW struct
* @msg: the PHY message buffer to fill in
* @quad: the quad to access
* @offset: the register offset
*
* Fill a message buffer for accessing a register in a quad shared between
* multiple PHYs.
+ *
+ * Return:
+ * * %0 - OK
+ * * %-EINVAL - invalid quad number
*/
-static int
-ice_fill_quad_msg_e82x(struct ice_sbq_msg_input *msg, u8 quad, u16 offset)
+static int ice_fill_quad_msg_e82x(struct ice_hw *hw,
+ struct ice_sbq_msg_input *msg, u8 quad,
+ u16 offset)
{
u32 addr;
- if (quad >= ICE_MAX_QUAD)
+ if (quad >= ICE_GET_QUAD_NUM(hw->ptp.num_lports))
return -EINVAL;
msg->dest_dev = rmn_0;
- if ((quad % ICE_QUADS_PER_PHY_E82X) == 0)
+ if (!(quad % ICE_GET_QUAD_NUM(hw->ptp.ports_per_phy)))
addr = Q_0_BASE + offset;
else
addr = Q_1_BASE + offset;
@@ -661,13 +3088,13 @@ ice_read_quad_reg_e82x(struct ice_hw *hw, u8 quad, u16 offset, u32 *val)
struct ice_sbq_msg_input msg = {0};
int err;
- err = ice_fill_quad_msg_e82x(&msg, quad, offset);
+ err = ice_fill_quad_msg_e82x(hw, &msg, quad, offset);
if (err)
return err;
msg.opcode = ice_sbq_msg_rd;
- err = ice_sbq_rw_reg(hw, &msg);
+ err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
err);
@@ -695,14 +3122,14 @@ ice_write_quad_reg_e82x(struct ice_hw *hw, u8 quad, u16 offset, u32 val)
struct ice_sbq_msg_input msg = {0};
int err;
- err = ice_fill_quad_msg_e82x(&msg, quad, offset);
+ err = ice_fill_quad_msg_e82x(hw, &msg, quad, offset);
if (err)
return err;
msg.opcode = ice_sbq_msg_wr;
msg.data = val;
- err = ice_sbq_rw_reg(hw, &msg);
+ err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
err);
@@ -751,7 +3178,7 @@ ice_read_phy_tstamp_e82x(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp)
* lower 8 bits in the low register, and the upper 32 bits in the high
* register.
*/
- *tstamp = ((u64)hi) << TS_PHY_HIGH_S | ((u64)lo & TS_PHY_LOW_M);
+ *tstamp = FIELD_PREP(TS_PHY_HIGH_M, hi) | FIELD_PREP(TS_PHY_LOW_M, lo);
return 0;
}
@@ -816,294 +3243,11 @@ static void ice_ptp_reset_ts_memory_e82x(struct ice_hw *hw)
{
unsigned int quad;
- for (quad = 0; quad < ICE_MAX_QUAD; quad++)
+ for (quad = 0; quad < ICE_GET_QUAD_NUM(hw->ptp.num_lports); quad++)
ice_ptp_reset_ts_memory_quad_e82x(hw, quad);
}
/**
- * ice_read_cgu_reg_e82x - Read a CGU register
- * @hw: pointer to the HW struct
- * @addr: Register address to read
- * @val: storage for register value read
- *
- * Read the contents of a register of the Clock Generation Unit. Only
- * applicable to E822 devices.
- */
-static int
-ice_read_cgu_reg_e82x(struct ice_hw *hw, u32 addr, u32 *val)
-{
- struct ice_sbq_msg_input cgu_msg;
- int err;
-
- cgu_msg.opcode = ice_sbq_msg_rd;
- cgu_msg.dest_dev = cgu;
- cgu_msg.msg_addr_low = addr;
- cgu_msg.msg_addr_high = 0x0;
-
- err = ice_sbq_rw_reg(hw, &cgu_msg);
- if (err) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to read CGU register 0x%04x, err %d\n",
- addr, err);
- return err;
- }
-
- *val = cgu_msg.data;
-
- return err;
-}
-
-/**
- * ice_write_cgu_reg_e82x - Write a CGU register
- * @hw: pointer to the HW struct
- * @addr: Register address to write
- * @val: value to write into the register
- *
- * Write the specified value to a register of the Clock Generation Unit. Only
- * applicable to E822 devices.
- */
-static int
-ice_write_cgu_reg_e82x(struct ice_hw *hw, u32 addr, u32 val)
-{
- struct ice_sbq_msg_input cgu_msg;
- int err;
-
- cgu_msg.opcode = ice_sbq_msg_wr;
- cgu_msg.dest_dev = cgu;
- cgu_msg.msg_addr_low = addr;
- cgu_msg.msg_addr_high = 0x0;
- cgu_msg.data = val;
-
- err = ice_sbq_rw_reg(hw, &cgu_msg);
- if (err) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to write CGU register 0x%04x, err %d\n",
- addr, err);
- return err;
- }
-
- return err;
-}
-
-/**
- * ice_clk_freq_str - Convert time_ref_freq to string
- * @clk_freq: Clock frequency
- *
- * Convert the specified TIME_REF clock frequency to a string.
- */
-static const char *ice_clk_freq_str(u8 clk_freq)
-{
- switch ((enum ice_time_ref_freq)clk_freq) {
- case ICE_TIME_REF_FREQ_25_000:
- return "25 MHz";
- case ICE_TIME_REF_FREQ_122_880:
- return "122.88 MHz";
- case ICE_TIME_REF_FREQ_125_000:
- return "125 MHz";
- case ICE_TIME_REF_FREQ_153_600:
- return "153.6 MHz";
- case ICE_TIME_REF_FREQ_156_250:
- return "156.25 MHz";
- case ICE_TIME_REF_FREQ_245_760:
- return "245.76 MHz";
- default:
- return "Unknown";
- }
-}
-
-/**
- * ice_clk_src_str - Convert time_ref_src to string
- * @clk_src: Clock source
- *
- * Convert the specified clock source to its string name.
- */
-static const char *ice_clk_src_str(u8 clk_src)
-{
- switch ((enum ice_clk_src)clk_src) {
- case ICE_CLK_SRC_TCX0:
- return "TCX0";
- case ICE_CLK_SRC_TIME_REF:
- return "TIME_REF";
- default:
- return "Unknown";
- }
-}
-
-/**
- * ice_cfg_cgu_pll_e82x - Configure the Clock Generation Unit
- * @hw: pointer to the HW struct
- * @clk_freq: Clock frequency to program
- * @clk_src: Clock source to select (TIME_REF, or TCX0)
- *
- * Configure the Clock Generation Unit with the desired clock frequency and
- * time reference, enabling the PLL which drives the PTP hardware clock.
- */
-static int
-ice_cfg_cgu_pll_e82x(struct ice_hw *hw, enum ice_time_ref_freq clk_freq,
- enum ice_clk_src clk_src)
-{
- union tspll_ro_bwm_lf bwm_lf;
- union nac_cgu_dword19 dw19;
- union nac_cgu_dword22 dw22;
- union nac_cgu_dword24 dw24;
- union nac_cgu_dword9 dw9;
- int err;
-
- if (clk_freq >= NUM_ICE_TIME_REF_FREQ) {
- dev_warn(ice_hw_to_dev(hw), "Invalid TIME_REF frequency %u\n",
- clk_freq);
- return -EINVAL;
- }
-
- if (clk_src >= NUM_ICE_CLK_SRC) {
- dev_warn(ice_hw_to_dev(hw), "Invalid clock source %u\n",
- clk_src);
- return -EINVAL;
- }
-
- if (clk_src == ICE_CLK_SRC_TCX0 &&
- clk_freq != ICE_TIME_REF_FREQ_25_000) {
- dev_warn(ice_hw_to_dev(hw),
- "TCX0 only supports 25 MHz frequency\n");
- return -EINVAL;
- }
-
- err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD9, &dw9.val);
- if (err)
- return err;
-
- err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD24, &dw24.val);
- if (err)
- return err;
-
- err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_BWM_LF, &bwm_lf.val);
- if (err)
- return err;
-
- /* Log the current clock configuration */
- ice_debug(hw, ICE_DBG_PTP, "Current CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
- dw24.field.ts_pll_enable ? "enabled" : "disabled",
- ice_clk_src_str(dw24.field.time_ref_sel),
- ice_clk_freq_str(dw9.field.time_ref_freq_sel),
- bwm_lf.field.plllock_true_lock_cri ? "locked" : "unlocked");
-
- /* Disable the PLL before changing the clock source or frequency */
- if (dw24.field.ts_pll_enable) {
- dw24.field.ts_pll_enable = 0;
-
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val);
- if (err)
- return err;
- }
-
- /* Set the frequency */
- dw9.field.time_ref_freq_sel = clk_freq;
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD9, dw9.val);
- if (err)
- return err;
-
- /* Configure the TS PLL feedback divisor */
- err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD19, &dw19.val);
- if (err)
- return err;
-
- dw19.field.tspll_fbdiv_intgr = e822_cgu_params[clk_freq].feedback_div;
- dw19.field.tspll_ndivratio = 1;
-
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD19, dw19.val);
- if (err)
- return err;
-
- /* Configure the TS PLL post divisor */
- err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD22, &dw22.val);
- if (err)
- return err;
-
- dw22.field.time1588clk_div = e822_cgu_params[clk_freq].post_pll_div;
- dw22.field.time1588clk_sel_div2 = 0;
-
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD22, dw22.val);
- if (err)
- return err;
-
- /* Configure the TS PLL pre divisor and clock source */
- err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD24, &dw24.val);
- if (err)
- return err;
-
- dw24.field.ref1588_ck_div = e822_cgu_params[clk_freq].refclk_pre_div;
- dw24.field.tspll_fbdiv_frac = e822_cgu_params[clk_freq].frac_n_div;
- dw24.field.time_ref_sel = clk_src;
-
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val);
- if (err)
- return err;
-
- /* Finally, enable the PLL */
- dw24.field.ts_pll_enable = 1;
-
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val);
- if (err)
- return err;
-
- /* Wait to verify if the PLL locks */
- usleep_range(1000, 5000);
-
- err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_BWM_LF, &bwm_lf.val);
- if (err)
- return err;
-
- if (!bwm_lf.field.plllock_true_lock_cri) {
- dev_warn(ice_hw_to_dev(hw), "CGU PLL failed to lock\n");
- return -EBUSY;
- }
-
- /* Log the current clock configuration */
- ice_debug(hw, ICE_DBG_PTP, "New CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
- dw24.field.ts_pll_enable ? "enabled" : "disabled",
- ice_clk_src_str(dw24.field.time_ref_sel),
- ice_clk_freq_str(dw9.field.time_ref_freq_sel),
- bwm_lf.field.plllock_true_lock_cri ? "locked" : "unlocked");
-
- return 0;
-}
-
-/**
- * ice_init_cgu_e82x - Initialize CGU with settings from firmware
- * @hw: pointer to the HW structure
- *
- * Initialize the Clock Generation Unit of the E822 device.
- */
-static int ice_init_cgu_e82x(struct ice_hw *hw)
-{
- struct ice_ts_func_info *ts_info = &hw->func_caps.ts_func_info;
- union tspll_cntr_bist_settings cntr_bist;
- int err;
-
- err = ice_read_cgu_reg_e82x(hw, TSPLL_CNTR_BIST_SETTINGS,
- &cntr_bist.val);
- if (err)
- return err;
-
- /* Disable sticky lock detection so lock err reported is accurate */
- cntr_bist.field.i_plllock_sel_0 = 0;
- cntr_bist.field.i_plllock_sel_1 = 0;
-
- err = ice_write_cgu_reg_e82x(hw, TSPLL_CNTR_BIST_SETTINGS,
- cntr_bist.val);
- if (err)
- return err;
-
- /* Configure the CGU PLL using the parameters from the function
- * capabilities.
- */
- err = ice_cfg_cgu_pll_e82x(hw, ts_info->time_ref,
- (enum ice_clk_src)ts_info->clk_src);
- if (err)
- return err;
-
- return 0;
-}
-
-/**
* ice_ptp_set_vernier_wl - Set the window length for vernier calibration
* @hw: pointer to the HW struct
*
@@ -1113,7 +3257,7 @@ static int ice_ptp_set_vernier_wl(struct ice_hw *hw)
{
u8 port;
- for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
+ for (port = 0; port < hw->ptp.num_lports; port++) {
int err;
err = ice_write_phy_reg_e82x(hw, port, P_REG_WL,
@@ -1137,15 +3281,14 @@ static int ice_ptp_set_vernier_wl(struct ice_hw *hw)
static int ice_ptp_init_phc_e82x(struct ice_hw *hw)
{
int err;
- u32 regval;
+ u32 val;
/* Enable reading switch and PHY registers over the sideband queue */
#define PF_SB_REM_DEV_CTL_SWITCH_READ BIT(1)
#define PF_SB_REM_DEV_CTL_PHY0 BIT(2)
- regval = rd32(hw, PF_SB_REM_DEV_CTL);
- regval |= (PF_SB_REM_DEV_CTL_SWITCH_READ |
- PF_SB_REM_DEV_CTL_PHY0);
- wr32(hw, PF_SB_REM_DEV_CTL, regval);
+ val = rd32(hw, PF_SB_REM_DEV_CTL);
+ val |= (PF_SB_REM_DEV_CTL_SWITCH_READ | PF_SB_REM_DEV_CTL_PHY0);
+ wr32(hw, PF_SB_REM_DEV_CTL, val);
/* Initialize the Clock Generation Unit */
err = ice_init_cgu_e82x(hw);
@@ -1178,7 +3321,7 @@ ice_ptp_prep_phy_time_e82x(struct ice_hw *hw, u32 time)
*/
phy_time = (u64)time << 32;
- for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
+ for (port = 0; port < hw->ptp.num_lports; port++) {
/* Tx case */
err = ice_write_64b_phy_reg_e82x(hw, port,
P_REG_TX_TIMER_INC_PRE_L,
@@ -1281,7 +3424,7 @@ ice_ptp_prep_phy_adj_e82x(struct ice_hw *hw, s32 adj)
else
cycles = -(((s64)-adj) << 32);
- for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
+ for (port = 0; port < hw->ptp.num_lports; port++) {
int err;
err = ice_ptp_prep_port_adj_e82x(hw, port, cycles);
@@ -1307,7 +3450,7 @@ ice_ptp_prep_phy_incval_e82x(struct ice_hw *hw, u64 incval)
int err;
u8 port;
- for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
+ for (port = 0; port < hw->ptp.num_lports; port++) {
err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_TIMETUS_L,
incval);
if (err)
@@ -1372,51 +3515,20 @@ ice_ptp_read_port_capture(struct ice_hw *hw, u8 port, u64 *tx_ts, u64 *rx_ts)
*
* Prepare the requested port for an upcoming timer sync command.
*
- * Do not use this function directly. If you want to configure exactly one
- * port, use ice_ptp_one_port_cmd() instead.
+ * Note there is no equivalent of this operation on E810, as that device
+ * always handles all external PHYs internally.
+ *
+ * Return:
+ * * %0 - success
+ * * %other - failed to write to PHY
*/
static int ice_ptp_write_port_cmd_e82x(struct ice_hw *hw, u8 port,
enum ice_ptp_tmr_cmd cmd)
{
- u32 cmd_val, val;
- u8 tmr_idx;
+ u32 val = ice_ptp_tmr_cmd_to_port_reg(hw, cmd);
int err;
- tmr_idx = ice_get_ptp_src_clock_index(hw);
- cmd_val = tmr_idx << SEL_PHY_SRC;
- switch (cmd) {
- case ICE_PTP_INIT_TIME:
- cmd_val |= PHY_CMD_INIT_TIME;
- break;
- case ICE_PTP_INIT_INCVAL:
- cmd_val |= PHY_CMD_INIT_INCVAL;
- break;
- case ICE_PTP_ADJ_TIME:
- cmd_val |= PHY_CMD_ADJ_TIME;
- break;
- case ICE_PTP_READ_TIME:
- cmd_val |= PHY_CMD_READ_TIME;
- break;
- case ICE_PTP_ADJ_TIME_AT_TIME:
- cmd_val |= PHY_CMD_ADJ_TIME_AT_TIME;
- break;
- case ICE_PTP_NOP:
- break;
- }
-
/* Tx case */
- /* Read, modify, write */
- err = ice_read_phy_reg_e82x(hw, port, P_REG_TX_TMR_CMD, &val);
- if (err) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_TMR_CMD, err %d\n",
- err);
- return err;
- }
-
- /* Modify necessary bits only and perform write */
- val &= ~TS_CMD_MASK;
- val |= cmd_val;
-
err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_TMR_CMD, val);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to write back TX_TMR_CMD, err %d\n",
@@ -1425,19 +3537,8 @@ static int ice_ptp_write_port_cmd_e82x(struct ice_hw *hw, u8 port,
}
/* Rx case */
- /* Read, modify, write */
- err = ice_read_phy_reg_e82x(hw, port, P_REG_RX_TMR_CMD, &val);
- if (err) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_TMR_CMD, err %d\n",
- err);
- return err;
- }
-
- /* Modify necessary bits only and perform write */
- val &= ~TS_CMD_MASK;
- val |= cmd_val;
-
- err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_TMR_CMD, val);
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_TMR_CMD,
+ val | TS_CMD_RX_TYPE);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to write back RX_TMR_CMD, err %d\n",
err);
@@ -1447,63 +3548,6 @@ static int ice_ptp_write_port_cmd_e82x(struct ice_hw *hw, u8 port,
return 0;
}
-/**
- * ice_ptp_one_port_cmd - Prepare one port for a timer command
- * @hw: pointer to the HW struct
- * @configured_port: the port to configure with configured_cmd
- * @configured_cmd: timer command to prepare on the configured_port
- *
- * Prepare the configured_port for the configured_cmd, and prepare all other
- * ports for ICE_PTP_NOP. This causes the configured_port to execute the
- * desired command while all other ports perform no operation.
- */
-static int
-ice_ptp_one_port_cmd(struct ice_hw *hw, u8 configured_port,
- enum ice_ptp_tmr_cmd configured_cmd)
-{
- u8 port;
-
- for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
- enum ice_ptp_tmr_cmd cmd;
- int err;
-
- if (port == configured_port)
- cmd = configured_cmd;
- else
- cmd = ICE_PTP_NOP;
-
- err = ice_ptp_write_port_cmd_e82x(hw, port, cmd);
- if (err)
- return err;
- }
-
- return 0;
-}
-
-/**
- * ice_ptp_port_cmd_e82x - Prepare all ports for a timer command
- * @hw: pointer to the HW struct
- * @cmd: timer command to prepare
- *
- * Prepare all ports connected to this device for an upcoming timer sync
- * command.
- */
-static int
-ice_ptp_port_cmd_e82x(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
-{
- u8 port;
-
- for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
- int err;
-
- err = ice_ptp_write_port_cmd_e82x(hw, port, cmd);
- if (err)
- return err;
- }
-
- return 0;
-}
-
/* E822 Vernier calibration functions
*
* The following functions are used as part of the vernier calibration of
@@ -1606,7 +3650,7 @@ static void ice_phy_cfg_lane_e82x(struct ice_hw *hw, u8 port)
return;
}
- quad = port / ICE_PORTS_PER_QUAD;
+ quad = ICE_GET_QUAD_NUM(port);
err = ice_read_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG, &val);
if (err) {
@@ -2327,6 +4371,40 @@ int ice_phy_cfg_rx_offset_e82x(struct ice_hw *hw, u8 port)
}
/**
+ * ice_ptp_clear_phy_offset_ready_e82x - Clear PHY TX_/RX_OFFSET_READY registers
+ * @hw: pointer to the HW struct
+ *
+ * Clear PHY TX_/RX_OFFSET_READY registers, effectively marking all transmitted
+ * and received timestamps as invalid.
+ *
+ * Return: 0 on success, other error codes when failed to write to PHY
+ */
+int ice_ptp_clear_phy_offset_ready_e82x(struct ice_hw *hw)
+{
+ u8 port;
+
+ for (port = 0; port < hw->ptp.num_lports; port++) {
+ int err;
+
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_OR, 0);
+ if (err) {
+ dev_warn(ice_hw_to_dev(hw),
+ "Failed to clear PHY TX_OFFSET_READY register\n");
+ return err;
+ }
+
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_OR, 0);
+ if (err) {
+ dev_warn(ice_hw_to_dev(hw),
+ "Failed to clear PHY RX_OFFSET_READY register\n");
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+/**
* ice_read_phy_and_phc_time_e82x - Simultaneously capture PHC and PHY time
* @hw: pointer to the HW struct
* @port: the PHY port to read
@@ -2636,6 +4714,48 @@ ice_get_phy_tx_tstamp_ready_e82x(struct ice_hw *hw, u8 quad, u64 *tstamp_ready)
return 0;
}
+/**
+ * ice_phy_cfg_intr_e82x - Configure TX timestamp interrupt
+ * @hw: pointer to the HW struct
+ * @quad: the timestamp quad
+ * @ena: enable or disable interrupt
+ * @threshold: interrupt threshold
+ *
+ * Configure TX timestamp interrupt for the specified quad
+ *
+ * Return: 0 on success, other error codes when failed to read/write quad
+ */
+
+int ice_phy_cfg_intr_e82x(struct ice_hw *hw, u8 quad, bool ena, u8 threshold)
+{
+ int err;
+ u32 val;
+
+ err = ice_read_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG, &val);
+ if (err)
+ return err;
+
+ val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M;
+ if (ena) {
+ val |= Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M;
+ val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_THR_M;
+ val |= FIELD_PREP(Q_REG_TX_MEM_GBL_CFG_INTR_THR_M, threshold);
+ }
+
+ return ice_write_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG, val);
+}
+
+/**
+ * ice_ptp_init_phy_e82x - initialize PHY parameters
+ * @ptp: pointer to the PTP HW struct
+ */
+static void ice_ptp_init_phy_e82x(struct ice_ptp_hw *ptp)
+{
+ ptp->phy_model = ICE_PHY_E82X;
+ ptp->num_lports = 8;
+ ptp->ports_per_phy = 8;
+}
+
/* E810 functions
*
* The following functions operate on the E810 series devices which use
@@ -2660,7 +4780,7 @@ static int ice_read_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 *val)
msg.opcode = ice_sbq_msg_rd;
msg.dest_dev = rmn_0;
- err = ice_sbq_rw_reg(hw, &msg);
+ err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
err);
@@ -2691,7 +4811,7 @@ static int ice_write_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 val)
msg.dest_dev = rmn_0;
msg.data = val;
- err = ice_sbq_rw_reg(hw, &msg);
+ err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
err);
@@ -2863,17 +4983,21 @@ static int ice_clear_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx)
}
/**
- * ice_ptp_init_phy_e810 - Enable PTP function on the external PHY
+ * ice_ptp_init_phc_e810 - Perform E810 specific PHC initialization
* @hw: pointer to HW struct
*
- * Enable the timesync PTP functionality for the external PHY connected to
- * this function.
+ * Perform E810-specific PTP hardware clock initialization steps.
+ *
+ * Return: 0 on success, other error codes when failed to initialize TimeSync
*/
-int ice_ptp_init_phy_e810(struct ice_hw *hw)
+static int ice_ptp_init_phc_e810(struct ice_hw *hw)
{
u8 tmr_idx;
int err;
+ /* Ensure synchronization delay is zero */
+ wr32(hw, GLTSYN_SYNC_DLAY, 0);
+
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_ENA(tmr_idx),
GLTSYN_ENA_TSYN_ENA_M);
@@ -2885,21 +5009,6 @@ int ice_ptp_init_phy_e810(struct ice_hw *hw)
}
/**
- * ice_ptp_init_phc_e810 - Perform E810 specific PHC initialization
- * @hw: pointer to HW struct
- *
- * Perform E810-specific PTP hardware clock initialization steps.
- */
-static int ice_ptp_init_phc_e810(struct ice_hw *hw)
-{
- /* Ensure synchronization delay is zero */
- wr32(hw, GLTSYN_SYNC_DLAY, 0);
-
- /* Initialize the PHY */
- return ice_ptp_init_phy_e810(hw);
-}
-
-/**
* ice_ptp_prep_phy_time_e810 - Prepare PHY port with initial time
* @hw: Board private structure
* @time: Time to initialize the PHY port clock to
@@ -3020,47 +5129,9 @@ static int ice_ptp_prep_phy_incval_e810(struct ice_hw *hw, u64 incval)
*/
static int ice_ptp_port_cmd_e810(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
{
- u32 cmd_val, val;
- int err;
-
- switch (cmd) {
- case ICE_PTP_INIT_TIME:
- cmd_val = GLTSYN_CMD_INIT_TIME;
- break;
- case ICE_PTP_INIT_INCVAL:
- cmd_val = GLTSYN_CMD_INIT_INCVAL;
- break;
- case ICE_PTP_ADJ_TIME:
- cmd_val = GLTSYN_CMD_ADJ_TIME;
- break;
- case ICE_PTP_READ_TIME:
- cmd_val = GLTSYN_CMD_READ_TIME;
- break;
- case ICE_PTP_ADJ_TIME_AT_TIME:
- cmd_val = GLTSYN_CMD_ADJ_INIT_TIME;
- break;
- case ICE_PTP_NOP:
- return 0;
- }
+ u32 val = ice_ptp_tmr_cmd_to_port_reg(hw, cmd);
- /* Read, modify, write */
- err = ice_read_phy_reg_e810(hw, ETH_GLTSYN_CMD, &val);
- if (err) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to read GLTSYN_CMD, err %d\n", err);
- return err;
- }
-
- /* Modify necessary bits only and perform write */
- val &= ~TS_CMD_MASK_E810;
- val |= cmd_val;
-
- err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_CMD, val);
- if (err) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to write back GLTSYN_CMD, err %d\n", err);
- return err;
- }
-
- return 0;
+ return ice_write_phy_reg_e810(hw, E810_ETH_GLTSYN_CMD, val);
}
/**
@@ -3242,6 +5313,17 @@ int ice_read_pca9575_reg_e810t(struct ice_hw *hw, u8 offset, u8 *data)
return ice_aq_read_i2c(hw, link_topo, 0, addr, 1, data, NULL);
}
+/**
+ * ice_ptp_init_phy_e810 - initialize PHY parameters
+ * @ptp: pointer to the PTP HW struct
+ */
+static void ice_ptp_init_phy_e810(struct ice_ptp_hw *ptp)
+{
+ ptp->phy_model = ICE_PHY_E810;
+ ptp->num_lports = 8;
+ ptp->ports_per_phy = 4;
+}
+
/* Device agnostic functions
*
* The following functions implement shared behavior common to both E822 and
@@ -3299,18 +5381,126 @@ void ice_ptp_unlock(struct ice_hw *hw)
}
/**
- * ice_ptp_init_phy_model - Initialize hw->phy_model based on device type
+ * ice_ptp_init_hw - Initialize hw based on device type
* @hw: pointer to the HW structure
*
- * Determine the PHY model for the device, and initialize hw->phy_model
+ * Determine the PHY model for the device, and initialize hw
* for use by other functions.
*/
-void ice_ptp_init_phy_model(struct ice_hw *hw)
+void ice_ptp_init_hw(struct ice_hw *hw)
{
- if (ice_is_e810(hw))
- hw->phy_model = ICE_PHY_E810;
+ struct ice_ptp_hw *ptp = &hw->ptp;
+
+ if (ice_is_e822(hw) || ice_is_e823(hw))
+ ice_ptp_init_phy_e82x(ptp);
+ else if (ice_is_e810(hw))
+ ice_ptp_init_phy_e810(ptp);
+ else if (ice_is_e825c(hw))
+ ice_ptp_init_phy_e825c(hw);
else
- hw->phy_model = ICE_PHY_E82X;
+ ptp->phy_model = ICE_PHY_UNSUP;
+}
+
+/**
+ * ice_ptp_write_port_cmd - Prepare a single PHY port for a timer command
+ * @hw: pointer to HW struct
+ * @port: Port to which cmd has to be sent
+ * @cmd: Command to be sent to the port
+ *
+ * Prepare one port for the upcoming timer sync command. Do not use this for
+ * programming only a single port, instead use ice_ptp_one_port_cmd() to
+ * ensure non-modified ports get properly initialized to ICE_PTP_NOP.
+ *
+ * Return:
+ * * %0 - success
+ * %-EBUSY - PHY type not supported
+ * * %other - failed to write port command
+ */
+static int ice_ptp_write_port_cmd(struct ice_hw *hw, u8 port,
+ enum ice_ptp_tmr_cmd cmd)
+{
+ switch (hw->ptp.phy_model) {
+ case ICE_PHY_ETH56G:
+ return ice_ptp_write_port_cmd_eth56g(hw, port, cmd);
+ case ICE_PHY_E82X:
+ return ice_ptp_write_port_cmd_e82x(hw, port, cmd);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/**
+ * ice_ptp_one_port_cmd - Program one PHY port for a timer command
+ * @hw: pointer to HW struct
+ * @configured_port: the port that should execute the command
+ * @configured_cmd: the command to be executed on the configured port
+ *
+ * Prepare one port for executing a timer command, while preparing all other
+ * ports to ICE_PTP_NOP. This allows executing a command on a single port
+ * while ensuring all other ports do not execute stale commands.
+ *
+ * Return:
+ * * %0 - success
+ * * %other - failed to write port command
+ */
+int ice_ptp_one_port_cmd(struct ice_hw *hw, u8 configured_port,
+ enum ice_ptp_tmr_cmd configured_cmd)
+{
+ u32 port;
+
+ for (port = 0; port < hw->ptp.num_lports; port++) {
+ int err;
+
+ /* Program the configured port with the configured command,
+ * program all other ports with ICE_PTP_NOP.
+ */
+ if (port == configured_port)
+ err = ice_ptp_write_port_cmd(hw, port, configured_cmd);
+ else
+ err = ice_ptp_write_port_cmd(hw, port, ICE_PTP_NOP);
+
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_ptp_port_cmd - Prepare PHY ports for a timer sync command
+ * @hw: pointer to HW struct
+ * @cmd: the timer command to setup
+ *
+ * Prepare all PHY ports on this device for the requested timer command. For
+ * some families this can be done in one shot, but for other families each
+ * port must be configured individually.
+ *
+ * Return:
+ * * %0 - success
+ * * %other - failed to write port command
+ */
+static int ice_ptp_port_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
+{
+ u32 port;
+
+ /* PHY models which can program all ports simultaneously */
+ switch (hw->ptp.phy_model) {
+ case ICE_PHY_E810:
+ return ice_ptp_port_cmd_e810(hw, cmd);
+ default:
+ break;
+ }
+
+ /* PHY models which require programming each port separately */
+ for (port = 0; port < hw->ptp.num_lports; port++) {
+ int err;
+
+ err = ice_ptp_write_port_cmd(hw, port, cmd);
+ if (err)
+ return err;
+ }
+
+ return 0;
}
/**
@@ -3331,17 +5521,7 @@ static int ice_ptp_tmr_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
ice_ptp_src_cmd(hw, cmd);
/* Next, prepare the ports */
- switch (hw->phy_model) {
- case ICE_PHY_E810:
- err = ice_ptp_port_cmd_e810(hw, cmd);
- break;
- case ICE_PHY_E82X:
- err = ice_ptp_port_cmd_e82x(hw, cmd);
- break;
- default:
- err = -EOPNOTSUPP;
- }
-
+ err = ice_ptp_port_cmd(hw, cmd);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to prepare PHY ports for timer command %u, err %d\n",
cmd, err);
@@ -3383,7 +5563,11 @@ int ice_ptp_init_time(struct ice_hw *hw, u64 time)
/* PHY timers */
/* Fill Rx and Tx ports and send msg to PHY */
- switch (hw->phy_model) {
+ switch (hw->ptp.phy_model) {
+ case ICE_PHY_ETH56G:
+ err = ice_ptp_prep_phy_time_eth56g(hw,
+ (u32)(time & 0xFFFFFFFF));
+ break;
case ICE_PHY_E810:
err = ice_ptp_prep_phy_time_e810(hw, time & 0xFFFFFFFF);
break;
@@ -3425,7 +5609,10 @@ int ice_ptp_write_incval(struct ice_hw *hw, u64 incval)
wr32(hw, GLTSYN_SHADJ_L(tmr_idx), lower_32_bits(incval));
wr32(hw, GLTSYN_SHADJ_H(tmr_idx), upper_32_bits(incval));
- switch (hw->phy_model) {
+ switch (hw->ptp.phy_model) {
+ case ICE_PHY_ETH56G:
+ err = ice_ptp_prep_phy_incval_eth56g(hw, incval);
+ break;
case ICE_PHY_E810:
err = ice_ptp_prep_phy_incval_e810(hw, incval);
break;
@@ -3491,7 +5678,10 @@ int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj)
wr32(hw, GLTSYN_SHADJ_L(tmr_idx), 0);
wr32(hw, GLTSYN_SHADJ_H(tmr_idx), adj);
- switch (hw->phy_model) {
+ switch (hw->ptp.phy_model) {
+ case ICE_PHY_ETH56G:
+ err = ice_ptp_prep_phy_adj_eth56g(hw, adj);
+ break;
case ICE_PHY_E810:
err = ice_ptp_prep_phy_adj_e810(hw, adj);
break;
@@ -3521,7 +5711,9 @@ int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj)
*/
int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp)
{
- switch (hw->phy_model) {
+ switch (hw->ptp.phy_model) {
+ case ICE_PHY_ETH56G:
+ return ice_read_ptp_tstamp_eth56g(hw, block, idx, tstamp);
case ICE_PHY_E810:
return ice_read_phy_tstamp_e810(hw, block, idx, tstamp);
case ICE_PHY_E82X:
@@ -3549,7 +5741,9 @@ int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp)
*/
int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx)
{
- switch (hw->phy_model) {
+ switch (hw->ptp.phy_model) {
+ case ICE_PHY_ETH56G:
+ return ice_clear_ptp_tstamp_eth56g(hw, block, idx);
case ICE_PHY_E810:
return ice_clear_phy_tstamp_e810(hw, block, idx);
case ICE_PHY_E82X:
@@ -3610,7 +5804,10 @@ static int ice_get_pf_c827_idx(struct ice_hw *hw, u8 *idx)
*/
void ice_ptp_reset_ts_memory(struct ice_hw *hw)
{
- switch (hw->phy_model) {
+ switch (hw->ptp.phy_model) {
+ case ICE_PHY_ETH56G:
+ ice_ptp_reset_ts_memory_eth56g(hw);
+ break;
case ICE_PHY_E82X:
ice_ptp_reset_ts_memory_e82x(hw);
break;
@@ -3636,7 +5833,9 @@ int ice_ptp_init_phc(struct ice_hw *hw)
/* Clear event err indications for auxiliary pins */
(void)rd32(hw, GLTSYN_STAT(src_idx));
- switch (hw->phy_model) {
+ switch (hw->ptp.phy_model) {
+ case ICE_PHY_ETH56G:
+ return ice_ptp_init_phc_eth56g(hw);
case ICE_PHY_E810:
return ice_ptp_init_phc_e810(hw);
case ICE_PHY_E82X:
@@ -3659,7 +5858,10 @@ int ice_ptp_init_phc(struct ice_hw *hw)
*/
int ice_get_phy_tx_tstamp_ready(struct ice_hw *hw, u8 block, u64 *tstamp_ready)
{
- switch (hw->phy_model) {
+ switch (hw->ptp.phy_model) {
+ case ICE_PHY_ETH56G:
+ return ice_get_phy_tx_tstamp_ready_eth56g(hw, block,
+ tstamp_ready);
case ICE_PHY_E810:
return ice_get_phy_tx_tstamp_ready_e810(hw, block,
tstamp_ready);
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
index 1f3e03124430..0852a34ade91 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
@@ -41,6 +41,41 @@ enum ice_ptp_fec_mode {
ICE_PTP_FEC_MODE_RS_FEC
};
+enum eth56g_res_type {
+ ETH56G_PHY_REG_PTP,
+ ETH56G_PHY_MEM_PTP,
+ ETH56G_PHY_REG_XPCS,
+ ETH56G_PHY_REG_MAC,
+ ETH56G_PHY_REG_GPCS,
+ NUM_ETH56G_PHY_RES
+};
+
+enum ice_eth56g_link_spd {
+ ICE_ETH56G_LNK_SPD_1G,
+ ICE_ETH56G_LNK_SPD_2_5G,
+ ICE_ETH56G_LNK_SPD_10G,
+ ICE_ETH56G_LNK_SPD_25G,
+ ICE_ETH56G_LNK_SPD_40G,
+ ICE_ETH56G_LNK_SPD_50G,
+ ICE_ETH56G_LNK_SPD_50G2,
+ ICE_ETH56G_LNK_SPD_100G,
+ ICE_ETH56G_LNK_SPD_100G2,
+ NUM_ICE_ETH56G_LNK_SPD /* Must be last */
+};
+
+/**
+ * struct ice_phy_reg_info_eth56g - ETH56G PHY register parameters
+ * @base: base address for each PHY block
+ * @step: step between PHY lanes
+ *
+ * Characteristic information for the various PHY register parameters in the
+ * ETH56G devices
+ */
+struct ice_phy_reg_info_eth56g {
+ u32 base[NUM_ETH56G_PHY_RES];
+ u32 step;
+};
+
/**
* struct ice_time_ref_info_e82x
* @pll_freq: Frequency of PLL that drives timer ticks in Hz
@@ -94,8 +129,75 @@ struct ice_vernier_info_e82x {
u32 rx_fixed_delay;
};
+#define ICE_ETH56G_MAC_CFG_RX_OFFSET_INT GENMASK(19, 9)
+#define ICE_ETH56G_MAC_CFG_RX_OFFSET_FRAC GENMASK(8, 0)
+#define ICE_ETH56G_MAC_CFG_FRAC_W 9
/**
- * struct ice_cgu_pll_params_e82x
+ * struct ice_eth56g_mac_reg_cfg - MAC config values for specific PTP registers
+ * @tx_mode: Tx timestamp compensation mode
+ * @tx_mk_dly: Tx timestamp marker start strobe delay
+ * @tx_cw_dly: Tx timestamp codeword start strobe delay
+ * @rx_mode: Rx timestamp compensation mode
+ * @rx_mk_dly: Rx timestamp marker start strobe delay
+ * @rx_cw_dly: Rx timestamp codeword start strobe delay
+ * @blks_per_clk: number of blocks transferred per clock cycle
+ * @blktime: block time, fixed point
+ * @mktime: marker time, fixed point
+ * @tx_offset: total Tx offset, fixed point
+ * @rx_offset: total Rx offset, contains value for bitslip/deskew, fixed point
+ *
+ * All fixed point registers except Rx offset are 23 bit unsigned ints with
+ * a 9 bit fractional.
+ * Rx offset is 11 bit unsigned int with a 9 bit fractional.
+ */
+struct ice_eth56g_mac_reg_cfg {
+ struct {
+ u8 def;
+ u8 rs;
+ } tx_mode;
+ u8 tx_mk_dly;
+ struct {
+ u8 def;
+ u8 onestep;
+ } tx_cw_dly;
+ struct {
+ u8 def;
+ u8 rs;
+ } rx_mode;
+ struct {
+ u8 def;
+ u8 rs;
+ } rx_mk_dly;
+ struct {
+ u8 def;
+ u8 rs;
+ } rx_cw_dly;
+ u8 blks_per_clk;
+ u16 blktime;
+ u16 mktime;
+ struct {
+ u32 serdes;
+ u32 no_fec;
+ u32 fc;
+ u32 rs;
+ u32 sfd;
+ u32 onestep;
+ } tx_offset;
+ struct {
+ u32 serdes;
+ u32 no_fec;
+ u32 fc;
+ u32 rs;
+ u32 sfd;
+ u32 bs_ds;
+ } rx_offset;
+};
+
+extern
+const struct ice_eth56g_mac_reg_cfg eth56g_mac_cfg[NUM_ICE_ETH56G_LNK_SPD];
+
+/**
+ * struct ice_cgu_pll_params_e82x - E82X CGU parameters
* @refclk_pre_div: Reference clock pre-divisor
* @feedback_div: Feedback divisor
* @frac_n_div: Fractional divisor
@@ -185,9 +287,34 @@ struct ice_cgu_pin_desc {
extern const struct
ice_cgu_pll_params_e82x e822_cgu_params[NUM_ICE_TIME_REF_FREQ];
+/**
+ * struct ice_cgu_pll_params_e825c - E825C CGU parameters
+ * @tspll_ck_refclkfreq: tspll_ck_refclkfreq selection
+ * @tspll_ndivratio: ndiv ratio that goes directly to the pll
+ * @tspll_fbdiv_intgr: TS PLL integer feedback divide
+ * @tspll_fbdiv_frac: TS PLL fractional feedback divide
+ * @ref1588_ck_div: clock divider for tspll ref
+ *
+ * Clock Generation Unit parameters used to program the PLL based on the
+ * selected TIME_REF/TCXO frequency.
+ */
+struct ice_cgu_pll_params_e825c {
+ u32 tspll_ck_refclkfreq;
+ u32 tspll_ndivratio;
+ u32 tspll_fbdiv_intgr;
+ u32 tspll_fbdiv_frac;
+ u32 ref1588_ck_div;
+};
+
+extern const struct
+ice_cgu_pll_params_e825c e825c_cgu_params[NUM_ICE_TIME_REF_FREQ];
+
#define E810C_QSFP_C827_0_HANDLE 2
#define E810C_QSFP_C827_1_HANDLE 3
+/* Table of constants related to possible ETH56G PHY resources */
+extern const struct ice_phy_reg_info_eth56g eth56g_phy_res[NUM_ETH56G_PHY_RES];
+
/* Table of constants related to possible TIME_REF sources */
extern const struct ice_time_ref_info_e82x e822_time_ref[NUM_ICE_TIME_REF_FREQ];
@@ -197,7 +324,9 @@ extern const struct ice_vernier_info_e82x e822_vernier[NUM_ICE_PTP_LNK_SPD];
/* Increment value to generate nanoseconds in the GLTSYN_TIME_L register for
* the E810 devices. Based off of a PLL with an 812.5 MHz frequency.
*/
-#define ICE_PTP_NOMINAL_INCVAL_E810 0x13b13b13bULL
+#define ICE_E810_PLL_FREQ 812500000
+#define ICE_PTP_NOMINAL_INCVAL_E810 0x13b13b13bULL
+#define E810_OUT_PROP_DELAY_NS 1
/* Device agnostic functions */
u8 ice_get_ptp_src_clock_index(struct ice_hw *hw);
@@ -208,11 +337,15 @@ int ice_ptp_init_time(struct ice_hw *hw, u64 time);
int ice_ptp_write_incval(struct ice_hw *hw, u64 incval);
int ice_ptp_write_incval_locked(struct ice_hw *hw, u64 incval);
int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj);
+int ice_ptp_clear_phy_offset_ready_e82x(struct ice_hw *hw);
int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp);
int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx);
void ice_ptp_reset_ts_memory(struct ice_hw *hw);
int ice_ptp_init_phc(struct ice_hw *hw);
+void ice_ptp_init_hw(struct ice_hw *hw);
int ice_get_phy_tx_tstamp_ready(struct ice_hw *hw, u8 block, u64 *tstamp_ready);
+int ice_ptp_one_port_cmd(struct ice_hw *hw, u8 configured_port,
+ enum ice_ptp_tmr_cmd configured_cmd);
/* E822 family functions */
int ice_read_quad_reg_e82x(struct ice_hw *hw, u8 quad, u16 offset, u32 *val);
@@ -264,9 +397,9 @@ int ice_stop_phy_timer_e82x(struct ice_hw *hw, u8 port, bool soft_reset);
int ice_start_phy_timer_e82x(struct ice_hw *hw, u8 port);
int ice_phy_cfg_tx_offset_e82x(struct ice_hw *hw, u8 port);
int ice_phy_cfg_rx_offset_e82x(struct ice_hw *hw, u8 port);
+int ice_phy_cfg_intr_e82x(struct ice_hw *hw, u8 quad, bool ena, u8 threshold);
/* E810 family functions */
-int ice_ptp_init_phy_e810(struct ice_hw *hw);
int ice_read_sma_ctrl_e810t(struct ice_hw *hw, u8 *data);
int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data);
int ice_read_pca9575_reg_e810t(struct ice_hw *hw, u8 offset, u8 *data);
@@ -280,11 +413,44 @@ int ice_get_cgu_state(struct ice_hw *hw, u8 dpll_idx,
u8 *ref_state, u8 *eec_mode, s64 *phase_offset,
enum dpll_lock_status *dpll_state);
int ice_get_cgu_rclk_pin_info(struct ice_hw *hw, u8 *base_idx, u8 *pin_num);
-
-void ice_ptp_init_phy_model(struct ice_hw *hw);
int ice_cgu_get_output_pin_state_caps(struct ice_hw *hw, u8 pin_id,
unsigned long *caps);
+/* ETH56G family functions */
+int ice_ptp_read_tx_hwtstamp_status_eth56g(struct ice_hw *hw, u32 *ts_status);
+int ice_stop_phy_timer_eth56g(struct ice_hw *hw, u8 port, bool soft_reset);
+int ice_start_phy_timer_eth56g(struct ice_hw *hw, u8 port);
+int ice_phy_cfg_tx_offset_eth56g(struct ice_hw *hw, u8 port);
+int ice_phy_cfg_rx_offset_eth56g(struct ice_hw *hw, u8 port);
+int ice_phy_cfg_intr_eth56g(struct ice_hw *hw, u8 port, bool ena, u8 threshold);
+int ice_phy_cfg_ptp_1step_eth56g(struct ice_hw *hw, u8 port);
+
+#define ICE_ETH56G_NOMINAL_INCVAL 0x140000000ULL
+#define ICE_ETH56G_NOMINAL_PCS_REF_TUS 0x100000000ULL
+#define ICE_ETH56G_NOMINAL_PCS_REF_INC 0x300000000ULL
+#define ICE_ETH56G_NOMINAL_THRESH4 0x7777
+#define ICE_ETH56G_NOMINAL_TX_THRESH 0x6
+
+/**
+ * ice_get_base_incval - Get base clock increment value
+ * @hw: pointer to the HW struct
+ *
+ * Return: base clock increment value for supported PHYs, 0 otherwise
+ */
+static inline u64 ice_get_base_incval(struct ice_hw *hw)
+{
+ switch (hw->ptp.phy_model) {
+ case ICE_PHY_ETH56G:
+ return ICE_ETH56G_NOMINAL_INCVAL;
+ case ICE_PHY_E810:
+ return ICE_PTP_NOMINAL_INCVAL_E810;
+ case ICE_PHY_E82X:
+ return ice_e82x_nominal_incval(ice_e82x_time_ref(hw));
+ default:
+ return 0;
+ }
+}
+
#define PFTSYN_SEM_BYTES 4
#define ICE_PTP_CLOCK_INDEX_0 0x00
@@ -312,6 +478,7 @@ int ice_cgu_get_output_pin_state_caps(struct ice_hw *hw, u8 pin_id,
#define TS_CMD_MASK_E810 0xFF
#define TS_CMD_MASK 0xF
#define SYNC_EXEC_CMD 0x3
+#define TS_CMD_RX_TYPE ICE_M(0x18, 0x4)
/* Macros to derive port low and high addresses on both quads */
#define P_Q0_L(a, p) ((((a) + (0x2000 * (p)))) & 0xFFFF)
@@ -344,11 +511,8 @@ int ice_cgu_get_output_pin_state_caps(struct ice_hw *hw, u8 pin_id,
#define Q_REG_TX_MEM_GBL_CFG 0xC08
#define Q_REG_TX_MEM_GBL_CFG_LANE_TYPE_S 0
#define Q_REG_TX_MEM_GBL_CFG_LANE_TYPE_M BIT(0)
-#define Q_REG_TX_MEM_GBL_CFG_TX_TYPE_S 1
#define Q_REG_TX_MEM_GBL_CFG_TX_TYPE_M ICE_M(0xFF, 1)
-#define Q_REG_TX_MEM_GBL_CFG_INTR_THR_S 9
#define Q_REG_TX_MEM_GBL_CFG_INTR_THR_M ICE_M(0x3F, 9)
-#define Q_REG_TX_MEM_GBL_CFG_INTR_ENA_S 15
#define Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M BIT(15)
/* Tx Timestamp data registers */
@@ -380,7 +544,7 @@ int ice_cgu_get_output_pin_state_caps(struct ice_hw *hw, u8 pin_id,
#define P_REG_TIMETUS_L 0x410
#define P_REG_TIMETUS_U 0x414
-#define P_REG_40B_LOW_M 0xFF
+#define P_REG_40B_LOW_M GENMASK(7, 0)
#define P_REG_40B_HIGH_S 8
/* PHY window length registers */
@@ -487,7 +651,7 @@ int ice_cgu_get_output_pin_state_caps(struct ice_hw *hw, u8 pin_id,
#define ETH_GLTSYN_SHADJ_H(_i) (0x0300037C + ((_i) * 32))
/* E810 timer command register */
-#define ETH_GLTSYN_CMD 0x03000344
+#define E810_ETH_GLTSYN_CMD 0x03000344
/* Source timer incval macros */
#define INCVAL_HIGH_M 0xFF
@@ -549,4 +713,115 @@ int ice_cgu_get_output_pin_state_caps(struct ice_hw *hw, u8 pin_id,
/* E810T PCA9575 IO controller pin control */
#define ICE_E810T_P0_GNSS_PRSNT_N BIT(4)
+/* ETH56G PHY register addresses */
+/* Timestamp PHY incval registers */
+#define PHY_REG_TIMETUS_L 0x8
+#define PHY_REG_TIMETUS_U 0xC
+
+/* Timestamp PCS registers */
+#define PHY_PCS_REF_TUS_L 0x18
+#define PHY_PCS_REF_TUS_U 0x1C
+
+/* Timestamp PCS ref incval registers */
+#define PHY_PCS_REF_INC_L 0x20
+#define PHY_PCS_REF_INC_U 0x24
+
+/* Timestamp init registers */
+#define PHY_REG_RX_TIMER_INC_PRE_L 0x64
+#define PHY_REG_RX_TIMER_INC_PRE_U 0x68
+#define PHY_REG_TX_TIMER_INC_PRE_L 0x44
+#define PHY_REG_TX_TIMER_INC_PRE_U 0x48
+
+/* Timestamp match and adjust target registers */
+#define PHY_REG_RX_TIMER_CNT_ADJ_L 0x6C
+#define PHY_REG_RX_TIMER_CNT_ADJ_U 0x70
+#define PHY_REG_TX_TIMER_CNT_ADJ_L 0x4C
+#define PHY_REG_TX_TIMER_CNT_ADJ_U 0x50
+
+/* Timestamp command registers */
+#define PHY_REG_TX_TMR_CMD 0x40
+#define PHY_REG_RX_TMR_CMD 0x60
+
+/* Phy offset ready registers */
+#define PHY_REG_TX_OFFSET_READY 0x54
+#define PHY_REG_RX_OFFSET_READY 0x74
+
+/* Phy total offset registers */
+#define PHY_REG_TOTAL_TX_OFFSET_L 0x38
+#define PHY_REG_TOTAL_TX_OFFSET_U 0x3C
+#define PHY_REG_TOTAL_RX_OFFSET_L 0x58
+#define PHY_REG_TOTAL_RX_OFFSET_U 0x5C
+
+/* Timestamp capture registers */
+#define PHY_REG_TX_CAPTURE_L 0x78
+#define PHY_REG_TX_CAPTURE_U 0x7C
+#define PHY_REG_RX_CAPTURE_L 0x8C
+#define PHY_REG_RX_CAPTURE_U 0x90
+
+/* Memory status registers */
+#define PHY_REG_TX_MEMORY_STATUS_L 0x80
+#define PHY_REG_TX_MEMORY_STATUS_U 0x84
+
+/* Interrupt config register */
+#define PHY_REG_TS_INT_CONFIG 0x88
+
+/* XIF mode config register */
+#define PHY_MAC_XIF_MODE 0x24
+#define PHY_MAC_XIF_1STEP_ENA_M ICE_M(0x1, 5)
+#define PHY_MAC_XIF_TS_BIN_MODE_M ICE_M(0x1, 11)
+#define PHY_MAC_XIF_TS_SFD_ENA_M ICE_M(0x1, 20)
+#define PHY_MAC_XIF_GMII_TS_SEL_M ICE_M(0x1, 21)
+
+/* GPCS config register */
+#define PHY_GPCS_CONFIG_REG0 0x268
+#define PHY_GPCS_CONFIG_REG0_TX_THR_M ICE_M(0xF, 24)
+#define PHY_GPCS_BITSLIP 0x5C
+
+#define PHY_TS_INT_CONFIG_THRESHOLD_M ICE_M(0x3F, 0)
+#define PHY_TS_INT_CONFIG_ENA_M BIT(6)
+
+/* 1-step PTP config */
+#define PHY_PTP_1STEP_CONFIG 0x270
+#define PHY_PTP_1STEP_T1S_UP64_M ICE_M(0xF, 4)
+#define PHY_PTP_1STEP_T1S_DELTA_M ICE_M(0xF, 8)
+#define PHY_PTP_1STEP_PEER_DELAY(_port) (0x274 + 4 * (_port))
+#define PHY_PTP_1STEP_PD_ADD_PD_M ICE_M(0x1, 0)
+#define PHY_PTP_1STEP_PD_DELAY_M ICE_M(0x3fffffff, 1)
+#define PHY_PTP_1STEP_PD_DLY_V_M ICE_M(0x1, 31)
+
+/* Macros to derive offsets for TimeStampLow and TimeStampHigh */
+#define PHY_TSTAMP_L(x) (((x) * 8) + 0)
+#define PHY_TSTAMP_U(x) (((x) * 8) + 4)
+
+#define PHY_REG_REVISION 0x85000
+
+#define PHY_REG_DESKEW_0 0x94
+#define PHY_REG_DESKEW_0_RLEVEL GENMASK(6, 0)
+#define PHY_REG_DESKEW_0_RLEVEL_FRAC GENMASK(9, 7)
+#define PHY_REG_DESKEW_0_RLEVEL_FRAC_W 3
+#define PHY_REG_DESKEW_0_VALID GENMASK(10, 10)
+
+#define PHY_REG_GPCS_BITSLIP 0x5C
+#define PHY_REG_SD_BIT_SLIP(_port_offset) (0x29C + 4 * (_port_offset))
+#define PHY_REVISION_ETH56G 0x10200
+#define PHY_VENDOR_TXLANE_THRESH 0x2000C
+
+#define PHY_MAC_TSU_CONFIG 0x40
+#define PHY_MAC_TSU_CFG_RX_MODE_M ICE_M(0x7, 0)
+#define PHY_MAC_TSU_CFG_RX_MII_CW_DLY_M ICE_M(0x7, 4)
+#define PHY_MAC_TSU_CFG_RX_MII_MK_DLY_M ICE_M(0x7, 8)
+#define PHY_MAC_TSU_CFG_TX_MODE_M ICE_M(0x7, 12)
+#define PHY_MAC_TSU_CFG_TX_MII_CW_DLY_M ICE_M(0x1F, 16)
+#define PHY_MAC_TSU_CFG_TX_MII_MK_DLY_M ICE_M(0x1F, 21)
+#define PHY_MAC_TSU_CFG_BLKS_PER_CLK_M ICE_M(0x1, 28)
+#define PHY_MAC_RX_MODULO 0x44
+#define PHY_MAC_RX_OFFSET 0x48
+#define PHY_MAC_RX_OFFSET_M ICE_M(0xFFFFFF, 0)
+#define PHY_MAC_TX_MODULO 0x4C
+#define PHY_MAC_BLOCKTIME 0x50
+#define PHY_MAC_MARKERTIME 0x54
+#define PHY_MAC_TX_OFFSET 0x58
+
+#define PHY_PTP_INT_STATUS 0x7FD140
+
#endif /* _ICE_PTP_HW_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_repr.c b/drivers/net/ethernet/intel/ice/ice_repr.c
index d367f4c66dcd..bdda3401e343 100644
--- a/drivers/net/ethernet/intel/ice/ice_repr.c
+++ b/drivers/net/ethernet/intel/ice/ice_repr.c
@@ -285,9 +285,7 @@ ice_repr_reg_netdev(struct net_device *netdev)
static void ice_repr_remove_node(struct devlink_port *devlink_port)
{
- devl_lock(devlink_port->devlink);
devl_rate_leaf_destroy(devlink_port);
- devl_unlock(devlink_port->devlink);
}
/**
@@ -308,6 +306,7 @@ static void ice_repr_rem(struct ice_repr *repr)
void ice_repr_rem_vf(struct ice_repr *repr)
{
ice_repr_remove_node(&repr->vf->devlink_port);
+ ice_eswitch_decfg_vsi(repr->src_vsi, repr->parent_mac);
unregister_netdev(repr->netdev);
ice_devlink_destroy_vf_port(repr->vf);
ice_virtchnl_set_dflt_ops(repr->vf);
@@ -403,11 +402,17 @@ struct ice_repr *ice_repr_add_vf(struct ice_vf *vf)
if (err)
goto err_netdev;
+ err = ice_eswitch_cfg_vsi(repr->src_vsi, repr->parent_mac);
+ if (err)
+ goto err_cfg_vsi;
+
ice_virtchnl_set_repr_ops(vf);
ice_repr_set_tx_topology(vf->pf);
return repr;
+err_cfg_vsi:
+ unregister_netdev(repr->netdev);
err_netdev:
ice_repr_rem(repr);
err_repr_add:
@@ -415,12 +420,9 @@ err_repr_add:
return ERR_PTR(err);
}
-struct ice_repr *ice_repr_get_by_vsi(struct ice_vsi *vsi)
+struct ice_repr *ice_repr_get(struct ice_pf *pf, u32 id)
{
- if (!vsi->vf)
- return NULL;
-
- return xa_load(&vsi->back->eswitch.reprs, vsi->vf->repr_id);
+ return xa_load(&pf->eswitch.reprs, id);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_repr.h b/drivers/net/ethernet/intel/ice/ice_repr.h
index cff730b15ca0..488661b2900b 100644
--- a/drivers/net/ethernet/intel/ice/ice_repr.h
+++ b/drivers/net/ethernet/intel/ice/ice_repr.h
@@ -35,9 +35,8 @@ void ice_repr_stop_tx_queues(struct ice_repr *repr);
struct ice_repr *ice_netdev_to_repr(const struct net_device *netdev);
bool ice_is_port_repr_netdev(const struct net_device *netdev);
-struct ice_repr *ice_repr_get_by_vsi(struct ice_vsi *vsi);
-
void ice_repr_inc_tx_stats(struct ice_repr *repr, unsigned int len,
int xmit_status);
void ice_repr_inc_rx_stats(struct net_device *netdev, unsigned int len);
+struct ice_repr *ice_repr_get(struct ice_pf *pf, u32 id);
#endif
diff --git a/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h b/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h
index ead75fe2bcda..3b0054faf70c 100644
--- a/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h
@@ -47,10 +47,12 @@ struct ice_sbq_evt_desc {
};
enum ice_sbq_msg_dev {
- rmn_0 = 0x02,
- rmn_1 = 0x03,
- rmn_2 = 0x04,
- cgu = 0x06
+ eth56g_phy_0 = 0x02,
+ rmn_0 = 0x02,
+ rmn_1 = 0x03,
+ rmn_2 = 0x04,
+ cgu = 0x06,
+ eth56g_phy_1 = 0x0D,
};
enum ice_sbq_msg_opcode {
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
index 067712f4923f..55ef33208456 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -1416,21 +1416,23 @@ out_put_vf:
}
/**
- * ice_set_vf_mac
- * @netdev: network interface device structure
+ * __ice_set_vf_mac - program VF MAC address
+ * @pf: PF to be configure
* @vf_id: VF identifier
* @mac: MAC address
*
* program VF MAC address
+ * Return: zero on success or an error code on failure
*/
-int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
+int __ice_set_vf_mac(struct ice_pf *pf, u16 vf_id, const u8 *mac)
{
- struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct device *dev;
struct ice_vf *vf;
int ret;
+ dev = ice_pf_to_dev(pf);
if (is_multicast_ether_addr(mac)) {
- netdev_err(netdev, "%pM not a valid unicast address\n", mac);
+ dev_err(dev, "%pM not a valid unicast address\n", mac);
return -EINVAL;
}
@@ -1459,13 +1461,13 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
if (is_zero_ether_addr(mac)) {
/* VF will send VIRTCHNL_OP_ADD_ETH_ADDR message with its MAC */
vf->pf_set_mac = false;
- netdev_info(netdev, "Removing MAC on VF %d. VF driver will be reinitialized\n",
- vf->vf_id);
+ dev_info(dev, "Removing MAC on VF %d. VF driver will be reinitialized\n",
+ vf->vf_id);
} else {
/* PF will add MAC rule for the VF */
vf->pf_set_mac = true;
- netdev_info(netdev, "Setting MAC %pM on VF %d. VF driver will be reinitialized\n",
- mac, vf_id);
+ dev_info(dev, "Setting MAC %pM on VF %d. VF driver will be reinitialized\n",
+ mac, vf_id);
}
ice_reset_vf(vf, ICE_VF_RESET_NOTIFY);
@@ -1477,6 +1479,20 @@ out_put_vf:
}
/**
+ * ice_set_vf_mac - .ndo_set_vf_mac handler
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @mac: MAC address
+ *
+ * program VF MAC address
+ * Return: zero on success or an error code on failure
+ */
+int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
+{
+ return __ice_set_vf_mac(ice_netdev_to_pf(netdev), vf_id, mac);
+}
+
+/**
* ice_set_vf_trust
* @netdev: network interface device structure
* @vf_id: VF identifier
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.h b/drivers/net/ethernet/intel/ice/ice_sriov.h
index 8f22313474d6..96549ca5c52c 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.h
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.h
@@ -28,6 +28,7 @@
#ifdef CONFIG_PCI_IOV
void ice_process_vflr_event(struct ice_pf *pf);
int ice_sriov_configure(struct pci_dev *pdev, int num_vfs);
+int __ice_set_vf_mac(struct ice_pf *pf, u16 vf_id, const u8 *mac);
int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac);
int
ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi);
@@ -81,6 +82,13 @@ ice_sriov_configure(struct pci_dev __always_unused *pdev,
}
static inline int
+__ice_set_vf_mac(struct ice_pf __always_unused *pf,
+ u16 __always_unused vf_id, const u8 __always_unused *mac)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
ice_set_vf_mac(struct net_device __always_unused *netdev,
int __always_unused vf_id, u8 __always_unused *mac)
{
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 1191031b2a43..3caafcdc301f 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -3,6 +3,7 @@
#include "ice_lib.h"
#include "ice_switch.h"
+#include "ice_trace.h"
#define ICE_ETH_DA_OFFSET 0
#define ICE_ETH_ETHTYPE_OFFSET 12
@@ -1471,7 +1472,6 @@ int ice_init_def_sw_recp(struct ice_hw *hw)
recps[i].root_rid = i;
INIT_LIST_HEAD(&recps[i].filt_rules);
INIT_LIST_HEAD(&recps[i].filt_replay_rules);
- INIT_LIST_HEAD(&recps[i].rg_list);
mutex_init(&recps[i].filt_rule_lock);
}
@@ -1962,6 +1962,15 @@ ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
status = -ENOENT;
+ if (!status) {
+ if (opc == ice_aqc_opc_add_sw_rules)
+ hw->switch_info->rule_cnt += num_rules;
+ else if (opc == ice_aqc_opc_remove_sw_rules)
+ hw->switch_info->rule_cnt -= num_rules;
+ }
+
+ trace_ice_aq_sw_rules(hw->switch_info);
+
return status;
}
@@ -2182,8 +2191,10 @@ int ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
sw_buf->res_type = cpu_to_le16(res_type);
status = ice_aq_alloc_free_res(hw, sw_buf, buf_len,
ice_aqc_opc_alloc_res);
- if (!status)
+ if (!status) {
*rid = le16_to_cpu(sw_buf->elem[0].e.sw_resp);
+ hw->switch_info->recp_cnt++;
+ }
return status;
}
@@ -2197,7 +2208,13 @@ int ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
*/
static int ice_free_recipe_res(struct ice_hw *hw, u16 rid)
{
- return ice_free_hw_res(hw, ICE_AQC_RES_TYPE_RECIPE, 1, &rid);
+ int status;
+
+ status = ice_free_hw_res(hw, ICE_AQC_RES_TYPE_RECIPE, 1, &rid);
+ if (!status)
+ hw->switch_info->recp_cnt--;
+
+ return status;
}
/**
@@ -2282,20 +2299,6 @@ static void ice_get_recp_to_prof_map(struct ice_hw *hw)
}
/**
- * ice_collect_result_idx - copy result index values
- * @buf: buffer that contains the result index
- * @recp: the recipe struct to copy data into
- */
-static void
-ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
- struct ice_sw_recipe *recp)
-{
- if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
- set_bit(buf->content.result_indx & ~ICE_AQ_RECIPE_RESULT_EN,
- recp->res_idxs);
-}
-
-/**
* ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
* @hw: pointer to hardware structure
* @recps: struct that we need to populate
@@ -2353,18 +2356,10 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
- struct ice_recp_grp_entry *rg_entry;
u8 i, prof, idx, prot = 0;
bool is_root;
u16 off = 0;
- rg_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rg_entry),
- GFP_KERNEL);
- if (!rg_entry) {
- status = -ENOMEM;
- goto err_unroll;
- }
-
idx = root_bufs.recipe_indx;
is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
@@ -2377,11 +2372,8 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
prof = find_first_bit(recipe_to_profile[idx],
ICE_MAX_NUM_PROFILES);
for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
- u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
-
- rg_entry->fv_idx[i] = lkup_indx;
- rg_entry->fv_mask[i] =
- le16_to_cpu(root_bufs.content.mask[i + 1]);
+ u8 lkup_indx = root_bufs.content.lkup_indx[i];
+ u16 lkup_mask = le16_to_cpu(root_bufs.content.mask[i]);
/* If the recipe is a chained recipe then all its
* child recipe's result will have a result index.
@@ -2392,26 +2384,21 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
* has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
* valid offset value.
*/
- if (test_bit(rg_entry->fv_idx[i], hw->switch_info->prof_res_bm[prof]) ||
- rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
- rg_entry->fv_idx[i] == 0)
+ if (!lkup_indx ||
+ (lkup_indx & ICE_AQ_RECIPE_LKUP_IGNORE) ||
+ test_bit(lkup_indx,
+ hw->switch_info->prof_res_bm[prof]))
continue;
- ice_find_prot_off(hw, ICE_BLK_SW, prof,
- rg_entry->fv_idx[i], &prot, &off);
+ ice_find_prot_off(hw, ICE_BLK_SW, prof, lkup_indx,
+ &prot, &off);
lkup_exts->fv_words[fv_word_idx].prot_id = prot;
lkup_exts->fv_words[fv_word_idx].off = off;
- lkup_exts->field_mask[fv_word_idx] =
- rg_entry->fv_mask[i];
+ lkup_exts->field_mask[fv_word_idx] = lkup_mask;
fv_word_idx++;
}
- /* populate rg_list with the data from the child entry of this
- * recipe
- */
- list_add(&rg_entry->l_entry, &recps[rid].rg_list);
/* Propagate some data to the recipe database */
- recps[idx].is_root = !!is_root;
recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
recps[idx].need_pass_l2 = root_bufs.content.act_ctrl &
ICE_AQ_RECIPE_ACT_NEED_PASS_L2;
@@ -2419,11 +2406,8 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
ICE_AQ_RECIPE_ACT_ALLOW_PASS_L2;
bitmap_zero(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
- recps[idx].chain_idx = root_bufs.content.result_indx &
- ~ICE_AQ_RECIPE_RESULT_EN;
- set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
- } else {
- recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
+ set_bit(root_bufs.content.result_indx &
+ ~ICE_AQ_RECIPE_RESULT_EN, recps[idx].res_idxs);
}
if (!is_root) {
@@ -2443,15 +2427,6 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
/* Complete initialization of the root recipe entry */
lkup_exts->n_val_words = fv_word_idx;
- recps[rid].big_recp = (num_recps > 1);
- recps[rid].n_grp_count = (u8)num_recps;
- recps[rid].root_buf = devm_kmemdup(ice_hw_to_dev(hw), tmp,
- recps[rid].n_grp_count * sizeof(*recps[rid].root_buf),
- GFP_KERNEL);
- if (!recps[rid].root_buf) {
- status = -ENOMEM;
- goto err_unroll;
- }
/* Copy result indexes */
bitmap_copy(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
@@ -4768,11 +4743,6 @@ ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
continue;
}
- /* Skip inverse action recipes */
- if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
- ICE_AQ_RECIPE_ACT_INV_ACT)
- continue;
-
/* if number of words we are looking for match */
if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
struct ice_fv_word *ar = recp[i].lkup_exts.fv_words;
@@ -4897,110 +4867,55 @@ ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
}
/**
- * ice_create_first_fit_recp_def - Create a recipe grouping
- * @hw: pointer to the hardware structure
- * @lkup_exts: an array of protocol header extractions
- * @rg_list: pointer to a list that stores new recipe groups
- * @recp_cnt: pointer to a variable that stores returned number of recipe groups
- *
- * Using first fit algorithm, take all the words that are still not done
- * and start grouping them in 4-word groups. Each group makes up one
- * recipe.
- */
-static int
-ice_create_first_fit_recp_def(struct ice_hw *hw,
- struct ice_prot_lkup_ext *lkup_exts,
- struct list_head *rg_list,
- u8 *recp_cnt)
-{
- struct ice_pref_recipe_group *grp = NULL;
- u8 j;
-
- *recp_cnt = 0;
-
- /* Walk through every word in the rule to check if it is not done. If so
- * then this word needs to be part of a new recipe.
- */
- for (j = 0; j < lkup_exts->n_val_words; j++)
- if (!test_bit(j, lkup_exts->done)) {
- if (!grp ||
- grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
- struct ice_recp_grp_entry *entry;
-
- entry = devm_kzalloc(ice_hw_to_dev(hw),
- sizeof(*entry),
- GFP_KERNEL);
- if (!entry)
- return -ENOMEM;
- list_add(&entry->l_entry, rg_list);
- grp = &entry->r_group;
- (*recp_cnt)++;
- }
-
- grp->pairs[grp->n_val_pairs].prot_id =
- lkup_exts->fv_words[j].prot_id;
- grp->pairs[grp->n_val_pairs].off =
- lkup_exts->fv_words[j].off;
- grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
- grp->n_val_pairs++;
- }
-
- return 0;
-}
-
-/**
* ice_fill_fv_word_index - fill in the field vector indices for a recipe group
* @hw: pointer to the hardware structure
- * @fv_list: field vector with the extraction sequence information
- * @rg_list: recipe groupings with protocol-offset pairs
+ * @rm: recipe management list entry
*
* Helper function to fill in the field vector indices for protocol-offset
* pairs. These indexes are then ultimately programmed into a recipe.
*/
static int
-ice_fill_fv_word_index(struct ice_hw *hw, struct list_head *fv_list,
- struct list_head *rg_list)
+ice_fill_fv_word_index(struct ice_hw *hw, struct ice_sw_recipe *rm)
{
struct ice_sw_fv_list_entry *fv;
- struct ice_recp_grp_entry *rg;
struct ice_fv_word *fv_ext;
+ u8 i;
- if (list_empty(fv_list))
- return 0;
+ if (list_empty(&rm->fv_list))
+ return -EINVAL;
- fv = list_first_entry(fv_list, struct ice_sw_fv_list_entry,
+ fv = list_first_entry(&rm->fv_list, struct ice_sw_fv_list_entry,
list_entry);
fv_ext = fv->fv_ptr->ew;
- list_for_each_entry(rg, rg_list, l_entry) {
- u8 i;
-
- for (i = 0; i < rg->r_group.n_val_pairs; i++) {
- struct ice_fv_word *pr;
- bool found = false;
- u16 mask;
- u8 j;
+ /* Add switch id as the first word. */
+ rm->fv_idx[0] = ICE_AQ_SW_ID_LKUP_IDX;
+ rm->fv_mask[0] = ICE_AQ_SW_ID_LKUP_MASK;
+ rm->n_ext_words++;
- pr = &rg->r_group.pairs[i];
- mask = rg->r_group.mask[i];
-
- for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
- if (fv_ext[j].prot_id == pr->prot_id &&
- fv_ext[j].off == pr->off) {
- found = true;
+ for (i = 1; i < rm->n_ext_words; i++) {
+ struct ice_fv_word *fv_word = &rm->ext_words[i - 1];
+ u16 fv_mask = rm->word_masks[i - 1];
+ bool found = false;
+ u8 j;
- /* Store index of field vector */
- rg->fv_idx[i] = j;
- rg->fv_mask[i] = mask;
- break;
- }
+ for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++) {
+ if (fv_ext[j].prot_id == fv_word->prot_id &&
+ fv_ext[j].off == fv_word->off) {
+ found = true;
- /* Protocol/offset could not be found, caller gave an
- * invalid pair
- */
- if (!found)
- return -EINVAL;
+ /* Store index of field vector */
+ rm->fv_idx[i] = j;
+ rm->fv_mask[i] = fv_mask;
+ break;
+ }
}
+
+ /* Protocol/offset could not be found, caller gave an invalid
+ * pair.
+ */
+ if (!found)
+ return -EINVAL;
}
return 0;
@@ -5074,335 +4989,223 @@ ice_find_free_recp_res_idx(struct ice_hw *hw, const unsigned long *profiles,
}
/**
- * ice_add_sw_recipe - function to call AQ calls to create switch recipe
- * @hw: pointer to hardware structure
- * @rm: recipe management list entry
- * @profiles: bitmap of profiles that will be associated.
+ * ice_calc_recp_cnt - calculate number of recipes based on word count
+ * @word_cnt: number of lookup words
+ *
+ * Word count should include switch ID word and regular lookup words.
+ * Returns: number of recipes required to fit @word_cnt, including extra recipes
+ * needed for recipe chaining (if needed).
*/
-static int
-ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
- unsigned long *profiles)
+static int ice_calc_recp_cnt(u8 word_cnt)
{
- DECLARE_BITMAP(result_idx_bm, ICE_MAX_FV_WORDS);
- struct ice_aqc_recipe_content *content;
- struct ice_aqc_recipe_data_elem *tmp;
- struct ice_aqc_recipe_data_elem *buf;
- struct ice_recp_grp_entry *entry;
- u16 free_res_idx;
- u16 recipe_count;
- u8 chain_idx;
- u8 recps = 0;
- int status;
+ /* All words fit in a single recipe, no need for chaining. */
+ if (word_cnt <= ICE_NUM_WORDS_RECIPE)
+ return 1;
- /* When more than one recipe are required, another recipe is needed to
- * chain them together. Matching a tunnel metadata ID takes up one of
- * the match fields in the chaining recipe reducing the number of
- * chained recipes by one.
+ /* Recipe chaining required. Result indexes are fitted right after
+ * regular lookup words. In some cases a new recipe must be added in
+ * order to fit result indexes.
+ *
+ * While the word count increases, every 5 words an extra recipe needs
+ * to be added. However, by adding a recipe, one word for its result
+ * index must also be added, therefore every 4 words recipe count
+ * increases by 1. This calculation does not apply to word count == 1,
+ * which is handled above.
*/
- /* check number of free result indices */
- bitmap_zero(result_idx_bm, ICE_MAX_FV_WORDS);
- free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
+ return (word_cnt + 2) / (ICE_NUM_WORDS_RECIPE - 1);
+}
- ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
- free_res_idx, rm->n_grp_count);
+static void fill_recipe_template(struct ice_aqc_recipe_data_elem *recp, u16 rid,
+ const struct ice_sw_recipe *rm)
+{
+ int i;
- if (rm->n_grp_count > 1) {
- if (rm->n_grp_count > free_res_idx)
- return -ENOSPC;
+ recp->recipe_indx = rid;
+ recp->content.act_ctrl |= ICE_AQ_RECIPE_ACT_PRUNE_INDX_M;
- rm->n_grp_count++;
+ for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
+ recp->content.lkup_indx[i] = ICE_AQ_RECIPE_LKUP_IGNORE;
+ recp->content.mask[i] = cpu_to_le16(0);
}
- if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE)
- return -ENOSPC;
+ set_bit(rid, (unsigned long *)recp->recipe_bitmap);
+ recp->content.act_ctrl_fwd_priority = rm->priority;
- tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL);
- if (!tmp)
- return -ENOMEM;
-
- buf = devm_kcalloc(ice_hw_to_dev(hw), rm->n_grp_count, sizeof(*buf),
- GFP_KERNEL);
- if (!buf) {
- status = -ENOMEM;
- goto err_mem;
- }
+ if (rm->need_pass_l2)
+ recp->content.act_ctrl |= ICE_AQ_RECIPE_ACT_NEED_PASS_L2;
- bitmap_zero(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
- recipe_count = ICE_MAX_NUM_RECIPES;
- status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
- NULL);
- if (status || recipe_count == 0)
- goto err_unroll;
+ if (rm->allow_pass_l2)
+ recp->content.act_ctrl |= ICE_AQ_RECIPE_ACT_ALLOW_PASS_L2;
+}
- /* Allocate the recipe resources, and configure them according to the
- * match fields from protocol headers and extracted field vectors.
- */
- chain_idx = find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
- list_for_each_entry(entry, &rm->rg_list, l_entry) {
- u8 i;
+static void bookkeep_recipe(struct ice_sw_recipe *recipe,
+ struct ice_aqc_recipe_data_elem *r,
+ const struct ice_sw_recipe *rm)
+{
+ memcpy(recipe->r_bitmap, r->recipe_bitmap, sizeof(recipe->r_bitmap));
- status = ice_alloc_recipe(hw, &entry->rid);
- if (status)
- goto err_unroll;
+ recipe->priority = r->content.act_ctrl_fwd_priority;
+ recipe->tun_type = rm->tun_type;
+ recipe->need_pass_l2 = rm->need_pass_l2;
+ recipe->allow_pass_l2 = rm->allow_pass_l2;
+ recipe->recp_created = true;
+}
- content = &buf[recps].content;
+/* For memcpy in ice_add_sw_recipe. */
+static_assert(sizeof_field(struct ice_aqc_recipe_data_elem, recipe_bitmap) ==
+ sizeof_field(struct ice_sw_recipe, r_bitmap));
- /* Clear the result index of the located recipe, as this will be
- * updated, if needed, later in the recipe creation process.
- */
- tmp[0].content.result_indx = 0;
+/**
+ * ice_add_sw_recipe - function to call AQ calls to create switch recipe
+ * @hw: pointer to hardware structure
+ * @rm: recipe management list entry
+ * @profiles: bitmap of profiles that will be associated.
+ */
+static int
+ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
+ unsigned long *profiles)
+{
+ struct ice_aqc_recipe_data_elem *buf __free(kfree) = NULL;
+ DECLARE_BITMAP(result_idx_bm, ICE_MAX_FV_WORDS);
+ struct ice_aqc_recipe_data_elem *root;
+ struct ice_sw_recipe *recipe;
+ u16 free_res_idx, rid;
+ int lookup = 0;
+ int recp_cnt;
+ int status;
+ int word;
+ int i;
- buf[recps] = tmp[0];
- buf[recps].recipe_indx = (u8)entry->rid;
- /* if the recipe is a non-root recipe RID should be programmed
- * as 0 for the rules to be applied correctly.
- */
- content->rid = 0;
- memset(&content->lkup_indx, 0,
- sizeof(content->lkup_indx));
-
- /* All recipes use look-up index 0 to match switch ID. */
- content->lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
- content->mask[0] = cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK);
- /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
- * to be 0
- */
- for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
- content->lkup_indx[i] = 0x80;
- content->mask[i] = 0;
- }
+ recp_cnt = ice_calc_recp_cnt(rm->n_ext_words);
- for (i = 0; i < entry->r_group.n_val_pairs; i++) {
- content->lkup_indx[i + 1] = entry->fv_idx[i];
- content->mask[i + 1] = cpu_to_le16(entry->fv_mask[i]);
- }
+ bitmap_zero(result_idx_bm, ICE_MAX_FV_WORDS);
+ bitmap_zero(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
- if (rm->n_grp_count > 1) {
- /* Checks to see if there really is a valid result index
- * that can be used.
- */
- if (chain_idx >= ICE_MAX_FV_WORDS) {
- ice_debug(hw, ICE_DBG_SW, "No chain index available\n");
- status = -ENOSPC;
- goto err_unroll;
- }
+ /* Check number of free result indices */
+ free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
- entry->chain_idx = chain_idx;
- content->result_indx =
- ICE_AQ_RECIPE_RESULT_EN |
- FIELD_PREP(ICE_AQ_RECIPE_RESULT_DATA_M,
- chain_idx);
- clear_bit(chain_idx, result_idx_bm);
- chain_idx = find_first_bit(result_idx_bm,
- ICE_MAX_FV_WORDS);
- }
+ ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
+ free_res_idx, recp_cnt);
- /* fill recipe dependencies */
- bitmap_zero((unsigned long *)buf[recps].recipe_bitmap,
- ICE_MAX_NUM_RECIPES);
- set_bit(buf[recps].recipe_indx,
- (unsigned long *)buf[recps].recipe_bitmap);
- content->act_ctrl_fwd_priority = rm->priority;
+ /* Last recipe doesn't need result index */
+ if (recp_cnt - 1 > free_res_idx)
+ return -ENOSPC;
- if (rm->need_pass_l2)
- content->act_ctrl |= ICE_AQ_RECIPE_ACT_NEED_PASS_L2;
+ if (recp_cnt > ICE_MAX_CHAIN_RECIPE_RES)
+ return -E2BIG;
- if (rm->allow_pass_l2)
- content->act_ctrl |= ICE_AQ_RECIPE_ACT_ALLOW_PASS_L2;
- recps++;
- }
+ buf = kcalloc(recp_cnt, sizeof(*buf), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
- if (rm->n_grp_count == 1) {
- rm->root_rid = buf[0].recipe_indx;
- set_bit(buf[0].recipe_indx, rm->r_bitmap);
- buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
- if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
- memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
- sizeof(buf[0].recipe_bitmap));
- } else {
- status = -EINVAL;
- goto err_unroll;
- }
- /* Applicable only for ROOT_RECIPE, set the fwd_priority for
- * the recipe which is getting created if specified
- * by user. Usually any advanced switch filter, which results
- * into new extraction sequence, ended up creating a new recipe
- * of type ROOT and usually recipes are associated with profiles
- * Switch rule referreing newly created recipe, needs to have
- * either/or 'fwd' or 'join' priority, otherwise switch rule
- * evaluation will not happen correctly. In other words, if
- * switch rule to be evaluated on priority basis, then recipe
- * needs to have priority, otherwise it will be evaluated last.
- */
- buf[0].content.act_ctrl_fwd_priority = rm->priority;
- } else {
- struct ice_recp_grp_entry *last_chain_entry;
- u16 rid, i;
+ /* Setup the non-root subrecipes. These do not contain lookups for other
+ * subrecipes results. Set associated recipe only to own recipe index.
+ * Each non-root subrecipe needs a free result index from FV.
+ *
+ * Note: only done if there is more than one recipe.
+ */
+ for (i = 0; i < recp_cnt - 1; i++) {
+ struct ice_aqc_recipe_content *content;
+ u8 result_idx;
- /* Allocate the last recipe that will chain the outcomes of the
- * other recipes together
- */
status = ice_alloc_recipe(hw, &rid);
if (status)
- goto err_unroll;
+ return status;
- content = &buf[recps].content;
+ fill_recipe_template(&buf[i], rid, rm);
- buf[recps].recipe_indx = (u8)rid;
- content->rid = (u8)rid;
- content->rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
- /* the new entry created should also be part of rg_list to
- * make sure we have complete recipe
+ result_idx = find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
+ /* Check if there really is a valid result index that can be
+ * used.
*/
- last_chain_entry = devm_kzalloc(ice_hw_to_dev(hw),
- sizeof(*last_chain_entry),
- GFP_KERNEL);
- if (!last_chain_entry) {
- status = -ENOMEM;
- goto err_unroll;
- }
- last_chain_entry->rid = rid;
- memset(&content->lkup_indx, 0, sizeof(content->lkup_indx));
- /* All recipes use look-up index 0 to match switch ID. */
- content->lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
- content->mask[0] = cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK);
- for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
- content->lkup_indx[i] = ICE_AQ_RECIPE_LKUP_IGNORE;
- content->mask[i] = 0;
+ if (result_idx >= ICE_MAX_FV_WORDS) {
+ ice_debug(hw, ICE_DBG_SW, "No chain index available\n");
+ return -ENOSPC;
}
+ clear_bit(result_idx, result_idx_bm);
- i = 1;
- /* update r_bitmap with the recp that is used for chaining */
+ content = &buf[i].content;
+ content->result_indx = ICE_AQ_RECIPE_RESULT_EN |
+ FIELD_PREP(ICE_AQ_RECIPE_RESULT_DATA_M,
+ result_idx);
+
+ /* Set recipe association to be used for root recipe */
set_bit(rid, rm->r_bitmap);
- /* this is the recipe that chains all the other recipes so it
- * should not have a chaining ID to indicate the same
- */
- last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
- list_for_each_entry(entry, &rm->rg_list, l_entry) {
- last_chain_entry->fv_idx[i] = entry->chain_idx;
- content->lkup_indx[i] = entry->chain_idx;
- content->mask[i++] = cpu_to_le16(0xFFFF);
- set_bit(entry->rid, rm->r_bitmap);
- }
- list_add(&last_chain_entry->l_entry, &rm->rg_list);
- if (sizeof(buf[recps].recipe_bitmap) >=
- sizeof(rm->r_bitmap)) {
- memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
- sizeof(buf[recps].recipe_bitmap));
- } else {
- status = -EINVAL;
- goto err_unroll;
+
+ word = 0;
+ while (lookup < rm->n_ext_words &&
+ word < ICE_NUM_WORDS_RECIPE) {
+ content->lkup_indx[word] = rm->fv_idx[lookup];
+ content->mask[word] = cpu_to_le16(rm->fv_mask[lookup]);
+
+ lookup++;
+ word++;
}
- content->act_ctrl_fwd_priority = rm->priority;
- recps++;
- rm->root_rid = (u8)rid;
+ recipe = &hw->switch_info->recp_list[rid];
+ set_bit(result_idx, recipe->res_idxs);
+ bookkeep_recipe(recipe, &buf[i], rm);
}
- status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
- if (status)
- goto err_unroll;
- status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
- ice_release_change_lock(hw);
+ /* Setup the root recipe */
+ status = ice_alloc_recipe(hw, &rid);
if (status)
- goto err_unroll;
-
- /* Every recipe that just got created add it to the recipe
- * book keeping list
- */
- list_for_each_entry(entry, &rm->rg_list, l_entry) {
- struct ice_switch_info *sw = hw->switch_info;
- bool is_root, idx_found = false;
- struct ice_sw_recipe *recp;
- u16 idx, buf_idx = 0;
-
- /* find buffer index for copying some data */
- for (idx = 0; idx < rm->n_grp_count; idx++)
- if (buf[idx].recipe_indx == entry->rid) {
- buf_idx = idx;
- idx_found = true;
- }
+ return status;
- if (!idx_found) {
- status = -EIO;
- goto err_unroll;
- }
+ recipe = &hw->switch_info->recp_list[rid];
+ root = &buf[recp_cnt - 1];
+ fill_recipe_template(root, rid, rm);
- recp = &sw->recp_list[entry->rid];
- is_root = (rm->root_rid == entry->rid);
- recp->is_root = is_root;
+ /* Set recipe association, use previously set bitmap and own rid */
+ set_bit(rid, rm->r_bitmap);
+ memcpy(root->recipe_bitmap, rm->r_bitmap, sizeof(root->recipe_bitmap));
- recp->root_rid = entry->rid;
- recp->big_recp = (is_root && rm->n_grp_count > 1);
+ /* For non-root recipes rid should be 0, for root it should be correct
+ * rid value ored with 0x80 (is root bit).
+ */
+ root->content.rid = rid | ICE_AQ_RECIPE_ID_IS_ROOT;
- memcpy(&recp->ext_words, entry->r_group.pairs,
- entry->r_group.n_val_pairs * sizeof(struct ice_fv_word));
+ /* Fill remaining lookups in root recipe */
+ word = 0;
+ while (lookup < rm->n_ext_words &&
+ word < ICE_NUM_WORDS_RECIPE /* should always be true */) {
+ root->content.lkup_indx[word] = rm->fv_idx[lookup];
+ root->content.mask[word] = cpu_to_le16(rm->fv_mask[lookup]);
- memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
- sizeof(recp->r_bitmap));
+ lookup++;
+ word++;
+ }
- /* Copy non-result fv index values and masks to recipe. This
- * call will also update the result recipe bitmask.
+ /* Fill result indexes as lookups */
+ i = 0;
+ while (i < recp_cnt - 1 &&
+ word < ICE_NUM_WORDS_RECIPE /* should always be true */) {
+ root->content.lkup_indx[word] = buf[i].content.result_indx &
+ ~ICE_AQ_RECIPE_RESULT_EN;
+ root->content.mask[word] = cpu_to_le16(0xffff);
+ /* For bookkeeping, it is needed to mark FV index as used for
+ * intermediate result.
*/
- ice_collect_result_idx(&buf[buf_idx], recp);
+ set_bit(root->content.lkup_indx[word], recipe->res_idxs);
- /* for non-root recipes, also copy to the root, this allows
- * easier matching of a complete chained recipe
- */
- if (!is_root)
- ice_collect_result_idx(&buf[buf_idx],
- &sw->recp_list[rm->root_rid]);
-
- recp->n_ext_words = entry->r_group.n_val_pairs;
- recp->chain_idx = entry->chain_idx;
- recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
- recp->n_grp_count = rm->n_grp_count;
- recp->tun_type = rm->tun_type;
- recp->need_pass_l2 = rm->need_pass_l2;
- recp->allow_pass_l2 = rm->allow_pass_l2;
- recp->recp_created = true;
+ i++;
+ word++;
}
- rm->root_buf = buf;
- kfree(tmp);
- return status;
-err_unroll:
-err_mem:
- kfree(tmp);
- devm_kfree(ice_hw_to_dev(hw), buf);
- return status;
-}
+ rm->root_rid = rid;
+ bookkeep_recipe(&hw->switch_info->recp_list[rid], root, rm);
-/**
- * ice_create_recipe_group - creates recipe group
- * @hw: pointer to hardware structure
- * @rm: recipe management list entry
- * @lkup_exts: lookup elements
- */
-static int
-ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
- struct ice_prot_lkup_ext *lkup_exts)
-{
- u8 recp_count = 0;
- int status;
-
- rm->n_grp_count = 0;
+ /* Program the recipe */
+ status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
+ if (status)
+ return status;
- /* Create recipes for words that are marked not done by packing them
- * as best fit.
- */
- status = ice_create_first_fit_recp_def(hw, lkup_exts,
- &rm->rg_list, &recp_count);
- if (!status) {
- rm->n_grp_count += recp_count;
- rm->n_ext_words = lkup_exts->n_val_words;
- memcpy(&rm->ext_words, lkup_exts->fv_words,
- sizeof(rm->ext_words));
- memcpy(rm->word_masks, lkup_exts->field_mask,
- sizeof(rm->word_masks));
- }
+ status = ice_aq_add_recipe(hw, buf, recp_cnt, NULL);
+ ice_release_change_lock(hw);
+ if (status)
+ return status;
- return status;
+ return 0;
}
/* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
@@ -5509,9 +5312,7 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
DECLARE_BITMAP(fv_bitmap, ICE_MAX_NUM_PROFILES);
DECLARE_BITMAP(profiles, ICE_MAX_NUM_PROFILES);
struct ice_prot_lkup_ext *lkup_exts;
- struct ice_recp_grp_entry *r_entry;
struct ice_sw_fv_list_entry *fvit;
- struct ice_recp_grp_entry *r_tmp;
struct ice_sw_fv_list_entry *tmp;
struct ice_sw_recipe *rm;
int status = 0;
@@ -5553,7 +5354,6 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
* headers being programmed.
*/
INIT_LIST_HEAD(&rm->fv_list);
- INIT_LIST_HEAD(&rm->rg_list);
/* Get bitmap of field vectors (profiles) that are compatible with the
* rule request; only these will be searched in the subsequent call to
@@ -5565,12 +5365,10 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
if (status)
goto err_unroll;
- /* Group match words into recipes using preferred recipe grouping
- * criteria.
- */
- status = ice_create_recipe_group(hw, rm, lkup_exts);
- if (status)
- goto err_unroll;
+ /* Copy FV words and masks from lkup_exts to recipe struct. */
+ rm->n_ext_words = lkup_exts->n_val_words;
+ memcpy(rm->ext_words, lkup_exts->fv_words, sizeof(rm->ext_words));
+ memcpy(rm->word_masks, lkup_exts->field_mask, sizeof(rm->word_masks));
/* set the recipe priority if specified */
rm->priority = (u8)rinfo->priority;
@@ -5581,7 +5379,7 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
/* Find offsets from the field vector. Pick the first one for all the
* recipes.
*/
- status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
+ status = ice_fill_fv_word_index(hw, rm);
if (status)
goto err_unroll;
@@ -5659,17 +5457,11 @@ err_free_recipe:
}
err_unroll:
- list_for_each_entry_safe(r_entry, r_tmp, &rm->rg_list, l_entry) {
- list_del(&r_entry->l_entry);
- devm_kfree(ice_hw_to_dev(hw), r_entry);
- }
-
list_for_each_entry_safe(fvit, tmp, &rm->fv_list, list_entry) {
list_del(&fvit->list_entry);
devm_kfree(ice_hw_to_dev(hw), fvit);
}
- devm_kfree(ice_hw_to_dev(hw), rm->root_buf);
kfree(rm);
err_free_lkup_exts:
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h
index ad98e98c812d..671d7a5f359f 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.h
+++ b/drivers/net/ethernet/intel/ice/ice_switch.h
@@ -216,7 +216,6 @@ struct ice_sw_recipe {
/* For a chained recipe the root recipe is what should be used for
* programming rules
*/
- u8 is_root;
u8 root_rid;
u8 recp_created;
@@ -227,19 +226,8 @@ struct ice_sw_recipe {
*/
struct ice_fv_word ext_words[ICE_MAX_CHAIN_WORDS];
u16 word_masks[ICE_MAX_CHAIN_WORDS];
-
- /* if this recipe is a collection of other recipe */
- u8 big_recp;
-
- /* if this recipe is part of another bigger recipe then chain index
- * corresponding to this recipe
- */
- u8 chain_idx;
-
- /* if this recipe is a collection of other recipe then count of other
- * recipes and recipe IDs of those recipes
- */
- u8 n_grp_count;
+ u8 fv_idx[ICE_MAX_CHAIN_WORDS];
+ u16 fv_mask[ICE_MAX_CHAIN_WORDS];
/* Bit map specifying the IDs associated with this group of recipe */
DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
@@ -272,10 +260,6 @@ struct ice_sw_recipe {
u8 need_pass_l2:1;
u8 allow_pass_l2:1;
- struct list_head rg_list;
-
- /* AQ buffer associated with this recipe */
- struct ice_aqc_recipe_data_elem *root_buf;
/* This struct saves the fv_words for a given lookup */
struct ice_prot_lkup_ext lkup_exts;
};
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
index 8bd24b33f3a6..e6923f8121a9 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
@@ -1353,6 +1353,7 @@ ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule,
struct ice_tc_flower_fltr *fltr)
{
struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers;
+ struct netlink_ext_ack *extack = fltr->extack;
struct flow_match_control enc_control;
fltr->tunnel_type = ice_tc_tun_get_type(dev);
@@ -1373,6 +1374,9 @@ ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule,
flow_rule_match_enc_control(rule, &enc_control);
+ if (flow_rule_has_enc_control_flags(enc_control.mask->flags, extack))
+ return -EOPNOTSUPP;
+
if (enc_control.key->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
struct flow_match_ipv4_addrs match;
diff --git a/drivers/net/ethernet/intel/ice/ice_trace.h b/drivers/net/ethernet/intel/ice/ice_trace.h
index 244cddd2a9ea..07aab6e130cd 100644
--- a/drivers/net/ethernet/intel/ice/ice_trace.h
+++ b/drivers/net/ethernet/intel/ice/ice_trace.h
@@ -330,6 +330,24 @@ DEFINE_EVENT(ice_esw_br_port_template,
TP_ARGS(port)
);
+DECLARE_EVENT_CLASS(ice_switch_stats_template,
+ TP_PROTO(struct ice_switch_info *sw_info),
+ TP_ARGS(sw_info),
+ TP_STRUCT__entry(__field(u16, rule_cnt)
+ __field(u8, recp_cnt)),
+ TP_fast_assign(__entry->rule_cnt = sw_info->rule_cnt;
+ __entry->recp_cnt = sw_info->recp_cnt;),
+ TP_printk("rules=%u recipes=%u",
+ __entry->rule_cnt,
+ __entry->recp_cnt)
+);
+
+DEFINE_EVENT(ice_switch_stats_template,
+ ice_aq_sw_rules,
+ TP_PROTO(struct ice_switch_info *sw_info),
+ TP_ARGS(sw_info)
+);
+
/* End tracepoints */
#endif /* _ICE_TRACE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index eef397e5baa0..96037bef3e78 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -71,6 +71,14 @@ enum ice_aq_res_ids {
ICE_GLOBAL_CFG_LOCK_RES_ID
};
+enum ice_fec_stats_types {
+ ICE_FEC_CORR_LOW,
+ ICE_FEC_CORR_HIGH,
+ ICE_FEC_UNCORR_LOW,
+ ICE_FEC_UNCORR_HIGH,
+ ICE_FEC_MAX
+};
+
/* FW update timeout definitions are in milliseconds */
#define ICE_NVM_TIMEOUT 180000
#define ICE_CHANGE_LOCK_TIMEOUT 1000
@@ -322,12 +330,14 @@ enum ice_time_ref_freq {
ICE_TIME_REF_FREQ_156_250 = 4,
ICE_TIME_REF_FREQ_245_760 = 5,
- NUM_ICE_TIME_REF_FREQ
+ NUM_ICE_TIME_REF_FREQ,
+
+ ICE_TIME_REF_FREQ_INVALID = -1,
};
/* Clock source specification */
enum ice_clk_src {
- ICE_CLK_SRC_TCX0 = 0, /* Temperature compensated oscillator */
+ ICE_CLK_SRC_TCXO = 0, /* Temperature compensated oscillator */
ICE_CLK_SRC_TIME_REF = 1, /* Use TIME_REF reference clock */
NUM_ICE_CLK_SRC
@@ -372,6 +382,15 @@ struct ice_ts_dev_info {
u8 ts_ll_int_read;
};
+#define ICE_NAC_TOPO_PRIMARY_M BIT(0)
+#define ICE_NAC_TOPO_DUAL_M BIT(1)
+#define ICE_NAC_TOPO_ID_M GENMASK(0xF, 0)
+
+struct ice_nac_topology {
+ u32 mode;
+ u8 id;
+};
+
/* Function specific capabilities */
struct ice_hw_func_caps {
struct ice_hw_common_caps common_cap;
@@ -393,6 +412,7 @@ struct ice_hw_dev_caps {
u32 num_flow_director_fltr; /* Number of FD filters available */
struct ice_ts_dev_info ts_dev_info;
u32 num_funcs;
+ struct ice_nac_topology nac_topo;
/* bitmap of supported sensors
* bit 0 - internal temperature sensor
* bit 31:1 - Reserved
@@ -718,6 +738,7 @@ struct ice_port_info {
u16 sw_id; /* Initial switch ID belongs to port */
u16 pf_vf_num;
u8 port_state;
+ u8 local_fwd_mode;
#define ICE_SCHED_PORT_STATE_INIT 0x0
#define ICE_SCHED_PORT_STATE_READY 0x1
u8 lport;
@@ -741,6 +762,8 @@ struct ice_switch_info {
struct ice_sw_recipe *recp_list;
u16 prof_res_bm_init;
u16 max_used_prof_index;
+ u16 rule_cnt;
+ u8 recp_cnt;
DECLARE_BITMAP(prof_res_bm[ICE_MAX_NUM_PROFILES], ICE_MAX_FV_WORDS);
};
@@ -820,11 +843,43 @@ struct ice_mbx_data {
u16 async_watermark_val;
};
+#define ICE_PORTS_PER_QUAD 4
+#define ICE_GET_QUAD_NUM(port) ((port) / ICE_PORTS_PER_QUAD)
+
+struct ice_eth56g_params {
+ u8 num_phys;
+ u8 phy_addr[2];
+ bool onestep_ena;
+ bool sfd_ena;
+ u32 peer_delay;
+};
+
+union ice_phy_params {
+ struct ice_eth56g_params eth56g;
+};
+
/* PHY model */
enum ice_phy_model {
ICE_PHY_UNSUP = -1,
- ICE_PHY_E810 = 1,
+ ICE_PHY_E810 = 1,
ICE_PHY_E82X,
+ ICE_PHY_ETH56G,
+};
+
+/* Global Link Topology */
+enum ice_global_link_topo {
+ ICE_LINK_TOPO_UP_TO_2_LINKS,
+ ICE_LINK_TOPO_UP_TO_4_LINKS,
+ ICE_LINK_TOPO_UP_TO_8_LINKS,
+ ICE_LINK_TOPO_RESERVED,
+};
+
+struct ice_ptp_hw {
+ enum ice_phy_model phy_model;
+ union ice_phy_params phy;
+ u8 num_lports;
+ u8 ports_per_phy;
+ bool is_2x50g_muxed_topo;
};
/* Port hardware description */
@@ -848,7 +903,6 @@ struct ice_hw {
u8 revision_id;
u8 pf_id; /* device profile info */
- enum ice_phy_model phy_model;
u16 max_burst_size; /* driver sets this value */
@@ -911,12 +965,7 @@ struct ice_hw {
/* INTRL granularity in 1 us */
u8 intrl_gran;
-#define ICE_MAX_QUAD 2
-#define ICE_QUADS_PER_PHY_E82X 2
-#define ICE_PORTS_PER_PHY_E82X 8
-#define ICE_PORTS_PER_QUAD 4
-#define ICE_PORTS_PER_PHY_E810 4
-#define ICE_NUM_EXTERNAL_PORTS (ICE_MAX_QUAD * ICE_PORTS_PER_QUAD)
+ struct ice_ptp_hw ptp;
/* Active package version (currently active) */
struct ice_pkg_ver active_pkg_ver;
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
index 48a8d462d76a..5635e9da2212 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
@@ -948,7 +948,7 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
goto out_unlock;
}
- ice_eswitch_update_repr(vf->repr_id, vsi);
+ ice_eswitch_update_repr(&vf->repr_id, vsi);
/* if the VF has been reset allow it to come up again */
ice_mbx_clear_malvf(&vf->mbx_info);
diff --git a/drivers/net/ethernet/intel/idpf/Kconfig b/drivers/net/ethernet/intel/idpf/Kconfig
new file mode 100644
index 000000000000..1addd663acad
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/Kconfig
@@ -0,0 +1,26 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# Copyright (C) 2024 Intel Corporation
+
+config IDPF
+ tristate "Intel(R) Infrastructure Data Path Function Support"
+ depends on PCI_MSI
+ select DIMLIB
+ select LIBETH
+ help
+ This driver supports Intel(R) Infrastructure Data Path Function
+ devices.
+
+ To compile this driver as a module, choose M here. The module
+ will be called idpf.
+
+if IDPF
+
+config IDPF_SINGLEQ
+ bool "idpf singleq support"
+ help
+ This option enables support for legacy single Rx/Tx queues w/no
+ completion and fill queues. Only enable if you have hardware which
+ wants to work in this mode as it increases the driver size and adds
+ runtme checks on hotpath.
+
+endif # IDPF
diff --git a/drivers/net/ethernet/intel/idpf/Makefile b/drivers/net/ethernet/intel/idpf/Makefile
index 6844ead2f3ac..2ce01a0b5898 100644
--- a/drivers/net/ethernet/intel/idpf/Makefile
+++ b/drivers/net/ethernet/intel/idpf/Makefile
@@ -12,7 +12,8 @@ idpf-y := \
idpf_ethtool.o \
idpf_lib.o \
idpf_main.o \
- idpf_singleq_txrx.o \
idpf_txrx.o \
idpf_virtchnl.o \
idpf_vf_dev.o
+
+idpf-$(CONFIG_IDPF_SINGLEQ) += idpf_singleq_txrx.o
diff --git a/drivers/net/ethernet/intel/idpf/idpf.h b/drivers/net/ethernet/intel/idpf/idpf.h
index e7a036538246..2c31ad87587a 100644
--- a/drivers/net/ethernet/intel/idpf/idpf.h
+++ b/drivers/net/ethernet/intel/idpf/idpf.h
@@ -17,10 +17,8 @@ struct idpf_vport_max_q;
#include <linux/sctp.h>
#include <linux/ethtool_netlink.h>
#include <net/gro.h>
-#include <linux/dim.h>
#include "virtchnl2.h"
-#include "idpf_lan_txrx.h"
#include "idpf_txrx.h"
#include "idpf_controlq.h"
@@ -266,7 +264,6 @@ struct idpf_port_stats {
* the worst case.
* @num_bufqs_per_qgrp: Buffer queues per RX queue in a given grouping
* @bufq_desc_count: Buffer queue descriptor count
- * @bufq_size: Size of buffers in ring (e.g. 2K, 4K, etc)
* @num_rxq_grp: Number of RX queues in a group
* @rxq_grps: Total number of RX groups. Number of groups * number of RX per
* group will yield total number of RX queues.
@@ -302,7 +299,7 @@ struct idpf_vport {
u16 num_txq_grp;
struct idpf_txq_group *txq_grps;
u32 txq_model;
- struct idpf_queue **txqs;
+ struct idpf_tx_queue **txqs;
bool crc_enable;
u16 num_rxq;
@@ -310,11 +307,10 @@ struct idpf_vport {
u32 rxq_desc_count;
u8 num_bufqs_per_qgrp;
u32 bufq_desc_count[IDPF_MAX_BUFQS_PER_RXQ_GRP];
- u32 bufq_size[IDPF_MAX_BUFQS_PER_RXQ_GRP];
u16 num_rxq_grp;
struct idpf_rxq_group *rxq_grps;
u32 rxq_model;
- struct idpf_rx_ptype_decoded rx_ptype_lkup[IDPF_RX_MAX_PTYPE];
+ struct libeth_rx_pt *rx_ptype_lkup;
struct idpf_adapter *adapter;
struct net_device *netdev;
@@ -601,7 +597,8 @@ struct idpf_adapter {
*/
static inline int idpf_is_queue_model_split(u16 q_model)
{
- return q_model == VIRTCHNL2_QUEUE_MODEL_SPLIT;
+ return !IS_ENABLED(CONFIG_IDPF_SINGLEQ) ||
+ q_model == VIRTCHNL2_QUEUE_MODEL_SPLIT;
}
#define idpf_is_cap_ena(adapter, field, flag) \
diff --git a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
index 1885ba618981..3806ddd3ce4a 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
@@ -437,22 +437,24 @@ struct idpf_stats {
.stat_offset = offsetof(_type, _stat) \
}
-/* Helper macro for defining some statistics related to queues */
-#define IDPF_QUEUE_STAT(_name, _stat) \
- IDPF_STAT(struct idpf_queue, _name, _stat)
+/* Helper macros for defining some statistics related to queues */
+#define IDPF_RX_QUEUE_STAT(_name, _stat) \
+ IDPF_STAT(struct idpf_rx_queue, _name, _stat)
+#define IDPF_TX_QUEUE_STAT(_name, _stat) \
+ IDPF_STAT(struct idpf_tx_queue, _name, _stat)
/* Stats associated with a Tx queue */
static const struct idpf_stats idpf_gstrings_tx_queue_stats[] = {
- IDPF_QUEUE_STAT("pkts", q_stats.tx.packets),
- IDPF_QUEUE_STAT("bytes", q_stats.tx.bytes),
- IDPF_QUEUE_STAT("lso_pkts", q_stats.tx.lso_pkts),
+ IDPF_TX_QUEUE_STAT("pkts", q_stats.packets),
+ IDPF_TX_QUEUE_STAT("bytes", q_stats.bytes),
+ IDPF_TX_QUEUE_STAT("lso_pkts", q_stats.lso_pkts),
};
/* Stats associated with an Rx queue */
static const struct idpf_stats idpf_gstrings_rx_queue_stats[] = {
- IDPF_QUEUE_STAT("pkts", q_stats.rx.packets),
- IDPF_QUEUE_STAT("bytes", q_stats.rx.bytes),
- IDPF_QUEUE_STAT("rx_gro_hw_pkts", q_stats.rx.rsc_pkts),
+ IDPF_RX_QUEUE_STAT("pkts", q_stats.packets),
+ IDPF_RX_QUEUE_STAT("bytes", q_stats.bytes),
+ IDPF_RX_QUEUE_STAT("rx_gro_hw_pkts", q_stats.rsc_pkts),
};
#define IDPF_TX_QUEUE_STATS_LEN ARRAY_SIZE(idpf_gstrings_tx_queue_stats)
@@ -563,8 +565,6 @@ static void idpf_get_stat_strings(struct net_device *netdev, u8 *data)
for (i = 0; i < vport_config->max_q.max_rxq; i++)
idpf_add_qstat_strings(&data, idpf_gstrings_rx_queue_stats,
"rx", i);
-
- page_pool_ethtool_stats_get_strings(data);
}
/**
@@ -598,7 +598,6 @@ static int idpf_get_sset_count(struct net_device *netdev, int sset)
struct idpf_netdev_priv *np = netdev_priv(netdev);
struct idpf_vport_config *vport_config;
u16 max_txq, max_rxq;
- unsigned int size;
if (sset != ETH_SS_STATS)
return -EINVAL;
@@ -617,11 +616,8 @@ static int idpf_get_sset_count(struct net_device *netdev, int sset)
max_txq = vport_config->max_q.max_txq;
max_rxq = vport_config->max_q.max_rxq;
- size = IDPF_PORT_STATS_LEN + (IDPF_TX_QUEUE_STATS_LEN * max_txq) +
+ return IDPF_PORT_STATS_LEN + (IDPF_TX_QUEUE_STATS_LEN * max_txq) +
(IDPF_RX_QUEUE_STATS_LEN * max_rxq);
- size += page_pool_ethtool_stats_get_count();
-
- return size;
}
/**
@@ -633,7 +629,7 @@ static int idpf_get_sset_count(struct net_device *netdev, int sset)
* Copies the stat data defined by the pointer and stat structure pair into
* the memory supplied as data. If the pointer is null, data will be zero'd.
*/
-static void idpf_add_one_ethtool_stat(u64 *data, void *pstat,
+static void idpf_add_one_ethtool_stat(u64 *data, const void *pstat,
const struct idpf_stats *stat)
{
char *p;
@@ -671,6 +667,7 @@ static void idpf_add_one_ethtool_stat(u64 *data, void *pstat,
* idpf_add_queue_stats - copy queue statistics into supplied buffer
* @data: ethtool stats buffer
* @q: the queue to copy
+ * @type: type of the queue
*
* Queue statistics must be copied while protected by u64_stats_fetch_begin,
* so we can't directly use idpf_add_ethtool_stats. Assumes that queue stats
@@ -681,19 +678,23 @@ static void idpf_add_one_ethtool_stat(u64 *data, void *pstat,
*
* This function expects to be called while under rcu_read_lock().
*/
-static void idpf_add_queue_stats(u64 **data, struct idpf_queue *q)
+static void idpf_add_queue_stats(u64 **data, const void *q,
+ enum virtchnl2_queue_type type)
{
+ const struct u64_stats_sync *stats_sync;
const struct idpf_stats *stats;
unsigned int start;
unsigned int size;
unsigned int i;
- if (q->q_type == VIRTCHNL2_QUEUE_TYPE_RX) {
+ if (type == VIRTCHNL2_QUEUE_TYPE_RX) {
size = IDPF_RX_QUEUE_STATS_LEN;
stats = idpf_gstrings_rx_queue_stats;
+ stats_sync = &((const struct idpf_rx_queue *)q)->stats_sync;
} else {
size = IDPF_TX_QUEUE_STATS_LEN;
stats = idpf_gstrings_tx_queue_stats;
+ stats_sync = &((const struct idpf_tx_queue *)q)->stats_sync;
}
/* To avoid invalid statistics values, ensure that we keep retrying
@@ -701,10 +702,10 @@ static void idpf_add_queue_stats(u64 **data, struct idpf_queue *q)
* u64_stats_fetch_retry.
*/
do {
- start = u64_stats_fetch_begin(&q->stats_sync);
+ start = u64_stats_fetch_begin(stats_sync);
for (i = 0; i < size; i++)
idpf_add_one_ethtool_stat(&(*data)[i], q, &stats[i]);
- } while (u64_stats_fetch_retry(&q->stats_sync, start));
+ } while (u64_stats_fetch_retry(stats_sync, start));
/* Once we successfully copy the stats in, update the data pointer */
*data += size;
@@ -793,7 +794,7 @@ static void idpf_collect_queue_stats(struct idpf_vport *vport)
for (j = 0; j < num_rxq; j++) {
u64 hw_csum_err, hsplit, hsplit_hbo, bad_descs;
struct idpf_rx_queue_stats *stats;
- struct idpf_queue *rxq;
+ struct idpf_rx_queue *rxq;
unsigned int start;
if (idpf_is_queue_model_split(vport->rxq_model))
@@ -807,7 +808,7 @@ static void idpf_collect_queue_stats(struct idpf_vport *vport)
do {
start = u64_stats_fetch_begin(&rxq->stats_sync);
- stats = &rxq->q_stats.rx;
+ stats = &rxq->q_stats;
hw_csum_err = u64_stats_read(&stats->hw_csum_err);
hsplit = u64_stats_read(&stats->hsplit_pkts);
hsplit_hbo = u64_stats_read(&stats->hsplit_buf_ovf);
@@ -828,7 +829,7 @@ static void idpf_collect_queue_stats(struct idpf_vport *vport)
for (j = 0; j < txq_grp->num_txq; j++) {
u64 linearize, qbusy, skb_drops, dma_map_errs;
- struct idpf_queue *txq = txq_grp->txqs[j];
+ struct idpf_tx_queue *txq = txq_grp->txqs[j];
struct idpf_tx_queue_stats *stats;
unsigned int start;
@@ -838,7 +839,7 @@ static void idpf_collect_queue_stats(struct idpf_vport *vport)
do {
start = u64_stats_fetch_begin(&txq->stats_sync);
- stats = &txq->q_stats.tx;
+ stats = &txq->q_stats;
linearize = u64_stats_read(&stats->linearize);
qbusy = u64_stats_read(&stats->q_busy);
skb_drops = u64_stats_read(&stats->skb_drops);
@@ -869,7 +870,6 @@ static void idpf_get_ethtool_stats(struct net_device *netdev,
{
struct idpf_netdev_priv *np = netdev_priv(netdev);
struct idpf_vport_config *vport_config;
- struct page_pool_stats pp_stats = { };
struct idpf_vport *vport;
unsigned int total = 0;
unsigned int i, j;
@@ -896,12 +896,12 @@ static void idpf_get_ethtool_stats(struct net_device *netdev,
qtype = VIRTCHNL2_QUEUE_TYPE_TX;
for (j = 0; j < txq_grp->num_txq; j++, total++) {
- struct idpf_queue *txq = txq_grp->txqs[j];
+ struct idpf_tx_queue *txq = txq_grp->txqs[j];
if (!txq)
idpf_add_empty_queue_stats(&data, qtype);
else
- idpf_add_queue_stats(&data, txq);
+ idpf_add_queue_stats(&data, txq, qtype);
}
}
@@ -929,7 +929,7 @@ static void idpf_get_ethtool_stats(struct net_device *netdev,
num_rxq = rxq_grp->singleq.num_rxq;
for (j = 0; j < num_rxq; j++, total++) {
- struct idpf_queue *rxq;
+ struct idpf_rx_queue *rxq;
if (is_splitq)
rxq = &rxq_grp->splitq.rxq_sets[j]->rxq;
@@ -938,93 +938,77 @@ static void idpf_get_ethtool_stats(struct net_device *netdev,
if (!rxq)
idpf_add_empty_queue_stats(&data, qtype);
else
- idpf_add_queue_stats(&data, rxq);
-
- /* In splitq mode, don't get page pool stats here since
- * the pools are attached to the buffer queues
- */
- if (is_splitq)
- continue;
-
- if (rxq)
- page_pool_get_stats(rxq->pp, &pp_stats);
- }
- }
-
- for (i = 0; i < vport->num_rxq_grp; i++) {
- for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
- struct idpf_queue *rxbufq =
- &vport->rxq_grps[i].splitq.bufq_sets[j].bufq;
-
- page_pool_get_stats(rxbufq->pp, &pp_stats);
+ idpf_add_queue_stats(&data, rxq, qtype);
}
}
for (; total < vport_config->max_q.max_rxq; total++)
idpf_add_empty_queue_stats(&data, VIRTCHNL2_QUEUE_TYPE_RX);
- page_pool_ethtool_stats_get(data, &pp_stats);
-
rcu_read_unlock();
idpf_vport_ctrl_unlock(netdev);
}
/**
- * idpf_find_rxq - find rxq from q index
+ * idpf_find_rxq_vec - find rxq vector from q index
* @vport: virtual port associated to queue
* @q_num: q index used to find queue
*
- * returns pointer to rx queue
+ * returns pointer to rx vector
*/
-static struct idpf_queue *idpf_find_rxq(struct idpf_vport *vport, int q_num)
+static struct idpf_q_vector *idpf_find_rxq_vec(const struct idpf_vport *vport,
+ int q_num)
{
int q_grp, q_idx;
if (!idpf_is_queue_model_split(vport->rxq_model))
- return vport->rxq_grps->singleq.rxqs[q_num];
+ return vport->rxq_grps->singleq.rxqs[q_num]->q_vector;
q_grp = q_num / IDPF_DFLT_SPLITQ_RXQ_PER_GROUP;
q_idx = q_num % IDPF_DFLT_SPLITQ_RXQ_PER_GROUP;
- return &vport->rxq_grps[q_grp].splitq.rxq_sets[q_idx]->rxq;
+ return vport->rxq_grps[q_grp].splitq.rxq_sets[q_idx]->rxq.q_vector;
}
/**
- * idpf_find_txq - find txq from q index
+ * idpf_find_txq_vec - find txq vector from q index
* @vport: virtual port associated to queue
* @q_num: q index used to find queue
*
- * returns pointer to tx queue
+ * returns pointer to tx vector
*/
-static struct idpf_queue *idpf_find_txq(struct idpf_vport *vport, int q_num)
+static struct idpf_q_vector *idpf_find_txq_vec(const struct idpf_vport *vport,
+ int q_num)
{
int q_grp;
if (!idpf_is_queue_model_split(vport->txq_model))
- return vport->txqs[q_num];
+ return vport->txqs[q_num]->q_vector;
q_grp = q_num / IDPF_DFLT_SPLITQ_TXQ_PER_GROUP;
- return vport->txq_grps[q_grp].complq;
+ return vport->txq_grps[q_grp].complq->q_vector;
}
/**
* __idpf_get_q_coalesce - get ITR values for specific queue
* @ec: ethtool structure to fill with driver's coalesce settings
- * @q: quuee of Rx or Tx
+ * @q_vector: queue vector corresponding to this queue
+ * @type: queue type
*/
static void __idpf_get_q_coalesce(struct ethtool_coalesce *ec,
- struct idpf_queue *q)
+ const struct idpf_q_vector *q_vector,
+ enum virtchnl2_queue_type type)
{
- if (q->q_type == VIRTCHNL2_QUEUE_TYPE_RX) {
+ if (type == VIRTCHNL2_QUEUE_TYPE_RX) {
ec->use_adaptive_rx_coalesce =
- IDPF_ITR_IS_DYNAMIC(q->q_vector->rx_intr_mode);
- ec->rx_coalesce_usecs = q->q_vector->rx_itr_value;
+ IDPF_ITR_IS_DYNAMIC(q_vector->rx_intr_mode);
+ ec->rx_coalesce_usecs = q_vector->rx_itr_value;
} else {
ec->use_adaptive_tx_coalesce =
- IDPF_ITR_IS_DYNAMIC(q->q_vector->tx_intr_mode);
- ec->tx_coalesce_usecs = q->q_vector->tx_itr_value;
+ IDPF_ITR_IS_DYNAMIC(q_vector->tx_intr_mode);
+ ec->tx_coalesce_usecs = q_vector->tx_itr_value;
}
}
@@ -1040,8 +1024,8 @@ static int idpf_get_q_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec,
u32 q_num)
{
- struct idpf_netdev_priv *np = netdev_priv(netdev);
- struct idpf_vport *vport;
+ const struct idpf_netdev_priv *np = netdev_priv(netdev);
+ const struct idpf_vport *vport;
int err = 0;
idpf_vport_ctrl_lock(netdev);
@@ -1056,10 +1040,12 @@ static int idpf_get_q_coalesce(struct net_device *netdev,
}
if (q_num < vport->num_rxq)
- __idpf_get_q_coalesce(ec, idpf_find_rxq(vport, q_num));
+ __idpf_get_q_coalesce(ec, idpf_find_rxq_vec(vport, q_num),
+ VIRTCHNL2_QUEUE_TYPE_RX);
if (q_num < vport->num_txq)
- __idpf_get_q_coalesce(ec, idpf_find_txq(vport, q_num));
+ __idpf_get_q_coalesce(ec, idpf_find_txq_vec(vport, q_num),
+ VIRTCHNL2_QUEUE_TYPE_TX);
unlock_mutex:
idpf_vport_ctrl_unlock(netdev);
@@ -1103,16 +1089,15 @@ static int idpf_get_per_q_coalesce(struct net_device *netdev, u32 q_num,
/**
* __idpf_set_q_coalesce - set ITR values for specific queue
* @ec: ethtool structure from user to update ITR settings
- * @q: queue for which itr values has to be set
+ * @qv: queue vector for which itr values has to be set
* @is_rxq: is queue type rx
*
* Returns 0 on success, negative otherwise.
*/
-static int __idpf_set_q_coalesce(struct ethtool_coalesce *ec,
- struct idpf_queue *q, bool is_rxq)
+static int __idpf_set_q_coalesce(const struct ethtool_coalesce *ec,
+ struct idpf_q_vector *qv, bool is_rxq)
{
u32 use_adaptive_coalesce, coalesce_usecs;
- struct idpf_q_vector *qv = q->q_vector;
bool is_dim_ena = false;
u16 itr_val;
@@ -1128,7 +1113,7 @@ static int __idpf_set_q_coalesce(struct ethtool_coalesce *ec,
itr_val = qv->tx_itr_value;
}
if (coalesce_usecs != itr_val && use_adaptive_coalesce) {
- netdev_err(q->vport->netdev, "Cannot set coalesce usecs if adaptive enabled\n");
+ netdev_err(qv->vport->netdev, "Cannot set coalesce usecs if adaptive enabled\n");
return -EINVAL;
}
@@ -1137,7 +1122,7 @@ static int __idpf_set_q_coalesce(struct ethtool_coalesce *ec,
return 0;
if (coalesce_usecs > IDPF_ITR_MAX) {
- netdev_err(q->vport->netdev,
+ netdev_err(qv->vport->netdev,
"Invalid value, %d-usecs range is 0-%d\n",
coalesce_usecs, IDPF_ITR_MAX);
@@ -1146,7 +1131,7 @@ static int __idpf_set_q_coalesce(struct ethtool_coalesce *ec,
if (coalesce_usecs % 2) {
coalesce_usecs--;
- netdev_info(q->vport->netdev,
+ netdev_info(qv->vport->netdev,
"HW only supports even ITR values, ITR rounded to %d\n",
coalesce_usecs);
}
@@ -1185,15 +1170,16 @@ static int __idpf_set_q_coalesce(struct ethtool_coalesce *ec,
*
* Return 0 on success, and negative on failure
*/
-static int idpf_set_q_coalesce(struct idpf_vport *vport,
- struct ethtool_coalesce *ec,
+static int idpf_set_q_coalesce(const struct idpf_vport *vport,
+ const struct ethtool_coalesce *ec,
int q_num, bool is_rxq)
{
- struct idpf_queue *q;
+ struct idpf_q_vector *qv;
- q = is_rxq ? idpf_find_rxq(vport, q_num) : idpf_find_txq(vport, q_num);
+ qv = is_rxq ? idpf_find_rxq_vec(vport, q_num) :
+ idpf_find_txq_vec(vport, q_num);
- if (q && __idpf_set_q_coalesce(ec, q, is_rxq))
+ if (qv && __idpf_set_q_coalesce(ec, qv, is_rxq))
return -EINVAL;
return 0;
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h
index a5752dcab888..8c7f8ef8f1a1 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h
@@ -4,6 +4,8 @@
#ifndef _IDPF_LAN_TXRX_H_
#define _IDPF_LAN_TXRX_H_
+#include <linux/bits.h>
+
enum idpf_rss_hash {
IDPF_HASH_INVALID = 0,
/* Values 1 - 28 are reserved for future use */
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c
index f1ee5584e8fa..5dbf2b4ba1b0 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_lib.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c
@@ -4,8 +4,7 @@
#include "idpf.h"
#include "idpf_virtchnl.h"
-static const struct net_device_ops idpf_netdev_ops_splitq;
-static const struct net_device_ops idpf_netdev_ops_singleq;
+static const struct net_device_ops idpf_netdev_ops;
/**
* idpf_init_vector_stack - Fill the MSIX vector stack with vector index
@@ -69,7 +68,7 @@ static void idpf_deinit_vector_stack(struct idpf_adapter *adapter)
static void idpf_mb_intr_rel_irq(struct idpf_adapter *adapter)
{
clear_bit(IDPF_MB_INTR_MODE, adapter->flags);
- free_irq(adapter->msix_entries[0].vector, adapter);
+ kfree(free_irq(adapter->msix_entries[0].vector, adapter));
queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0);
}
@@ -124,15 +123,14 @@ static void idpf_mb_irq_enable(struct idpf_adapter *adapter)
*/
static int idpf_mb_intr_req_irq(struct idpf_adapter *adapter)
{
- struct idpf_q_vector *mb_vector = &adapter->mb_vector;
int irq_num, mb_vidx = 0, err;
+ char *name;
irq_num = adapter->msix_entries[mb_vidx].vector;
- mb_vector->name = kasprintf(GFP_KERNEL, "%s-%s-%d",
- dev_driver_string(&adapter->pdev->dev),
- "Mailbox", mb_vidx);
- err = request_irq(irq_num, adapter->irq_mb_handler, 0,
- mb_vector->name, adapter);
+ name = kasprintf(GFP_KERNEL, "%s-%s-%d",
+ dev_driver_string(&adapter->pdev->dev),
+ "Mailbox", mb_vidx);
+ err = request_irq(irq_num, adapter->irq_mb_handler, 0, name, adapter);
if (err) {
dev_err(&adapter->pdev->dev,
"IRQ request for mailbox failed, error: %d\n", err);
@@ -765,10 +763,7 @@ static int idpf_cfg_netdev(struct idpf_vport *vport)
}
/* assign netdev_ops */
- if (idpf_is_queue_model_split(vport->txq_model))
- netdev->netdev_ops = &idpf_netdev_ops_splitq;
- else
- netdev->netdev_ops = &idpf_netdev_ops_singleq;
+ netdev->netdev_ops = &idpf_netdev_ops;
/* setup watchdog timeout value to be 5 second */
netdev->watchdog_timeo = 5 * HZ;
@@ -946,6 +941,9 @@ static void idpf_decfg_netdev(struct idpf_vport *vport)
{
struct idpf_adapter *adapter = vport->adapter;
+ kfree(vport->rx_ptype_lkup);
+ vport->rx_ptype_lkup = NULL;
+
unregister_netdev(vport->netdev);
free_netdev(vport->netdev);
vport->netdev = NULL;
@@ -1318,14 +1316,14 @@ static void idpf_rx_init_buf_tail(struct idpf_vport *vport)
if (idpf_is_queue_model_split(vport->rxq_model)) {
for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
- struct idpf_queue *q =
+ const struct idpf_buf_queue *q =
&grp->splitq.bufq_sets[j].bufq;
writel(q->next_to_alloc, q->tail);
}
} else {
for (j = 0; j < grp->singleq.num_rxq; j++) {
- struct idpf_queue *q =
+ const struct idpf_rx_queue *q =
grp->singleq.rxqs[j];
writel(q->next_to_alloc, q->tail);
@@ -1855,7 +1853,7 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
enum idpf_vport_state current_state = np->state;
struct idpf_adapter *adapter = vport->adapter;
struct idpf_vport *new_vport;
- int err, i;
+ int err;
/* If the system is low on memory, we can end up in bad state if we
* free all the memory for queue resources and try to allocate them
@@ -1929,46 +1927,6 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
*/
memcpy(vport, new_vport, offsetof(struct idpf_vport, link_speed_mbps));
- /* Since idpf_vport_queues_alloc was called with new_port, the queue
- * back pointers are currently pointing to the local new_vport. Reset
- * the backpointers to the original vport here
- */
- for (i = 0; i < vport->num_txq_grp; i++) {
- struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
- int j;
-
- tx_qgrp->vport = vport;
- for (j = 0; j < tx_qgrp->num_txq; j++)
- tx_qgrp->txqs[j]->vport = vport;
-
- if (idpf_is_queue_model_split(vport->txq_model))
- tx_qgrp->complq->vport = vport;
- }
-
- for (i = 0; i < vport->num_rxq_grp; i++) {
- struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
- struct idpf_queue *q;
- u16 num_rxq;
- int j;
-
- rx_qgrp->vport = vport;
- for (j = 0; j < vport->num_bufqs_per_qgrp; j++)
- rx_qgrp->splitq.bufq_sets[j].bufq.vport = vport;
-
- if (idpf_is_queue_model_split(vport->rxq_model))
- num_rxq = rx_qgrp->splitq.num_rxq_sets;
- else
- num_rxq = rx_qgrp->singleq.num_rxq;
-
- for (j = 0; j < num_rxq; j++) {
- if (idpf_is_queue_model_split(vport->rxq_model))
- q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
- else
- q = rx_qgrp->singleq.rxqs[j];
- q->vport = vport;
- }
- }
-
if (reset_cause == IDPF_SR_Q_CHANGE)
idpf_vport_alloc_vec_indexes(vport);
@@ -2393,24 +2351,10 @@ void idpf_free_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem)
mem->pa = 0;
}
-static const struct net_device_ops idpf_netdev_ops_splitq = {
- .ndo_open = idpf_open,
- .ndo_stop = idpf_stop,
- .ndo_start_xmit = idpf_tx_splitq_start,
- .ndo_features_check = idpf_features_check,
- .ndo_set_rx_mode = idpf_set_rx_mode,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = idpf_set_mac,
- .ndo_change_mtu = idpf_change_mtu,
- .ndo_get_stats64 = idpf_get_stats64,
- .ndo_set_features = idpf_set_features,
- .ndo_tx_timeout = idpf_tx_timeout,
-};
-
-static const struct net_device_ops idpf_netdev_ops_singleq = {
+static const struct net_device_ops idpf_netdev_ops = {
.ndo_open = idpf_open,
.ndo_stop = idpf_stop,
- .ndo_start_xmit = idpf_tx_singleq_start,
+ .ndo_start_xmit = idpf_tx_start,
.ndo_features_check = idpf_features_check,
.ndo_set_rx_mode = idpf_set_rx_mode,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/ethernet/intel/idpf/idpf_main.c b/drivers/net/ethernet/intel/idpf/idpf_main.c
index f784eea044bd..db476b3314c8 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_main.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_main.c
@@ -8,6 +8,7 @@
#define DRV_SUMMARY "Intel(R) Infrastructure Data Path Function Linux Driver"
MODULE_DESCRIPTION(DRV_SUMMARY);
+MODULE_IMPORT_NS(LIBETH);
MODULE_LICENSE("GPL");
/**
diff --git a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
index 27b93592c4ba..fe64febf7436 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2023 Intel Corporation */
+#include <net/libeth/rx.h>
+
#include "idpf.h"
/**
@@ -186,7 +188,7 @@ static int idpf_tx_singleq_csum(struct sk_buff *skb,
* and gets a physical address for each memory location and programs
* it and the length into the transmit base mode descriptor.
*/
-static void idpf_tx_singleq_map(struct idpf_queue *tx_q,
+static void idpf_tx_singleq_map(struct idpf_tx_queue *tx_q,
struct idpf_tx_buf *first,
struct idpf_tx_offload_params *offloads)
{
@@ -205,12 +207,12 @@ static void idpf_tx_singleq_map(struct idpf_queue *tx_q,
data_len = skb->data_len;
size = skb_headlen(skb);
- tx_desc = IDPF_BASE_TX_DESC(tx_q, i);
+ tx_desc = &tx_q->base_tx[i];
dma = dma_map_single(tx_q->dev, skb->data, size, DMA_TO_DEVICE);
/* write each descriptor with CRC bit */
- if (tx_q->vport->crc_enable)
+ if (idpf_queue_has(CRC_EN, tx_q))
td_cmd |= IDPF_TX_DESC_CMD_ICRC;
for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
@@ -239,7 +241,7 @@ static void idpf_tx_singleq_map(struct idpf_queue *tx_q,
i++;
if (i == tx_q->desc_count) {
- tx_desc = IDPF_BASE_TX_DESC(tx_q, 0);
+ tx_desc = &tx_q->base_tx[0];
i = 0;
}
@@ -259,7 +261,7 @@ static void idpf_tx_singleq_map(struct idpf_queue *tx_q,
i++;
if (i == tx_q->desc_count) {
- tx_desc = IDPF_BASE_TX_DESC(tx_q, 0);
+ tx_desc = &tx_q->base_tx[0];
i = 0;
}
@@ -285,7 +287,7 @@ static void idpf_tx_singleq_map(struct idpf_queue *tx_q,
/* set next_to_watch value indicating a packet is present */
first->next_to_watch = tx_desc;
- nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx);
+ nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
netdev_tx_sent_queue(nq, first->bytecount);
idpf_tx_buf_hw_update(tx_q, i, netdev_xmit_more());
@@ -299,7 +301,7 @@ static void idpf_tx_singleq_map(struct idpf_queue *tx_q,
* ring entry to reflect that this index is a context descriptor
*/
static struct idpf_base_tx_ctx_desc *
-idpf_tx_singleq_get_ctx_desc(struct idpf_queue *txq)
+idpf_tx_singleq_get_ctx_desc(struct idpf_tx_queue *txq)
{
struct idpf_base_tx_ctx_desc *ctx_desc;
int ntu = txq->next_to_use;
@@ -307,7 +309,7 @@ idpf_tx_singleq_get_ctx_desc(struct idpf_queue *txq)
memset(&txq->tx_buf[ntu], 0, sizeof(struct idpf_tx_buf));
txq->tx_buf[ntu].ctx_entry = true;
- ctx_desc = IDPF_BASE_TX_CTX_DESC(txq, ntu);
+ ctx_desc = &txq->base_ctx[ntu];
IDPF_SINGLEQ_BUMP_RING_IDX(txq, ntu);
txq->next_to_use = ntu;
@@ -320,7 +322,7 @@ idpf_tx_singleq_get_ctx_desc(struct idpf_queue *txq)
* @txq: queue to send buffer on
* @offload: offload parameter structure
**/
-static void idpf_tx_singleq_build_ctx_desc(struct idpf_queue *txq,
+static void idpf_tx_singleq_build_ctx_desc(struct idpf_tx_queue *txq,
struct idpf_tx_offload_params *offload)
{
struct idpf_base_tx_ctx_desc *desc = idpf_tx_singleq_get_ctx_desc(txq);
@@ -333,7 +335,7 @@ static void idpf_tx_singleq_build_ctx_desc(struct idpf_queue *txq,
qw1 |= FIELD_PREP(IDPF_TXD_CTX_QW1_MSS_M, offload->mss);
u64_stats_update_begin(&txq->stats_sync);
- u64_stats_inc(&txq->q_stats.tx.lso_pkts);
+ u64_stats_inc(&txq->q_stats.lso_pkts);
u64_stats_update_end(&txq->stats_sync);
}
@@ -351,8 +353,8 @@ static void idpf_tx_singleq_build_ctx_desc(struct idpf_queue *txq,
*
* Returns NETDEV_TX_OK if sent, else an error code
*/
-static netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
- struct idpf_queue *tx_q)
+netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
+ struct idpf_tx_queue *tx_q)
{
struct idpf_tx_offload_params offload = { };
struct idpf_tx_buf *first;
@@ -409,53 +411,25 @@ out_drop:
}
/**
- * idpf_tx_singleq_start - Selects the right Tx queue to send buffer
- * @skb: send buffer
- * @netdev: network interface device structure
- *
- * Returns NETDEV_TX_OK if sent, else an error code
- */
-netdev_tx_t idpf_tx_singleq_start(struct sk_buff *skb,
- struct net_device *netdev)
-{
- struct idpf_vport *vport = idpf_netdev_to_vport(netdev);
- struct idpf_queue *tx_q;
-
- tx_q = vport->txqs[skb_get_queue_mapping(skb)];
-
- /* hardware can't handle really short frames, hardware padding works
- * beyond this point
- */
- if (skb_put_padto(skb, IDPF_TX_MIN_PKT_LEN)) {
- idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
-
- return NETDEV_TX_OK;
- }
-
- return idpf_tx_singleq_frame(skb, tx_q);
-}
-
-/**
* idpf_tx_singleq_clean - Reclaim resources from queue
* @tx_q: Tx queue to clean
* @napi_budget: Used to determine if we are in netpoll
* @cleaned: returns number of packets cleaned
*
*/
-static bool idpf_tx_singleq_clean(struct idpf_queue *tx_q, int napi_budget,
+static bool idpf_tx_singleq_clean(struct idpf_tx_queue *tx_q, int napi_budget,
int *cleaned)
{
- unsigned int budget = tx_q->vport->compln_clean_budget;
unsigned int total_bytes = 0, total_pkts = 0;
struct idpf_base_tx_desc *tx_desc;
+ u32 budget = tx_q->clean_budget;
s16 ntc = tx_q->next_to_clean;
struct idpf_netdev_priv *np;
struct idpf_tx_buf *tx_buf;
- struct idpf_vport *vport;
struct netdev_queue *nq;
bool dont_wake;
- tx_desc = IDPF_BASE_TX_DESC(tx_q, ntc);
+ tx_desc = &tx_q->base_tx[ntc];
tx_buf = &tx_q->tx_buf[ntc];
ntc -= tx_q->desc_count;
@@ -517,7 +491,7 @@ static bool idpf_tx_singleq_clean(struct idpf_queue *tx_q, int napi_budget,
if (unlikely(!ntc)) {
ntc -= tx_q->desc_count;
tx_buf = tx_q->tx_buf;
- tx_desc = IDPF_BASE_TX_DESC(tx_q, 0);
+ tx_desc = &tx_q->base_tx[0];
}
/* unmap any remaining paged data */
@@ -540,7 +514,7 @@ fetch_next_txq_desc:
if (unlikely(!ntc)) {
ntc -= tx_q->desc_count;
tx_buf = tx_q->tx_buf;
- tx_desc = IDPF_BASE_TX_DESC(tx_q, 0);
+ tx_desc = &tx_q->base_tx[0];
}
} while (likely(budget));
@@ -550,16 +524,15 @@ fetch_next_txq_desc:
*cleaned += total_pkts;
u64_stats_update_begin(&tx_q->stats_sync);
- u64_stats_add(&tx_q->q_stats.tx.packets, total_pkts);
- u64_stats_add(&tx_q->q_stats.tx.bytes, total_bytes);
+ u64_stats_add(&tx_q->q_stats.packets, total_pkts);
+ u64_stats_add(&tx_q->q_stats.bytes, total_bytes);
u64_stats_update_end(&tx_q->stats_sync);
- vport = tx_q->vport;
- np = netdev_priv(vport->netdev);
- nq = netdev_get_tx_queue(vport->netdev, tx_q->idx);
+ np = netdev_priv(tx_q->netdev);
+ nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
dont_wake = np->state != __IDPF_VPORT_UP ||
- !netif_carrier_ok(vport->netdev);
+ !netif_carrier_ok(tx_q->netdev);
__netif_txq_completed_wake(nq, total_pkts, total_bytes,
IDPF_DESC_UNUSED(tx_q), IDPF_TX_WAKE_THRESH,
dont_wake);
@@ -584,7 +557,7 @@ static bool idpf_tx_singleq_clean_all(struct idpf_q_vector *q_vec, int budget,
budget_per_q = num_txq ? max(budget / num_txq, 1) : 0;
for (i = 0; i < num_txq; i++) {
- struct idpf_queue *q;
+ struct idpf_tx_queue *q;
q = q_vec->tx[i];
clean_complete &= idpf_tx_singleq_clean(q, budget_per_q,
@@ -614,14 +587,9 @@ static bool idpf_rx_singleq_test_staterr(const union virtchnl2_rx_desc *rx_desc,
/**
* idpf_rx_singleq_is_non_eop - process handling of non-EOP buffers
- * @rxq: Rx ring being processed
* @rx_desc: Rx descriptor for current buffer
- * @skb: Current socket buffer containing buffer in progress
- * @ntc: next to clean
*/
-static bool idpf_rx_singleq_is_non_eop(struct idpf_queue *rxq,
- union virtchnl2_rx_desc *rx_desc,
- struct sk_buff *skb, u16 ntc)
+static bool idpf_rx_singleq_is_non_eop(const union virtchnl2_rx_desc *rx_desc)
{
/* if we are the last buffer then there is nothing else to do */
if (likely(idpf_rx_singleq_test_staterr(rx_desc, IDPF_RXD_EOF_SINGLEQ)))
@@ -635,98 +603,82 @@ static bool idpf_rx_singleq_is_non_eop(struct idpf_queue *rxq,
* @rxq: Rx ring being processed
* @skb: skb currently being received and modified
* @csum_bits: checksum bits from descriptor
- * @ptype: the packet type decoded by hardware
+ * @decoded: the packet type decoded by hardware
*
* skb->protocol must be set before this function is called
*/
-static void idpf_rx_singleq_csum(struct idpf_queue *rxq, struct sk_buff *skb,
- struct idpf_rx_csum_decoded *csum_bits,
- u16 ptype)
+static void idpf_rx_singleq_csum(struct idpf_rx_queue *rxq,
+ struct sk_buff *skb,
+ struct idpf_rx_csum_decoded csum_bits,
+ struct libeth_rx_pt decoded)
{
- struct idpf_rx_ptype_decoded decoded;
bool ipv4, ipv6;
/* check if Rx checksum is enabled */
- if (unlikely(!(rxq->vport->netdev->features & NETIF_F_RXCSUM)))
+ if (!libeth_rx_pt_has_checksum(rxq->netdev, decoded))
return;
/* check if HW has decoded the packet and checksum */
- if (unlikely(!(csum_bits->l3l4p)))
- return;
-
- decoded = rxq->vport->rx_ptype_lkup[ptype];
- if (unlikely(!(decoded.known && decoded.outer_ip)))
+ if (unlikely(!csum_bits.l3l4p))
return;
- ipv4 = IDPF_RX_PTYPE_TO_IPV(&decoded, IDPF_RX_PTYPE_OUTER_IPV4);
- ipv6 = IDPF_RX_PTYPE_TO_IPV(&decoded, IDPF_RX_PTYPE_OUTER_IPV6);
+ ipv4 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV4;
+ ipv6 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV6;
/* Check if there were any checksum errors */
- if (unlikely(ipv4 && (csum_bits->ipe || csum_bits->eipe)))
+ if (unlikely(ipv4 && (csum_bits.ipe || csum_bits.eipe)))
goto checksum_fail;
/* Device could not do any checksum offload for certain extension
* headers as indicated by setting IPV6EXADD bit
*/
- if (unlikely(ipv6 && csum_bits->ipv6exadd))
+ if (unlikely(ipv6 && csum_bits.ipv6exadd))
return;
/* check for L4 errors and handle packets that were not able to be
* checksummed due to arrival speed
*/
- if (unlikely(csum_bits->l4e))
+ if (unlikely(csum_bits.l4e))
goto checksum_fail;
- if (unlikely(csum_bits->nat && csum_bits->eudpe))
+ if (unlikely(csum_bits.nat && csum_bits.eudpe))
goto checksum_fail;
/* Handle packets that were not able to be checksummed due to arrival
* speed, in this case the stack can compute the csum.
*/
- if (unlikely(csum_bits->pprs))
+ if (unlikely(csum_bits.pprs))
return;
/* If there is an outer header present that might contain a checksum
* we need to bump the checksum level by 1 to reflect the fact that
* we are indicating we validated the inner checksum.
*/
- if (decoded.tunnel_type >= IDPF_RX_PTYPE_TUNNEL_IP_GRENAT)
+ if (decoded.tunnel_type >= LIBETH_RX_PT_TUNNEL_IP_GRENAT)
skb->csum_level = 1;
- /* Only report checksum unnecessary for ICMP, TCP, UDP, or SCTP */
- switch (decoded.inner_prot) {
- case IDPF_RX_PTYPE_INNER_PROT_ICMP:
- case IDPF_RX_PTYPE_INNER_PROT_TCP:
- case IDPF_RX_PTYPE_INNER_PROT_UDP:
- case IDPF_RX_PTYPE_INNER_PROT_SCTP:
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- return;
- default:
- return;
- }
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ return;
checksum_fail:
u64_stats_update_begin(&rxq->stats_sync);
- u64_stats_inc(&rxq->q_stats.rx.hw_csum_err);
+ u64_stats_inc(&rxq->q_stats.hw_csum_err);
u64_stats_update_end(&rxq->stats_sync);
}
/**
* idpf_rx_singleq_base_csum - Indicate in skb if hw indicated a good cksum
- * @rx_q: Rx completion queue
- * @skb: skb currently being received and modified
* @rx_desc: the receive descriptor
- * @ptype: Rx packet type
*
* This function only operates on the VIRTCHNL2_RXDID_1_32B_BASE_M base 32byte
* descriptor writeback format.
+ *
+ * Return: parsed checksum status.
**/
-static void idpf_rx_singleq_base_csum(struct idpf_queue *rx_q,
- struct sk_buff *skb,
- union virtchnl2_rx_desc *rx_desc,
- u16 ptype)
+static struct idpf_rx_csum_decoded
+idpf_rx_singleq_base_csum(const union virtchnl2_rx_desc *rx_desc)
{
- struct idpf_rx_csum_decoded csum_bits;
+ struct idpf_rx_csum_decoded csum_bits = { };
u32 rx_error, rx_status;
u64 qword;
@@ -745,28 +697,23 @@ static void idpf_rx_singleq_base_csum(struct idpf_queue *rx_q,
rx_status);
csum_bits.ipv6exadd = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_STATUS_IPV6EXADD_M,
rx_status);
- csum_bits.nat = 0;
- csum_bits.eudpe = 0;
- idpf_rx_singleq_csum(rx_q, skb, &csum_bits, ptype);
+ return csum_bits;
}
/**
* idpf_rx_singleq_flex_csum - Indicate in skb if hw indicated a good cksum
- * @rx_q: Rx completion queue
- * @skb: skb currently being received and modified
* @rx_desc: the receive descriptor
- * @ptype: Rx packet type
*
* This function only operates on the VIRTCHNL2_RXDID_2_FLEX_SQ_NIC flexible
* descriptor writeback format.
+ *
+ * Return: parsed checksum status.
**/
-static void idpf_rx_singleq_flex_csum(struct idpf_queue *rx_q,
- struct sk_buff *skb,
- union virtchnl2_rx_desc *rx_desc,
- u16 ptype)
+static struct idpf_rx_csum_decoded
+idpf_rx_singleq_flex_csum(const union virtchnl2_rx_desc *rx_desc)
{
- struct idpf_rx_csum_decoded csum_bits;
+ struct idpf_rx_csum_decoded csum_bits = { };
u16 rx_status0, rx_status1;
rx_status0 = le16_to_cpu(rx_desc->flex_nic_wb.status_error0);
@@ -786,9 +733,8 @@ static void idpf_rx_singleq_flex_csum(struct idpf_queue *rx_q,
rx_status0);
csum_bits.nat = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_STATUS1_NAT_M,
rx_status1);
- csum_bits.pprs = 0;
- idpf_rx_singleq_csum(rx_q, skb, &csum_bits, ptype);
+ return csum_bits;
}
/**
@@ -801,14 +747,14 @@ static void idpf_rx_singleq_flex_csum(struct idpf_queue *rx_q,
* This function only operates on the VIRTCHNL2_RXDID_1_32B_BASE_M base 32byte
* descriptor writeback format.
**/
-static void idpf_rx_singleq_base_hash(struct idpf_queue *rx_q,
+static void idpf_rx_singleq_base_hash(struct idpf_rx_queue *rx_q,
struct sk_buff *skb,
- union virtchnl2_rx_desc *rx_desc,
- struct idpf_rx_ptype_decoded *decoded)
+ const union virtchnl2_rx_desc *rx_desc,
+ struct libeth_rx_pt decoded)
{
u64 mask, qw1;
- if (unlikely(!(rx_q->vport->netdev->features & NETIF_F_RXHASH)))
+ if (!libeth_rx_pt_has_hash(rx_q->netdev, decoded))
return;
mask = VIRTCHNL2_RX_BASE_DESC_FLTSTAT_RSS_HASH_M;
@@ -817,7 +763,7 @@ static void idpf_rx_singleq_base_hash(struct idpf_queue *rx_q,
if (FIELD_GET(mask, qw1) == mask) {
u32 hash = le32_to_cpu(rx_desc->base_wb.qword0.hi_dword.rss);
- skb_set_hash(skb, hash, idpf_ptype_to_htype(decoded));
+ libeth_rx_pt_set_hash(skb, hash, decoded);
}
}
@@ -831,18 +777,20 @@ static void idpf_rx_singleq_base_hash(struct idpf_queue *rx_q,
* This function only operates on the VIRTCHNL2_RXDID_2_FLEX_SQ_NIC flexible
* descriptor writeback format.
**/
-static void idpf_rx_singleq_flex_hash(struct idpf_queue *rx_q,
+static void idpf_rx_singleq_flex_hash(struct idpf_rx_queue *rx_q,
struct sk_buff *skb,
- union virtchnl2_rx_desc *rx_desc,
- struct idpf_rx_ptype_decoded *decoded)
+ const union virtchnl2_rx_desc *rx_desc,
+ struct libeth_rx_pt decoded)
{
- if (unlikely(!(rx_q->vport->netdev->features & NETIF_F_RXHASH)))
+ if (!libeth_rx_pt_has_hash(rx_q->netdev, decoded))
return;
if (FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_STATUS0_RSS_VALID_M,
- le16_to_cpu(rx_desc->flex_nic_wb.status_error0)))
- skb_set_hash(skb, le32_to_cpu(rx_desc->flex_nic_wb.rss_hash),
- idpf_ptype_to_htype(decoded));
+ le16_to_cpu(rx_desc->flex_nic_wb.status_error0))) {
+ u32 hash = le32_to_cpu(rx_desc->flex_nic_wb.rss_hash);
+
+ libeth_rx_pt_set_hash(skb, hash, decoded);
+ }
}
/**
@@ -857,25 +805,45 @@ static void idpf_rx_singleq_flex_hash(struct idpf_queue *rx_q,
* order to populate the hash, checksum, VLAN, protocol, and
* other fields within the skb.
*/
-static void idpf_rx_singleq_process_skb_fields(struct idpf_queue *rx_q,
- struct sk_buff *skb,
- union virtchnl2_rx_desc *rx_desc,
- u16 ptype)
+static void
+idpf_rx_singleq_process_skb_fields(struct idpf_rx_queue *rx_q,
+ struct sk_buff *skb,
+ const union virtchnl2_rx_desc *rx_desc,
+ u16 ptype)
{
- struct idpf_rx_ptype_decoded decoded =
- rx_q->vport->rx_ptype_lkup[ptype];
+ struct libeth_rx_pt decoded = rx_q->rx_ptype_lkup[ptype];
+ struct idpf_rx_csum_decoded csum_bits;
/* modifies the skb - consumes the enet header */
- skb->protocol = eth_type_trans(skb, rx_q->vport->netdev);
+ skb->protocol = eth_type_trans(skb, rx_q->netdev);
/* Check if we're using base mode descriptor IDs */
if (rx_q->rxdids == VIRTCHNL2_RXDID_1_32B_BASE_M) {
- idpf_rx_singleq_base_hash(rx_q, skb, rx_desc, &decoded);
- idpf_rx_singleq_base_csum(rx_q, skb, rx_desc, ptype);
+ idpf_rx_singleq_base_hash(rx_q, skb, rx_desc, decoded);
+ csum_bits = idpf_rx_singleq_base_csum(rx_desc);
} else {
- idpf_rx_singleq_flex_hash(rx_q, skb, rx_desc, &decoded);
- idpf_rx_singleq_flex_csum(rx_q, skb, rx_desc, ptype);
+ idpf_rx_singleq_flex_hash(rx_q, skb, rx_desc, decoded);
+ csum_bits = idpf_rx_singleq_flex_csum(rx_desc);
}
+
+ idpf_rx_singleq_csum(rx_q, skb, csum_bits, decoded);
+ skb_record_rx_queue(skb, rx_q->idx);
+}
+
+/**
+ * idpf_rx_buf_hw_update - Store the new tail and head values
+ * @rxq: queue to bump
+ * @val: new head index
+ */
+static void idpf_rx_buf_hw_update(struct idpf_rx_queue *rxq, u32 val)
+{
+ rxq->next_to_use = val;
+
+ if (unlikely(!rxq->tail))
+ return;
+
+ /* writel has an implicit memory barrier */
+ writel(val, rxq->tail);
}
/**
@@ -885,24 +853,28 @@ static void idpf_rx_singleq_process_skb_fields(struct idpf_queue *rx_q,
*
* Returns false if all allocations were successful, true if any fail
*/
-bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_queue *rx_q,
+bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_rx_queue *rx_q,
u16 cleaned_count)
{
struct virtchnl2_singleq_rx_buf_desc *desc;
+ const struct libeth_fq_fp fq = {
+ .pp = rx_q->pp,
+ .fqes = rx_q->rx_buf,
+ .truesize = rx_q->truesize,
+ .count = rx_q->desc_count,
+ };
u16 nta = rx_q->next_to_alloc;
- struct idpf_rx_buf *buf;
if (!cleaned_count)
return false;
- desc = IDPF_SINGLEQ_RX_BUF_DESC(rx_q, nta);
- buf = &rx_q->rx_buf.buf[nta];
+ desc = &rx_q->single_buf[nta];
do {
dma_addr_t addr;
- addr = idpf_alloc_page(rx_q->pp, buf, rx_q->rx_buf_size);
- if (unlikely(addr == DMA_MAPPING_ERROR))
+ addr = libeth_rx_alloc(&fq, nta);
+ if (addr == DMA_MAPPING_ERROR)
break;
/* Refresh the desc even if buffer_addrs didn't change
@@ -912,11 +884,9 @@ bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_queue *rx_q,
desc->hdr_addr = 0;
desc++;
- buf++;
nta++;
if (unlikely(nta == rx_q->desc_count)) {
- desc = IDPF_SINGLEQ_RX_BUF_DESC(rx_q, 0);
- buf = rx_q->rx_buf.buf;
+ desc = &rx_q->single_buf[0];
nta = 0;
}
@@ -933,7 +903,6 @@ bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_queue *rx_q,
/**
* idpf_rx_singleq_extract_base_fields - Extract fields from the Rx descriptor
- * @rx_q: Rx descriptor queue
* @rx_desc: the descriptor to process
* @fields: storage for extracted values
*
@@ -943,9 +912,9 @@ bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_queue *rx_q,
* This function only operates on the VIRTCHNL2_RXDID_1_32B_BASE_M base 32byte
* descriptor writeback format.
*/
-static void idpf_rx_singleq_extract_base_fields(struct idpf_queue *rx_q,
- union virtchnl2_rx_desc *rx_desc,
- struct idpf_rx_extracted *fields)
+static void
+idpf_rx_singleq_extract_base_fields(const union virtchnl2_rx_desc *rx_desc,
+ struct idpf_rx_extracted *fields)
{
u64 qword;
@@ -957,7 +926,6 @@ static void idpf_rx_singleq_extract_base_fields(struct idpf_queue *rx_q,
/**
* idpf_rx_singleq_extract_flex_fields - Extract fields from the Rx descriptor
- * @rx_q: Rx descriptor queue
* @rx_desc: the descriptor to process
* @fields: storage for extracted values
*
@@ -967,9 +935,9 @@ static void idpf_rx_singleq_extract_base_fields(struct idpf_queue *rx_q,
* This function only operates on the VIRTCHNL2_RXDID_2_FLEX_SQ_NIC flexible
* descriptor writeback format.
*/
-static void idpf_rx_singleq_extract_flex_fields(struct idpf_queue *rx_q,
- union virtchnl2_rx_desc *rx_desc,
- struct idpf_rx_extracted *fields)
+static void
+idpf_rx_singleq_extract_flex_fields(const union virtchnl2_rx_desc *rx_desc,
+ struct idpf_rx_extracted *fields)
{
fields->size = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_PKT_LEN_M,
le16_to_cpu(rx_desc->flex_nic_wb.pkt_len));
@@ -984,14 +952,15 @@ static void idpf_rx_singleq_extract_flex_fields(struct idpf_queue *rx_q,
* @fields: storage for extracted values
*
*/
-static void idpf_rx_singleq_extract_fields(struct idpf_queue *rx_q,
- union virtchnl2_rx_desc *rx_desc,
- struct idpf_rx_extracted *fields)
+static void
+idpf_rx_singleq_extract_fields(const struct idpf_rx_queue *rx_q,
+ const union virtchnl2_rx_desc *rx_desc,
+ struct idpf_rx_extracted *fields)
{
if (rx_q->rxdids == VIRTCHNL2_RXDID_1_32B_BASE_M)
- idpf_rx_singleq_extract_base_fields(rx_q, rx_desc, fields);
+ idpf_rx_singleq_extract_base_fields(rx_desc, fields);
else
- idpf_rx_singleq_extract_flex_fields(rx_q, rx_desc, fields);
+ idpf_rx_singleq_extract_flex_fields(rx_desc, fields);
}
/**
@@ -1001,7 +970,7 @@ static void idpf_rx_singleq_extract_fields(struct idpf_queue *rx_q,
*
* Returns true if there's any budget left (e.g. the clean is finished)
*/
-static int idpf_rx_singleq_clean(struct idpf_queue *rx_q, int budget)
+static int idpf_rx_singleq_clean(struct idpf_rx_queue *rx_q, int budget)
{
unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
struct sk_buff *skb = rx_q->skb;
@@ -1016,7 +985,7 @@ static int idpf_rx_singleq_clean(struct idpf_queue *rx_q, int budget)
struct idpf_rx_buf *rx_buf;
/* get the Rx desc from Rx queue based on 'next_to_clean' */
- rx_desc = IDPF_RX_DESC(rx_q, ntc);
+ rx_desc = &rx_q->rx[ntc];
/* status_error_ptype_len will always be zero for unused
* descriptors because it's cleared in cleanup, and overlaps
@@ -1036,29 +1005,27 @@ static int idpf_rx_singleq_clean(struct idpf_queue *rx_q, int budget)
idpf_rx_singleq_extract_fields(rx_q, rx_desc, &fields);
- rx_buf = &rx_q->rx_buf.buf[ntc];
- if (!fields.size) {
- idpf_rx_put_page(rx_buf);
+ rx_buf = &rx_q->rx_buf[ntc];
+ if (!libeth_rx_sync_for_cpu(rx_buf, fields.size))
goto skip_data;
- }
- idpf_rx_sync_for_cpu(rx_buf, fields.size);
if (skb)
idpf_rx_add_frag(rx_buf, skb, fields.size);
else
- skb = idpf_rx_construct_skb(rx_q, rx_buf, fields.size);
+ skb = idpf_rx_build_skb(rx_buf, fields.size);
/* exit if we failed to retrieve a buffer */
if (!skb)
break;
skip_data:
- IDPF_SINGLEQ_BUMP_RING_IDX(rx_q, ntc);
+ rx_buf->page = NULL;
+ IDPF_SINGLEQ_BUMP_RING_IDX(rx_q, ntc);
cleaned_count++;
/* skip if it is non EOP desc */
- if (idpf_rx_singleq_is_non_eop(rx_q, rx_desc, skb, ntc))
+ if (idpf_rx_singleq_is_non_eop(rx_desc) || unlikely(!skb))
continue;
#define IDPF_RXD_ERR_S FIELD_PREP(VIRTCHNL2_RX_BASE_DESC_QW1_ERROR_M, \
@@ -1084,7 +1051,7 @@ skip_data:
rx_desc, fields.rx_ptype);
/* send completed skb up the stack */
- napi_gro_receive(&rx_q->q_vector->napi, skb);
+ napi_gro_receive(rx_q->pp->p.napi, skb);
skb = NULL;
/* update budget accounting */
@@ -1095,12 +1062,13 @@ skip_data:
rx_q->next_to_clean = ntc;
+ page_pool_nid_changed(rx_q->pp, numa_mem_id());
if (cleaned_count)
failure = idpf_rx_singleq_buf_hw_alloc_all(rx_q, cleaned_count);
u64_stats_update_begin(&rx_q->stats_sync);
- u64_stats_add(&rx_q->q_stats.rx.packets, total_rx_pkts);
- u64_stats_add(&rx_q->q_stats.rx.bytes, total_rx_bytes);
+ u64_stats_add(&rx_q->q_stats.packets, total_rx_pkts);
+ u64_stats_add(&rx_q->q_stats.bytes, total_rx_bytes);
u64_stats_update_end(&rx_q->stats_sync);
/* guarantee a trip back through this routine if there was a failure */
@@ -1127,7 +1095,7 @@ static bool idpf_rx_singleq_clean_all(struct idpf_q_vector *q_vec, int budget,
*/
budget_per_q = num_rxq ? max(budget / num_rxq, 1) : 0;
for (i = 0; i < num_rxq; i++) {
- struct idpf_queue *rxq = q_vec->rx[i];
+ struct idpf_rx_queue *rxq = q_vec->rx[i];
int pkts_cleaned_per_q;
pkts_cleaned_per_q = idpf_rx_singleq_clean(rxq, budget_per_q);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
index b023704bbbda..af2879f03b8d 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
@@ -1,9 +1,14 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2023 Intel Corporation */
+#include <net/libeth/rx.h>
+
#include "idpf.h"
#include "idpf_virtchnl.h"
+static bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs,
+ unsigned int count);
+
/**
* idpf_buf_lifo_push - push a buffer pointer onto stack
* @stack: pointer to stack struct
@@ -60,7 +65,8 @@ void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
* @tx_q: the queue that owns the buffer
* @tx_buf: the buffer to free
*/
-static void idpf_tx_buf_rel(struct idpf_queue *tx_q, struct idpf_tx_buf *tx_buf)
+static void idpf_tx_buf_rel(struct idpf_tx_queue *tx_q,
+ struct idpf_tx_buf *tx_buf)
{
if (tx_buf->skb) {
if (dma_unmap_len(tx_buf, len))
@@ -86,8 +92,9 @@ static void idpf_tx_buf_rel(struct idpf_queue *tx_q, struct idpf_tx_buf *tx_buf)
* idpf_tx_buf_rel_all - Free any empty Tx buffers
* @txq: queue to be cleaned
*/
-static void idpf_tx_buf_rel_all(struct idpf_queue *txq)
+static void idpf_tx_buf_rel_all(struct idpf_tx_queue *txq)
{
+ struct idpf_buf_lifo *buf_stack;
u16 i;
/* Buffers already cleared, nothing to do */
@@ -101,39 +108,58 @@ static void idpf_tx_buf_rel_all(struct idpf_queue *txq)
kfree(txq->tx_buf);
txq->tx_buf = NULL;
- if (!txq->buf_stack.bufs)
+ if (!idpf_queue_has(FLOW_SCH_EN, txq))
return;
- for (i = 0; i < txq->buf_stack.size; i++)
- kfree(txq->buf_stack.bufs[i]);
+ buf_stack = &txq->stash->buf_stack;
+ if (!buf_stack->bufs)
+ return;
- kfree(txq->buf_stack.bufs);
- txq->buf_stack.bufs = NULL;
+ for (i = 0; i < buf_stack->size; i++)
+ kfree(buf_stack->bufs[i]);
+
+ kfree(buf_stack->bufs);
+ buf_stack->bufs = NULL;
}
/**
* idpf_tx_desc_rel - Free Tx resources per queue
* @txq: Tx descriptor ring for a specific queue
- * @bufq: buffer q or completion q
*
* Free all transmit software resources
*/
-static void idpf_tx_desc_rel(struct idpf_queue *txq, bool bufq)
+static void idpf_tx_desc_rel(struct idpf_tx_queue *txq)
{
- if (bufq)
- idpf_tx_buf_rel_all(txq);
+ idpf_tx_buf_rel_all(txq);
if (!txq->desc_ring)
return;
dmam_free_coherent(txq->dev, txq->size, txq->desc_ring, txq->dma);
txq->desc_ring = NULL;
- txq->next_to_alloc = 0;
txq->next_to_use = 0;
txq->next_to_clean = 0;
}
/**
+ * idpf_compl_desc_rel - Free completion resources per queue
+ * @complq: completion queue
+ *
+ * Free all completion software resources.
+ */
+static void idpf_compl_desc_rel(struct idpf_compl_queue *complq)
+{
+ if (!complq->comp)
+ return;
+
+ dma_free_coherent(complq->netdev->dev.parent, complq->size,
+ complq->comp, complq->dma);
+ complq->comp = NULL;
+ complq->next_to_use = 0;
+ complq->next_to_clean = 0;
+}
+
+/**
* idpf_tx_desc_rel_all - Free Tx Resources for All Queues
* @vport: virtual port structure
*
@@ -150,10 +176,10 @@ static void idpf_tx_desc_rel_all(struct idpf_vport *vport)
struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
for (j = 0; j < txq_grp->num_txq; j++)
- idpf_tx_desc_rel(txq_grp->txqs[j], true);
+ idpf_tx_desc_rel(txq_grp->txqs[j]);
if (idpf_is_queue_model_split(vport->txq_model))
- idpf_tx_desc_rel(txq_grp->complq, false);
+ idpf_compl_desc_rel(txq_grp->complq);
}
}
@@ -163,8 +189,9 @@ static void idpf_tx_desc_rel_all(struct idpf_vport *vport)
*
* Returns 0 on success, negative on failure
*/
-static int idpf_tx_buf_alloc_all(struct idpf_queue *tx_q)
+static int idpf_tx_buf_alloc_all(struct idpf_tx_queue *tx_q)
{
+ struct idpf_buf_lifo *buf_stack;
int buf_size;
int i;
@@ -180,22 +207,26 @@ static int idpf_tx_buf_alloc_all(struct idpf_queue *tx_q)
for (i = 0; i < tx_q->desc_count; i++)
tx_q->tx_buf[i].compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG;
+ if (!idpf_queue_has(FLOW_SCH_EN, tx_q))
+ return 0;
+
+ buf_stack = &tx_q->stash->buf_stack;
+
/* Initialize tx buf stack for out-of-order completions if
* flow scheduling offload is enabled
*/
- tx_q->buf_stack.bufs =
- kcalloc(tx_q->desc_count, sizeof(struct idpf_tx_stash *),
- GFP_KERNEL);
- if (!tx_q->buf_stack.bufs)
+ buf_stack->bufs = kcalloc(tx_q->desc_count, sizeof(*buf_stack->bufs),
+ GFP_KERNEL);
+ if (!buf_stack->bufs)
return -ENOMEM;
- tx_q->buf_stack.size = tx_q->desc_count;
- tx_q->buf_stack.top = tx_q->desc_count;
+ buf_stack->size = tx_q->desc_count;
+ buf_stack->top = tx_q->desc_count;
for (i = 0; i < tx_q->desc_count; i++) {
- tx_q->buf_stack.bufs[i] = kzalloc(sizeof(*tx_q->buf_stack.bufs[i]),
- GFP_KERNEL);
- if (!tx_q->buf_stack.bufs[i])
+ buf_stack->bufs[i] = kzalloc(sizeof(*buf_stack->bufs[i]),
+ GFP_KERNEL);
+ if (!buf_stack->bufs[i])
return -ENOMEM;
}
@@ -204,28 +235,22 @@ static int idpf_tx_buf_alloc_all(struct idpf_queue *tx_q)
/**
* idpf_tx_desc_alloc - Allocate the Tx descriptors
+ * @vport: vport to allocate resources for
* @tx_q: the tx ring to set up
- * @bufq: buffer or completion queue
*
* Returns 0 on success, negative on failure
*/
-static int idpf_tx_desc_alloc(struct idpf_queue *tx_q, bool bufq)
+static int idpf_tx_desc_alloc(const struct idpf_vport *vport,
+ struct idpf_tx_queue *tx_q)
{
struct device *dev = tx_q->dev;
- u32 desc_sz;
int err;
- if (bufq) {
- err = idpf_tx_buf_alloc_all(tx_q);
- if (err)
- goto err_alloc;
-
- desc_sz = sizeof(struct idpf_base_tx_desc);
- } else {
- desc_sz = sizeof(struct idpf_splitq_tx_compl_desc);
- }
+ err = idpf_tx_buf_alloc_all(tx_q);
+ if (err)
+ goto err_alloc;
- tx_q->size = tx_q->desc_count * desc_sz;
+ tx_q->size = tx_q->desc_count * sizeof(*tx_q->base_tx);
/* Allocate descriptors also round up to nearest 4K */
tx_q->size = ALIGN(tx_q->size, 4096);
@@ -238,20 +263,44 @@ static int idpf_tx_desc_alloc(struct idpf_queue *tx_q, bool bufq)
goto err_alloc;
}
- tx_q->next_to_alloc = 0;
tx_q->next_to_use = 0;
tx_q->next_to_clean = 0;
- set_bit(__IDPF_Q_GEN_CHK, tx_q->flags);
+ idpf_queue_set(GEN_CHK, tx_q);
return 0;
err_alloc:
- idpf_tx_desc_rel(tx_q, bufq);
+ idpf_tx_desc_rel(tx_q);
return err;
}
/**
+ * idpf_compl_desc_alloc - allocate completion descriptors
+ * @vport: vport to allocate resources for
+ * @complq: completion queue to set up
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+static int idpf_compl_desc_alloc(const struct idpf_vport *vport,
+ struct idpf_compl_queue *complq)
+{
+ complq->size = array_size(complq->desc_count, sizeof(*complq->comp));
+
+ complq->comp = dma_alloc_coherent(complq->netdev->dev.parent,
+ complq->size, &complq->dma,
+ GFP_KERNEL);
+ if (!complq->comp)
+ return -ENOMEM;
+
+ complq->next_to_use = 0;
+ complq->next_to_clean = 0;
+ idpf_queue_set(GEN_CHK, complq);
+
+ return 0;
+}
+
+/**
* idpf_tx_desc_alloc_all - allocate all queues Tx resources
* @vport: virtual port private structure
*
@@ -259,7 +308,6 @@ err_alloc:
*/
static int idpf_tx_desc_alloc_all(struct idpf_vport *vport)
{
- struct device *dev = &vport->adapter->pdev->dev;
int err = 0;
int i, j;
@@ -268,13 +316,14 @@ static int idpf_tx_desc_alloc_all(struct idpf_vport *vport)
*/
for (i = 0; i < vport->num_txq_grp; i++) {
for (j = 0; j < vport->txq_grps[i].num_txq; j++) {
- struct idpf_queue *txq = vport->txq_grps[i].txqs[j];
+ struct idpf_tx_queue *txq = vport->txq_grps[i].txqs[j];
u8 gen_bits = 0;
u16 bufidx_mask;
- err = idpf_tx_desc_alloc(txq, true);
+ err = idpf_tx_desc_alloc(vport, txq);
if (err) {
- dev_err(dev, "Allocation for Tx Queue %u failed\n",
+ pci_err(vport->adapter->pdev,
+ "Allocation for Tx Queue %u failed\n",
i);
goto err_out;
}
@@ -312,9 +361,10 @@ static int idpf_tx_desc_alloc_all(struct idpf_vport *vport)
continue;
/* Setup completion queues */
- err = idpf_tx_desc_alloc(vport->txq_grps[i].complq, false);
+ err = idpf_compl_desc_alloc(vport, vport->txq_grps[i].complq);
if (err) {
- dev_err(dev, "Allocation for Tx Completion Queue %u failed\n",
+ pci_err(vport->adapter->pdev,
+ "Allocation for Tx Completion Queue %u failed\n",
i);
goto err_out;
}
@@ -329,70 +379,97 @@ err_out:
/**
* idpf_rx_page_rel - Release an rx buffer page
- * @rxq: the queue that owns the buffer
* @rx_buf: the buffer to free
*/
-static void idpf_rx_page_rel(struct idpf_queue *rxq, struct idpf_rx_buf *rx_buf)
+static void idpf_rx_page_rel(struct libeth_fqe *rx_buf)
{
if (unlikely(!rx_buf->page))
return;
- page_pool_put_full_page(rxq->pp, rx_buf->page, false);
+ page_pool_put_full_page(rx_buf->page->pp, rx_buf->page, false);
rx_buf->page = NULL;
- rx_buf->page_offset = 0;
+ rx_buf->offset = 0;
}
/**
* idpf_rx_hdr_buf_rel_all - Release header buffer memory
- * @rxq: queue to use
+ * @bufq: queue to use
*/
-static void idpf_rx_hdr_buf_rel_all(struct idpf_queue *rxq)
+static void idpf_rx_hdr_buf_rel_all(struct idpf_buf_queue *bufq)
{
- struct idpf_adapter *adapter = rxq->vport->adapter;
+ struct libeth_fq fq = {
+ .fqes = bufq->hdr_buf,
+ .pp = bufq->hdr_pp,
+ };
- dma_free_coherent(&adapter->pdev->dev,
- rxq->desc_count * IDPF_HDR_BUF_SIZE,
- rxq->rx_buf.hdr_buf_va,
- rxq->rx_buf.hdr_buf_pa);
- rxq->rx_buf.hdr_buf_va = NULL;
+ for (u32 i = 0; i < bufq->desc_count; i++)
+ idpf_rx_page_rel(&bufq->hdr_buf[i]);
+
+ libeth_rx_fq_destroy(&fq);
+ bufq->hdr_buf = NULL;
+ bufq->hdr_pp = NULL;
}
/**
- * idpf_rx_buf_rel_all - Free all Rx buffer resources for a queue
- * @rxq: queue to be cleaned
+ * idpf_rx_buf_rel_bufq - Free all Rx buffer resources for a buffer queue
+ * @bufq: queue to be cleaned
*/
-static void idpf_rx_buf_rel_all(struct idpf_queue *rxq)
+static void idpf_rx_buf_rel_bufq(struct idpf_buf_queue *bufq)
{
- u16 i;
+ struct libeth_fq fq = {
+ .fqes = bufq->buf,
+ .pp = bufq->pp,
+ };
/* queue already cleared, nothing to do */
- if (!rxq->rx_buf.buf)
+ if (!bufq->buf)
return;
/* Free all the bufs allocated and given to hw on Rx queue */
- for (i = 0; i < rxq->desc_count; i++)
- idpf_rx_page_rel(rxq, &rxq->rx_buf.buf[i]);
+ for (u32 i = 0; i < bufq->desc_count; i++)
+ idpf_rx_page_rel(&bufq->buf[i]);
- if (rxq->rx_hsplit_en)
- idpf_rx_hdr_buf_rel_all(rxq);
+ if (idpf_queue_has(HSPLIT_EN, bufq))
+ idpf_rx_hdr_buf_rel_all(bufq);
- page_pool_destroy(rxq->pp);
- rxq->pp = NULL;
+ libeth_rx_fq_destroy(&fq);
+ bufq->buf = NULL;
+ bufq->pp = NULL;
+}
+
+/**
+ * idpf_rx_buf_rel_all - Free all Rx buffer resources for a receive queue
+ * @rxq: queue to be cleaned
+ */
+static void idpf_rx_buf_rel_all(struct idpf_rx_queue *rxq)
+{
+ struct libeth_fq fq = {
+ .fqes = rxq->rx_buf,
+ .pp = rxq->pp,
+ };
+
+ if (!rxq->rx_buf)
+ return;
- kfree(rxq->rx_buf.buf);
- rxq->rx_buf.buf = NULL;
+ for (u32 i = 0; i < rxq->desc_count; i++)
+ idpf_rx_page_rel(&rxq->rx_buf[i]);
+
+ libeth_rx_fq_destroy(&fq);
+ rxq->rx_buf = NULL;
+ rxq->pp = NULL;
}
/**
* idpf_rx_desc_rel - Free a specific Rx q resources
* @rxq: queue to clean the resources from
- * @bufq: buffer q or completion q
- * @q_model: single or split q model
+ * @dev: device to free DMA memory
+ * @model: single or split queue model
*
* Free a specific rx queue resources
*/
-static void idpf_rx_desc_rel(struct idpf_queue *rxq, bool bufq, s32 q_model)
+static void idpf_rx_desc_rel(struct idpf_rx_queue *rxq, struct device *dev,
+ u32 model)
{
if (!rxq)
return;
@@ -402,7 +479,7 @@ static void idpf_rx_desc_rel(struct idpf_queue *rxq, bool bufq, s32 q_model)
rxq->skb = NULL;
}
- if (bufq || !idpf_is_queue_model_split(q_model))
+ if (!idpf_is_queue_model_split(model))
idpf_rx_buf_rel_all(rxq);
rxq->next_to_alloc = 0;
@@ -411,11 +488,35 @@ static void idpf_rx_desc_rel(struct idpf_queue *rxq, bool bufq, s32 q_model)
if (!rxq->desc_ring)
return;
- dmam_free_coherent(rxq->dev, rxq->size, rxq->desc_ring, rxq->dma);
+ dmam_free_coherent(dev, rxq->size, rxq->desc_ring, rxq->dma);
rxq->desc_ring = NULL;
}
/**
+ * idpf_rx_desc_rel_bufq - free buffer queue resources
+ * @bufq: buffer queue to clean the resources from
+ * @dev: device to free DMA memory
+ */
+static void idpf_rx_desc_rel_bufq(struct idpf_buf_queue *bufq,
+ struct device *dev)
+{
+ if (!bufq)
+ return;
+
+ idpf_rx_buf_rel_bufq(bufq);
+
+ bufq->next_to_alloc = 0;
+ bufq->next_to_clean = 0;
+ bufq->next_to_use = 0;
+
+ if (!bufq->split_buf)
+ return;
+
+ dma_free_coherent(dev, bufq->size, bufq->split_buf, bufq->dma);
+ bufq->split_buf = NULL;
+}
+
+/**
* idpf_rx_desc_rel_all - Free Rx Resources for All Queues
* @vport: virtual port structure
*
@@ -423,6 +524,7 @@ static void idpf_rx_desc_rel(struct idpf_queue *rxq, bool bufq, s32 q_model)
*/
static void idpf_rx_desc_rel_all(struct idpf_vport *vport)
{
+ struct device *dev = &vport->adapter->pdev->dev;
struct idpf_rxq_group *rx_qgrp;
u16 num_rxq;
int i, j;
@@ -435,15 +537,15 @@ static void idpf_rx_desc_rel_all(struct idpf_vport *vport)
if (!idpf_is_queue_model_split(vport->rxq_model)) {
for (j = 0; j < rx_qgrp->singleq.num_rxq; j++)
- idpf_rx_desc_rel(rx_qgrp->singleq.rxqs[j],
- false, vport->rxq_model);
+ idpf_rx_desc_rel(rx_qgrp->singleq.rxqs[j], dev,
+ VIRTCHNL2_QUEUE_MODEL_SINGLE);
continue;
}
num_rxq = rx_qgrp->splitq.num_rxq_sets;
for (j = 0; j < num_rxq; j++)
idpf_rx_desc_rel(&rx_qgrp->splitq.rxq_sets[j]->rxq,
- false, vport->rxq_model);
+ dev, VIRTCHNL2_QUEUE_MODEL_SPLIT);
if (!rx_qgrp->splitq.bufq_sets)
continue;
@@ -452,45 +554,50 @@ static void idpf_rx_desc_rel_all(struct idpf_vport *vport)
struct idpf_bufq_set *bufq_set =
&rx_qgrp->splitq.bufq_sets[j];
- idpf_rx_desc_rel(&bufq_set->bufq, true,
- vport->rxq_model);
+ idpf_rx_desc_rel_bufq(&bufq_set->bufq, dev);
}
}
}
/**
* idpf_rx_buf_hw_update - Store the new tail and head values
- * @rxq: queue to bump
+ * @bufq: queue to bump
* @val: new head index
*/
-void idpf_rx_buf_hw_update(struct idpf_queue *rxq, u32 val)
+static void idpf_rx_buf_hw_update(struct idpf_buf_queue *bufq, u32 val)
{
- rxq->next_to_use = val;
+ bufq->next_to_use = val;
- if (unlikely(!rxq->tail))
+ if (unlikely(!bufq->tail))
return;
/* writel has an implicit memory barrier */
- writel(val, rxq->tail);
+ writel(val, bufq->tail);
}
/**
* idpf_rx_hdr_buf_alloc_all - Allocate memory for header buffers
- * @rxq: ring to use
+ * @bufq: ring to use
*
* Returns 0 on success, negative on failure.
*/
-static int idpf_rx_hdr_buf_alloc_all(struct idpf_queue *rxq)
+static int idpf_rx_hdr_buf_alloc_all(struct idpf_buf_queue *bufq)
{
- struct idpf_adapter *adapter = rxq->vport->adapter;
-
- rxq->rx_buf.hdr_buf_va =
- dma_alloc_coherent(&adapter->pdev->dev,
- IDPF_HDR_BUF_SIZE * rxq->desc_count,
- &rxq->rx_buf.hdr_buf_pa,
- GFP_KERNEL);
- if (!rxq->rx_buf.hdr_buf_va)
- return -ENOMEM;
+ struct libeth_fq fq = {
+ .count = bufq->desc_count,
+ .type = LIBETH_FQE_HDR,
+ .nid = idpf_q_vector_to_mem(bufq->q_vector),
+ };
+ int ret;
+
+ ret = libeth_rx_fq_create(&fq, &bufq->q_vector->napi);
+ if (ret)
+ return ret;
+
+ bufq->hdr_pp = fq.pp;
+ bufq->hdr_buf = fq.fqes;
+ bufq->hdr_truesize = fq.truesize;
+ bufq->rx_hbuf_size = fq.buf_len;
return 0;
}
@@ -502,19 +609,20 @@ static int idpf_rx_hdr_buf_alloc_all(struct idpf_queue *rxq)
*/
static void idpf_rx_post_buf_refill(struct idpf_sw_queue *refillq, u16 buf_id)
{
- u16 nta = refillq->next_to_alloc;
+ u32 nta = refillq->next_to_use;
/* store the buffer ID and the SW maintained GEN bit to the refillq */
refillq->ring[nta] =
FIELD_PREP(IDPF_RX_BI_BUFID_M, buf_id) |
FIELD_PREP(IDPF_RX_BI_GEN_M,
- test_bit(__IDPF_Q_GEN_CHK, refillq->flags));
+ idpf_queue_has(GEN_CHK, refillq));
if (unlikely(++nta == refillq->desc_count)) {
nta = 0;
- change_bit(__IDPF_Q_GEN_CHK, refillq->flags);
+ idpf_queue_change(GEN_CHK, refillq);
}
- refillq->next_to_alloc = nta;
+
+ refillq->next_to_use = nta;
}
/**
@@ -524,24 +632,35 @@ static void idpf_rx_post_buf_refill(struct idpf_sw_queue *refillq, u16 buf_id)
*
* Returns false if buffer could not be allocated, true otherwise.
*/
-static bool idpf_rx_post_buf_desc(struct idpf_queue *bufq, u16 buf_id)
+static bool idpf_rx_post_buf_desc(struct idpf_buf_queue *bufq, u16 buf_id)
{
struct virtchnl2_splitq_rx_buf_desc *splitq_rx_desc = NULL;
+ struct libeth_fq_fp fq = {
+ .count = bufq->desc_count,
+ };
u16 nta = bufq->next_to_alloc;
- struct idpf_rx_buf *buf;
dma_addr_t addr;
- splitq_rx_desc = IDPF_SPLITQ_RX_BUF_DESC(bufq, nta);
- buf = &bufq->rx_buf.buf[buf_id];
+ splitq_rx_desc = &bufq->split_buf[nta];
- if (bufq->rx_hsplit_en) {
- splitq_rx_desc->hdr_addr =
- cpu_to_le64(bufq->rx_buf.hdr_buf_pa +
- (u32)buf_id * IDPF_HDR_BUF_SIZE);
+ if (idpf_queue_has(HSPLIT_EN, bufq)) {
+ fq.pp = bufq->hdr_pp;
+ fq.fqes = bufq->hdr_buf;
+ fq.truesize = bufq->hdr_truesize;
+
+ addr = libeth_rx_alloc(&fq, buf_id);
+ if (addr == DMA_MAPPING_ERROR)
+ return false;
+
+ splitq_rx_desc->hdr_addr = cpu_to_le64(addr);
}
- addr = idpf_alloc_page(bufq->pp, buf, bufq->rx_buf_size);
- if (unlikely(addr == DMA_MAPPING_ERROR))
+ fq.pp = bufq->pp;
+ fq.fqes = bufq->buf;
+ fq.truesize = bufq->truesize;
+
+ addr = libeth_rx_alloc(&fq, buf_id);
+ if (addr == DMA_MAPPING_ERROR)
return false;
splitq_rx_desc->pkt_addr = cpu_to_le64(addr);
@@ -562,7 +681,8 @@ static bool idpf_rx_post_buf_desc(struct idpf_queue *bufq, u16 buf_id)
*
* Returns true if @working_set bufs were posted successfully, false otherwise.
*/
-static bool idpf_rx_post_init_bufs(struct idpf_queue *bufq, u16 working_set)
+static bool idpf_rx_post_init_bufs(struct idpf_buf_queue *bufq,
+ u16 working_set)
{
int i;
@@ -571,95 +691,114 @@ static bool idpf_rx_post_init_bufs(struct idpf_queue *bufq, u16 working_set)
return false;
}
- idpf_rx_buf_hw_update(bufq,
- bufq->next_to_alloc & ~(bufq->rx_buf_stride - 1));
+ idpf_rx_buf_hw_update(bufq, ALIGN_DOWN(bufq->next_to_alloc,
+ IDPF_RX_BUF_STRIDE));
return true;
}
/**
- * idpf_rx_create_page_pool - Create a page pool
- * @rxbufq: RX queue to create page pool for
+ * idpf_rx_buf_alloc_singleq - Allocate memory for all buffer resources
+ * @rxq: queue for which the buffers are allocated
+ *
+ * Return: 0 on success, -ENOMEM on failure.
+ */
+static int idpf_rx_buf_alloc_singleq(struct idpf_rx_queue *rxq)
+{
+ if (idpf_rx_singleq_buf_hw_alloc_all(rxq, rxq->desc_count - 1))
+ goto err;
+
+ return 0;
+
+err:
+ idpf_rx_buf_rel_all(rxq);
+
+ return -ENOMEM;
+}
+
+/**
+ * idpf_rx_bufs_init_singleq - Initialize page pool and allocate Rx bufs
+ * @rxq: buffer queue to create page pool for
*
- * Returns &page_pool on success, casted -errno on failure
+ * Return: 0 on success, -errno on failure.
*/
-static struct page_pool *idpf_rx_create_page_pool(struct idpf_queue *rxbufq)
+static int idpf_rx_bufs_init_singleq(struct idpf_rx_queue *rxq)
{
- struct page_pool_params pp = {
- .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
- .order = 0,
- .pool_size = rxbufq->desc_count,
- .nid = NUMA_NO_NODE,
- .dev = rxbufq->vport->netdev->dev.parent,
- .max_len = PAGE_SIZE,
- .dma_dir = DMA_FROM_DEVICE,
- .offset = 0,
+ struct libeth_fq fq = {
+ .count = rxq->desc_count,
+ .type = LIBETH_FQE_MTU,
+ .nid = idpf_q_vector_to_mem(rxq->q_vector),
};
+ int ret;
+
+ ret = libeth_rx_fq_create(&fq, &rxq->q_vector->napi);
+ if (ret)
+ return ret;
+
+ rxq->pp = fq.pp;
+ rxq->rx_buf = fq.fqes;
+ rxq->truesize = fq.truesize;
+ rxq->rx_buf_size = fq.buf_len;
- return page_pool_create(&pp);
+ return idpf_rx_buf_alloc_singleq(rxq);
}
/**
* idpf_rx_buf_alloc_all - Allocate memory for all buffer resources
- * @rxbufq: queue for which the buffers are allocated; equivalent to
- * rxq when operating in singleq mode
+ * @rxbufq: queue for which the buffers are allocated
*
* Returns 0 on success, negative on failure
*/
-static int idpf_rx_buf_alloc_all(struct idpf_queue *rxbufq)
+static int idpf_rx_buf_alloc_all(struct idpf_buf_queue *rxbufq)
{
int err = 0;
- /* Allocate book keeping buffers */
- rxbufq->rx_buf.buf = kcalloc(rxbufq->desc_count,
- sizeof(struct idpf_rx_buf), GFP_KERNEL);
- if (!rxbufq->rx_buf.buf) {
- err = -ENOMEM;
- goto rx_buf_alloc_all_out;
- }
-
- if (rxbufq->rx_hsplit_en) {
+ if (idpf_queue_has(HSPLIT_EN, rxbufq)) {
err = idpf_rx_hdr_buf_alloc_all(rxbufq);
if (err)
goto rx_buf_alloc_all_out;
}
/* Allocate buffers to be given to HW. */
- if (idpf_is_queue_model_split(rxbufq->vport->rxq_model)) {
- int working_set = IDPF_RX_BUFQ_WORKING_SET(rxbufq);
-
- if (!idpf_rx_post_init_bufs(rxbufq, working_set))
- err = -ENOMEM;
- } else {
- if (idpf_rx_singleq_buf_hw_alloc_all(rxbufq,
- rxbufq->desc_count - 1))
- err = -ENOMEM;
- }
+ if (!idpf_rx_post_init_bufs(rxbufq, IDPF_RX_BUFQ_WORKING_SET(rxbufq)))
+ err = -ENOMEM;
rx_buf_alloc_all_out:
if (err)
- idpf_rx_buf_rel_all(rxbufq);
+ idpf_rx_buf_rel_bufq(rxbufq);
return err;
}
/**
* idpf_rx_bufs_init - Initialize page pool, allocate rx bufs, and post to HW
- * @rxbufq: RX queue to create page pool for
+ * @bufq: buffer queue to create page pool for
+ * @type: type of Rx buffers to allocate
*
* Returns 0 on success, negative on failure
*/
-static int idpf_rx_bufs_init(struct idpf_queue *rxbufq)
+static int idpf_rx_bufs_init(struct idpf_buf_queue *bufq,
+ enum libeth_fqe_type type)
{
- struct page_pool *pool;
+ struct libeth_fq fq = {
+ .truesize = bufq->truesize,
+ .count = bufq->desc_count,
+ .type = type,
+ .hsplit = idpf_queue_has(HSPLIT_EN, bufq),
+ .nid = idpf_q_vector_to_mem(bufq->q_vector),
+ };
+ int ret;
- pool = idpf_rx_create_page_pool(rxbufq);
- if (IS_ERR(pool))
- return PTR_ERR(pool);
+ ret = libeth_rx_fq_create(&fq, &bufq->q_vector->napi);
+ if (ret)
+ return ret;
- rxbufq->pp = pool;
+ bufq->pp = fq.pp;
+ bufq->buf = fq.fqes;
+ bufq->truesize = fq.truesize;
+ bufq->rx_buf_size = fq.buf_len;
- return idpf_rx_buf_alloc_all(rxbufq);
+ return idpf_rx_buf_alloc_all(bufq);
}
/**
@@ -670,20 +809,22 @@ static int idpf_rx_bufs_init(struct idpf_queue *rxbufq)
*/
int idpf_rx_bufs_init_all(struct idpf_vport *vport)
{
- struct idpf_rxq_group *rx_qgrp;
- struct idpf_queue *q;
+ bool split = idpf_is_queue_model_split(vport->rxq_model);
int i, j, err;
for (i = 0; i < vport->num_rxq_grp; i++) {
- rx_qgrp = &vport->rxq_grps[i];
+ struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ u32 truesize = 0;
/* Allocate bufs for the rxq itself in singleq */
- if (!idpf_is_queue_model_split(vport->rxq_model)) {
+ if (!split) {
int num_rxq = rx_qgrp->singleq.num_rxq;
for (j = 0; j < num_rxq; j++) {
+ struct idpf_rx_queue *q;
+
q = rx_qgrp->singleq.rxqs[j];
- err = idpf_rx_bufs_init(q);
+ err = idpf_rx_bufs_init_singleq(q);
if (err)
return err;
}
@@ -693,10 +834,19 @@ int idpf_rx_bufs_init_all(struct idpf_vport *vport)
/* Otherwise, allocate bufs for the buffer queues */
for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ enum libeth_fqe_type type;
+ struct idpf_buf_queue *q;
+
q = &rx_qgrp->splitq.bufq_sets[j].bufq;
- err = idpf_rx_bufs_init(q);
+ q->truesize = truesize;
+
+ type = truesize ? LIBETH_FQE_SHORT : LIBETH_FQE_MTU;
+
+ err = idpf_rx_bufs_init(q, type);
if (err)
return err;
+
+ truesize = q->truesize >> 1;
}
}
@@ -705,22 +855,17 @@ int idpf_rx_bufs_init_all(struct idpf_vport *vport)
/**
* idpf_rx_desc_alloc - Allocate queue Rx resources
+ * @vport: vport to allocate resources for
* @rxq: Rx queue for which the resources are setup
- * @bufq: buffer or completion queue
- * @q_model: single or split queue model
*
* Returns 0 on success, negative on failure
*/
-static int idpf_rx_desc_alloc(struct idpf_queue *rxq, bool bufq, s32 q_model)
+static int idpf_rx_desc_alloc(const struct idpf_vport *vport,
+ struct idpf_rx_queue *rxq)
{
- struct device *dev = rxq->dev;
+ struct device *dev = &vport->adapter->pdev->dev;
- if (bufq)
- rxq->size = rxq->desc_count *
- sizeof(struct virtchnl2_splitq_rx_buf_desc);
- else
- rxq->size = rxq->desc_count *
- sizeof(union virtchnl2_rx_desc);
+ rxq->size = rxq->desc_count * sizeof(union virtchnl2_rx_desc);
/* Allocate descriptors and also round up to nearest 4K */
rxq->size = ALIGN(rxq->size, 4096);
@@ -735,7 +880,35 @@ static int idpf_rx_desc_alloc(struct idpf_queue *rxq, bool bufq, s32 q_model)
rxq->next_to_alloc = 0;
rxq->next_to_clean = 0;
rxq->next_to_use = 0;
- set_bit(__IDPF_Q_GEN_CHK, rxq->flags);
+ idpf_queue_set(GEN_CHK, rxq);
+
+ return 0;
+}
+
+/**
+ * idpf_bufq_desc_alloc - Allocate buffer queue descriptor ring
+ * @vport: vport to allocate resources for
+ * @bufq: buffer queue for which the resources are set up
+ *
+ * Return: 0 on success, -ENOMEM on failure.
+ */
+static int idpf_bufq_desc_alloc(const struct idpf_vport *vport,
+ struct idpf_buf_queue *bufq)
+{
+ struct device *dev = &vport->adapter->pdev->dev;
+
+ bufq->size = array_size(bufq->desc_count, sizeof(*bufq->split_buf));
+
+ bufq->split_buf = dma_alloc_coherent(dev, bufq->size, &bufq->dma,
+ GFP_KERNEL);
+ if (!bufq->split_buf)
+ return -ENOMEM;
+
+ bufq->next_to_alloc = 0;
+ bufq->next_to_clean = 0;
+ bufq->next_to_use = 0;
+
+ idpf_queue_set(GEN_CHK, bufq);
return 0;
}
@@ -748,9 +921,7 @@ static int idpf_rx_desc_alloc(struct idpf_queue *rxq, bool bufq, s32 q_model)
*/
static int idpf_rx_desc_alloc_all(struct idpf_vport *vport)
{
- struct device *dev = &vport->adapter->pdev->dev;
struct idpf_rxq_group *rx_qgrp;
- struct idpf_queue *q;
int i, j, err;
u16 num_rxq;
@@ -762,13 +933,17 @@ static int idpf_rx_desc_alloc_all(struct idpf_vport *vport)
num_rxq = rx_qgrp->singleq.num_rxq;
for (j = 0; j < num_rxq; j++) {
+ struct idpf_rx_queue *q;
+
if (idpf_is_queue_model_split(vport->rxq_model))
q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
else
q = rx_qgrp->singleq.rxqs[j];
- err = idpf_rx_desc_alloc(q, false, vport->rxq_model);
+
+ err = idpf_rx_desc_alloc(vport, q);
if (err) {
- dev_err(dev, "Memory allocation for Rx Queue %u failed\n",
+ pci_err(vport->adapter->pdev,
+ "Memory allocation for Rx Queue %u failed\n",
i);
goto err_out;
}
@@ -778,10 +953,14 @@ static int idpf_rx_desc_alloc_all(struct idpf_vport *vport)
continue;
for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ struct idpf_buf_queue *q;
+
q = &rx_qgrp->splitq.bufq_sets[j].bufq;
- err = idpf_rx_desc_alloc(q, true, vport->rxq_model);
+
+ err = idpf_bufq_desc_alloc(vport, q);
if (err) {
- dev_err(dev, "Memory allocation for Rx Buffer Queue %u failed\n",
+ pci_err(vport->adapter->pdev,
+ "Memory allocation for Rx Buffer Queue %u failed\n",
i);
goto err_out;
}
@@ -802,11 +981,16 @@ err_out:
*/
static void idpf_txq_group_rel(struct idpf_vport *vport)
{
+ bool split, flow_sch_en;
int i, j;
if (!vport->txq_grps)
return;
+ split = idpf_is_queue_model_split(vport->txq_model);
+ flow_sch_en = !idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS,
+ VIRTCHNL2_CAP_SPLITQ_QSCHED);
+
for (i = 0; i < vport->num_txq_grp; i++) {
struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
@@ -814,8 +998,15 @@ static void idpf_txq_group_rel(struct idpf_vport *vport)
kfree(txq_grp->txqs[j]);
txq_grp->txqs[j] = NULL;
}
+
+ if (!split)
+ continue;
+
kfree(txq_grp->complq);
txq_grp->complq = NULL;
+
+ if (flow_sch_en)
+ kfree(txq_grp->stashes);
}
kfree(vport->txq_grps);
vport->txq_grps = NULL;
@@ -919,7 +1110,7 @@ static int idpf_vport_init_fast_path_txqs(struct idpf_vport *vport)
{
int i, j, k = 0;
- vport->txqs = kcalloc(vport->num_txq, sizeof(struct idpf_queue *),
+ vport->txqs = kcalloc(vport->num_txq, sizeof(*vport->txqs),
GFP_KERNEL);
if (!vport->txqs)
@@ -967,17 +1158,11 @@ void idpf_vport_init_num_qs(struct idpf_vport *vport,
/* Adjust number of buffer queues per Rx queue group. */
if (!idpf_is_queue_model_split(vport->rxq_model)) {
vport->num_bufqs_per_qgrp = 0;
- vport->bufq_size[0] = IDPF_RX_BUF_2048;
return;
}
vport->num_bufqs_per_qgrp = IDPF_MAX_BUFQS_PER_RXQ_GRP;
- /* Bufq[0] default buffer size is 4K
- * Bufq[1] default buffer size is 2K
- */
- vport->bufq_size[0] = IDPF_RX_BUF_4096;
- vport->bufq_size[1] = IDPF_RX_BUF_2048;
}
/**
@@ -1137,9 +1322,10 @@ static void idpf_vport_calc_numq_per_grp(struct idpf_vport *vport,
* @q: rx queue for which descids are set
*
*/
-static void idpf_rxq_set_descids(struct idpf_vport *vport, struct idpf_queue *q)
+static void idpf_rxq_set_descids(const struct idpf_vport *vport,
+ struct idpf_rx_queue *q)
{
- if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
+ if (idpf_is_queue_model_split(vport->rxq_model)) {
q->rxdids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
} else {
if (vport->base_rxd)
@@ -1158,20 +1344,22 @@ static void idpf_rxq_set_descids(struct idpf_vport *vport, struct idpf_queue *q)
*/
static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq)
{
- bool flow_sch_en;
- int err, i;
+ bool split, flow_sch_en;
+ int i;
vport->txq_grps = kcalloc(vport->num_txq_grp,
sizeof(*vport->txq_grps), GFP_KERNEL);
if (!vport->txq_grps)
return -ENOMEM;
+ split = idpf_is_queue_model_split(vport->txq_model);
flow_sch_en = !idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS,
VIRTCHNL2_CAP_SPLITQ_QSCHED);
for (i = 0; i < vport->num_txq_grp; i++) {
struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_txq_stash *stashes;
int j;
tx_qgrp->vport = vport;
@@ -1180,45 +1368,62 @@ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq)
for (j = 0; j < tx_qgrp->num_txq; j++) {
tx_qgrp->txqs[j] = kzalloc(sizeof(*tx_qgrp->txqs[j]),
GFP_KERNEL);
- if (!tx_qgrp->txqs[j]) {
- err = -ENOMEM;
+ if (!tx_qgrp->txqs[j])
goto err_alloc;
- }
+ }
+
+ if (split && flow_sch_en) {
+ stashes = kcalloc(num_txq, sizeof(*stashes),
+ GFP_KERNEL);
+ if (!stashes)
+ goto err_alloc;
+
+ tx_qgrp->stashes = stashes;
}
for (j = 0; j < tx_qgrp->num_txq; j++) {
- struct idpf_queue *q = tx_qgrp->txqs[j];
+ struct idpf_tx_queue *q = tx_qgrp->txqs[j];
q->dev = &adapter->pdev->dev;
q->desc_count = vport->txq_desc_count;
q->tx_max_bufs = idpf_get_max_tx_bufs(adapter);
q->tx_min_pkt_len = idpf_get_min_tx_pkt_len(adapter);
- q->vport = vport;
+ q->netdev = vport->netdev;
q->txq_grp = tx_qgrp;
- hash_init(q->sched_buf_hash);
- if (flow_sch_en)
- set_bit(__IDPF_Q_FLOW_SCH_EN, q->flags);
+ if (!split) {
+ q->clean_budget = vport->compln_clean_budget;
+ idpf_queue_assign(CRC_EN, q,
+ vport->crc_enable);
+ }
+
+ if (!flow_sch_en)
+ continue;
+
+ if (split) {
+ q->stash = &stashes[j];
+ hash_init(q->stash->sched_buf_hash);
+ }
+
+ idpf_queue_set(FLOW_SCH_EN, q);
}
- if (!idpf_is_queue_model_split(vport->txq_model))
+ if (!split)
continue;
tx_qgrp->complq = kcalloc(IDPF_COMPLQ_PER_GROUP,
sizeof(*tx_qgrp->complq),
GFP_KERNEL);
- if (!tx_qgrp->complq) {
- err = -ENOMEM;
+ if (!tx_qgrp->complq)
goto err_alloc;
- }
- tx_qgrp->complq->dev = &adapter->pdev->dev;
tx_qgrp->complq->desc_count = vport->complq_desc_count;
- tx_qgrp->complq->vport = vport;
tx_qgrp->complq->txq_grp = tx_qgrp;
+ tx_qgrp->complq->netdev = vport->netdev;
+ tx_qgrp->complq->clean_budget = vport->compln_clean_budget;
if (flow_sch_en)
- __set_bit(__IDPF_Q_FLOW_SCH_EN, tx_qgrp->complq->flags);
+ idpf_queue_set(FLOW_SCH_EN, tx_qgrp->complq);
}
return 0;
@@ -1226,7 +1431,7 @@ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq)
err_alloc:
idpf_txq_group_rel(vport);
- return err;
+ return -ENOMEM;
}
/**
@@ -1238,8 +1443,6 @@ err_alloc:
*/
static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq)
{
- struct idpf_adapter *adapter = vport->adapter;
- struct idpf_queue *q;
int i, k, err = 0;
bool hs;
@@ -1292,21 +1495,13 @@ static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq)
struct idpf_bufq_set *bufq_set =
&rx_qgrp->splitq.bufq_sets[j];
int swq_size = sizeof(struct idpf_sw_queue);
+ struct idpf_buf_queue *q;
q = &rx_qgrp->splitq.bufq_sets[j].bufq;
- q->dev = &adapter->pdev->dev;
q->desc_count = vport->bufq_desc_count[j];
- q->vport = vport;
- q->rxq_grp = rx_qgrp;
- q->idx = j;
- q->rx_buf_size = vport->bufq_size[j];
q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK;
- q->rx_buf_stride = IDPF_RX_BUF_STRIDE;
- if (hs) {
- q->rx_hsplit_en = true;
- q->rx_hbuf_size = IDPF_HDR_BUF_SIZE;
- }
+ idpf_queue_assign(HSPLIT_EN, q, hs);
bufq_set->num_refillqs = num_rxq;
bufq_set->refillqs = kcalloc(num_rxq, swq_size,
@@ -1319,13 +1514,12 @@ static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq)
struct idpf_sw_queue *refillq =
&bufq_set->refillqs[k];
- refillq->dev = &vport->adapter->pdev->dev;
refillq->desc_count =
vport->bufq_desc_count[j];
- set_bit(__IDPF_Q_GEN_CHK, refillq->flags);
- set_bit(__IDPF_RFLQ_GEN_CHK, refillq->flags);
+ idpf_queue_set(GEN_CHK, refillq);
+ idpf_queue_set(RFL_GEN_CHK, refillq);
refillq->ring = kcalloc(refillq->desc_count,
- sizeof(u16),
+ sizeof(*refillq->ring),
GFP_KERNEL);
if (!refillq->ring) {
err = -ENOMEM;
@@ -1336,36 +1530,30 @@ static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq)
skip_splitq_rx_init:
for (j = 0; j < num_rxq; j++) {
+ struct idpf_rx_queue *q;
+
if (!idpf_is_queue_model_split(vport->rxq_model)) {
q = rx_qgrp->singleq.rxqs[j];
goto setup_rxq;
}
q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
- rx_qgrp->splitq.rxq_sets[j]->refillq0 =
+ rx_qgrp->splitq.rxq_sets[j]->refillq[0] =
&rx_qgrp->splitq.bufq_sets[0].refillqs[j];
if (vport->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP)
- rx_qgrp->splitq.rxq_sets[j]->refillq1 =
+ rx_qgrp->splitq.rxq_sets[j]->refillq[1] =
&rx_qgrp->splitq.bufq_sets[1].refillqs[j];
- if (hs) {
- q->rx_hsplit_en = true;
- q->rx_hbuf_size = IDPF_HDR_BUF_SIZE;
- }
+ idpf_queue_assign(HSPLIT_EN, q, hs);
setup_rxq:
- q->dev = &adapter->pdev->dev;
q->desc_count = vport->rxq_desc_count;
- q->vport = vport;
- q->rxq_grp = rx_qgrp;
+ q->rx_ptype_lkup = vport->rx_ptype_lkup;
+ q->netdev = vport->netdev;
+ q->bufq_sets = rx_qgrp->splitq.bufq_sets;
q->idx = (i * num_rxq) + j;
- /* In splitq mode, RXQ buffer size should be
- * set to that of the first buffer queue
- * associated with this RXQ
- */
- q->rx_buf_size = vport->bufq_size[0];
q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK;
q->rx_max_pkt_size = vport->netdev->mtu +
- IDPF_PACKET_HDR_PAD;
+ LIBETH_RX_LL_LEN;
idpf_rxq_set_descids(vport, q);
}
}
@@ -1445,12 +1633,13 @@ err_out:
* idpf_tx_handle_sw_marker - Handle queue marker packet
* @tx_q: tx queue to handle software marker
*/
-static void idpf_tx_handle_sw_marker(struct idpf_queue *tx_q)
+static void idpf_tx_handle_sw_marker(struct idpf_tx_queue *tx_q)
{
- struct idpf_vport *vport = tx_q->vport;
+ struct idpf_netdev_priv *priv = netdev_priv(tx_q->netdev);
+ struct idpf_vport *vport = priv->vport;
int i;
- clear_bit(__IDPF_Q_SW_MARKER, tx_q->flags);
+ idpf_queue_clear(SW_MARKER, tx_q);
/* Hardware must write marker packets to all queues associated with
* completion queues. So check if all queues received marker packets
*/
@@ -1458,7 +1647,7 @@ static void idpf_tx_handle_sw_marker(struct idpf_queue *tx_q)
/* If we're still waiting on any other TXQ marker completions,
* just return now since we cannot wake up the marker_wq yet.
*/
- if (test_bit(__IDPF_Q_SW_MARKER, vport->txqs[i]->flags))
+ if (idpf_queue_has(SW_MARKER, vport->txqs[i]))
return;
/* Drain complete */
@@ -1474,7 +1663,7 @@ static void idpf_tx_handle_sw_marker(struct idpf_queue *tx_q)
* @cleaned: pointer to stats struct to track cleaned packets/bytes
* @napi_budget: Used to determine if we are in netpoll
*/
-static void idpf_tx_splitq_clean_hdr(struct idpf_queue *tx_q,
+static void idpf_tx_splitq_clean_hdr(struct idpf_tx_queue *tx_q,
struct idpf_tx_buf *tx_buf,
struct idpf_cleaned_stats *cleaned,
int napi_budget)
@@ -1505,7 +1694,8 @@ static void idpf_tx_splitq_clean_hdr(struct idpf_queue *tx_q,
* @cleaned: pointer to stats struct to track cleaned packets/bytes
* @budget: Used to determine if we are in netpoll
*/
-static void idpf_tx_clean_stashed_bufs(struct idpf_queue *txq, u16 compl_tag,
+static void idpf_tx_clean_stashed_bufs(struct idpf_tx_queue *txq,
+ u16 compl_tag,
struct idpf_cleaned_stats *cleaned,
int budget)
{
@@ -1513,7 +1703,7 @@ static void idpf_tx_clean_stashed_bufs(struct idpf_queue *txq, u16 compl_tag,
struct hlist_node *tmp_buf;
/* Buffer completion */
- hash_for_each_possible_safe(txq->sched_buf_hash, stash, tmp_buf,
+ hash_for_each_possible_safe(txq->stash->sched_buf_hash, stash, tmp_buf,
hlist, compl_tag) {
if (unlikely(stash->buf.compl_tag != (int)compl_tag))
continue;
@@ -1530,7 +1720,7 @@ static void idpf_tx_clean_stashed_bufs(struct idpf_queue *txq, u16 compl_tag,
}
/* Push shadow buf back onto stack */
- idpf_buf_lifo_push(&txq->buf_stack, stash);
+ idpf_buf_lifo_push(&txq->stash->buf_stack, stash);
hash_del(&stash->hlist);
}
@@ -1542,7 +1732,7 @@ static void idpf_tx_clean_stashed_bufs(struct idpf_queue *txq, u16 compl_tag,
* @txq: Tx queue to clean
* @tx_buf: buffer to store
*/
-static int idpf_stash_flow_sch_buffers(struct idpf_queue *txq,
+static int idpf_stash_flow_sch_buffers(struct idpf_tx_queue *txq,
struct idpf_tx_buf *tx_buf)
{
struct idpf_tx_stash *stash;
@@ -1551,10 +1741,10 @@ static int idpf_stash_flow_sch_buffers(struct idpf_queue *txq,
!dma_unmap_len(tx_buf, len)))
return 0;
- stash = idpf_buf_lifo_pop(&txq->buf_stack);
+ stash = idpf_buf_lifo_pop(&txq->stash->buf_stack);
if (unlikely(!stash)) {
net_err_ratelimited("%s: No out-of-order TX buffers left!\n",
- txq->vport->netdev->name);
+ netdev_name(txq->netdev));
return -ENOMEM;
}
@@ -1568,7 +1758,8 @@ static int idpf_stash_flow_sch_buffers(struct idpf_queue *txq,
stash->buf.compl_tag = tx_buf->compl_tag;
/* Add buffer to buf_hash table to be freed later */
- hash_add(txq->sched_buf_hash, &stash->hlist, stash->buf.compl_tag);
+ hash_add(txq->stash->sched_buf_hash, &stash->hlist,
+ stash->buf.compl_tag);
memset(tx_buf, 0, sizeof(struct idpf_tx_buf));
@@ -1584,7 +1775,7 @@ do { \
if (unlikely(!(ntc))) { \
ntc -= (txq)->desc_count; \
buf = (txq)->tx_buf; \
- desc = IDPF_FLEX_TX_DESC(txq, 0); \
+ desc = &(txq)->flex_tx[0]; \
} else { \
(buf)++; \
(desc)++; \
@@ -1607,7 +1798,7 @@ do { \
* and the buffers will be cleaned separately. The stats are not updated from
* this function when using flow-based scheduling.
*/
-static void idpf_tx_splitq_clean(struct idpf_queue *tx_q, u16 end,
+static void idpf_tx_splitq_clean(struct idpf_tx_queue *tx_q, u16 end,
int napi_budget,
struct idpf_cleaned_stats *cleaned,
bool descs_only)
@@ -1617,8 +1808,8 @@ static void idpf_tx_splitq_clean(struct idpf_queue *tx_q, u16 end,
s16 ntc = tx_q->next_to_clean;
struct idpf_tx_buf *tx_buf;
- tx_desc = IDPF_FLEX_TX_DESC(tx_q, ntc);
- next_pending_desc = IDPF_FLEX_TX_DESC(tx_q, end);
+ tx_desc = &tx_q->flex_tx[ntc];
+ next_pending_desc = &tx_q->flex_tx[end];
tx_buf = &tx_q->tx_buf[ntc];
ntc -= tx_q->desc_count;
@@ -1703,7 +1894,7 @@ do { \
* stashed. Returns the byte/segment count for the cleaned packet associated
* this completion tag.
*/
-static bool idpf_tx_clean_buf_ring(struct idpf_queue *txq, u16 compl_tag,
+static bool idpf_tx_clean_buf_ring(struct idpf_tx_queue *txq, u16 compl_tag,
struct idpf_cleaned_stats *cleaned,
int budget)
{
@@ -1772,14 +1963,14 @@ static bool idpf_tx_clean_buf_ring(struct idpf_queue *txq, u16 compl_tag,
*
* Returns bytes/packets cleaned
*/
-static void idpf_tx_handle_rs_completion(struct idpf_queue *txq,
+static void idpf_tx_handle_rs_completion(struct idpf_tx_queue *txq,
struct idpf_splitq_tx_compl_desc *desc,
struct idpf_cleaned_stats *cleaned,
int budget)
{
u16 compl_tag;
- if (!test_bit(__IDPF_Q_FLOW_SCH_EN, txq->flags)) {
+ if (!idpf_queue_has(FLOW_SCH_EN, txq)) {
u16 head = le16_to_cpu(desc->q_head_compl_tag.q_head);
return idpf_tx_splitq_clean(txq, head, budget, cleaned, false);
@@ -1802,24 +1993,23 @@ static void idpf_tx_handle_rs_completion(struct idpf_queue *txq,
*
* Returns true if there's any budget left (e.g. the clean is finished)
*/
-static bool idpf_tx_clean_complq(struct idpf_queue *complq, int budget,
+static bool idpf_tx_clean_complq(struct idpf_compl_queue *complq, int budget,
int *cleaned)
{
struct idpf_splitq_tx_compl_desc *tx_desc;
- struct idpf_vport *vport = complq->vport;
s16 ntc = complq->next_to_clean;
struct idpf_netdev_priv *np;
unsigned int complq_budget;
bool complq_ok = true;
int i;
- complq_budget = vport->compln_clean_budget;
- tx_desc = IDPF_SPLITQ_TX_COMPLQ_DESC(complq, ntc);
+ complq_budget = complq->clean_budget;
+ tx_desc = &complq->comp[ntc];
ntc -= complq->desc_count;
do {
struct idpf_cleaned_stats cleaned_stats = { };
- struct idpf_queue *tx_q;
+ struct idpf_tx_queue *tx_q;
int rel_tx_qid;
u16 hw_head;
u8 ctype; /* completion type */
@@ -1828,7 +2018,7 @@ static bool idpf_tx_clean_complq(struct idpf_queue *complq, int budget,
/* if the descriptor isn't done, no work yet to do */
gen = le16_get_bits(tx_desc->qid_comptype_gen,
IDPF_TXD_COMPLQ_GEN_M);
- if (test_bit(__IDPF_Q_GEN_CHK, complq->flags) != gen)
+ if (idpf_queue_has(GEN_CHK, complq) != gen)
break;
/* Find necessary info of TX queue to clean buffers */
@@ -1836,8 +2026,7 @@ static bool idpf_tx_clean_complq(struct idpf_queue *complq, int budget,
IDPF_TXD_COMPLQ_QID_M);
if (rel_tx_qid >= complq->txq_grp->num_txq ||
!complq->txq_grp->txqs[rel_tx_qid]) {
- dev_err(&complq->vport->adapter->pdev->dev,
- "TxQ not found\n");
+ netdev_err(complq->netdev, "TxQ not found\n");
goto fetch_next_desc;
}
tx_q = complq->txq_grp->txqs[rel_tx_qid];
@@ -1860,15 +2049,14 @@ static bool idpf_tx_clean_complq(struct idpf_queue *complq, int budget,
idpf_tx_handle_sw_marker(tx_q);
break;
default:
- dev_err(&tx_q->vport->adapter->pdev->dev,
- "Unknown TX completion type: %d\n",
- ctype);
+ netdev_err(tx_q->netdev,
+ "Unknown TX completion type: %d\n", ctype);
goto fetch_next_desc;
}
u64_stats_update_begin(&tx_q->stats_sync);
- u64_stats_add(&tx_q->q_stats.tx.packets, cleaned_stats.packets);
- u64_stats_add(&tx_q->q_stats.tx.bytes, cleaned_stats.bytes);
+ u64_stats_add(&tx_q->q_stats.packets, cleaned_stats.packets);
+ u64_stats_add(&tx_q->q_stats.bytes, cleaned_stats.bytes);
tx_q->cleaned_pkts += cleaned_stats.packets;
tx_q->cleaned_bytes += cleaned_stats.bytes;
complq->num_completions++;
@@ -1879,8 +2067,8 @@ fetch_next_desc:
ntc++;
if (unlikely(!ntc)) {
ntc -= complq->desc_count;
- tx_desc = IDPF_SPLITQ_TX_COMPLQ_DESC(complq, 0);
- change_bit(__IDPF_Q_GEN_CHK, complq->flags);
+ tx_desc = &complq->comp[0];
+ idpf_queue_change(GEN_CHK, complq);
}
prefetch(tx_desc);
@@ -1896,9 +2084,9 @@ fetch_next_desc:
IDPF_TX_COMPLQ_OVERFLOW_THRESH(complq)))
complq_ok = false;
- np = netdev_priv(complq->vport->netdev);
+ np = netdev_priv(complq->netdev);
for (i = 0; i < complq->txq_grp->num_txq; ++i) {
- struct idpf_queue *tx_q = complq->txq_grp->txqs[i];
+ struct idpf_tx_queue *tx_q = complq->txq_grp->txqs[i];
struct netdev_queue *nq;
bool dont_wake;
@@ -1909,11 +2097,11 @@ fetch_next_desc:
*cleaned += tx_q->cleaned_pkts;
/* Update BQL */
- nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx);
+ nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
dont_wake = !complq_ok || IDPF_TX_BUF_RSV_LOW(tx_q) ||
np->state != __IDPF_VPORT_UP ||
- !netif_carrier_ok(tx_q->vport->netdev);
+ !netif_carrier_ok(tx_q->netdev);
/* Check if the TXQ needs to and can be restarted */
__netif_txq_completed_wake(nq, tx_q->cleaned_pkts, tx_q->cleaned_bytes,
IDPF_DESC_UNUSED(tx_q), IDPF_TX_WAKE_THRESH,
@@ -1976,7 +2164,7 @@ void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc,
*
* Returns 0 if stop is not needed
*/
-int idpf_tx_maybe_stop_common(struct idpf_queue *tx_q, unsigned int size)
+int idpf_tx_maybe_stop_common(struct idpf_tx_queue *tx_q, unsigned int size)
{
struct netdev_queue *nq;
@@ -1984,10 +2172,10 @@ int idpf_tx_maybe_stop_common(struct idpf_queue *tx_q, unsigned int size)
return 0;
u64_stats_update_begin(&tx_q->stats_sync);
- u64_stats_inc(&tx_q->q_stats.tx.q_busy);
+ u64_stats_inc(&tx_q->q_stats.q_busy);
u64_stats_update_end(&tx_q->stats_sync);
- nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx);
+ nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
return netif_txq_maybe_stop(nq, IDPF_DESC_UNUSED(tx_q), size, size);
}
@@ -1999,7 +2187,7 @@ int idpf_tx_maybe_stop_common(struct idpf_queue *tx_q, unsigned int size)
*
* Returns 0 if stop is not needed
*/
-static int idpf_tx_maybe_stop_splitq(struct idpf_queue *tx_q,
+static int idpf_tx_maybe_stop_splitq(struct idpf_tx_queue *tx_q,
unsigned int descs_needed)
{
if (idpf_tx_maybe_stop_common(tx_q, descs_needed))
@@ -2023,9 +2211,9 @@ static int idpf_tx_maybe_stop_splitq(struct idpf_queue *tx_q,
splitq_stop:
u64_stats_update_begin(&tx_q->stats_sync);
- u64_stats_inc(&tx_q->q_stats.tx.q_busy);
+ u64_stats_inc(&tx_q->q_stats.q_busy);
u64_stats_update_end(&tx_q->stats_sync);
- netif_stop_subqueue(tx_q->vport->netdev, tx_q->idx);
+ netif_stop_subqueue(tx_q->netdev, tx_q->idx);
return -EBUSY;
}
@@ -2040,12 +2228,12 @@ splitq_stop:
* to do a register write to update our queue status. We know this can only
* mean tail here as HW should be owning head for TX.
*/
-void idpf_tx_buf_hw_update(struct idpf_queue *tx_q, u32 val,
+void idpf_tx_buf_hw_update(struct idpf_tx_queue *tx_q, u32 val,
bool xmit_more)
{
struct netdev_queue *nq;
- nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx);
+ nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
tx_q->next_to_use = val;
idpf_tx_maybe_stop_common(tx_q, IDPF_TX_DESC_NEEDED);
@@ -2069,7 +2257,7 @@ void idpf_tx_buf_hw_update(struct idpf_queue *tx_q, u32 val,
*
* Returns number of data descriptors needed for this skb.
*/
-unsigned int idpf_tx_desc_count_required(struct idpf_queue *txq,
+unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq,
struct sk_buff *skb)
{
const struct skb_shared_info *shinfo;
@@ -2102,7 +2290,7 @@ unsigned int idpf_tx_desc_count_required(struct idpf_queue *txq,
count = idpf_size_to_txd_count(skb->len);
u64_stats_update_begin(&txq->stats_sync);
- u64_stats_inc(&txq->q_stats.tx.linearize);
+ u64_stats_inc(&txq->q_stats.linearize);
u64_stats_update_end(&txq->stats_sync);
}
@@ -2116,11 +2304,11 @@ unsigned int idpf_tx_desc_count_required(struct idpf_queue *txq,
* @first: original first buffer info buffer for packet
* @idx: starting point on ring to unwind
*/
-void idpf_tx_dma_map_error(struct idpf_queue *txq, struct sk_buff *skb,
+void idpf_tx_dma_map_error(struct idpf_tx_queue *txq, struct sk_buff *skb,
struct idpf_tx_buf *first, u16 idx)
{
u64_stats_update_begin(&txq->stats_sync);
- u64_stats_inc(&txq->q_stats.tx.dma_map_errs);
+ u64_stats_inc(&txq->q_stats.dma_map_errs);
u64_stats_update_end(&txq->stats_sync);
/* clear dma mappings for failed tx_buf map */
@@ -2143,7 +2331,7 @@ void idpf_tx_dma_map_error(struct idpf_queue *txq, struct sk_buff *skb,
* used one additional descriptor for a context
* descriptor. Reset that here.
*/
- tx_desc = IDPF_FLEX_TX_DESC(txq, idx);
+ tx_desc = &txq->flex_tx[idx];
memset(tx_desc, 0, sizeof(struct idpf_flex_tx_ctx_desc));
if (idx == 0)
idx = txq->desc_count;
@@ -2159,7 +2347,7 @@ void idpf_tx_dma_map_error(struct idpf_queue *txq, struct sk_buff *skb,
* @txq: the tx ring to wrap
* @ntu: ring index to bump
*/
-static unsigned int idpf_tx_splitq_bump_ntu(struct idpf_queue *txq, u16 ntu)
+static unsigned int idpf_tx_splitq_bump_ntu(struct idpf_tx_queue *txq, u16 ntu)
{
ntu++;
@@ -2181,7 +2369,7 @@ static unsigned int idpf_tx_splitq_bump_ntu(struct idpf_queue *txq, u16 ntu)
* and gets a physical address for each memory location and programs
* it and the length into the transmit flex descriptor.
*/
-static void idpf_tx_splitq_map(struct idpf_queue *tx_q,
+static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q,
struct idpf_tx_splitq_params *params,
struct idpf_tx_buf *first)
{
@@ -2202,7 +2390,7 @@ static void idpf_tx_splitq_map(struct idpf_queue *tx_q,
data_len = skb->data_len;
size = skb_headlen(skb);
- tx_desc = IDPF_FLEX_TX_DESC(tx_q, i);
+ tx_desc = &tx_q->flex_tx[i];
dma = dma_map_single(tx_q->dev, skb->data, size, DMA_TO_DEVICE);
@@ -2275,7 +2463,7 @@ static void idpf_tx_splitq_map(struct idpf_queue *tx_q,
i++;
if (i == tx_q->desc_count) {
- tx_desc = IDPF_FLEX_TX_DESC(tx_q, 0);
+ tx_desc = &tx_q->flex_tx[0];
i = 0;
tx_q->compl_tag_cur_gen =
IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q);
@@ -2320,7 +2508,7 @@ static void idpf_tx_splitq_map(struct idpf_queue *tx_q,
i++;
if (i == tx_q->desc_count) {
- tx_desc = IDPF_FLEX_TX_DESC(tx_q, 0);
+ tx_desc = &tx_q->flex_tx[0];
i = 0;
tx_q->compl_tag_cur_gen = IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q);
}
@@ -2348,7 +2536,7 @@ static void idpf_tx_splitq_map(struct idpf_queue *tx_q,
tx_q->txq_grp->num_completions_pending++;
/* record bytecount for BQL */
- nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx);
+ nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
netdev_tx_sent_queue(nq, first->bytecount);
idpf_tx_buf_hw_update(tx_q, i, netdev_xmit_more());
@@ -2525,8 +2713,8 @@ static bool __idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs)
* E.g.: a packet with 7 fragments can require 9 DMA transactions; 1 for TSO
* header, 1 for segment payload, and then 7 for the fragments.
*/
-bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs,
- unsigned int count)
+static bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs,
+ unsigned int count)
{
if (likely(count < max_bufs))
return false;
@@ -2544,7 +2732,7 @@ bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs,
* ring entry to reflect that this index is a context descriptor
*/
static struct idpf_flex_tx_ctx_desc *
-idpf_tx_splitq_get_ctx_desc(struct idpf_queue *txq)
+idpf_tx_splitq_get_ctx_desc(struct idpf_tx_queue *txq)
{
struct idpf_flex_tx_ctx_desc *desc;
int i = txq->next_to_use;
@@ -2553,7 +2741,7 @@ idpf_tx_splitq_get_ctx_desc(struct idpf_queue *txq)
txq->tx_buf[i].compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG;
/* grab the next descriptor */
- desc = IDPF_FLEX_TX_CTX_DESC(txq, i);
+ desc = &txq->flex_ctx[i];
txq->next_to_use = idpf_tx_splitq_bump_ntu(txq, i);
return desc;
@@ -2564,10 +2752,10 @@ idpf_tx_splitq_get_ctx_desc(struct idpf_queue *txq)
* @tx_q: queue to send buffer on
* @skb: pointer to skb
*/
-netdev_tx_t idpf_tx_drop_skb(struct idpf_queue *tx_q, struct sk_buff *skb)
+netdev_tx_t idpf_tx_drop_skb(struct idpf_tx_queue *tx_q, struct sk_buff *skb)
{
u64_stats_update_begin(&tx_q->stats_sync);
- u64_stats_inc(&tx_q->q_stats.tx.skb_drops);
+ u64_stats_inc(&tx_q->q_stats.skb_drops);
u64_stats_update_end(&tx_q->stats_sync);
idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
@@ -2585,7 +2773,7 @@ netdev_tx_t idpf_tx_drop_skb(struct idpf_queue *tx_q, struct sk_buff *skb)
* Returns NETDEV_TX_OK if sent, else an error code
*/
static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
- struct idpf_queue *tx_q)
+ struct idpf_tx_queue *tx_q)
{
struct idpf_tx_splitq_params tx_params = { };
struct idpf_tx_buf *first;
@@ -2625,7 +2813,7 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
ctx_desc->tso.qw0.hdr_len = tx_params.offload.tso_hdr_len;
u64_stats_update_begin(&tx_q->stats_sync);
- u64_stats_inc(&tx_q->q_stats.tx.lso_pkts);
+ u64_stats_inc(&tx_q->q_stats.lso_pkts);
u64_stats_update_end(&tx_q->stats_sync);
}
@@ -2642,7 +2830,7 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
}
- if (test_bit(__IDPF_Q_FLOW_SCH_EN, tx_q->flags)) {
+ if (idpf_queue_has(FLOW_SCH_EN, tx_q)) {
tx_params.dtype = IDPF_TX_DESC_DTYPE_FLEX_FLOW_SCHE;
tx_params.eop_cmd = IDPF_TXD_FLEX_FLOW_CMD_EOP;
/* Set the RE bit to catch any packets that may have not been
@@ -2672,17 +2860,16 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
}
/**
- * idpf_tx_splitq_start - Selects the right Tx queue to send buffer
+ * idpf_tx_start - Selects the right Tx queue to send buffer
* @skb: send buffer
* @netdev: network interface device structure
*
* Returns NETDEV_TX_OK if sent, else an error code
*/
-netdev_tx_t idpf_tx_splitq_start(struct sk_buff *skb,
- struct net_device *netdev)
+netdev_tx_t idpf_tx_start(struct sk_buff *skb, struct net_device *netdev)
{
struct idpf_vport *vport = idpf_netdev_to_vport(netdev);
- struct idpf_queue *tx_q;
+ struct idpf_tx_queue *tx_q;
if (unlikely(skb_get_queue_mapping(skb) >= vport->num_txq)) {
dev_kfree_skb_any(skb);
@@ -2701,31 +2888,10 @@ netdev_tx_t idpf_tx_splitq_start(struct sk_buff *skb,
return NETDEV_TX_OK;
}
- return idpf_tx_splitq_frame(skb, tx_q);
-}
-
-/**
- * idpf_ptype_to_htype - get a hash type
- * @decoded: Decoded Rx packet type related fields
- *
- * Returns appropriate hash type (such as PKT_HASH_TYPE_L2/L3/L4) to be used by
- * skb_set_hash based on PTYPE as parsed by HW Rx pipeline and is part of
- * Rx desc.
- */
-enum pkt_hash_types idpf_ptype_to_htype(const struct idpf_rx_ptype_decoded *decoded)
-{
- if (!decoded->known)
- return PKT_HASH_TYPE_NONE;
- if (decoded->payload_layer == IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY2 &&
- decoded->inner_prot)
- return PKT_HASH_TYPE_L4;
- if (decoded->payload_layer == IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY2 &&
- decoded->outer_ip)
- return PKT_HASH_TYPE_L3;
- if (decoded->outer_ip == IDPF_RX_PTYPE_OUTER_L2)
- return PKT_HASH_TYPE_L2;
-
- return PKT_HASH_TYPE_NONE;
+ if (idpf_is_queue_model_split(vport->txq_model))
+ return idpf_tx_splitq_frame(skb, tx_q);
+ else
+ return idpf_tx_singleq_frame(skb, tx_q);
}
/**
@@ -2735,20 +2901,21 @@ enum pkt_hash_types idpf_ptype_to_htype(const struct idpf_rx_ptype_decoded *deco
* @rx_desc: Receive descriptor
* @decoded: Decoded Rx packet type related fields
*/
-static void idpf_rx_hash(struct idpf_queue *rxq, struct sk_buff *skb,
- struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc,
- struct idpf_rx_ptype_decoded *decoded)
+static void
+idpf_rx_hash(const struct idpf_rx_queue *rxq, struct sk_buff *skb,
+ const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc,
+ struct libeth_rx_pt decoded)
{
u32 hash;
- if (unlikely(!idpf_is_feature_ena(rxq->vport, NETIF_F_RXHASH)))
+ if (!libeth_rx_pt_has_hash(rxq->netdev, decoded))
return;
hash = le16_to_cpu(rx_desc->hash1) |
(rx_desc->ff2_mirrid_hash2.hash2 << 16) |
(rx_desc->hash3 << 24);
- skb_set_hash(skb, hash, idpf_ptype_to_htype(decoded));
+ libeth_rx_pt_set_hash(skb, hash, decoded);
}
/**
@@ -2760,92 +2927,83 @@ static void idpf_rx_hash(struct idpf_queue *rxq, struct sk_buff *skb,
*
* skb->protocol must be set before this function is called
*/
-static void idpf_rx_csum(struct idpf_queue *rxq, struct sk_buff *skb,
- struct idpf_rx_csum_decoded *csum_bits,
- struct idpf_rx_ptype_decoded *decoded)
+static void idpf_rx_csum(struct idpf_rx_queue *rxq, struct sk_buff *skb,
+ struct idpf_rx_csum_decoded csum_bits,
+ struct libeth_rx_pt decoded)
{
bool ipv4, ipv6;
/* check if Rx checksum is enabled */
- if (unlikely(!idpf_is_feature_ena(rxq->vport, NETIF_F_RXCSUM)))
+ if (!libeth_rx_pt_has_checksum(rxq->netdev, decoded))
return;
/* check if HW has decoded the packet and checksum */
- if (!(csum_bits->l3l4p))
+ if (unlikely(!csum_bits.l3l4p))
return;
- ipv4 = IDPF_RX_PTYPE_TO_IPV(decoded, IDPF_RX_PTYPE_OUTER_IPV4);
- ipv6 = IDPF_RX_PTYPE_TO_IPV(decoded, IDPF_RX_PTYPE_OUTER_IPV6);
+ ipv4 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV4;
+ ipv6 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV6;
- if (ipv4 && (csum_bits->ipe || csum_bits->eipe))
+ if (unlikely(ipv4 && (csum_bits.ipe || csum_bits.eipe)))
goto checksum_fail;
- if (ipv6 && csum_bits->ipv6exadd)
+ if (unlikely(ipv6 && csum_bits.ipv6exadd))
return;
/* check for L4 errors and handle packets that were not able to be
* checksummed
*/
- if (csum_bits->l4e)
+ if (unlikely(csum_bits.l4e))
goto checksum_fail;
- /* Only report checksum unnecessary for ICMP, TCP, UDP, or SCTP */
- switch (decoded->inner_prot) {
- case IDPF_RX_PTYPE_INNER_PROT_ICMP:
- case IDPF_RX_PTYPE_INNER_PROT_TCP:
- case IDPF_RX_PTYPE_INNER_PROT_UDP:
- if (!csum_bits->raw_csum_inv) {
- u16 csum = csum_bits->raw_csum;
-
- skb->csum = csum_unfold((__force __sum16)~swab16(csum));
- skb->ip_summed = CHECKSUM_COMPLETE;
- } else {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- }
- break;
- case IDPF_RX_PTYPE_INNER_PROT_SCTP:
+ if (csum_bits.raw_csum_inv ||
+ decoded.inner_prot == LIBETH_RX_PT_INNER_SCTP) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
- break;
- default:
- break;
+ return;
}
+ skb->csum = csum_unfold((__force __sum16)~swab16(csum_bits.raw_csum));
+ skb->ip_summed = CHECKSUM_COMPLETE;
+
return;
checksum_fail:
u64_stats_update_begin(&rxq->stats_sync);
- u64_stats_inc(&rxq->q_stats.rx.hw_csum_err);
+ u64_stats_inc(&rxq->q_stats.hw_csum_err);
u64_stats_update_end(&rxq->stats_sync);
}
/**
* idpf_rx_splitq_extract_csum_bits - Extract checksum bits from descriptor
* @rx_desc: receive descriptor
- * @csum: structure to extract checksum fields
*
+ * Return: parsed checksum status.
**/
-static void idpf_rx_splitq_extract_csum_bits(struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc,
- struct idpf_rx_csum_decoded *csum)
+static struct idpf_rx_csum_decoded
+idpf_rx_splitq_extract_csum_bits(const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc)
{
+ struct idpf_rx_csum_decoded csum = { };
u8 qword0, qword1;
qword0 = rx_desc->status_err0_qw0;
qword1 = rx_desc->status_err0_qw1;
- csum->ipe = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_M,
+ csum.ipe = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_M,
+ qword1);
+ csum.eipe = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_M,
qword1);
- csum->eipe = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_M,
+ csum.l4e = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_M,
+ qword1);
+ csum.l3l4p = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_L3L4P_M,
qword1);
- csum->l4e = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_M,
- qword1);
- csum->l3l4p = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_L3L4P_M,
- qword1);
- csum->ipv6exadd = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_IPV6EXADD_M,
- qword0);
- csum->raw_csum_inv =
+ csum.ipv6exadd = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_IPV6EXADD_M,
+ qword0);
+ csum.raw_csum_inv =
le16_get_bits(rx_desc->ptype_err_fflags0,
VIRTCHNL2_RX_FLEX_DESC_ADV_RAW_CSUM_INV_M);
- csum->raw_csum = le16_to_cpu(rx_desc->misc.raw_cs);
+ csum.raw_csum = le16_to_cpu(rx_desc->misc.raw_cs);
+
+ return csum;
}
/**
@@ -2860,23 +3018,24 @@ static void idpf_rx_splitq_extract_csum_bits(struct virtchnl2_rx_flex_desc_adv_n
* Populate the skb fields with the total number of RSC segments, RSC payload
* length and packet type.
*/
-static int idpf_rx_rsc(struct idpf_queue *rxq, struct sk_buff *skb,
- struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc,
- struct idpf_rx_ptype_decoded *decoded)
+static int idpf_rx_rsc(struct idpf_rx_queue *rxq, struct sk_buff *skb,
+ const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc,
+ struct libeth_rx_pt decoded)
{
u16 rsc_segments, rsc_seg_len;
bool ipv4, ipv6;
int len;
- if (unlikely(!decoded->outer_ip))
+ if (unlikely(libeth_rx_pt_get_ip_ver(decoded) ==
+ LIBETH_RX_PT_OUTER_L2))
return -EINVAL;
rsc_seg_len = le16_to_cpu(rx_desc->misc.rscseglen);
if (unlikely(!rsc_seg_len))
return -EINVAL;
- ipv4 = IDPF_RX_PTYPE_TO_IPV(decoded, IDPF_RX_PTYPE_OUTER_IPV4);
- ipv6 = IDPF_RX_PTYPE_TO_IPV(decoded, IDPF_RX_PTYPE_OUTER_IPV6);
+ ipv4 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV4;
+ ipv6 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV6;
if (unlikely(!(ipv4 ^ ipv6)))
return -EINVAL;
@@ -2914,7 +3073,7 @@ static int idpf_rx_rsc(struct idpf_queue *rxq, struct sk_buff *skb,
tcp_gro_complete(skb);
u64_stats_update_begin(&rxq->stats_sync);
- u64_stats_inc(&rxq->q_stats.rx.rsc_pkts);
+ u64_stats_inc(&rxq->q_stats.rsc_pkts);
u64_stats_update_end(&rxq->stats_sync);
return 0;
@@ -2930,35 +3089,31 @@ static int idpf_rx_rsc(struct idpf_queue *rxq, struct sk_buff *skb,
* order to populate the hash, checksum, protocol, and
* other fields within the skb.
*/
-static int idpf_rx_process_skb_fields(struct idpf_queue *rxq,
- struct sk_buff *skb,
- struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc)
+static int
+idpf_rx_process_skb_fields(struct idpf_rx_queue *rxq, struct sk_buff *skb,
+ const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc)
{
- struct idpf_rx_csum_decoded csum_bits = { };
- struct idpf_rx_ptype_decoded decoded;
+ struct idpf_rx_csum_decoded csum_bits;
+ struct libeth_rx_pt decoded;
u16 rx_ptype;
rx_ptype = le16_get_bits(rx_desc->ptype_err_fflags0,
VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_M);
-
- skb->protocol = eth_type_trans(skb, rxq->vport->netdev);
-
- decoded = rxq->vport->rx_ptype_lkup[rx_ptype];
- /* If we don't know the ptype we can't do anything else with it. Just
- * pass it up the stack as-is.
- */
- if (!decoded.known)
- return 0;
+ decoded = rxq->rx_ptype_lkup[rx_ptype];
/* process RSS/hash */
- idpf_rx_hash(rxq, skb, rx_desc, &decoded);
+ idpf_rx_hash(rxq, skb, rx_desc, decoded);
+
+ skb->protocol = eth_type_trans(skb, rxq->netdev);
if (le16_get_bits(rx_desc->hdrlen_flags,
VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_M))
- return idpf_rx_rsc(rxq, skb, rx_desc, &decoded);
+ return idpf_rx_rsc(rxq, skb, rx_desc, decoded);
+
+ csum_bits = idpf_rx_splitq_extract_csum_bits(rx_desc);
+ idpf_rx_csum(rxq, skb, csum_bits, decoded);
- idpf_rx_splitq_extract_csum_bits(rx_desc, &csum_bits);
- idpf_rx_csum(rxq, skb, &csum_bits, &decoded);
+ skb_record_rx_queue(skb, rxq->idx);
return 0;
}
@@ -2976,103 +3131,73 @@ static int idpf_rx_process_skb_fields(struct idpf_queue *rxq,
void idpf_rx_add_frag(struct idpf_rx_buf *rx_buf, struct sk_buff *skb,
unsigned int size)
{
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page,
- rx_buf->page_offset, size, rx_buf->truesize);
+ u32 hr = rx_buf->page->pp->p.offset;
- rx_buf->page = NULL;
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page,
+ rx_buf->offset + hr, size, rx_buf->truesize);
}
/**
- * idpf_rx_construct_skb - Allocate skb and populate it
- * @rxq: Rx descriptor queue
- * @rx_buf: Rx buffer to pull data from
- * @size: the length of the packet
+ * idpf_rx_hsplit_wa - handle header buffer overflows and split errors
+ * @hdr: Rx buffer for the headers
+ * @buf: Rx buffer for the payload
+ * @data_len: number of bytes received to the payload buffer
*
- * This function allocates an skb. It then populates it with the page
- * data from the current receive descriptor, taking care to set up the
- * skb correctly.
+ * When a header buffer overflow occurs or the HW was unable do parse the
+ * packet type to perform header split, the whole frame gets placed to the
+ * payload buffer. We can't build a valid skb around a payload buffer when
+ * the header split is active since it doesn't reserve any head- or tailroom.
+ * In that case, copy either the whole frame when it's short or just the
+ * Ethernet header to the header buffer to be able to build an skb and adjust
+ * the data offset in the payload buffer, IOW emulate the header split.
+ *
+ * Return: number of bytes copied to the header buffer.
*/
-struct sk_buff *idpf_rx_construct_skb(struct idpf_queue *rxq,
- struct idpf_rx_buf *rx_buf,
- unsigned int size)
+static u32 idpf_rx_hsplit_wa(const struct libeth_fqe *hdr,
+ struct libeth_fqe *buf, u32 data_len)
{
- unsigned int headlen;
- struct sk_buff *skb;
- void *va;
-
- va = page_address(rx_buf->page) + rx_buf->page_offset;
-
- /* prefetch first cache line of first page */
- net_prefetch(va);
- /* allocate a skb to store the frags */
- skb = napi_alloc_skb(&rxq->q_vector->napi, IDPF_RX_HDR_SIZE);
- if (unlikely(!skb)) {
- idpf_rx_put_page(rx_buf);
-
- return NULL;
- }
-
- skb_record_rx_queue(skb, rxq->idx);
- skb_mark_for_recycle(skb);
+ u32 copy = data_len <= L1_CACHE_BYTES ? data_len : ETH_HLEN;
+ const void *src;
+ void *dst;
- /* Determine available headroom for copy */
- headlen = size;
- if (headlen > IDPF_RX_HDR_SIZE)
- headlen = eth_get_headlen(skb->dev, va, IDPF_RX_HDR_SIZE);
-
- /* align pull length to size of long to optimize memcpy performance */
- memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
-
- /* if we exhaust the linear part then add what is left as a frag */
- size -= headlen;
- if (!size) {
- idpf_rx_put_page(rx_buf);
-
- return skb;
- }
+ if (!libeth_rx_sync_for_cpu(buf, copy))
+ return 0;
- skb_add_rx_frag(skb, 0, rx_buf->page, rx_buf->page_offset + headlen,
- size, rx_buf->truesize);
+ dst = page_address(hdr->page) + hdr->offset + hdr->page->pp->p.offset;
+ src = page_address(buf->page) + buf->offset + buf->page->pp->p.offset;
+ memcpy(dst, src, LARGEST_ALIGN(copy));
- /* Since we're giving the page to the stack, clear our reference to it.
- * We'll get a new one during buffer posting.
- */
- rx_buf->page = NULL;
+ buf->offset += copy;
- return skb;
+ return copy;
}
/**
- * idpf_rx_hdr_construct_skb - Allocate skb and populate it from header buffer
- * @rxq: Rx descriptor queue
- * @va: Rx buffer to pull data from
+ * idpf_rx_build_skb - Allocate skb and populate it from header buffer
+ * @buf: Rx buffer to pull data from
* @size: the length of the packet
*
* This function allocates an skb. It then populates it with the page data from
* the current receive descriptor, taking care to set up the skb correctly.
- * This specifically uses a header buffer to start building the skb.
*/
-static struct sk_buff *idpf_rx_hdr_construct_skb(struct idpf_queue *rxq,
- const void *va,
- unsigned int size)
+struct sk_buff *idpf_rx_build_skb(const struct libeth_fqe *buf, u32 size)
{
+ u32 hr = buf->page->pp->p.offset;
struct sk_buff *skb;
+ void *va;
- /* allocate a skb to store the frags */
- skb = napi_alloc_skb(&rxq->q_vector->napi, size);
+ va = page_address(buf->page) + buf->offset;
+ prefetch(va + hr);
+
+ skb = napi_build_skb(va, buf->truesize);
if (unlikely(!skb))
return NULL;
- skb_record_rx_queue(skb, rxq->idx);
-
- memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
-
- /* More than likely, a payload fragment, which will use a page from
- * page_pool will be added to the SKB so mark it for recycle
- * preemptively. And if not, it's inconsequential.
- */
skb_mark_for_recycle(skb);
+ skb_reserve(skb, hr);
+ __skb_put(skb, size);
+
return skb;
}
@@ -3115,31 +3240,27 @@ static bool idpf_rx_splitq_is_eop(struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_de
*
* Returns amount of work completed
*/
-static int idpf_rx_splitq_clean(struct idpf_queue *rxq, int budget)
+static int idpf_rx_splitq_clean(struct idpf_rx_queue *rxq, int budget)
{
int total_rx_bytes = 0, total_rx_pkts = 0;
- struct idpf_queue *rx_bufq = NULL;
+ struct idpf_buf_queue *rx_bufq = NULL;
struct sk_buff *skb = rxq->skb;
u16 ntc = rxq->next_to_clean;
/* Process Rx packets bounded by budget */
while (likely(total_rx_pkts < budget)) {
struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc;
+ struct libeth_fqe *hdr, *rx_buf = NULL;
struct idpf_sw_queue *refillq = NULL;
struct idpf_rxq_set *rxq_set = NULL;
- struct idpf_rx_buf *rx_buf = NULL;
- union virtchnl2_rx_desc *desc;
unsigned int pkt_len = 0;
unsigned int hdr_len = 0;
u16 gen_id, buf_id = 0;
- /* Header buffer overflow only valid for header split */
- bool hbo = false;
int bufq_id;
u8 rxdid;
/* get the Rx desc from Rx queue based on 'next_to_clean' */
- desc = IDPF_RX_DESC(rxq, ntc);
- rx_desc = (struct virtchnl2_rx_flex_desc_adv_nic_3 *)desc;
+ rx_desc = &rxq->rx[ntc].flex_adv_nic_3_wb;
/* This memory barrier is needed to keep us from reading
* any other fields out of the rx_desc
@@ -3150,7 +3271,7 @@ static int idpf_rx_splitq_clean(struct idpf_queue *rxq, int budget)
gen_id = le16_get_bits(rx_desc->pktlen_gen_bufq_id,
VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_M);
- if (test_bit(__IDPF_Q_GEN_CHK, rxq->flags) != gen_id)
+ if (idpf_queue_has(GEN_CHK, rxq) != gen_id)
break;
rxdid = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_RXDID_M,
@@ -3158,7 +3279,7 @@ static int idpf_rx_splitq_clean(struct idpf_queue *rxq, int budget)
if (rxdid != VIRTCHNL2_RXDID_2_FLEX_SPLITQ) {
IDPF_RX_BUMP_NTC(rxq, ntc);
u64_stats_update_begin(&rxq->stats_sync);
- u64_stats_inc(&rxq->q_stats.rx.bad_descs);
+ u64_stats_inc(&rxq->q_stats.bad_descs);
u64_stats_update_end(&rxq->stats_sync);
continue;
}
@@ -3166,71 +3287,79 @@ static int idpf_rx_splitq_clean(struct idpf_queue *rxq, int budget)
pkt_len = le16_get_bits(rx_desc->pktlen_gen_bufq_id,
VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_PBUF_M);
- hbo = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_HBO_M,
- rx_desc->status_err0_qw1);
-
- if (unlikely(hbo)) {
- /* If a header buffer overflow, occurs, i.e. header is
- * too large to fit in the header split buffer, HW will
- * put the entire packet, including headers, in the
- * data/payload buffer.
- */
- u64_stats_update_begin(&rxq->stats_sync);
- u64_stats_inc(&rxq->q_stats.rx.hsplit_buf_ovf);
- u64_stats_update_end(&rxq->stats_sync);
- goto bypass_hsplit;
- }
-
- hdr_len = le16_get_bits(rx_desc->hdrlen_flags,
- VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_HDR_M);
-
-bypass_hsplit:
bufq_id = le16_get_bits(rx_desc->pktlen_gen_bufq_id,
VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_M);
rxq_set = container_of(rxq, struct idpf_rxq_set, rxq);
- if (!bufq_id)
- refillq = rxq_set->refillq0;
- else
- refillq = rxq_set->refillq1;
+ refillq = rxq_set->refillq[bufq_id];
/* retrieve buffer from the rxq */
- rx_bufq = &rxq->rxq_grp->splitq.bufq_sets[bufq_id].bufq;
+ rx_bufq = &rxq->bufq_sets[bufq_id].bufq;
buf_id = le16_to_cpu(rx_desc->buf_id);
- rx_buf = &rx_bufq->rx_buf.buf[buf_id];
+ rx_buf = &rx_bufq->buf[buf_id];
+
+ if (!rx_bufq->hdr_pp)
+ goto payload;
+
+#define __HBO_BIT VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_HBO_M
+#define __HDR_LEN_MASK VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_HDR_M
+ if (likely(!(rx_desc->status_err0_qw1 & __HBO_BIT)))
+ /* If a header buffer overflow, occurs, i.e. header is
+ * too large to fit in the header split buffer, HW will
+ * put the entire packet, including headers, in the
+ * data/payload buffer.
+ */
+ hdr_len = le16_get_bits(rx_desc->hdrlen_flags,
+ __HDR_LEN_MASK);
+#undef __HDR_LEN_MASK
+#undef __HBO_BIT
+
+ hdr = &rx_bufq->hdr_buf[buf_id];
- if (hdr_len) {
- const void *va = (u8 *)rx_bufq->rx_buf.hdr_buf_va +
- (u32)buf_id * IDPF_HDR_BUF_SIZE;
+ if (unlikely(!hdr_len && !skb)) {
+ hdr_len = idpf_rx_hsplit_wa(hdr, rx_buf, pkt_len);
+ pkt_len -= hdr_len;
- skb = idpf_rx_hdr_construct_skb(rxq, va, hdr_len);
u64_stats_update_begin(&rxq->stats_sync);
- u64_stats_inc(&rxq->q_stats.rx.hsplit_pkts);
+ u64_stats_inc(&rxq->q_stats.hsplit_buf_ovf);
u64_stats_update_end(&rxq->stats_sync);
}
- if (pkt_len) {
- idpf_rx_sync_for_cpu(rx_buf, pkt_len);
- if (skb)
- idpf_rx_add_frag(rx_buf, skb, pkt_len);
- else
- skb = idpf_rx_construct_skb(rxq, rx_buf,
- pkt_len);
- } else {
- idpf_rx_put_page(rx_buf);
+ if (libeth_rx_sync_for_cpu(hdr, hdr_len)) {
+ skb = idpf_rx_build_skb(hdr, hdr_len);
+ if (!skb)
+ break;
+
+ u64_stats_update_begin(&rxq->stats_sync);
+ u64_stats_inc(&rxq->q_stats.hsplit_pkts);
+ u64_stats_update_end(&rxq->stats_sync);
}
+ hdr->page = NULL;
+
+payload:
+ if (!libeth_rx_sync_for_cpu(rx_buf, pkt_len))
+ goto skip_data;
+
+ if (skb)
+ idpf_rx_add_frag(rx_buf, skb, pkt_len);
+ else
+ skb = idpf_rx_build_skb(rx_buf, pkt_len);
+
/* exit if we failed to retrieve a buffer */
if (!skb)
break;
- idpf_rx_post_buf_refill(refillq, buf_id);
+skip_data:
+ rx_buf->page = NULL;
+ idpf_rx_post_buf_refill(refillq, buf_id);
IDPF_RX_BUMP_NTC(rxq, ntc);
+
/* skip if it is non EOP desc */
- if (!idpf_rx_splitq_is_eop(rx_desc))
+ if (!idpf_rx_splitq_is_eop(rx_desc) || unlikely(!skb))
continue;
/* pad skb if needed (to make valid ethernet frame) */
@@ -3250,7 +3379,7 @@ bypass_hsplit:
}
/* send completed skb up the stack */
- napi_gro_receive(&rxq->q_vector->napi, skb);
+ napi_gro_receive(rxq->napi, skb);
skb = NULL;
/* update budget accounting */
@@ -3261,8 +3390,8 @@ bypass_hsplit:
rxq->skb = skb;
u64_stats_update_begin(&rxq->stats_sync);
- u64_stats_add(&rxq->q_stats.rx.packets, total_rx_pkts);
- u64_stats_add(&rxq->q_stats.rx.bytes, total_rx_bytes);
+ u64_stats_add(&rxq->q_stats.packets, total_rx_pkts);
+ u64_stats_add(&rxq->q_stats.bytes, total_rx_bytes);
u64_stats_update_end(&rxq->stats_sync);
/* guarantee a trip back through this routine if there was a failure */
@@ -3272,34 +3401,41 @@ bypass_hsplit:
/**
* idpf_rx_update_bufq_desc - Update buffer queue descriptor
* @bufq: Pointer to the buffer queue
- * @refill_desc: SW Refill queue descriptor containing buffer ID
+ * @buf_id: buffer ID
* @buf_desc: Buffer queue descriptor
*
* Return 0 on success and negative on failure.
*/
-static int idpf_rx_update_bufq_desc(struct idpf_queue *bufq, u16 refill_desc,
+static int idpf_rx_update_bufq_desc(struct idpf_buf_queue *bufq, u32 buf_id,
struct virtchnl2_splitq_rx_buf_desc *buf_desc)
{
- struct idpf_rx_buf *buf;
+ struct libeth_fq_fp fq = {
+ .pp = bufq->pp,
+ .fqes = bufq->buf,
+ .truesize = bufq->truesize,
+ .count = bufq->desc_count,
+ };
dma_addr_t addr;
- u16 buf_id;
-
- buf_id = FIELD_GET(IDPF_RX_BI_BUFID_M, refill_desc);
- buf = &bufq->rx_buf.buf[buf_id];
-
- addr = idpf_alloc_page(bufq->pp, buf, bufq->rx_buf_size);
- if (unlikely(addr == DMA_MAPPING_ERROR))
+ addr = libeth_rx_alloc(&fq, buf_id);
+ if (addr == DMA_MAPPING_ERROR)
return -ENOMEM;
buf_desc->pkt_addr = cpu_to_le64(addr);
buf_desc->qword0.buf_id = cpu_to_le16(buf_id);
- if (!bufq->rx_hsplit_en)
+ if (!idpf_queue_has(HSPLIT_EN, bufq))
return 0;
- buf_desc->hdr_addr = cpu_to_le64(bufq->rx_buf.hdr_buf_pa +
- (u32)buf_id * IDPF_HDR_BUF_SIZE);
+ fq.pp = bufq->hdr_pp;
+ fq.fqes = bufq->hdr_buf;
+ fq.truesize = bufq->hdr_truesize;
+
+ addr = libeth_rx_alloc(&fq, buf_id);
+ if (addr == DMA_MAPPING_ERROR)
+ return -ENOMEM;
+
+ buf_desc->hdr_addr = cpu_to_le64(addr);
return 0;
}
@@ -3311,38 +3447,37 @@ static int idpf_rx_update_bufq_desc(struct idpf_queue *bufq, u16 refill_desc,
*
* This function takes care of the buffer refill management
*/
-static void idpf_rx_clean_refillq(struct idpf_queue *bufq,
+static void idpf_rx_clean_refillq(struct idpf_buf_queue *bufq,
struct idpf_sw_queue *refillq)
{
struct virtchnl2_splitq_rx_buf_desc *buf_desc;
u16 bufq_nta = bufq->next_to_alloc;
u16 ntc = refillq->next_to_clean;
int cleaned = 0;
- u16 gen;
- buf_desc = IDPF_SPLITQ_RX_BUF_DESC(bufq, bufq_nta);
+ buf_desc = &bufq->split_buf[bufq_nta];
/* make sure we stop at ring wrap in the unlikely case ring is full */
while (likely(cleaned < refillq->desc_count)) {
- u16 refill_desc = IDPF_SPLITQ_RX_BI_DESC(refillq, ntc);
+ u32 buf_id, refill_desc = refillq->ring[ntc];
bool failure;
- gen = FIELD_GET(IDPF_RX_BI_GEN_M, refill_desc);
- if (test_bit(__IDPF_RFLQ_GEN_CHK, refillq->flags) != gen)
+ if (idpf_queue_has(RFL_GEN_CHK, refillq) !=
+ !!(refill_desc & IDPF_RX_BI_GEN_M))
break;
- failure = idpf_rx_update_bufq_desc(bufq, refill_desc,
- buf_desc);
+ buf_id = FIELD_GET(IDPF_RX_BI_BUFID_M, refill_desc);
+ failure = idpf_rx_update_bufq_desc(bufq, buf_id, buf_desc);
if (failure)
break;
if (unlikely(++ntc == refillq->desc_count)) {
- change_bit(__IDPF_RFLQ_GEN_CHK, refillq->flags);
+ idpf_queue_change(RFL_GEN_CHK, refillq);
ntc = 0;
}
if (unlikely(++bufq_nta == bufq->desc_count)) {
- buf_desc = IDPF_SPLITQ_RX_BUF_DESC(bufq, 0);
+ buf_desc = &bufq->split_buf[0];
bufq_nta = 0;
} else {
buf_desc++;
@@ -3371,16 +3506,21 @@ static void idpf_rx_clean_refillq(struct idpf_queue *bufq,
/**
* idpf_rx_clean_refillq_all - Clean all refill queues
* @bufq: buffer queue with refill queues
+ * @nid: ID of the closest NUMA node with memory
*
* Iterates through all refill queues assigned to the buffer queue assigned to
* this vector. Returns true if clean is complete within budget, false
* otherwise.
*/
-static void idpf_rx_clean_refillq_all(struct idpf_queue *bufq)
+static void idpf_rx_clean_refillq_all(struct idpf_buf_queue *bufq, int nid)
{
struct idpf_bufq_set *bufq_set;
int i;
+ page_pool_nid_changed(bufq->pp, nid);
+ if (bufq->hdr_pp)
+ page_pool_nid_changed(bufq->hdr_pp, nid);
+
bufq_set = container_of(bufq, struct idpf_bufq_set, bufq);
for (i = 0; i < bufq_set->num_refillqs; i++)
idpf_rx_clean_refillq(bufq, &bufq_set->refillqs[i]);
@@ -3441,12 +3581,16 @@ void idpf_vport_intr_rel(struct idpf_vport *vport)
for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) {
struct idpf_q_vector *q_vector = &vport->q_vectors[v_idx];
+ kfree(q_vector->complq);
+ q_vector->complq = NULL;
kfree(q_vector->bufq);
q_vector->bufq = NULL;
kfree(q_vector->tx);
q_vector->tx = NULL;
kfree(q_vector->rx);
q_vector->rx = NULL;
+
+ free_cpumask_var(q_vector->affinity_mask);
}
/* Clean up the mapping of queues to vectors */
@@ -3495,7 +3639,7 @@ static void idpf_vport_intr_rel_irq(struct idpf_vport *vport)
/* clear the affinity_mask in the IRQ descriptor */
irq_set_affinity_hint(irq_num, NULL);
- free_irq(irq_num, q_vector);
+ kfree(free_irq(irq_num, q_vector));
}
}
@@ -3579,13 +3723,13 @@ static void idpf_net_dim(struct idpf_q_vector *q_vector)
goto check_rx_itr;
for (i = 0, packets = 0, bytes = 0; i < q_vector->num_txq; i++) {
- struct idpf_queue *txq = q_vector->tx[i];
+ struct idpf_tx_queue *txq = q_vector->tx[i];
unsigned int start;
do {
start = u64_stats_fetch_begin(&txq->stats_sync);
- packets += u64_stats_read(&txq->q_stats.tx.packets);
- bytes += u64_stats_read(&txq->q_stats.tx.bytes);
+ packets += u64_stats_read(&txq->q_stats.packets);
+ bytes += u64_stats_read(&txq->q_stats.bytes);
} while (u64_stats_fetch_retry(&txq->stats_sync, start));
}
@@ -3598,13 +3742,13 @@ check_rx_itr:
return;
for (i = 0, packets = 0, bytes = 0; i < q_vector->num_rxq; i++) {
- struct idpf_queue *rxq = q_vector->rx[i];
+ struct idpf_rx_queue *rxq = q_vector->rx[i];
unsigned int start;
do {
start = u64_stats_fetch_begin(&rxq->stats_sync);
- packets += u64_stats_read(&rxq->q_stats.rx.packets);
- bytes += u64_stats_read(&rxq->q_stats.rx.bytes);
+ packets += u64_stats_read(&rxq->q_stats.packets);
+ bytes += u64_stats_read(&rxq->q_stats.bytes);
} while (u64_stats_fetch_retry(&rxq->stats_sync, start));
}
@@ -3646,6 +3790,7 @@ static int idpf_vport_intr_req_irq(struct idpf_vport *vport, char *basename)
for (vector = 0; vector < vport->num_q_vectors; vector++) {
struct idpf_q_vector *q_vector = &vport->q_vectors[vector];
+ char *name;
vidx = vport->q_vector_idxs[vector];
irq_num = adapter->msix_entries[vidx].vector;
@@ -3659,18 +3804,18 @@ static int idpf_vport_intr_req_irq(struct idpf_vport *vport, char *basename)
else
continue;
- q_vector->name = kasprintf(GFP_KERNEL, "%s-%s-%d",
- basename, vec_name, vidx);
+ name = kasprintf(GFP_KERNEL, "%s-%s-%d", basename, vec_name,
+ vidx);
err = request_irq(irq_num, idpf_vport_intr_clean_queues, 0,
- q_vector->name, q_vector);
+ name, q_vector);
if (err) {
netdev_err(vport->netdev,
"Request_irq failed, error: %d\n", err);
goto free_q_irqs;
}
/* assign the mask for this irq */
- irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
+ irq_set_affinity_hint(irq_num, q_vector->affinity_mask);
}
return 0;
@@ -3679,7 +3824,7 @@ free_q_irqs:
while (--vector >= 0) {
vidx = vport->q_vector_idxs[vector];
irq_num = adapter->msix_entries[vidx].vector;
- free_irq(irq_num, &vport->q_vectors[vector]);
+ kfree(free_irq(irq_num, &vport->q_vectors[vector]));
}
return err;
@@ -3846,16 +3991,17 @@ static void idpf_vport_intr_napi_ena_all(struct idpf_vport *vport)
static bool idpf_tx_splitq_clean_all(struct idpf_q_vector *q_vec,
int budget, int *cleaned)
{
- u16 num_txq = q_vec->num_txq;
+ u16 num_complq = q_vec->num_complq;
bool clean_complete = true;
int i, budget_per_q;
- if (unlikely(!num_txq))
+ if (unlikely(!num_complq))
return true;
- budget_per_q = DIV_ROUND_UP(budget, num_txq);
- for (i = 0; i < num_txq; i++)
- clean_complete &= idpf_tx_clean_complq(q_vec->tx[i],
+ budget_per_q = DIV_ROUND_UP(budget, num_complq);
+
+ for (i = 0; i < num_complq; i++)
+ clean_complete &= idpf_tx_clean_complq(q_vec->complq[i],
budget_per_q, cleaned);
return clean_complete;
@@ -3876,13 +4022,14 @@ static bool idpf_rx_splitq_clean_all(struct idpf_q_vector *q_vec, int budget,
bool clean_complete = true;
int pkts_cleaned = 0;
int i, budget_per_q;
+ int nid;
/* We attempt to distribute budget to each Rx queue fairly, but don't
* allow the budget to go below 1 because that would exit polling early.
*/
budget_per_q = num_rxq ? max(budget / num_rxq, 1) : 0;
for (i = 0; i < num_rxq; i++) {
- struct idpf_queue *rxq = q_vec->rx[i];
+ struct idpf_rx_queue *rxq = q_vec->rx[i];
int pkts_cleaned_per_q;
pkts_cleaned_per_q = idpf_rx_splitq_clean(rxq, budget_per_q);
@@ -3893,8 +4040,10 @@ static bool idpf_rx_splitq_clean_all(struct idpf_q_vector *q_vec, int budget,
}
*cleaned = pkts_cleaned;
+ nid = numa_mem_id();
+
for (i = 0; i < q_vec->num_bufq; i++)
- idpf_rx_clean_refillq_all(q_vec->bufq[i]);
+ idpf_rx_clean_refillq_all(q_vec->bufq[i], nid);
return clean_complete;
}
@@ -3937,8 +4086,8 @@ static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget)
* queues virtchnl message, as the interrupts will be disabled after
* that
*/
- if (unlikely(q_vector->num_txq && test_bit(__IDPF_Q_POLL_MODE,
- q_vector->tx[0]->flags)))
+ if (unlikely(q_vector->num_txq && idpf_queue_has(POLL_MODE,
+ q_vector->tx[0])))
return budget;
else
return work_done;
@@ -3952,27 +4101,28 @@ static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget)
*/
static void idpf_vport_intr_map_vector_to_qs(struct idpf_vport *vport)
{
+ bool split = idpf_is_queue_model_split(vport->rxq_model);
u16 num_txq_grp = vport->num_txq_grp;
- int i, j, qv_idx, bufq_vidx = 0;
struct idpf_rxq_group *rx_qgrp;
struct idpf_txq_group *tx_qgrp;
- struct idpf_queue *q, *bufq;
- u16 q_index;
+ u32 i, qv_idx, q_index;
for (i = 0, qv_idx = 0; i < vport->num_rxq_grp; i++) {
u16 num_rxq;
+ if (qv_idx >= vport->num_q_vectors)
+ qv_idx = 0;
+
rx_qgrp = &vport->rxq_grps[i];
- if (idpf_is_queue_model_split(vport->rxq_model))
+ if (split)
num_rxq = rx_qgrp->splitq.num_rxq_sets;
else
num_rxq = rx_qgrp->singleq.num_rxq;
- for (j = 0; j < num_rxq; j++) {
- if (qv_idx >= vport->num_q_vectors)
- qv_idx = 0;
+ for (u32 j = 0; j < num_rxq; j++) {
+ struct idpf_rx_queue *q;
- if (idpf_is_queue_model_split(vport->rxq_model))
+ if (split)
q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
else
q = rx_qgrp->singleq.rxqs[j];
@@ -3980,52 +4130,53 @@ static void idpf_vport_intr_map_vector_to_qs(struct idpf_vport *vport)
q_index = q->q_vector->num_rxq;
q->q_vector->rx[q_index] = q;
q->q_vector->num_rxq++;
- qv_idx++;
+
+ if (split)
+ q->napi = &q->q_vector->napi;
}
- if (idpf_is_queue_model_split(vport->rxq_model)) {
- for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ if (split) {
+ for (u32 j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ struct idpf_buf_queue *bufq;
+
bufq = &rx_qgrp->splitq.bufq_sets[j].bufq;
- bufq->q_vector = &vport->q_vectors[bufq_vidx];
+ bufq->q_vector = &vport->q_vectors[qv_idx];
q_index = bufq->q_vector->num_bufq;
bufq->q_vector->bufq[q_index] = bufq;
bufq->q_vector->num_bufq++;
}
- if (++bufq_vidx >= vport->num_q_vectors)
- bufq_vidx = 0;
}
+
+ qv_idx++;
}
+ split = idpf_is_queue_model_split(vport->txq_model);
+
for (i = 0, qv_idx = 0; i < num_txq_grp; i++) {
u16 num_txq;
+ if (qv_idx >= vport->num_q_vectors)
+ qv_idx = 0;
+
tx_qgrp = &vport->txq_grps[i];
num_txq = tx_qgrp->num_txq;
- if (idpf_is_queue_model_split(vport->txq_model)) {
- if (qv_idx >= vport->num_q_vectors)
- qv_idx = 0;
+ for (u32 j = 0; j < num_txq; j++) {
+ struct idpf_tx_queue *q;
- q = tx_qgrp->complq;
+ q = tx_qgrp->txqs[j];
q->q_vector = &vport->q_vectors[qv_idx];
- q_index = q->q_vector->num_txq;
- q->q_vector->tx[q_index] = q;
- q->q_vector->num_txq++;
- qv_idx++;
- } else {
- for (j = 0; j < num_txq; j++) {
- if (qv_idx >= vport->num_q_vectors)
- qv_idx = 0;
+ q->q_vector->tx[q->q_vector->num_txq++] = q;
+ }
- q = tx_qgrp->txqs[j];
- q->q_vector = &vport->q_vectors[qv_idx];
- q_index = q->q_vector->num_txq;
- q->q_vector->tx[q_index] = q;
- q->q_vector->num_txq++;
+ if (split) {
+ struct idpf_compl_queue *q = tx_qgrp->complq;
- qv_idx++;
- }
+ q->q_vector = &vport->q_vectors[qv_idx];
+ q->q_vector->complq[q->q_vector->num_complq++] = q;
}
+
+ qv_idx++;
}
}
@@ -4086,7 +4237,7 @@ static void idpf_vport_intr_napi_add_all(struct idpf_vport *vport)
/* only set affinity_mask if the CPU is online */
if (cpu_online(v_idx))
- cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
+ cpumask_set_cpu(v_idx, q_vector->affinity_mask);
}
}
@@ -4101,18 +4252,22 @@ int idpf_vport_intr_alloc(struct idpf_vport *vport)
{
u16 txqs_per_vector, rxqs_per_vector, bufqs_per_vector;
struct idpf_q_vector *q_vector;
- int v_idx, err;
+ u32 complqs_per_vector, v_idx;
vport->q_vectors = kcalloc(vport->num_q_vectors,
sizeof(struct idpf_q_vector), GFP_KERNEL);
if (!vport->q_vectors)
return -ENOMEM;
- txqs_per_vector = DIV_ROUND_UP(vport->num_txq, vport->num_q_vectors);
- rxqs_per_vector = DIV_ROUND_UP(vport->num_rxq, vport->num_q_vectors);
+ txqs_per_vector = DIV_ROUND_UP(vport->num_txq_grp,
+ vport->num_q_vectors);
+ rxqs_per_vector = DIV_ROUND_UP(vport->num_rxq_grp,
+ vport->num_q_vectors);
bufqs_per_vector = vport->num_bufqs_per_qgrp *
DIV_ROUND_UP(vport->num_rxq_grp,
vport->num_q_vectors);
+ complqs_per_vector = DIV_ROUND_UP(vport->num_txq_grp,
+ vport->num_q_vectors);
for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) {
q_vector = &vport->q_vectors[v_idx];
@@ -4126,32 +4281,33 @@ int idpf_vport_intr_alloc(struct idpf_vport *vport)
q_vector->rx_intr_mode = IDPF_ITR_DYNAMIC;
q_vector->rx_itr_idx = VIRTCHNL2_ITR_IDX_0;
- q_vector->tx = kcalloc(txqs_per_vector,
- sizeof(struct idpf_queue *),
+ if (!zalloc_cpumask_var(&q_vector->affinity_mask, GFP_KERNEL))
+ goto error;
+
+ q_vector->tx = kcalloc(txqs_per_vector, sizeof(*q_vector->tx),
GFP_KERNEL);
- if (!q_vector->tx) {
- err = -ENOMEM;
+ if (!q_vector->tx)
goto error;
- }
- q_vector->rx = kcalloc(rxqs_per_vector,
- sizeof(struct idpf_queue *),
+ q_vector->rx = kcalloc(rxqs_per_vector, sizeof(*q_vector->rx),
GFP_KERNEL);
- if (!q_vector->rx) {
- err = -ENOMEM;
+ if (!q_vector->rx)
goto error;
- }
if (!idpf_is_queue_model_split(vport->rxq_model))
continue;
q_vector->bufq = kcalloc(bufqs_per_vector,
- sizeof(struct idpf_queue *),
+ sizeof(*q_vector->bufq),
GFP_KERNEL);
- if (!q_vector->bufq) {
- err = -ENOMEM;
+ if (!q_vector->bufq)
+ goto error;
+
+ q_vector->complq = kcalloc(complqs_per_vector,
+ sizeof(*q_vector->complq),
+ GFP_KERNEL);
+ if (!q_vector->complq)
goto error;
- }
}
return 0;
@@ -4159,7 +4315,7 @@ int idpf_vport_intr_alloc(struct idpf_vport *vport)
error:
idpf_vport_intr_rel(vport);
- return err;
+ return -ENOMEM;
}
/**
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
index 551391e20464..6215dbee5546 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
@@ -4,10 +4,13 @@
#ifndef _IDPF_TXRX_H_
#define _IDPF_TXRX_H_
-#include <net/page_pool/helpers.h>
+#include <linux/dim.h>
+
+#include <net/libeth/cache.h>
#include <net/tcp.h>
#include <net/netdev_queues.h>
+#include "idpf_lan_txrx.h"
#include "virtchnl2_lan_desc.h"
#define IDPF_LARGE_MAX_Q 256
@@ -83,7 +86,7 @@
do { \
if (unlikely(++(ntc) == (rxq)->desc_count)) { \
ntc = 0; \
- change_bit(__IDPF_Q_GEN_CHK, (rxq)->flags); \
+ idpf_queue_change(GEN_CHK, rxq); \
} \
} while (0)
@@ -93,16 +96,10 @@ do { \
idx = 0; \
} while (0)
-#define IDPF_RX_HDR_SIZE 256
-#define IDPF_RX_BUF_2048 2048
-#define IDPF_RX_BUF_4096 4096
#define IDPF_RX_BUF_STRIDE 32
#define IDPF_RX_BUF_POST_STRIDE 16
#define IDPF_LOW_WATERMARK 64
-/* Size of header buffer specifically for header split */
-#define IDPF_HDR_BUF_SIZE 256
-#define IDPF_PACKET_HDR_PAD \
- (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN * 2)
+
#define IDPF_TX_TSO_MIN_MSS 88
/* Minimum number of descriptors between 2 descriptors with the RE bit set;
@@ -110,36 +107,17 @@ do { \
*/
#define IDPF_TX_SPLITQ_RE_MIN_GAP 64
-#define IDPF_RX_BI_BUFID_S 0
-#define IDPF_RX_BI_BUFID_M GENMASK(14, 0)
-#define IDPF_RX_BI_GEN_S 15
-#define IDPF_RX_BI_GEN_M BIT(IDPF_RX_BI_GEN_S)
+#define IDPF_RX_BI_GEN_M BIT(16)
+#define IDPF_RX_BI_BUFID_M GENMASK(15, 0)
+
#define IDPF_RXD_EOF_SPLITQ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_EOF_M
#define IDPF_RXD_EOF_SINGLEQ VIRTCHNL2_RX_BASE_DESC_STATUS_EOF_M
-#define IDPF_SINGLEQ_RX_BUF_DESC(rxq, i) \
- (&(((struct virtchnl2_singleq_rx_buf_desc *)((rxq)->desc_ring))[i]))
-#define IDPF_SPLITQ_RX_BUF_DESC(rxq, i) \
- (&(((struct virtchnl2_splitq_rx_buf_desc *)((rxq)->desc_ring))[i]))
-#define IDPF_SPLITQ_RX_BI_DESC(rxq, i) ((((rxq)->ring))[i])
-
-#define IDPF_BASE_TX_DESC(txq, i) \
- (&(((struct idpf_base_tx_desc *)((txq)->desc_ring))[i]))
-#define IDPF_BASE_TX_CTX_DESC(txq, i) \
- (&(((struct idpf_base_tx_ctx_desc *)((txq)->desc_ring))[i]))
-#define IDPF_SPLITQ_TX_COMPLQ_DESC(txcq, i) \
- (&(((struct idpf_splitq_tx_compl_desc *)((txcq)->desc_ring))[i]))
-
-#define IDPF_FLEX_TX_DESC(txq, i) \
- (&(((union idpf_tx_flex_desc *)((txq)->desc_ring))[i]))
-#define IDPF_FLEX_TX_CTX_DESC(txq, i) \
- (&(((struct idpf_flex_tx_ctx_desc *)((txq)->desc_ring))[i]))
-
#define IDPF_DESC_UNUSED(txq) \
((((txq)->next_to_clean > (txq)->next_to_use) ? 0 : (txq)->desc_count) + \
(txq)->next_to_clean - (txq)->next_to_use - 1)
-#define IDPF_TX_BUF_RSV_UNUSED(txq) ((txq)->buf_stack.top)
+#define IDPF_TX_BUF_RSV_UNUSED(txq) ((txq)->stash->buf_stack.top)
#define IDPF_TX_BUF_RSV_LOW(txq) (IDPF_TX_BUF_RSV_UNUSED(txq) < \
(txq)->desc_count >> 2)
@@ -315,16 +293,7 @@ struct idpf_rx_extracted {
#define IDPF_TX_MAX_DESC_DATA_ALIGNED \
ALIGN_DOWN(IDPF_TX_MAX_DESC_DATA, IDPF_TX_MAX_READ_REQ_SIZE)
-#define IDPF_RX_DMA_ATTR \
- (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
-#define IDPF_RX_DESC(rxq, i) \
- (&(((union virtchnl2_rx_desc *)((rxq)->desc_ring))[i]))
-
-struct idpf_rx_buf {
- struct page *page;
- unsigned int page_offset;
- u16 truesize;
-};
+#define idpf_rx_buf libeth_fqe
#define IDPF_RX_MAX_PTYPE_PROTO_IDS 32
#define IDPF_RX_MAX_PTYPE_SZ (sizeof(struct virtchnl2_ptype) + \
@@ -348,72 +317,6 @@ struct idpf_rx_buf {
#define IDPF_RX_MAX_BASE_PTYPE 256
#define IDPF_INVALID_PTYPE_ID 0xFFFF
-/* Packet type non-ip values */
-enum idpf_rx_ptype_l2 {
- IDPF_RX_PTYPE_L2_RESERVED = 0,
- IDPF_RX_PTYPE_L2_MAC_PAY2 = 1,
- IDPF_RX_PTYPE_L2_TIMESYNC_PAY2 = 2,
- IDPF_RX_PTYPE_L2_FIP_PAY2 = 3,
- IDPF_RX_PTYPE_L2_OUI_PAY2 = 4,
- IDPF_RX_PTYPE_L2_MACCNTRL_PAY2 = 5,
- IDPF_RX_PTYPE_L2_LLDP_PAY2 = 6,
- IDPF_RX_PTYPE_L2_ECP_PAY2 = 7,
- IDPF_RX_PTYPE_L2_EVB_PAY2 = 8,
- IDPF_RX_PTYPE_L2_QCN_PAY2 = 9,
- IDPF_RX_PTYPE_L2_EAPOL_PAY2 = 10,
- IDPF_RX_PTYPE_L2_ARP = 11,
-};
-
-enum idpf_rx_ptype_outer_ip {
- IDPF_RX_PTYPE_OUTER_L2 = 0,
- IDPF_RX_PTYPE_OUTER_IP = 1,
-};
-
-#define IDPF_RX_PTYPE_TO_IPV(ptype, ipv) \
- (((ptype)->outer_ip == IDPF_RX_PTYPE_OUTER_IP) && \
- ((ptype)->outer_ip_ver == (ipv)))
-
-enum idpf_rx_ptype_outer_ip_ver {
- IDPF_RX_PTYPE_OUTER_NONE = 0,
- IDPF_RX_PTYPE_OUTER_IPV4 = 1,
- IDPF_RX_PTYPE_OUTER_IPV6 = 2,
-};
-
-enum idpf_rx_ptype_outer_fragmented {
- IDPF_RX_PTYPE_NOT_FRAG = 0,
- IDPF_RX_PTYPE_FRAG = 1,
-};
-
-enum idpf_rx_ptype_tunnel_type {
- IDPF_RX_PTYPE_TUNNEL_NONE = 0,
- IDPF_RX_PTYPE_TUNNEL_IP_IP = 1,
- IDPF_RX_PTYPE_TUNNEL_IP_GRENAT = 2,
- IDPF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3,
- IDPF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4,
-};
-
-enum idpf_rx_ptype_tunnel_end_prot {
- IDPF_RX_PTYPE_TUNNEL_END_NONE = 0,
- IDPF_RX_PTYPE_TUNNEL_END_IPV4 = 1,
- IDPF_RX_PTYPE_TUNNEL_END_IPV6 = 2,
-};
-
-enum idpf_rx_ptype_inner_prot {
- IDPF_RX_PTYPE_INNER_PROT_NONE = 0,
- IDPF_RX_PTYPE_INNER_PROT_UDP = 1,
- IDPF_RX_PTYPE_INNER_PROT_TCP = 2,
- IDPF_RX_PTYPE_INNER_PROT_SCTP = 3,
- IDPF_RX_PTYPE_INNER_PROT_ICMP = 4,
- IDPF_RX_PTYPE_INNER_PROT_TIMESYNC = 5,
-};
-
-enum idpf_rx_ptype_payload_layer {
- IDPF_RX_PTYPE_PAYLOAD_LAYER_NONE = 0,
- IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1,
- IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2,
- IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3,
-};
-
enum idpf_tunnel_state {
IDPF_PTYPE_TUNNEL_IP = BIT(0),
IDPF_PTYPE_TUNNEL_IP_GRENAT = BIT(1),
@@ -421,22 +324,9 @@ enum idpf_tunnel_state {
};
struct idpf_ptype_state {
- bool outer_ip;
- bool outer_frag;
- u8 tunnel_state;
-};
-
-struct idpf_rx_ptype_decoded {
- u32 ptype:10;
- u32 known:1;
- u32 outer_ip:1;
- u32 outer_ip_ver:2;
- u32 outer_frag:1;
- u32 tunnel_type:3;
- u32 tunnel_end_prot:2;
- u32 tunnel_end_frag:1;
- u32 inner_prot:4;
- u32 payload_layer:3;
+ bool outer_ip:1;
+ bool outer_frag:1;
+ u8 tunnel_state:6;
};
/**
@@ -452,23 +342,37 @@ struct idpf_rx_ptype_decoded {
* to 1 and knows that reading a gen bit of 1 in any
* descriptor on the initial pass of the ring indicates a
* writeback. It also flips on every ring wrap.
- * @__IDPF_RFLQ_GEN_CHK: Refill queues are SW only, so Q_GEN acts as the HW bit
- * and RFLGQ_GEN is the SW bit.
+ * @__IDPF_Q_RFL_GEN_CHK: Refill queues are SW only, so Q_GEN acts as the HW
+ * bit and Q_RFL_GEN is the SW bit.
* @__IDPF_Q_FLOW_SCH_EN: Enable flow scheduling
* @__IDPF_Q_SW_MARKER: Used to indicate TX queue marker completions
* @__IDPF_Q_POLL_MODE: Enable poll mode
+ * @__IDPF_Q_CRC_EN: enable CRC offload in singleq mode
+ * @__IDPF_Q_HSPLIT_EN: enable header split on Rx (splitq)
* @__IDPF_Q_FLAGS_NBITS: Must be last
*/
enum idpf_queue_flags_t {
__IDPF_Q_GEN_CHK,
- __IDPF_RFLQ_GEN_CHK,
+ __IDPF_Q_RFL_GEN_CHK,
__IDPF_Q_FLOW_SCH_EN,
__IDPF_Q_SW_MARKER,
__IDPF_Q_POLL_MODE,
+ __IDPF_Q_CRC_EN,
+ __IDPF_Q_HSPLIT_EN,
__IDPF_Q_FLAGS_NBITS,
};
+#define idpf_queue_set(f, q) __set_bit(__IDPF_Q_##f, (q)->flags)
+#define idpf_queue_clear(f, q) __clear_bit(__IDPF_Q_##f, (q)->flags)
+#define idpf_queue_change(f, q) __change_bit(__IDPF_Q_##f, (q)->flags)
+#define idpf_queue_has(f, q) test_bit(__IDPF_Q_##f, (q)->flags)
+
+#define idpf_queue_has_clear(f, q) \
+ __test_and_clear_bit(__IDPF_Q_##f, (q)->flags)
+#define idpf_queue_assign(f, q, v) \
+ __assign_bit(__IDPF_Q_##f, (q)->flags, v)
+
/**
* struct idpf_vec_regs
* @dyn_ctl_reg: Dynamic control interrupt register offset
@@ -509,54 +413,68 @@ struct idpf_intr_reg {
/**
* struct idpf_q_vector
* @vport: Vport back pointer
- * @affinity_mask: CPU affinity mask
- * @napi: napi handler
- * @v_idx: Vector index
- * @intr_reg: See struct idpf_intr_reg
+ * @num_rxq: Number of RX queues
* @num_txq: Number of TX queues
+ * @num_bufq: Number of buffer queues
+ * @num_complq: number of completion queues
+ * @rx: Array of RX queues to service
* @tx: Array of TX queues to service
+ * @bufq: Array of buffer queues to service
+ * @complq: array of completion queues
+ * @intr_reg: See struct idpf_intr_reg
+ * @napi: napi handler
+ * @total_events: Number of interrupts processed
* @tx_dim: Data for TX net_dim algorithm
* @tx_itr_value: TX interrupt throttling rate
* @tx_intr_mode: Dynamic ITR or not
* @tx_itr_idx: TX ITR index
- * @num_rxq: Number of RX queues
- * @rx: Array of RX queues to service
* @rx_dim: Data for RX net_dim algorithm
* @rx_itr_value: RX interrupt throttling rate
* @rx_intr_mode: Dynamic ITR or not
* @rx_itr_idx: RX ITR index
- * @num_bufq: Number of buffer queues
- * @bufq: Array of buffer queues to service
- * @total_events: Number of interrupts processed
- * @name: Queue vector name
+ * @v_idx: Vector index
+ * @affinity_mask: CPU affinity mask
*/
struct idpf_q_vector {
+ __cacheline_group_begin_aligned(read_mostly);
struct idpf_vport *vport;
- cpumask_t affinity_mask;
- struct napi_struct napi;
- u16 v_idx;
- struct idpf_intr_reg intr_reg;
+ u16 num_rxq;
u16 num_txq;
- struct idpf_queue **tx;
+ u16 num_bufq;
+ u16 num_complq;
+ struct idpf_rx_queue **rx;
+ struct idpf_tx_queue **tx;
+ struct idpf_buf_queue **bufq;
+ struct idpf_compl_queue **complq;
+
+ struct idpf_intr_reg intr_reg;
+ __cacheline_group_end_aligned(read_mostly);
+
+ __cacheline_group_begin_aligned(read_write);
+ struct napi_struct napi;
+ u16 total_events;
+
struct dim tx_dim;
u16 tx_itr_value;
bool tx_intr_mode;
u32 tx_itr_idx;
- u16 num_rxq;
- struct idpf_queue **rx;
struct dim rx_dim;
u16 rx_itr_value;
bool rx_intr_mode;
u32 rx_itr_idx;
+ __cacheline_group_end_aligned(read_write);
- u16 num_bufq;
- struct idpf_queue **bufq;
+ __cacheline_group_begin_aligned(cold);
+ u16 v_idx;
- u16 total_events;
- char *name;
+ cpumask_var_t affinity_mask;
+ __cacheline_group_end_aligned(cold);
};
+libeth_cacheline_set_assert(struct idpf_q_vector, 104,
+ 424 + 2 * sizeof(struct dim),
+ 8 + sizeof(cpumask_var_t));
struct idpf_rx_queue_stats {
u64_stats_t packets;
@@ -583,11 +501,6 @@ struct idpf_cleaned_stats {
u32 bytes;
};
-union idpf_queue_stats {
- struct idpf_rx_queue_stats rx;
- struct idpf_tx_queue_stats tx;
-};
-
#define IDPF_ITR_DYNAMIC 1
#define IDPF_ITR_MAX 0x1FE0
#define IDPF_ITR_20K 0x0032
@@ -603,68 +516,123 @@ union idpf_queue_stats {
#define IDPF_DIM_DEFAULT_PROFILE_IX 1
/**
- * struct idpf_queue
- * @dev: Device back pointer for DMA mapping
- * @vport: Back pointer to associated vport
- * @txq_grp: See struct idpf_txq_group
- * @rxq_grp: See struct idpf_rxq_group
- * @idx: For buffer queue, it is used as group id, either 0 or 1. On clean,
- * buffer queue uses this index to determine which group of refill queues
- * to clean.
- * For TX queue, it is used as index to map between TX queue group and
- * hot path TX pointers stored in vport. Used in both singleq/splitq.
- * For RX queue, it is used to index to total RX queue across groups and
+ * struct idpf_txq_stash - Tx buffer stash for Flow-based scheduling mode
+ * @buf_stack: Stack of empty buffers to store buffer info for out of order
+ * buffer completions. See struct idpf_buf_lifo
+ * @sched_buf_hash: Hash table to store buffers
+ */
+struct idpf_txq_stash {
+ struct idpf_buf_lifo buf_stack;
+ DECLARE_HASHTABLE(sched_buf_hash, 12);
+} ____cacheline_aligned;
+
+/**
+ * struct idpf_rx_queue - software structure representing a receive queue
+ * @rx: universal receive descriptor array
+ * @single_buf: buffer descriptor array in singleq
+ * @desc_ring: virtual descriptor ring address
+ * @bufq_sets: Pointer to the array of buffer queues in splitq mode
+ * @napi: NAPI instance corresponding to this queue (splitq)
+ * @rx_buf: See struct &libeth_fqe
+ * @pp: Page pool pointer in singleq mode
+ * @netdev: &net_device corresponding to this queue
+ * @tail: Tail offset. Used for both queue models single and split.
+ * @flags: See enum idpf_queue_flags_t
+ * @idx: For RX queue, it is used to index to total RX queue across groups and
* used for skb reporting.
- * @tail: Tail offset. Used for both queue models single and split. In splitq
- * model relevant only for TX queue and RX queue.
- * @tx_buf: See struct idpf_tx_buf
- * @rx_buf: Struct with RX buffer related members
- * @rx_buf.buf: See struct idpf_rx_buf
- * @rx_buf.hdr_buf_pa: DMA handle
- * @rx_buf.hdr_buf_va: Virtual address
- * @pp: Page pool pointer
- * @skb: Pointer to the skb
- * @q_type: Queue type (TX, RX, TX completion, RX buffer)
- * @q_id: Queue id
* @desc_count: Number of descriptors
- * @next_to_use: Next descriptor to use. Relevant in both split & single txq
- * and bufq.
- * @next_to_clean: Next descriptor to clean. In split queue model, only
- * relevant to TX completion queue and RX queue.
- * @next_to_alloc: RX buffer to allocate at. Used only for RX. In splitq model
- * only relevant to RX queue.
- * @flags: See enum idpf_queue_flags_t
- * @q_stats: See union idpf_queue_stats
+ * @rxdids: Supported RX descriptor ids
+ * @rx_ptype_lkup: LUT of Rx ptypes
+ * @next_to_use: Next descriptor to use
+ * @next_to_clean: Next descriptor to clean
+ * @next_to_alloc: RX buffer to allocate at
+ * @skb: Pointer to the skb
+ * @truesize: data buffer truesize in singleq
* @stats_sync: See struct u64_stats_sync
- * @cleaned_bytes: Splitq only, TXQ only: When a TX completion is received on
- * the TX completion queue, it can be for any TXQ associated
- * with that completion queue. This means we can clean up to
- * N TXQs during a single call to clean the completion queue.
- * cleaned_bytes|pkts tracks the clean stats per TXQ during
- * that single call to clean the completion queue. By doing so,
- * we can update BQL with aggregate cleaned stats for each TXQ
- * only once at the end of the cleaning routine.
- * @cleaned_pkts: Number of packets cleaned for the above said case
- * @rx_hsplit_en: RX headsplit enable
+ * @q_stats: See union idpf_rx_queue_stats
+ * @q_id: Queue id
+ * @size: Length of descriptor ring in bytes
+ * @dma: Physical address of ring
+ * @q_vector: Backreference to associated vector
+ * @rx_buffer_low_watermark: RX buffer low watermark
* @rx_hbuf_size: Header buffer size
* @rx_buf_size: Buffer size
* @rx_max_pkt_size: RX max packet size
- * @rx_buf_stride: RX buffer stride
- * @rx_buffer_low_watermark: RX buffer low watermark
- * @rxdids: Supported RX descriptor ids
- * @q_vector: Backreference to associated vector
- * @size: Length of descriptor ring in bytes
- * @dma: Physical address of ring
- * @desc_ring: Descriptor ring memory
- * @tx_max_bufs: Max buffers that can be transmitted with scatter-gather
+ */
+struct idpf_rx_queue {
+ __cacheline_group_begin_aligned(read_mostly);
+ union {
+ union virtchnl2_rx_desc *rx;
+ struct virtchnl2_singleq_rx_buf_desc *single_buf;
+
+ void *desc_ring;
+ };
+ union {
+ struct {
+ struct idpf_bufq_set *bufq_sets;
+ struct napi_struct *napi;
+ };
+ struct {
+ struct libeth_fqe *rx_buf;
+ struct page_pool *pp;
+ };
+ };
+ struct net_device *netdev;
+ void __iomem *tail;
+
+ DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
+ u16 idx;
+ u16 desc_count;
+
+ u32 rxdids;
+ const struct libeth_rx_pt *rx_ptype_lkup;
+ __cacheline_group_end_aligned(read_mostly);
+
+ __cacheline_group_begin_aligned(read_write);
+ u16 next_to_use;
+ u16 next_to_clean;
+ u16 next_to_alloc;
+
+ struct sk_buff *skb;
+ u32 truesize;
+
+ struct u64_stats_sync stats_sync;
+ struct idpf_rx_queue_stats q_stats;
+ __cacheline_group_end_aligned(read_write);
+
+ __cacheline_group_begin_aligned(cold);
+ u32 q_id;
+ u32 size;
+ dma_addr_t dma;
+
+ struct idpf_q_vector *q_vector;
+
+ u16 rx_buffer_low_watermark;
+ u16 rx_hbuf_size;
+ u16 rx_buf_size;
+ u16 rx_max_pkt_size;
+ __cacheline_group_end_aligned(cold);
+};
+libeth_cacheline_set_assert(struct idpf_rx_queue, 64,
+ 80 + sizeof(struct u64_stats_sync),
+ 32);
+
+/**
+ * struct idpf_tx_queue - software structure representing a transmit queue
+ * @base_tx: base Tx descriptor array
+ * @base_ctx: base Tx context descriptor array
+ * @flex_tx: flex Tx descriptor array
+ * @flex_ctx: flex Tx context descriptor array
+ * @desc_ring: virtual descriptor ring address
+ * @tx_buf: See struct idpf_tx_buf
+ * @txq_grp: See struct idpf_txq_group
+ * @dev: Device back pointer for DMA mapping
+ * @tail: Tail offset. Used for both queue models single and split
+ * @flags: See enum idpf_queue_flags_t
+ * @idx: For TX queue, it is used as index to map between TX queue group and
+ * hot path TX pointers stored in vport. Used in both singleq/splitq.
+ * @desc_count: Number of descriptors
* @tx_min_pkt_len: Min supported packet length
- * @num_completions: Only relevant for TX completion queue. It tracks the
- * number of completions received to compare against the
- * number of completions pending, as accumulated by the
- * TX queues.
- * @buf_stack: Stack of empty buffers to store buffer info for out of order
- * buffer completions. See struct idpf_buf_lifo.
- * @compl_tag_bufid_m: Completion tag buffer id mask
* @compl_tag_gen_s: Completion tag generation bit
* The format of the completion tag will change based on the TXQ
* descriptor ring size so that we can maintain roughly the same level
@@ -685,108 +653,238 @@ union idpf_queue_stats {
* --------------------------------
*
* This gives us 8*8160 = 65280 possible unique values.
+ * @netdev: &net_device corresponding to this queue
+ * @next_to_use: Next descriptor to use
+ * @next_to_clean: Next descriptor to clean
+ * @cleaned_bytes: Splitq only, TXQ only: When a TX completion is received on
+ * the TX completion queue, it can be for any TXQ associated
+ * with that completion queue. This means we can clean up to
+ * N TXQs during a single call to clean the completion queue.
+ * cleaned_bytes|pkts tracks the clean stats per TXQ during
+ * that single call to clean the completion queue. By doing so,
+ * we can update BQL with aggregate cleaned stats for each TXQ
+ * only once at the end of the cleaning routine.
+ * @clean_budget: singleq only, queue cleaning budget
+ * @cleaned_pkts: Number of packets cleaned for the above said case
+ * @tx_max_bufs: Max buffers that can be transmitted with scatter-gather
+ * @stash: Tx buffer stash for Flow-based scheduling mode
+ * @compl_tag_bufid_m: Completion tag buffer id mask
* @compl_tag_cur_gen: Used to keep track of current completion tag generation
* @compl_tag_gen_max: To determine when compl_tag_cur_gen should be reset
- * @sched_buf_hash: Hash table to stores buffers
+ * @stats_sync: See struct u64_stats_sync
+ * @q_stats: See union idpf_tx_queue_stats
+ * @q_id: Queue id
+ * @size: Length of descriptor ring in bytes
+ * @dma: Physical address of ring
+ * @q_vector: Backreference to associated vector
*/
-struct idpf_queue {
- struct device *dev;
- struct idpf_vport *vport;
+struct idpf_tx_queue {
+ __cacheline_group_begin_aligned(read_mostly);
union {
- struct idpf_txq_group *txq_grp;
- struct idpf_rxq_group *rxq_grp;
+ struct idpf_base_tx_desc *base_tx;
+ struct idpf_base_tx_ctx_desc *base_ctx;
+ union idpf_tx_flex_desc *flex_tx;
+ struct idpf_flex_tx_ctx_desc *flex_ctx;
+
+ void *desc_ring;
};
- u16 idx;
+ struct idpf_tx_buf *tx_buf;
+ struct idpf_txq_group *txq_grp;
+ struct device *dev;
void __iomem *tail;
- union {
- struct idpf_tx_buf *tx_buf;
- struct {
- struct idpf_rx_buf *buf;
- dma_addr_t hdr_buf_pa;
- void *hdr_buf_va;
- } rx_buf;
- };
- struct page_pool *pp;
- struct sk_buff *skb;
- u16 q_type;
- u32 q_id;
+
+ DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
+ u16 idx;
u16 desc_count;
+ u16 tx_min_pkt_len;
+ u16 compl_tag_gen_s;
+
+ struct net_device *netdev;
+ __cacheline_group_end_aligned(read_mostly);
+
+ __cacheline_group_begin_aligned(read_write);
u16 next_to_use;
u16 next_to_clean;
- u16 next_to_alloc;
- DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
- union idpf_queue_stats q_stats;
+ union {
+ u32 cleaned_bytes;
+ u32 clean_budget;
+ };
+ u16 cleaned_pkts;
+
+ u16 tx_max_bufs;
+ struct idpf_txq_stash *stash;
+
+ u16 compl_tag_bufid_m;
+ u16 compl_tag_cur_gen;
+ u16 compl_tag_gen_max;
+
struct u64_stats_sync stats_sync;
+ struct idpf_tx_queue_stats q_stats;
+ __cacheline_group_end_aligned(read_write);
- u32 cleaned_bytes;
- u16 cleaned_pkts;
+ __cacheline_group_begin_aligned(cold);
+ u32 q_id;
+ u32 size;
+ dma_addr_t dma;
- bool rx_hsplit_en;
- u16 rx_hbuf_size;
- u16 rx_buf_size;
- u16 rx_max_pkt_size;
- u16 rx_buf_stride;
- u8 rx_buffer_low_watermark;
- u64 rxdids;
struct idpf_q_vector *q_vector;
- unsigned int size;
+ __cacheline_group_end_aligned(cold);
+};
+libeth_cacheline_set_assert(struct idpf_tx_queue, 64,
+ 88 + sizeof(struct u64_stats_sync),
+ 24);
+
+/**
+ * struct idpf_buf_queue - software structure representing a buffer queue
+ * @split_buf: buffer descriptor array
+ * @hdr_buf: &libeth_fqe for header buffers
+ * @hdr_pp: &page_pool for header buffers
+ * @buf: &libeth_fqe for data buffers
+ * @pp: &page_pool for data buffers
+ * @tail: Tail offset
+ * @flags: See enum idpf_queue_flags_t
+ * @desc_count: Number of descriptors
+ * @next_to_use: Next descriptor to use
+ * @next_to_clean: Next descriptor to clean
+ * @next_to_alloc: RX buffer to allocate at
+ * @hdr_truesize: truesize for buffer headers
+ * @truesize: truesize for data buffers
+ * @q_id: Queue id
+ * @size: Length of descriptor ring in bytes
+ * @dma: Physical address of ring
+ * @q_vector: Backreference to associated vector
+ * @rx_buffer_low_watermark: RX buffer low watermark
+ * @rx_hbuf_size: Header buffer size
+ * @rx_buf_size: Buffer size
+ */
+struct idpf_buf_queue {
+ __cacheline_group_begin_aligned(read_mostly);
+ struct virtchnl2_splitq_rx_buf_desc *split_buf;
+ struct libeth_fqe *hdr_buf;
+ struct page_pool *hdr_pp;
+ struct libeth_fqe *buf;
+ struct page_pool *pp;
+ void __iomem *tail;
+
+ DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
+ u32 desc_count;
+ __cacheline_group_end_aligned(read_mostly);
+
+ __cacheline_group_begin_aligned(read_write);
+ u32 next_to_use;
+ u32 next_to_clean;
+ u32 next_to_alloc;
+
+ u32 hdr_truesize;
+ u32 truesize;
+ __cacheline_group_end_aligned(read_write);
+
+ __cacheline_group_begin_aligned(cold);
+ u32 q_id;
+ u32 size;
dma_addr_t dma;
- void *desc_ring;
- u16 tx_max_bufs;
- u8 tx_min_pkt_len;
+ struct idpf_q_vector *q_vector;
- u32 num_completions;
+ u16 rx_buffer_low_watermark;
+ u16 rx_hbuf_size;
+ u16 rx_buf_size;
+ __cacheline_group_end_aligned(cold);
+};
+libeth_cacheline_set_assert(struct idpf_buf_queue, 64, 24, 32);
- struct idpf_buf_lifo buf_stack;
+/**
+ * struct idpf_compl_queue - software structure representing a completion queue
+ * @comp: completion descriptor array
+ * @txq_grp: See struct idpf_txq_group
+ * @flags: See enum idpf_queue_flags_t
+ * @desc_count: Number of descriptors
+ * @clean_budget: queue cleaning budget
+ * @netdev: &net_device corresponding to this queue
+ * @next_to_use: Next descriptor to use. Relevant in both split & single txq
+ * and bufq.
+ * @next_to_clean: Next descriptor to clean
+ * @num_completions: Only relevant for TX completion queue. It tracks the
+ * number of completions received to compare against the
+ * number of completions pending, as accumulated by the
+ * TX queues.
+ * @q_id: Queue id
+ * @size: Length of descriptor ring in bytes
+ * @dma: Physical address of ring
+ * @q_vector: Backreference to associated vector
+ */
+struct idpf_compl_queue {
+ __cacheline_group_begin_aligned(read_mostly);
+ struct idpf_splitq_tx_compl_desc *comp;
+ struct idpf_txq_group *txq_grp;
- u16 compl_tag_bufid_m;
- u16 compl_tag_gen_s;
+ DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
+ u32 desc_count;
- u16 compl_tag_cur_gen;
- u16 compl_tag_gen_max;
+ u32 clean_budget;
+ struct net_device *netdev;
+ __cacheline_group_end_aligned(read_mostly);
- DECLARE_HASHTABLE(sched_buf_hash, 12);
-} ____cacheline_internodealigned_in_smp;
+ __cacheline_group_begin_aligned(read_write);
+ u32 next_to_use;
+ u32 next_to_clean;
+
+ u32 num_completions;
+ __cacheline_group_end_aligned(read_write);
+
+ __cacheline_group_begin_aligned(cold);
+ u32 q_id;
+ u32 size;
+ dma_addr_t dma;
+
+ struct idpf_q_vector *q_vector;
+ __cacheline_group_end_aligned(cold);
+};
+libeth_cacheline_set_assert(struct idpf_compl_queue, 40, 16, 24);
/**
* struct idpf_sw_queue
- * @next_to_clean: Next descriptor to clean
- * @next_to_alloc: Buffer to allocate at
- * @flags: See enum idpf_queue_flags_t
* @ring: Pointer to the ring
+ * @flags: See enum idpf_queue_flags_t
* @desc_count: Descriptor count
- * @dev: Device back pointer for DMA mapping
+ * @next_to_use: Buffer to allocate at
+ * @next_to_clean: Next descriptor to clean
*
* Software queues are used in splitq mode to manage buffers between rxq
* producer and the bufq consumer. These are required in order to maintain a
* lockless buffer management system and are strictly software only constructs.
*/
struct idpf_sw_queue {
- u16 next_to_clean;
- u16 next_to_alloc;
+ __cacheline_group_begin_aligned(read_mostly);
+ u32 *ring;
+
DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
- u16 *ring;
- u16 desc_count;
- struct device *dev;
-} ____cacheline_internodealigned_in_smp;
+ u32 desc_count;
+ __cacheline_group_end_aligned(read_mostly);
+
+ __cacheline_group_begin_aligned(read_write);
+ u32 next_to_use;
+ u32 next_to_clean;
+ __cacheline_group_end_aligned(read_write);
+};
+libeth_cacheline_group_assert(struct idpf_sw_queue, read_mostly, 24);
+libeth_cacheline_group_assert(struct idpf_sw_queue, read_write, 8);
+libeth_cacheline_struct_assert(struct idpf_sw_queue, 24, 8);
/**
* struct idpf_rxq_set
* @rxq: RX queue
- * @refillq0: Pointer to refill queue 0
- * @refillq1: Pointer to refill queue 1
+ * @refillq: pointers to refill queues
*
* Splitq only. idpf_rxq_set associates an rxq with at an array of refillqs.
* Each rxq needs a refillq to return used buffers back to the respective bufq.
* Bufqs then clean these refillqs for buffers to give to hardware.
*/
struct idpf_rxq_set {
- struct idpf_queue rxq;
- struct idpf_sw_queue *refillq0;
- struct idpf_sw_queue *refillq1;
+ struct idpf_rx_queue rxq;
+ struct idpf_sw_queue *refillq[IDPF_MAX_BUFQS_PER_RXQ_GRP];
};
/**
@@ -805,7 +903,7 @@ struct idpf_rxq_set {
* managed by at most two bufqs (depending on performance configuration).
*/
struct idpf_bufq_set {
- struct idpf_queue bufq;
+ struct idpf_buf_queue bufq;
int num_refillqs;
struct idpf_sw_queue *refillqs;
};
@@ -831,7 +929,7 @@ struct idpf_rxq_group {
union {
struct {
u16 num_rxq;
- struct idpf_queue *rxqs[IDPF_LARGE_MAX_Q];
+ struct idpf_rx_queue *rxqs[IDPF_LARGE_MAX_Q];
} singleq;
struct {
u16 num_rxq_sets;
@@ -846,6 +944,7 @@ struct idpf_rxq_group {
* @vport: Vport back pointer
* @num_txq: Number of TX queues associated
* @txqs: Array of TX queue pointers
+ * @stashes: array of OOO stashes for the queues
* @complq: Associated completion queue pointer, split queue only
* @num_completions_pending: Total number of completions pending for the
* completion queue, acculumated for all TX queues
@@ -859,13 +958,26 @@ struct idpf_txq_group {
struct idpf_vport *vport;
u16 num_txq;
- struct idpf_queue *txqs[IDPF_LARGE_MAX_Q];
+ struct idpf_tx_queue *txqs[IDPF_LARGE_MAX_Q];
+ struct idpf_txq_stash *stashes;
- struct idpf_queue *complq;
+ struct idpf_compl_queue *complq;
u32 num_completions_pending;
};
+static inline int idpf_q_vector_to_mem(const struct idpf_q_vector *q_vector)
+{
+ u32 cpu;
+
+ if (!q_vector)
+ return NUMA_NO_NODE;
+
+ cpu = cpumask_first(q_vector->affinity_mask);
+
+ return cpu < nr_cpu_ids ? cpu_to_mem(cpu) : NUMA_NO_NODE;
+}
+
/**
* idpf_size_to_txd_count - Get number of descriptors needed for large Tx frag
* @size: transmit request size in bytes
@@ -921,60 +1033,6 @@ static inline void idpf_tx_splitq_build_desc(union idpf_tx_flex_desc *desc,
idpf_tx_splitq_build_flow_desc(desc, params, td_cmd, size);
}
-/**
- * idpf_alloc_page - Allocate a new RX buffer from the page pool
- * @pool: page_pool to allocate from
- * @buf: metadata struct to populate with page info
- * @buf_size: 2K or 4K
- *
- * Returns &dma_addr_t to be passed to HW for Rx, %DMA_MAPPING_ERROR otherwise.
- */
-static inline dma_addr_t idpf_alloc_page(struct page_pool *pool,
- struct idpf_rx_buf *buf,
- unsigned int buf_size)
-{
- if (buf_size == IDPF_RX_BUF_2048)
- buf->page = page_pool_dev_alloc_frag(pool, &buf->page_offset,
- buf_size);
- else
- buf->page = page_pool_dev_alloc_pages(pool);
-
- if (!buf->page)
- return DMA_MAPPING_ERROR;
-
- buf->truesize = buf_size;
-
- return page_pool_get_dma_addr(buf->page) + buf->page_offset +
- pool->p.offset;
-}
-
-/**
- * idpf_rx_put_page - Return RX buffer page to pool
- * @rx_buf: RX buffer metadata struct
- */
-static inline void idpf_rx_put_page(struct idpf_rx_buf *rx_buf)
-{
- page_pool_put_page(rx_buf->page->pp, rx_buf->page,
- rx_buf->truesize, true);
- rx_buf->page = NULL;
-}
-
-/**
- * idpf_rx_sync_for_cpu - Synchronize DMA buffer
- * @rx_buf: RX buffer metadata struct
- * @len: frame length from descriptor
- */
-static inline void idpf_rx_sync_for_cpu(struct idpf_rx_buf *rx_buf, u32 len)
-{
- struct page *page = rx_buf->page;
- struct page_pool *pp = page->pp;
-
- dma_sync_single_range_for_cpu(pp->p.dev,
- page_pool_get_dma_addr(page),
- rx_buf->page_offset + pp->p.offset, len,
- page_pool_get_dma_dir(pp));
-}
-
int idpf_vport_singleq_napi_poll(struct napi_struct *napi, int budget);
void idpf_vport_init_num_qs(struct idpf_vport *vport,
struct virtchnl2_create_vport *vport_msg);
@@ -991,35 +1049,27 @@ void idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector *q_vector);
void idpf_vport_intr_deinit(struct idpf_vport *vport);
int idpf_vport_intr_init(struct idpf_vport *vport);
void idpf_vport_intr_ena(struct idpf_vport *vport);
-enum pkt_hash_types idpf_ptype_to_htype(const struct idpf_rx_ptype_decoded *decoded);
int idpf_config_rss(struct idpf_vport *vport);
int idpf_init_rss(struct idpf_vport *vport);
void idpf_deinit_rss(struct idpf_vport *vport);
int idpf_rx_bufs_init_all(struct idpf_vport *vport);
void idpf_rx_add_frag(struct idpf_rx_buf *rx_buf, struct sk_buff *skb,
unsigned int size);
-struct sk_buff *idpf_rx_construct_skb(struct idpf_queue *rxq,
- struct idpf_rx_buf *rx_buf,
- unsigned int size);
-bool idpf_init_rx_buf_hw_alloc(struct idpf_queue *rxq, struct idpf_rx_buf *buf);
-void idpf_rx_buf_hw_update(struct idpf_queue *rxq, u32 val);
-void idpf_tx_buf_hw_update(struct idpf_queue *tx_q, u32 val,
+struct sk_buff *idpf_rx_build_skb(const struct libeth_fqe *buf, u32 size);
+void idpf_tx_buf_hw_update(struct idpf_tx_queue *tx_q, u32 val,
bool xmit_more);
unsigned int idpf_size_to_txd_count(unsigned int size);
-netdev_tx_t idpf_tx_drop_skb(struct idpf_queue *tx_q, struct sk_buff *skb);
-void idpf_tx_dma_map_error(struct idpf_queue *txq, struct sk_buff *skb,
+netdev_tx_t idpf_tx_drop_skb(struct idpf_tx_queue *tx_q, struct sk_buff *skb);
+void idpf_tx_dma_map_error(struct idpf_tx_queue *txq, struct sk_buff *skb,
struct idpf_tx_buf *first, u16 ring_idx);
-unsigned int idpf_tx_desc_count_required(struct idpf_queue *txq,
+unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq,
struct sk_buff *skb);
-bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs,
- unsigned int count);
-int idpf_tx_maybe_stop_common(struct idpf_queue *tx_q, unsigned int size);
+int idpf_tx_maybe_stop_common(struct idpf_tx_queue *tx_q, unsigned int size);
void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue);
-netdev_tx_t idpf_tx_splitq_start(struct sk_buff *skb,
- struct net_device *netdev);
-netdev_tx_t idpf_tx_singleq_start(struct sk_buff *skb,
- struct net_device *netdev);
-bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_queue *rxq,
+netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
+ struct idpf_tx_queue *tx_q);
+netdev_tx_t idpf_tx_start(struct sk_buff *skb, struct net_device *netdev);
+bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_rx_queue *rxq,
u16 cleaned_count);
int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
index a5f9b7a5effe..70986e12da28 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2023 Intel Corporation */
+#include <net/libeth/rx.h>
+
#include "idpf.h"
#include "idpf_virtchnl.h"
@@ -750,7 +752,7 @@ static int idpf_wait_for_marker_event(struct idpf_vport *vport)
int i;
for (i = 0; i < vport->num_txq; i++)
- set_bit(__IDPF_Q_SW_MARKER, vport->txqs[i]->flags);
+ idpf_queue_set(SW_MARKER, vport->txqs[i]);
event = wait_event_timeout(vport->sw_marker_wq,
test_and_clear_bit(IDPF_VPORT_SW_MARKER,
@@ -758,7 +760,7 @@ static int idpf_wait_for_marker_event(struct idpf_vport *vport)
msecs_to_jiffies(500));
for (i = 0; i < vport->num_txq; i++)
- clear_bit(__IDPF_Q_POLL_MODE, vport->txqs[i]->flags);
+ idpf_queue_clear(POLL_MODE, vport->txqs[i]);
if (event)
return 0;
@@ -1092,7 +1094,6 @@ static int __idpf_queue_reg_init(struct idpf_vport *vport, u32 *reg_vals,
int num_regs, u32 q_type)
{
struct idpf_adapter *adapter = vport->adapter;
- struct idpf_queue *q;
int i, j, k = 0;
switch (q_type) {
@@ -1111,6 +1112,8 @@ static int __idpf_queue_reg_init(struct idpf_vport *vport, u32 *reg_vals,
u16 num_rxq = rx_qgrp->singleq.num_rxq;
for (j = 0; j < num_rxq && k < num_regs; j++, k++) {
+ struct idpf_rx_queue *q;
+
q = rx_qgrp->singleq.rxqs[j];
q->tail = idpf_get_reg_addr(adapter,
reg_vals[k]);
@@ -1123,6 +1126,8 @@ static int __idpf_queue_reg_init(struct idpf_vport *vport, u32 *reg_vals,
u8 num_bufqs = vport->num_bufqs_per_qgrp;
for (j = 0; j < num_bufqs && k < num_regs; j++, k++) {
+ struct idpf_buf_queue *q;
+
q = &rx_qgrp->splitq.bufq_sets[j].bufq;
q->tail = idpf_get_reg_addr(adapter,
reg_vals[k]);
@@ -1253,12 +1258,12 @@ int idpf_send_create_vport_msg(struct idpf_adapter *adapter,
vport_msg->vport_type = cpu_to_le16(VIRTCHNL2_VPORT_TYPE_DEFAULT);
vport_msg->vport_index = cpu_to_le16(idx);
- if (adapter->req_tx_splitq)
+ if (adapter->req_tx_splitq || !IS_ENABLED(CONFIG_IDPF_SINGLEQ))
vport_msg->txq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SPLIT);
else
vport_msg->txq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
- if (adapter->req_rx_splitq)
+ if (adapter->req_rx_splitq || !IS_ENABLED(CONFIG_IDPF_SINGLEQ))
vport_msg->rxq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SPLIT);
else
vport_msg->rxq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
@@ -1320,10 +1325,17 @@ int idpf_check_supported_desc_ids(struct idpf_vport *vport)
vport_msg = adapter->vport_params_recvd[vport->idx];
+ if (!IS_ENABLED(CONFIG_IDPF_SINGLEQ) &&
+ (vport_msg->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE ||
+ vport_msg->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)) {
+ pci_err(adapter->pdev, "singleq mode requested, but not compiled-in\n");
+ return -EOPNOTSUPP;
+ }
+
rx_desc_ids = le64_to_cpu(vport_msg->rx_desc_ids);
tx_desc_ids = le64_to_cpu(vport_msg->tx_desc_ids);
- if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
+ if (idpf_is_queue_model_split(vport->rxq_model)) {
if (!(rx_desc_ids & VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M)) {
dev_info(&adapter->pdev->dev, "Minimum RX descriptor support not provided, using the default\n");
vport_msg->rx_desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M);
@@ -1333,7 +1345,7 @@ int idpf_check_supported_desc_ids(struct idpf_vport *vport)
vport->base_rxd = true;
}
- if (vport->txq_model != VIRTCHNL2_QUEUE_MODEL_SPLIT)
+ if (!idpf_is_queue_model_split(vport->txq_model))
return 0;
if ((tx_desc_ids & MIN_SUPPORT_TXDID) != MIN_SUPPORT_TXDID) {
@@ -1449,19 +1461,19 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
qi[k].model =
cpu_to_le16(vport->txq_model);
qi[k].type =
- cpu_to_le32(tx_qgrp->txqs[j]->q_type);
+ cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX);
qi[k].ring_len =
cpu_to_le16(tx_qgrp->txqs[j]->desc_count);
qi[k].dma_ring_addr =
cpu_to_le64(tx_qgrp->txqs[j]->dma);
if (idpf_is_queue_model_split(vport->txq_model)) {
- struct idpf_queue *q = tx_qgrp->txqs[j];
+ struct idpf_tx_queue *q = tx_qgrp->txqs[j];
qi[k].tx_compl_queue_id =
cpu_to_le16(tx_qgrp->complq->q_id);
qi[k].relative_queue_id = cpu_to_le16(j);
- if (test_bit(__IDPF_Q_FLOW_SCH_EN, q->flags))
+ if (idpf_queue_has(FLOW_SCH_EN, q))
qi[k].sched_mode =
cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_FLOW);
else
@@ -1478,11 +1490,11 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
qi[k].queue_id = cpu_to_le32(tx_qgrp->complq->q_id);
qi[k].model = cpu_to_le16(vport->txq_model);
- qi[k].type = cpu_to_le32(tx_qgrp->complq->q_type);
+ qi[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION);
qi[k].ring_len = cpu_to_le16(tx_qgrp->complq->desc_count);
qi[k].dma_ring_addr = cpu_to_le64(tx_qgrp->complq->dma);
- if (test_bit(__IDPF_Q_FLOW_SCH_EN, tx_qgrp->complq->flags))
+ if (idpf_queue_has(FLOW_SCH_EN, tx_qgrp->complq))
sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
else
sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE;
@@ -1567,17 +1579,18 @@ static int idpf_send_config_rx_queues_msg(struct idpf_vport *vport)
goto setup_rxqs;
for (j = 0; j < vport->num_bufqs_per_qgrp; j++, k++) {
- struct idpf_queue *bufq =
+ struct idpf_buf_queue *bufq =
&rx_qgrp->splitq.bufq_sets[j].bufq;
qi[k].queue_id = cpu_to_le32(bufq->q_id);
qi[k].model = cpu_to_le16(vport->rxq_model);
- qi[k].type = cpu_to_le32(bufq->q_type);
+ qi[k].type =
+ cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX_BUFFER);
qi[k].desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M);
qi[k].ring_len = cpu_to_le16(bufq->desc_count);
qi[k].dma_ring_addr = cpu_to_le64(bufq->dma);
qi[k].data_buffer_size = cpu_to_le32(bufq->rx_buf_size);
- qi[k].buffer_notif_stride = bufq->rx_buf_stride;
+ qi[k].buffer_notif_stride = IDPF_RX_BUF_STRIDE;
qi[k].rx_buffer_low_watermark =
cpu_to_le16(bufq->rx_buffer_low_watermark);
if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW))
@@ -1591,35 +1604,47 @@ setup_rxqs:
num_rxq = rx_qgrp->singleq.num_rxq;
for (j = 0; j < num_rxq; j++, k++) {
- struct idpf_queue *rxq;
+ const struct idpf_bufq_set *sets;
+ struct idpf_rx_queue *rxq;
if (!idpf_is_queue_model_split(vport->rxq_model)) {
rxq = rx_qgrp->singleq.rxqs[j];
goto common_qi_fields;
}
+
rxq = &rx_qgrp->splitq.rxq_sets[j]->rxq;
- qi[k].rx_bufq1_id =
- cpu_to_le16(rxq->rxq_grp->splitq.bufq_sets[0].bufq.q_id);
+ sets = rxq->bufq_sets;
+
+ /* In splitq mode, RXQ buffer size should be
+ * set to that of the first buffer queue
+ * associated with this RXQ.
+ */
+ rxq->rx_buf_size = sets[0].bufq.rx_buf_size;
+
+ qi[k].rx_bufq1_id = cpu_to_le16(sets[0].bufq.q_id);
if (vport->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP) {
qi[k].bufq2_ena = IDPF_BUFQ2_ENA;
qi[k].rx_bufq2_id =
- cpu_to_le16(rxq->rxq_grp->splitq.bufq_sets[1].bufq.q_id);
+ cpu_to_le16(sets[1].bufq.q_id);
}
qi[k].rx_buffer_low_watermark =
cpu_to_le16(rxq->rx_buffer_low_watermark);
if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW))
qi[k].qflags |= cpu_to_le16(VIRTCHNL2_RXQ_RSC);
-common_qi_fields:
- if (rxq->rx_hsplit_en) {
+ rxq->rx_hbuf_size = sets[0].bufq.rx_hbuf_size;
+
+ if (idpf_queue_has(HSPLIT_EN, rxq)) {
qi[k].qflags |=
cpu_to_le16(VIRTCHNL2_RXQ_HDR_SPLIT);
qi[k].hdr_buffer_size =
cpu_to_le16(rxq->rx_hbuf_size);
}
+
+common_qi_fields:
qi[k].queue_id = cpu_to_le32(rxq->q_id);
qi[k].model = cpu_to_le16(vport->rxq_model);
- qi[k].type = cpu_to_le32(rxq->q_type);
+ qi[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX);
qi[k].ring_len = cpu_to_le16(rxq->desc_count);
qi[k].dma_ring_addr = cpu_to_le64(rxq->dma);
qi[k].max_pkt_size = cpu_to_le32(rxq->rx_max_pkt_size);
@@ -1706,7 +1731,7 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, bool ena)
struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
for (j = 0; j < tx_qgrp->num_txq; j++, k++) {
- qc[k].type = cpu_to_le32(tx_qgrp->txqs[j]->q_type);
+ qc[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX);
qc[k].start_queue_id = cpu_to_le32(tx_qgrp->txqs[j]->q_id);
qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
}
@@ -1720,7 +1745,7 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, bool ena)
for (i = 0; i < vport->num_txq_grp; i++, k++) {
struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
- qc[k].type = cpu_to_le32(tx_qgrp->complq->q_type);
+ qc[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION);
qc[k].start_queue_id = cpu_to_le32(tx_qgrp->complq->q_id);
qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
}
@@ -1741,12 +1766,12 @@ setup_rx:
qc[k].start_queue_id =
cpu_to_le32(rx_qgrp->splitq.rxq_sets[j]->rxq.q_id);
qc[k].type =
- cpu_to_le32(rx_qgrp->splitq.rxq_sets[j]->rxq.q_type);
+ cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX);
} else {
qc[k].start_queue_id =
cpu_to_le32(rx_qgrp->singleq.rxqs[j]->q_id);
qc[k].type =
- cpu_to_le32(rx_qgrp->singleq.rxqs[j]->q_type);
+ cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX);
}
qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
}
@@ -1761,10 +1786,11 @@ setup_rx:
struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
for (j = 0; j < vport->num_bufqs_per_qgrp; j++, k++) {
- struct idpf_queue *q;
+ const struct idpf_buf_queue *q;
q = &rx_qgrp->splitq.bufq_sets[j].bufq;
- qc[k].type = cpu_to_le32(q->q_type);
+ qc[k].type =
+ cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX_BUFFER);
qc[k].start_queue_id = cpu_to_le32(q->q_id);
qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
}
@@ -1849,7 +1875,8 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
for (j = 0; j < tx_qgrp->num_txq; j++, k++) {
- vqv[k].queue_type = cpu_to_le32(tx_qgrp->txqs[j]->q_type);
+ vqv[k].queue_type =
+ cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX);
vqv[k].queue_id = cpu_to_le32(tx_qgrp->txqs[j]->q_id);
if (idpf_is_queue_model_split(vport->txq_model)) {
@@ -1879,14 +1906,15 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
num_rxq = rx_qgrp->singleq.num_rxq;
for (j = 0; j < num_rxq; j++, k++) {
- struct idpf_queue *rxq;
+ struct idpf_rx_queue *rxq;
if (idpf_is_queue_model_split(vport->rxq_model))
rxq = &rx_qgrp->splitq.rxq_sets[j]->rxq;
else
rxq = rx_qgrp->singleq.rxqs[j];
- vqv[k].queue_type = cpu_to_le32(rxq->q_type);
+ vqv[k].queue_type =
+ cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX);
vqv[k].queue_id = cpu_to_le32(rxq->q_id);
vqv[k].vector_id = cpu_to_le16(rxq->q_vector->v_idx);
vqv[k].itr_idx = cpu_to_le32(rxq->q_vector->rx_itr_idx);
@@ -1975,7 +2003,7 @@ int idpf_send_disable_queues_msg(struct idpf_vport *vport)
* queues virtchnl message is sent
*/
for (i = 0; i < vport->num_txq; i++)
- set_bit(__IDPF_Q_POLL_MODE, vport->txqs[i]->flags);
+ idpf_queue_set(POLL_MODE, vport->txqs[i]);
/* schedule the napi to receive all the marker packets */
local_bh_disable();
@@ -2469,39 +2497,52 @@ do_memcpy:
* @frag: fragmentation allowed
*
*/
-static void idpf_fill_ptype_lookup(struct idpf_rx_ptype_decoded *ptype,
+static void idpf_fill_ptype_lookup(struct libeth_rx_pt *ptype,
struct idpf_ptype_state *pstate,
bool ipv4, bool frag)
{
if (!pstate->outer_ip || !pstate->outer_frag) {
- ptype->outer_ip = IDPF_RX_PTYPE_OUTER_IP;
pstate->outer_ip = true;
if (ipv4)
- ptype->outer_ip_ver = IDPF_RX_PTYPE_OUTER_IPV4;
+ ptype->outer_ip = LIBETH_RX_PT_OUTER_IPV4;
else
- ptype->outer_ip_ver = IDPF_RX_PTYPE_OUTER_IPV6;
+ ptype->outer_ip = LIBETH_RX_PT_OUTER_IPV6;
if (frag) {
- ptype->outer_frag = IDPF_RX_PTYPE_FRAG;
+ ptype->outer_frag = LIBETH_RX_PT_FRAG;
pstate->outer_frag = true;
}
} else {
- ptype->tunnel_type = IDPF_RX_PTYPE_TUNNEL_IP_IP;
+ ptype->tunnel_type = LIBETH_RX_PT_TUNNEL_IP_IP;
pstate->tunnel_state = IDPF_PTYPE_TUNNEL_IP;
if (ipv4)
- ptype->tunnel_end_prot =
- IDPF_RX_PTYPE_TUNNEL_END_IPV4;
+ ptype->tunnel_end_prot = LIBETH_RX_PT_TUNNEL_END_IPV4;
else
- ptype->tunnel_end_prot =
- IDPF_RX_PTYPE_TUNNEL_END_IPV6;
+ ptype->tunnel_end_prot = LIBETH_RX_PT_TUNNEL_END_IPV6;
if (frag)
- ptype->tunnel_end_frag = IDPF_RX_PTYPE_FRAG;
+ ptype->tunnel_end_frag = LIBETH_RX_PT_FRAG;
}
}
+static void idpf_finalize_ptype_lookup(struct libeth_rx_pt *ptype)
+{
+ if (ptype->payload_layer == LIBETH_RX_PT_PAYLOAD_L2 &&
+ ptype->inner_prot)
+ ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_L4;
+ else if (ptype->payload_layer == LIBETH_RX_PT_PAYLOAD_L2 &&
+ ptype->outer_ip)
+ ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_L3;
+ else if (ptype->outer_ip == LIBETH_RX_PT_OUTER_L2)
+ ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_L2;
+ else
+ ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_NONE;
+
+ libeth_rx_pt_gen_hash_type(ptype);
+}
+
/**
* idpf_send_get_rx_ptype_msg - Send virtchnl for ptype info
* @vport: virtual port data structure
@@ -2512,7 +2553,7 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
{
struct virtchnl2_get_ptype_info *get_ptype_info __free(kfree) = NULL;
struct virtchnl2_get_ptype_info *ptype_info __free(kfree) = NULL;
- struct idpf_rx_ptype_decoded *ptype_lkup = vport->rx_ptype_lkup;
+ struct libeth_rx_pt *ptype_lkup __free(kfree) = NULL;
int max_ptype, ptypes_recvd = 0, ptype_offset;
struct idpf_adapter *adapter = vport->adapter;
struct idpf_vc_xn_params xn_params = {};
@@ -2520,12 +2561,17 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
ssize_t reply_sz;
int i, j, k;
+ if (vport->rx_ptype_lkup)
+ return 0;
+
if (idpf_is_queue_model_split(vport->rxq_model))
max_ptype = IDPF_RX_MAX_PTYPE;
else
max_ptype = IDPF_RX_MAX_BASE_PTYPE;
- memset(vport->rx_ptype_lkup, 0, sizeof(vport->rx_ptype_lkup));
+ ptype_lkup = kcalloc(max_ptype, sizeof(*ptype_lkup), GFP_KERNEL);
+ if (!ptype_lkup)
+ return -ENOMEM;
get_ptype_info = kzalloc(sizeof(*get_ptype_info), GFP_KERNEL);
if (!get_ptype_info)
@@ -2583,16 +2629,13 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
/* 0xFFFF indicates end of ptypes */
if (le16_to_cpu(ptype->ptype_id_10) ==
IDPF_INVALID_PTYPE_ID)
- return 0;
+ goto out;
if (idpf_is_queue_model_split(vport->rxq_model))
k = le16_to_cpu(ptype->ptype_id_10);
else
k = ptype->ptype_id_8;
- if (ptype->proto_id_count)
- ptype_lkup[k].known = 1;
-
for (j = 0; j < ptype->proto_id_count; j++) {
id = le16_to_cpu(ptype->proto_id[j]);
switch (id) {
@@ -2600,18 +2643,18 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
if (pstate.tunnel_state ==
IDPF_PTYPE_TUNNEL_IP) {
ptype_lkup[k].tunnel_type =
- IDPF_RX_PTYPE_TUNNEL_IP_GRENAT;
+ LIBETH_RX_PT_TUNNEL_IP_GRENAT;
pstate.tunnel_state |=
IDPF_PTYPE_TUNNEL_IP_GRENAT;
}
break;
case VIRTCHNL2_PROTO_HDR_MAC:
ptype_lkup[k].outer_ip =
- IDPF_RX_PTYPE_OUTER_L2;
+ LIBETH_RX_PT_OUTER_L2;
if (pstate.tunnel_state ==
IDPF_TUN_IP_GRE) {
ptype_lkup[k].tunnel_type =
- IDPF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC;
+ LIBETH_RX_PT_TUNNEL_IP_GRENAT_MAC;
pstate.tunnel_state |=
IDPF_PTYPE_TUNNEL_IP_GRENAT_MAC;
}
@@ -2638,23 +2681,23 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
break;
case VIRTCHNL2_PROTO_HDR_UDP:
ptype_lkup[k].inner_prot =
- IDPF_RX_PTYPE_INNER_PROT_UDP;
+ LIBETH_RX_PT_INNER_UDP;
break;
case VIRTCHNL2_PROTO_HDR_TCP:
ptype_lkup[k].inner_prot =
- IDPF_RX_PTYPE_INNER_PROT_TCP;
+ LIBETH_RX_PT_INNER_TCP;
break;
case VIRTCHNL2_PROTO_HDR_SCTP:
ptype_lkup[k].inner_prot =
- IDPF_RX_PTYPE_INNER_PROT_SCTP;
+ LIBETH_RX_PT_INNER_SCTP;
break;
case VIRTCHNL2_PROTO_HDR_ICMP:
ptype_lkup[k].inner_prot =
- IDPF_RX_PTYPE_INNER_PROT_ICMP;
+ LIBETH_RX_PT_INNER_ICMP;
break;
case VIRTCHNL2_PROTO_HDR_PAY:
ptype_lkup[k].payload_layer =
- IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY2;
+ LIBETH_RX_PT_PAYLOAD_L2;
break;
case VIRTCHNL2_PROTO_HDR_ICMPV6:
case VIRTCHNL2_PROTO_HDR_IPV6_EH:
@@ -2708,9 +2751,14 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
break;
}
}
+
+ idpf_finalize_ptype_lookup(&ptype_lkup[k]);
}
}
+out:
+ vport->rx_ptype_lkup = no_free_ptr(ptype_lkup);
+
return 0;
}
@@ -3125,7 +3173,7 @@ void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q)
rss_data->rss_lut_size = le16_to_cpu(vport_msg->rss_lut_size);
ether_addr_copy(vport->default_mac_addr, vport_msg->default_mac_addr);
- vport->max_mtu = le16_to_cpu(vport_msg->max_mtu) - IDPF_PACKET_HDR_PAD;
+ vport->max_mtu = le16_to_cpu(vport_msg->max_mtu) - LIBETH_RX_LL_LEN;
/* Initialize Tx and Rx profiles for Dynamic Interrupt Moderation */
memcpy(vport->rx_itr_profile, rx_itr, IDPF_DIM_PROFILE_SLOTS);
@@ -3242,7 +3290,6 @@ static int __idpf_vport_queue_ids_init(struct idpf_vport *vport,
int num_qids,
u32 q_type)
{
- struct idpf_queue *q;
int i, j, k = 0;
switch (q_type) {
@@ -3250,11 +3297,8 @@ static int __idpf_vport_queue_ids_init(struct idpf_vport *vport,
for (i = 0; i < vport->num_txq_grp; i++) {
struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
- for (j = 0; j < tx_qgrp->num_txq && k < num_qids; j++, k++) {
+ for (j = 0; j < tx_qgrp->num_txq && k < num_qids; j++, k++)
tx_qgrp->txqs[j]->q_id = qids[k];
- tx_qgrp->txqs[j]->q_type =
- VIRTCHNL2_QUEUE_TYPE_TX;
- }
}
break;
case VIRTCHNL2_QUEUE_TYPE_RX:
@@ -3268,12 +3312,13 @@ static int __idpf_vport_queue_ids_init(struct idpf_vport *vport,
num_rxq = rx_qgrp->singleq.num_rxq;
for (j = 0; j < num_rxq && k < num_qids; j++, k++) {
+ struct idpf_rx_queue *q;
+
if (idpf_is_queue_model_split(vport->rxq_model))
q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
else
q = rx_qgrp->singleq.rxqs[j];
q->q_id = qids[k];
- q->q_type = VIRTCHNL2_QUEUE_TYPE_RX;
}
}
break;
@@ -3282,8 +3327,6 @@ static int __idpf_vport_queue_ids_init(struct idpf_vport *vport,
struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
tx_qgrp->complq->q_id = qids[k];
- tx_qgrp->complq->q_type =
- VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
}
break;
case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
@@ -3292,9 +3335,10 @@ static int __idpf_vport_queue_ids_init(struct idpf_vport *vport,
u8 num_bufqs = vport->num_bufqs_per_qgrp;
for (j = 0; j < num_bufqs && k < num_qids; j++, k++) {
+ struct idpf_buf_queue *q;
+
q = &rx_qgrp->splitq.bufq_sets[j].bufq;
q->q_id = qids[k];
- q->q_type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
}
}
break;
diff --git a/drivers/net/ethernet/intel/igb/Makefile b/drivers/net/ethernet/intel/igb/Makefile
index 394c1e0656b9..463c0d26b9d4 100644
--- a/drivers/net/ethernet/intel/igb/Makefile
+++ b/drivers/net/ethernet/intel/igb/Makefile
@@ -6,6 +6,6 @@
obj-$(CONFIG_IGB) += igb.o
-igb-objs := igb_main.o igb_ethtool.o e1000_82575.o \
- e1000_mac.o e1000_nvm.o e1000_phy.o e1000_mbx.o \
- e1000_i210.o igb_ptp.o igb_hwmon.o
+igb-y := igb_main.o igb_ethtool.o e1000_82575.o \
+ e1000_mac.o e1000_nvm.o e1000_phy.o e1000_mbx.o \
+ e1000_i210.o igb_ptp.o igb_hwmon.o
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 61d72250c0ed..06b9970dffad 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -2381,7 +2381,7 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
}
static int igb_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct igb_adapter *adapter = netdev_priv(dev);
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index fce2930ae6af..11be39f435f3 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -203,7 +203,6 @@ static const struct pci_error_handlers igb_err_handler = {
static void igb_init_dmac(struct igb_adapter *adapter, u32 pba);
-MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
MODULE_LICENSE("GPL v2");
@@ -9139,6 +9138,10 @@ static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
return -EIO;
break;
case SIOCSMIIREG:
+ if (igb_write_phy_reg(&adapter->hw, data->reg_num & 0x1F,
+ data->val_in))
+ return -EIO;
+ break;
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/intel/igbvf/Makefile b/drivers/net/ethernet/intel/igbvf/Makefile
index afd3e36eae75..902711d5e691 100644
--- a/drivers/net/ethernet/intel/igbvf/Makefile
+++ b/drivers/net/ethernet/intel/igbvf/Makefile
@@ -6,8 +6,4 @@
obj-$(CONFIG_IGBVF) += igbvf.o
-igbvf-objs := vf.o \
- mbx.o \
- ethtool.o \
- netdev.o
-
+igbvf-y := vf.o mbx.o ethtool.o netdev.o
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 7661edd7d0f2..925d7286a8ee 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -3001,7 +3001,6 @@ static void __exit igbvf_exit_module(void)
}
module_exit(igbvf_exit_module);
-MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
MODULE_DESCRIPTION("Intel(R) Gigabit Virtual Function Network Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/intel/igc/Makefile b/drivers/net/ethernet/intel/igc/Makefile
index ebffd3054285..efc5e7983dad 100644
--- a/drivers/net/ethernet/intel/igc/Makefile
+++ b/drivers/net/ethernet/intel/igc/Makefile
@@ -6,7 +6,7 @@
#
obj-$(CONFIG_IGC) += igc.o
-igc-$(CONFIG_IGC_LEDS) += igc_leds.o
-igc-objs := igc_main.o igc_mac.o igc_i225.o igc_base.o igc_nvm.o igc_phy.o \
-igc_diag.o igc_ethtool.o igc_ptp.o igc_dump.o igc_tsn.o igc_xdp.o
+igc-y := igc_main.o igc_mac.o igc_i225.o igc_base.o igc_nvm.o igc_phy.o \
+ igc_diag.o igc_ethtool.o igc_ptp.o igc_dump.o igc_tsn.o igc_xdp.o
+igc-$(CONFIG_IGC_LEDS) += igc_leds.o
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index 8b14c029eda1..c38b4d0f00ce 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -202,7 +202,6 @@ struct igc_adapter {
struct net_device *netdev;
struct ethtool_keee eee;
- u16 eee_advert;
unsigned long state;
unsigned int flags;
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index 0cd2bd695db1..3d3ef4e1547c 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -1559,7 +1559,7 @@ static int igc_ethtool_set_channels(struct net_device *netdev,
}
static int igc_ethtool_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct igc_adapter *adapter = netdev_priv(dev);
@@ -1636,10 +1636,6 @@ static int igc_ethtool_get_eee(struct net_device *netdev,
linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
edata->supported);
- if (hw->dev_spec._base.eee_enable)
- mii_eee_cap1_mod_linkmode_t(edata->advertised,
- adapter->eee_advert);
-
eeer = rd32(IGC_EEER);
/* EEE status on negotiated link */
@@ -1700,8 +1696,6 @@ static int igc_ethtool_set_eee(struct net_device *netdev,
return -EINVAL;
}
- adapter->eee_advert = linkmode_to_mii_eee_cap1_t(edata->advertised);
-
if (hw->dev_spec._base.eee_enable != edata->eee_enabled) {
hw->dev_spec._base.eee_enable = edata->eee_enabled;
adapter->flags |= IGC_FLAG_EEE;
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 87b655b839c1..cb5c7b09e8a0 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -32,7 +32,6 @@
static int debug = -1;
-MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION(DRV_SUMMARY);
MODULE_LICENSE("GPL v2");
module_param(debug, int, 0);
@@ -4976,9 +4975,6 @@ void igc_up(struct igc_adapter *adapter)
/* start the watchdog. */
hw->mac.get_link_status = true;
schedule_work(&adapter->watchdog_task);
-
- adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T |
- MDIO_EEE_2_5GT;
}
/**
diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile
index 4fb0d9e3f2da..965e5ce1b326 100644
--- a/drivers/net/ethernet/intel/ixgbe/Makefile
+++ b/drivers/net/ethernet/intel/ixgbe/Makefile
@@ -6,10 +6,10 @@
obj-$(CONFIG_IXGBE) += ixgbe.o
-ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
- ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
- ixgbe_mbx.o ixgbe_x540.o ixgbe_x550.o ixgbe_lib.o ixgbe_ptp.o \
- ixgbe_xsk.o
+ixgbe-y := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
+ ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
+ ixgbe_mbx.o ixgbe_x540.o ixgbe_x550.o ixgbe_lib.o ixgbe_ptp.o \
+ ixgbe_xsk.o
ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \
ixgbe_dcb_82599.o ixgbe_dcb_nl.o
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 6e6e6f1847b6..4cac76254966 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -3170,7 +3170,7 @@ static int ixgbe_set_rxfh(struct net_device *netdev,
}
static int ixgbe_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 094653e81b97..8057cef61f39 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -162,7 +162,6 @@ static int debug = -1;
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
-MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/intel/ixgbevf/Makefile b/drivers/net/ethernet/intel/ixgbevf/Makefile
index 186a4bb24fde..01d3e892f3fa 100644
--- a/drivers/net/ethernet/intel/ixgbevf/Makefile
+++ b/drivers/net/ethernet/intel/ixgbevf/Makefile
@@ -6,9 +6,5 @@
obj-$(CONFIG_IXGBEVF) += ixgbevf.o
-ixgbevf-objs := vf.o \
- mbx.o \
- ethtool.o \
- ixgbevf_main.o
+ixgbevf-y := vf.o mbx.o ethtool.o ixgbevf_main.o
ixgbevf-$(CONFIG_IXGBEVF_IPSEC) += ipsec.o
-
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index b938dc06045d..149911e3002a 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -76,7 +76,6 @@ static const struct pci_device_id ixgbevf_pci_tbl[] = {
};
MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
-MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION("Intel(R) 10 Gigabit Virtual Function Network Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/intel/libeth/Makefile b/drivers/net/ethernet/intel/libeth/Makefile
index cb99203d1dd2..52492b081132 100644
--- a/drivers/net/ethernet/intel/libeth/Makefile
+++ b/drivers/net/ethernet/intel/libeth/Makefile
@@ -3,4 +3,4 @@
obj-$(CONFIG_LIBETH) += libeth.o
-libeth-objs += rx.o
+libeth-y := rx.o
diff --git a/drivers/net/ethernet/intel/libeth/rx.c b/drivers/net/ethernet/intel/libeth/rx.c
index 6221b88c34ac..f20926669318 100644
--- a/drivers/net/ethernet/intel/libeth/rx.c
+++ b/drivers/net/ethernet/intel/libeth/rx.c
@@ -6,7 +6,7 @@
/* Rx buffer management */
/**
- * libeth_rx_hw_len - get the actual buffer size to be passed to HW
+ * libeth_rx_hw_len_mtu - get the actual buffer size to be passed to HW
* @pp: &page_pool_params of the netdev to calculate the size for
* @max_len: maximum buffer size for a single descriptor
*
@@ -14,7 +14,7 @@
* MTU the @dev has, HW required alignment, minimum and maximum allowed values,
* and system's page size.
*/
-static u32 libeth_rx_hw_len(const struct page_pool_params *pp, u32 max_len)
+static u32 libeth_rx_hw_len_mtu(const struct page_pool_params *pp, u32 max_len)
{
u32 len;
@@ -27,6 +27,118 @@ static u32 libeth_rx_hw_len(const struct page_pool_params *pp, u32 max_len)
}
/**
+ * libeth_rx_hw_len_truesize - get the short buffer size to be passed to HW
+ * @pp: &page_pool_params of the netdev to calculate the size for
+ * @max_len: maximum buffer size for a single descriptor
+ * @truesize: desired truesize for the buffers
+ *
+ * Return: HW-writeable length per one buffer to pass it to the HW ignoring the
+ * MTU and closest to the passed truesize. Can be used for "short" buffer
+ * queues to fragment pages more efficiently.
+ */
+static u32 libeth_rx_hw_len_truesize(const struct page_pool_params *pp,
+ u32 max_len, u32 truesize)
+{
+ u32 min, len;
+
+ min = SKB_HEAD_ALIGN(pp->offset + LIBETH_RX_BUF_STRIDE);
+ truesize = clamp(roundup_pow_of_two(truesize), roundup_pow_of_two(min),
+ PAGE_SIZE << LIBETH_RX_PAGE_ORDER);
+
+ len = SKB_WITH_OVERHEAD(truesize - pp->offset);
+ len = ALIGN_DOWN(len, LIBETH_RX_BUF_STRIDE) ? : LIBETH_RX_BUF_STRIDE;
+ len = min3(len, ALIGN_DOWN(max_len ? : U32_MAX, LIBETH_RX_BUF_STRIDE),
+ pp->max_len);
+
+ return len;
+}
+
+/**
+ * libeth_rx_page_pool_params - calculate params with the stack overhead
+ * @fq: buffer queue to calculate the size for
+ * @pp: &page_pool_params of the netdev
+ *
+ * Set the PP params to will all needed stack overhead (headroom, tailroom) and
+ * both the HW buffer length and the truesize for all types of buffers. For
+ * "short" buffers, truesize never exceeds the "wanted" one; for the rest,
+ * it can be up to the page size.
+ *
+ * Return: true on success, false on invalid input params.
+ */
+static bool libeth_rx_page_pool_params(struct libeth_fq *fq,
+ struct page_pool_params *pp)
+{
+ pp->offset = LIBETH_SKB_HEADROOM;
+ /* HW-writeable / syncable length per one page */
+ pp->max_len = LIBETH_RX_PAGE_LEN(pp->offset);
+
+ /* HW-writeable length per buffer */
+ switch (fq->type) {
+ case LIBETH_FQE_MTU:
+ fq->buf_len = libeth_rx_hw_len_mtu(pp, fq->buf_len);
+ break;
+ case LIBETH_FQE_SHORT:
+ fq->buf_len = libeth_rx_hw_len_truesize(pp, fq->buf_len,
+ fq->truesize);
+ break;
+ case LIBETH_FQE_HDR:
+ fq->buf_len = ALIGN(LIBETH_MAX_HEAD, LIBETH_RX_BUF_STRIDE);
+ break;
+ default:
+ return false;
+ }
+
+ /* Buffer size to allocate */
+ fq->truesize = roundup_pow_of_two(SKB_HEAD_ALIGN(pp->offset +
+ fq->buf_len));
+
+ return true;
+}
+
+/**
+ * libeth_rx_page_pool_params_zc - calculate params without the stack overhead
+ * @fq: buffer queue to calculate the size for
+ * @pp: &page_pool_params of the netdev
+ *
+ * Set the PP params to exclude the stack overhead and both the buffer length
+ * and the truesize, which are equal for the data buffers. Note that this
+ * requires separate header buffers to be always active and account the
+ * overhead.
+ * With the MTU == ``PAGE_SIZE``, this allows the kernel to enable the zerocopy
+ * mode.
+ *
+ * Return: true on success, false on invalid input params.
+ */
+static bool libeth_rx_page_pool_params_zc(struct libeth_fq *fq,
+ struct page_pool_params *pp)
+{
+ u32 mtu, max;
+
+ pp->offset = 0;
+ pp->max_len = PAGE_SIZE << LIBETH_RX_PAGE_ORDER;
+
+ switch (fq->type) {
+ case LIBETH_FQE_MTU:
+ mtu = READ_ONCE(pp->netdev->mtu);
+ break;
+ case LIBETH_FQE_SHORT:
+ mtu = fq->truesize;
+ break;
+ default:
+ return false;
+ }
+
+ mtu = roundup_pow_of_two(mtu);
+ max = min(rounddown_pow_of_two(fq->buf_len ? : U32_MAX),
+ pp->max_len);
+
+ fq->buf_len = clamp(mtu, LIBETH_RX_BUF_STRIDE, max);
+ fq->truesize = fq->buf_len;
+
+ return true;
+}
+
+/**
* libeth_rx_fq_create - create a PP with the default libeth settings
* @fq: buffer queue struct to fill
* @napi: &napi_struct covering this PP (no usage outside its poll loops)
@@ -44,19 +156,17 @@ int libeth_rx_fq_create(struct libeth_fq *fq, struct napi_struct *napi)
.netdev = napi->dev,
.napi = napi,
.dma_dir = DMA_FROM_DEVICE,
- .offset = LIBETH_SKB_HEADROOM,
};
struct libeth_fqe *fqes;
struct page_pool *pool;
+ bool ret;
- /* HW-writeable / syncable length per one page */
- pp.max_len = LIBETH_RX_PAGE_LEN(pp.offset);
-
- /* HW-writeable length per buffer */
- fq->buf_len = libeth_rx_hw_len(&pp, fq->buf_len);
- /* Buffer size to allocate */
- fq->truesize = roundup_pow_of_two(SKB_HEAD_ALIGN(pp.offset +
- fq->buf_len));
+ if (!fq->hsplit)
+ ret = libeth_rx_page_pool_params(fq, &pp);
+ else
+ ret = libeth_rx_page_pool_params_zc(fq, &pp);
+ if (!ret)
+ return -EINVAL;
pool = page_pool_create(&pp);
if (IS_ERR(pool))
@@ -145,6 +255,5 @@ EXPORT_SYMBOL_NS_GPL(libeth_rx_pt_gen_hash_type, LIBETH);
/* Module */
-MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION("Common Ethernet library");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/intel/libie/Makefile b/drivers/net/ethernet/intel/libie/Makefile
index bf42c5aeeedd..ffd27fab916a 100644
--- a/drivers/net/ethernet/intel/libie/Makefile
+++ b/drivers/net/ethernet/intel/libie/Makefile
@@ -3,4 +3,4 @@
obj-$(CONFIG_LIBIE) += libie.o
-libie-objs += rx.o
+libie-y := rx.o
diff --git a/drivers/net/ethernet/intel/libie/rx.c b/drivers/net/ethernet/intel/libie/rx.c
index 38201ee1e891..aceb8d8813c4 100644
--- a/drivers/net/ethernet/intel/libie/rx.c
+++ b/drivers/net/ethernet/intel/libie/rx.c
@@ -118,7 +118,6 @@ const struct libeth_rx_pt libie_rx_pt_lut[LIBIE_RX_PT_NUM] = {
};
EXPORT_SYMBOL_NS_GPL(libie_rx_pt_lut, LIBIE);
-MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION("Intel(R) Ethernet common library");
MODULE_IMPORT_NS(LIBETH);
MODULE_LICENSE("GPL");