summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/mellanox
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2024-07-16 19:28:34 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2024-07-16 19:28:34 -0700
commit51835949dda3783d4639cfa74ce13a3c9829de00 (patch)
tree2b593de5eba6ecc73f7c58fc65fdaffae45c7323 /drivers/net/ethernet/mellanox
parent0434dbe32053d07d658165be681505120c6b1abc (diff)
parent77ae5e5b00720372af2860efdc4bc652ac682696 (diff)
Merge tag 'net-next-6.11' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-nextHEADmaster
Pull networking updates from Jakub Kicinski: "Not much excitement - a handful of large patchsets (devmem among them) did not make it in time. Core & protocols: - Use local_lock in addition to local_bh_disable() to protect per-CPU resources in networking, a step closer for local_bh_disable() not to act as a big lock on PREEMPT_RT - Use flex array for netdevice priv area, ensure its cache alignment - Add a sysctl knob to allow user to specify a default rto_min at socket init time. Bit of a big hammer but multiple companies were independently carrying such patch downstream so clearly it's useful - Support scheduling transmission of packets based on CLOCK_TAI - Un-pin TCP TIMEWAIT timer to avoid it firing on CPUs later cordoned off using cpusets - Support multiple L2TPv3 UDP tunnels using the same 5-tuple address - Allow configuration of multipath hash seed, to both allow synchronizing hashing of two routers, and preventing partial accidental sync - Improve TCP compliance with RFC 9293 for simultaneous connect() - Support sending NAT keepalives in IPsec ESP in UDP states. Userspace IKE daemon had to do this before, but the kernel can better keep track of it - Support sending supervision HSR frames with MAC addresses stored in ProxyNodeTable when RedBox (i.e. HSR-SAN) is enabled - Introduce IPPROTO_SMC for selecting SMC when socket is created - Allow UDP GSO transmit from devices with no checksum offload - openvswitch: add packet sampling via psample, separating the sampled traffic from "upcall" packets sent to user space for forwarding - nf_tables: shrink memory consumption for transaction objects Things we sprinkled into general kernel code: - Power Sequencing subsystem (used by Qualcomm Bluetooth driver for QCA6390) [ Already merged separately - Linus ] - Add IRQ information in sysfs for auxiliary bus - Introduce guard definition for local_lock - Add aligned flavor of __cacheline_group_{begin, end}() markings for grouping fields in structures BPF: - Notify user space (via epoll) when a struct_ops object is getting detached/unregistered - Add new kfuncs for a generic, open-coded bits iterator - Enable BPF programs to declare arrays of kptr, bpf_rb_root, and bpf_list_head - Support resilient split BTF which cuts down on duplication and makes BTF as compact as possible WRT BTF from modules - Add support for dumping kfunc prototypes from BTF which enables both detecting as well as dumping compilable prototypes for kfuncs - riscv64 BPF JIT improvements in particular to add 12-argument support for BPF trampolines and to utilize bpf_prog_pack for the latter - Add the capability to offload the netfilter flowtable in XDP layer through kfuncs Driver API: - Allow users to configure IRQ tresholds between which automatic IRQ moderation can choose - Expand Power Sourcing (PoE) status with power, class and failure reason. Support setting power limits - Track additional RSS contexts in the core, make sure configuration changes don't break them - Support IPsec crypto offload for IPv6 ESP and IPv4 UDP-encapsulated ESP data paths - Support updating firmware on SFP modules Tests and tooling: - mptcp: use net/lib.sh to manage netns - TCP-AO and TCP-MD5: replace debug prints used by tests with tracepoints - openvswitch: make test self-contained (don't depend on OvS CLI tools) Drivers: - Ethernet high-speed NICs: - Broadcom (bnxt): - increase the max total outstanding PTP TX packets to 4 - add timestamping statistics support - implement netdev_queue_mgmt_ops - support new RSS context API - Intel (100G, ice, idpf): - implement FEC statistics and dumping signal quality indicators - support E825C products (with 56Gbps PHYs) - nVidia/Mellanox: - support HW-GRO - mlx4/mlx5: support per-queue statistics via netlink - obey the max number of EQs setting in sub-functions - AMD/Solarflare: - support new RSS context API - AMD/Pensando: - ionic: rework fix for doorbell miss to lower overhead and skip it on new HW - Wangxun: - txgbe: support Flow Director perfect filters - Ethernet NICs consumer, embedded and virtual: - Add driver for Tehuti Networks TN40xx chips - Add driver for Meta's internal NIC chips - Add driver for Ethernet MAC on Airoha EN7581 SoCs - Add driver for Renesas Ethernet-TSN devices - Google cloud vNIC: - flow steering support - Microsoft vNIC: - support page sizes other than 4KB on ARM64 - vmware vNIC: - support latency measurement (update to version 9) - VirtIO net: - support for Byte Queue Limits - support configuring thresholds for automatic IRQ moderation - support for AF_XDP Rx zero-copy - Synopsys (stmmac): - support for STM32MP13 SoC - let platforms select the right PCS implementation - TI: - icssg-prueth: add multicast filtering support - icssg-prueth: enable PTP timestamping and PPS - Renesas: - ravb: improve Rx performance 30-400% by using page pool, theaded NAPI and timer-based IRQ coalescing - ravb: add MII support for R-Car V4M - Cadence (macb): - macb: add ARP support to Wake-On-LAN - Cortina: - use phylib for RX and TX pause configuration - Ethernet switches: - nVidia/Mellanox: - support configuration of multipath hash seed - report more accurate max MTU - use page_pool to improve Rx performance - MediaTek: - mt7530: add support for bridge port isolation - Qualcomm: - qca8k: add support for bridge port isolation - Microchip: - lan9371/2: add 100BaseTX PHY support - NXP: - vsc73xx: implement VLAN operations - Ethernet PHYs: - aquantia: enable support for aqr115c - aquantia: add support for PHY LEDs - realtek: add support for rtl8224 2.5Gbps PHY - xpcs: add memory-mapped device support - add BroadR-Reach link mode and support in Broadcom's PHY driver - CAN: - add document for ISO 15765-2 protocol support - mcp251xfd: workaround for erratum DS80000789E, use timestamps to catch when device returns incorrect FIFO status - WiFi: - mac80211/cfg80211: - parse Transmit Power Envelope (TPE) data in mac80211 instead of in drivers - improvements for 6 GHz regulatory flexibility - multi-link improvements - support multiple radios per wiphy - remove DEAUTH_NEED_MGD_TX_PREP flag - Intel (iwlwifi): - bump FW API to 91 for BZ/SC devices - report 64-bit radiotap timestamp - enable P2P low latency by default - handle Transmit Power Envelope (TPE) advertised by AP - remove support for older FW for new devices - fast resume (keeping the device configured) - mvm: re-enable Multi-Link Operation (MLO) - aggregation (A-MSDU) optimizations - MediaTek (mt76): - mt7925 Multi-Link Operation (MLO) support - Qualcomm (ath10k): - LED support for various chipsets - Qualcomm (ath12k): - remove unsupported Tx monitor handling - support channel 2 in 6 GHz band - support Spatial Multiplexing Power Save (SMPS) in 6 GHz band - supprt multiple BSSID (MBSSID) and Enhanced Multi-BSSID Advertisements (EMA) - support dynamic VLAN - add panic handler for resetting the firmware state - DebugFS support for datapath statistics - WCN7850: support for Wake on WLAN - Microchip (wilc1000): - read MAC address during probe to make it visible to user space - suspend/resume improvements - TI (wl18xx): - support newer firmware versions - RealTek (rtw89): - preparation for RTL8852BE-VT support - Wake on WLAN support for WiFi 6 chips - 36-bit PCI DMA support - RealTek (rtlwifi): - RTL8192DU support - Broadcom (brcmfmac): - Management Frame Protection support (to enable WPA3) - Bluetooth: - qualcomm: use the power sequencer for QCA6390 - btusb: mediatek: add ISO data transmission functions - hci_bcm4377: add BCM4388 support - btintel: add support for BlazarU core - btintel: add support for Whale Peak2 - btnxpuart: add support for AW693 A1 chipset - btnxpuart: add support for IW615 chipset - btusb: add Realtek RTL8852BE support ID 0x13d3:0x3591" * tag 'net-next-6.11' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next: (1589 commits) eth: fbnic: Fix spelling mistake "tiggerring" -> "triggering" tcp: Replace strncpy() with strscpy() wifi: ath12k: fix build vs old compiler tcp: Don't access uninit tcp_rsk(req)->ao_keyid in tcp_create_openreq_child(). eth: fbnic: Write the TCAM tables used for RSS control and Rx to host eth: fbnic: Add L2 address programming eth: fbnic: Add basic Rx handling eth: fbnic: Add basic Tx handling eth: fbnic: Add link detection eth: fbnic: Add initial messaging to notify FW of our presence eth: fbnic: Implement Rx queue alloc/start/stop/free eth: fbnic: Implement Tx queue alloc/start/stop/free eth: fbnic: Allocate a netdevice and napi vectors with queues eth: fbnic: Add FW communication mechanism eth: fbnic: Add message parsing for FW messages eth: fbnic: Add register init to set PCIe/Ethernet device config eth: fbnic: Allocate core device specific structures and devlink interface eth: fbnic: Add scaffolding for Meta's NIC driver PCI: Add Meta Platforms vendor ID net/sched: cls_flower: propagate tca[TCA_OPTIONS] to NL_REQ_ATTR_CHECK ...
Diffstat (limited to 'drivers/net/ethernet/mellanox')
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c61
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c74
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/qos.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c189
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h56
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c224
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c211
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wc.c434
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_env.c57
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_env.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_thermal.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/item.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/minimal.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c326
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/port.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c6
57 files changed, 1629 insertions, 479 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 1184ac5751e1..461cc2c79c71 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -126,6 +126,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
cq_idx = cq_idx % priv->rx_ring_num;
rx_cq = priv->rx_cq[cq_idx];
cq->vector = rx_cq->vector;
+ irq = mlx4_eq_get_irq(mdev->dev, cq->vector);
}
if (cq->type == RX)
@@ -142,18 +143,23 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
if (err)
goto free_eq;
+ cq->cq_idx = cq_idx;
cq->mcq.event = mlx4_en_cq_event;
switch (cq->type) {
case TX:
cq->mcq.comp = mlx4_en_tx_irq;
netif_napi_add_tx(cq->dev, &cq->napi, mlx4_en_poll_tx_cq);
+ netif_napi_set_irq(&cq->napi, irq);
napi_enable(&cq->napi);
+ netif_queue_set_napi(cq->dev, cq_idx, NETDEV_QUEUE_TYPE_TX, &cq->napi);
break;
case RX:
cq->mcq.comp = mlx4_en_rx_irq;
netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq);
+ netif_napi_set_irq(&cq->napi, irq);
napi_enable(&cq->napi);
+ netif_queue_set_napi(cq->dev, cq_idx, NETDEV_QUEUE_TYPE_RX, &cq->napi);
break;
case TX_XDP:
/* nothing regarding napi, it's shared with rx ring */
@@ -189,6 +195,14 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq)
void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
{
if (cq->type != TX_XDP) {
+ enum netdev_queue_type qtype;
+
+ if (cq->type == RX)
+ qtype = NETDEV_QUEUE_TYPE_RX;
+ else
+ qtype = NETDEV_QUEUE_TYPE_TX;
+
+ netif_queue_set_napi(cq->dev, cq->cq_idx, qtype, NULL);
napi_disable(&cq->napi);
netif_napi_del(&cq->napi);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 619e1c3ef7f9..943d6918c2ec 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -450,7 +450,6 @@ static void mlx4_en_get_strings(struct net_device *dev,
uint32_t stringset, uint8_t *data)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
- int index = 0;
int i, strings = 0;
struct bitmap_iterator it;
@@ -459,10 +458,10 @@ static void mlx4_en_get_strings(struct net_device *dev,
switch (stringset) {
case ETH_SS_TEST:
for (i = 0; i < MLX4_EN_NUM_SELF_TEST - 2; i++)
- strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]);
+ ethtool_puts(&data, mlx4_en_test_names[i]);
if (priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UC_LOOPBACK)
for (; i < MLX4_EN_NUM_SELF_TEST; i++)
- strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]);
+ ethtool_puts(&data, mlx4_en_test_names[i]);
break;
case ETH_SS_STATS:
@@ -470,74 +469,56 @@ static void mlx4_en_get_strings(struct net_device *dev,
for (i = 0; i < NUM_MAIN_STATS; i++, strings++,
bitmap_iterator_inc(&it))
if (bitmap_iterator_test(&it))
- strcpy(data + (index++) * ETH_GSTRING_LEN,
- main_strings[strings]);
+ ethtool_puts(&data, main_strings[strings]);
for (i = 0; i < NUM_PORT_STATS; i++, strings++,
bitmap_iterator_inc(&it))
if (bitmap_iterator_test(&it))
- strcpy(data + (index++) * ETH_GSTRING_LEN,
- main_strings[strings]);
+ ethtool_puts(&data, main_strings[strings]);
for (i = 0; i < NUM_PF_STATS; i++, strings++,
bitmap_iterator_inc(&it))
if (bitmap_iterator_test(&it))
- strcpy(data + (index++) * ETH_GSTRING_LEN,
- main_strings[strings]);
+ ethtool_puts(&data, main_strings[strings]);
for (i = 0; i < NUM_FLOW_STATS; i++, strings++,
bitmap_iterator_inc(&it))
if (bitmap_iterator_test(&it))
- strcpy(data + (index++) * ETH_GSTRING_LEN,
- main_strings[strings]);
+ ethtool_puts(&data, main_strings[strings]);
for (i = 0; i < NUM_PKT_STATS; i++, strings++,
bitmap_iterator_inc(&it))
if (bitmap_iterator_test(&it))
- strcpy(data + (index++) * ETH_GSTRING_LEN,
- main_strings[strings]);
+ ethtool_puts(&data, main_strings[strings]);
for (i = 0; i < NUM_XDP_STATS; i++, strings++,
bitmap_iterator_inc(&it))
if (bitmap_iterator_test(&it))
- strcpy(data + (index++) * ETH_GSTRING_LEN,
- main_strings[strings]);
+ ethtool_puts(&data, main_strings[strings]);
for (i = 0; i < NUM_PHY_STATS; i++, strings++,
bitmap_iterator_inc(&it))
if (bitmap_iterator_test(&it))
- strcpy(data + (index++) * ETH_GSTRING_LEN,
- main_strings[strings]);
+ ethtool_puts(&data, main_strings[strings]);
for (i = 0; i < priv->tx_ring_num[TX]; i++) {
- sprintf(data + (index++) * ETH_GSTRING_LEN,
- "tx%d_packets", i);
- sprintf(data + (index++) * ETH_GSTRING_LEN,
- "tx%d_bytes", i);
+ ethtool_sprintf(&data, "tx%d_packets", i);
+ ethtool_sprintf(&data, "tx%d_bytes", i);
}
for (i = 0; i < priv->rx_ring_num; i++) {
- sprintf(data + (index++) * ETH_GSTRING_LEN,
- "rx%d_packets", i);
- sprintf(data + (index++) * ETH_GSTRING_LEN,
- "rx%d_bytes", i);
- sprintf(data + (index++) * ETH_GSTRING_LEN,
- "rx%d_dropped", i);
- sprintf(data + (index++) * ETH_GSTRING_LEN,
- "rx%d_xdp_drop", i);
- sprintf(data + (index++) * ETH_GSTRING_LEN,
- "rx%d_xdp_redirect", i);
- sprintf(data + (index++) * ETH_GSTRING_LEN,
- "rx%d_xdp_redirect_fail", i);
- sprintf(data + (index++) * ETH_GSTRING_LEN,
- "rx%d_xdp_tx", i);
- sprintf(data + (index++) * ETH_GSTRING_LEN,
- "rx%d_xdp_tx_full", i);
+ ethtool_sprintf(&data, "rx%d_packets", i);
+ ethtool_sprintf(&data, "rx%d_bytes", i);
+ ethtool_sprintf(&data, "rx%d_dropped", i);
+ ethtool_sprintf(&data, "rx%d_xdp_drop", i);
+ ethtool_sprintf(&data, "rx%d_xdp_redirect", i);
+ ethtool_sprintf(&data, "rx%d_xdp_redirect_fail", i);
+ ethtool_sprintf(&data, "rx%d_xdp_tx", i);
+ ethtool_sprintf(&data, "rx%d_xdp_tx_full", i);
}
break;
case ETH_SS_PRIV_FLAGS:
for (i = 0; i < ARRAY_SIZE(mlx4_en_priv_flags); i++)
- strcpy(data + i * ETH_GSTRING_LEN,
- mlx4_en_priv_flags[i]);
+ ethtool_puts(&data, mlx4_en_priv_flags[i]);
break;
}
@@ -1903,7 +1884,7 @@ out:
}
static int mlx4_en_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 4c089cfa027a..281b34af0bb4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -43,6 +43,7 @@
#include <net/vxlan.h>
#include <net/devlink.h>
#include <net/rps.h>
+#include <net/netdev_queues.h>
#include <linux/mlx4/driver.h>
#include <linux/mlx4/device.h>
@@ -2073,6 +2074,7 @@ static void mlx4_en_clear_stats(struct net_device *dev)
priv->rx_ring[i]->csum_ok = 0;
priv->rx_ring[i]->csum_none = 0;
priv->rx_ring[i]->csum_complete = 0;
+ priv->rx_ring[i]->alloc_fail = 0;
}
}
@@ -3099,6 +3101,77 @@ void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev,
last_i += NUM_PHY_STATS;
}
+static void mlx4_get_queue_stats_rx(struct net_device *dev, int i,
+ struct netdev_queue_stats_rx *stats)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ const struct mlx4_en_rx_ring *ring;
+
+ spin_lock_bh(&priv->stats_lock);
+
+ if (!priv->port_up || mlx4_is_master(priv->mdev->dev))
+ goto out_unlock;
+
+ ring = priv->rx_ring[i];
+ stats->packets = READ_ONCE(ring->packets);
+ stats->bytes = READ_ONCE(ring->bytes);
+ stats->alloc_fail = READ_ONCE(ring->alloc_fail);
+
+out_unlock:
+ spin_unlock_bh(&priv->stats_lock);
+}
+
+static void mlx4_get_queue_stats_tx(struct net_device *dev, int i,
+ struct netdev_queue_stats_tx *stats)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ const struct mlx4_en_tx_ring *ring;
+
+ spin_lock_bh(&priv->stats_lock);
+
+ if (!priv->port_up || mlx4_is_master(priv->mdev->dev))
+ goto out_unlock;
+
+ ring = priv->tx_ring[TX][i];
+ stats->packets = READ_ONCE(ring->packets);
+ stats->bytes = READ_ONCE(ring->bytes);
+
+out_unlock:
+ spin_unlock_bh(&priv->stats_lock);
+}
+
+static void mlx4_get_base_stats(struct net_device *dev,
+ struct netdev_queue_stats_rx *rx,
+ struct netdev_queue_stats_tx *tx)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+
+ spin_lock_bh(&priv->stats_lock);
+
+ if (!priv->port_up || mlx4_is_master(priv->mdev->dev))
+ goto out_unlock;
+
+ if (priv->rx_ring_num) {
+ rx->packets = 0;
+ rx->bytes = 0;
+ rx->alloc_fail = 0;
+ }
+
+ if (priv->tx_ring_num[TX]) {
+ tx->packets = 0;
+ tx->bytes = 0;
+ }
+
+out_unlock:
+ spin_unlock_bh(&priv->stats_lock);
+}
+
+static const struct netdev_stat_ops mlx4_stat_ops = {
+ .get_queue_stats_rx = mlx4_get_queue_stats_rx,
+ .get_queue_stats_tx = mlx4_get_queue_stats_tx,
+ .get_base_stats = mlx4_get_base_stats,
+};
+
int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
struct mlx4_en_port_profile *prof)
{
@@ -3262,6 +3335,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]);
netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
+ dev->stat_ops = &mlx4_stat_ops;
dev->ethtool_ops = &mlx4_en_ethtool_ops;
/*
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 8328df8645d5..15c57e9517e9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -82,8 +82,10 @@ static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv,
for (i = 0; i < priv->num_frags; i++, frags++) {
if (!frags->page) {
- if (mlx4_alloc_page(priv, frags, gfp))
+ if (mlx4_alloc_page(priv, frags, gfp)) {
+ ring->alloc_fail++;
return -ENOMEM;
+ }
ring->rx_alloc_pages++;
}
rx_desc->data[i].addr = cpu_to_be64(frags->dma +
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 98688e4dbec5..febeadfdd5a5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -169,12 +169,6 @@ module_param_array(port_type_array, int, &arr_argc, 0444);
MODULE_PARM_DESC(port_type_array, "Array of port types: HW_DEFAULT (0) is default "
"1 for IB, 2 for Ethernet");
-struct mlx4_port_config {
- struct list_head list;
- enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1];
- struct pci_dev *pdev;
-};
-
static atomic_t pf_loading = ATOMIC_INIT(0);
static int mlx4_devlink_ierr_reset_get(struct devlink *devlink, u32 id,
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index efe3f97b874f..28b70dcc652e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -355,6 +355,7 @@ struct mlx4_en_rx_ring {
unsigned long xdp_tx;
unsigned long xdp_tx_full;
unsigned long dropped;
+ unsigned long alloc_fail;
int hwtstamp_rx_filter;
cpumask_var_t affinity_mask;
struct xdp_rxq_info xdp_rxq;
@@ -379,6 +380,7 @@ struct mlx4_en_cq {
#define MLX4_EN_OPCODE_ERROR 0x1e
const struct cpumask *aff_mask;
+ int cq_idx;
};
struct mlx4_en_port_profile {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 76dc5a9b9648..1289475e7be7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -17,7 +17,7 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
fs_counters.o fs_ft_pool.o rl.o lag/debugfs.o lag/lag.o dev.o events.o wq.o lib/gid.o \
lib/devcom.o lib/pci_vsc.o lib/dm.o lib/fs_ttc.o diag/fs_tracepoint.o \
diag/fw_tracer.o diag/crdump.o devlink.o diag/rsc_dump.o diag/reporter_vnic.o \
- fw_reset.o qos.o lib/tout.o lib/aso.o
+ fw_reset.o qos.o lib/tout.o lib/aso.o wc.o
#
# Netdev basic
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index e85fb71bf0b4..5fd82c67b6ab 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -80,6 +80,7 @@ struct page_pool;
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
#define MLX5E_RX_MAX_HEAD (256)
+#define MLX5E_SHAMPO_LOG_HEADER_ENTRY_SIZE (8)
#define MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE (9)
#define MLX5E_SHAMPO_WQ_HEADER_PER_PAGE (PAGE_SIZE >> MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE)
#define MLX5E_SHAMPO_WQ_BASE_HEAD_ENTRY_SIZE (64)
@@ -146,25 +147,6 @@ struct page_pool;
#define MLX5E_TX_XSK_POLL_BUDGET 64
#define MLX5E_SQ_RECOVER_MIN_INTERVAL 500 /* msecs */
-#define MLX5E_KLM_UMR_WQE_SZ(sgl_len)\
- (sizeof(struct mlx5e_umr_wqe) +\
- (sizeof(struct mlx5_klm) * (sgl_len)))
-
-#define MLX5E_KLM_UMR_WQEBBS(klm_entries) \
- (DIV_ROUND_UP(MLX5E_KLM_UMR_WQE_SZ(klm_entries), MLX5_SEND_WQE_BB))
-
-#define MLX5E_KLM_UMR_DS_CNT(klm_entries)\
- (DIV_ROUND_UP(MLX5E_KLM_UMR_WQE_SZ(klm_entries), MLX5_SEND_WQE_DS))
-
-#define MLX5E_KLM_MAX_ENTRIES_PER_WQE(wqe_size)\
- (((wqe_size) - sizeof(struct mlx5e_umr_wqe)) / sizeof(struct mlx5_klm))
-
-#define MLX5E_KLM_ENTRIES_PER_WQE(wqe_size)\
- ALIGN_DOWN(MLX5E_KLM_MAX_ENTRIES_PER_WQE(wqe_size), MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT)
-
-#define MLX5E_MAX_KLM_PER_WQE(mdev) \
- MLX5E_KLM_ENTRIES_PER_WQE(MLX5_SEND_WQE_BB * mlx5e_get_max_sq_aligned_wqebbs(mdev))
-
#define mlx5e_state_dereference(priv, p) \
rcu_dereference_protected((p), lockdep_is_held(&(priv)->state_lock))
@@ -885,6 +867,8 @@ struct mlx5e_priv {
/* priv data path fields - start */
struct mlx5e_selq selq;
struct mlx5e_txqsq **txq2sq;
+ struct mlx5e_sq_stats **txq2sq_stats;
+
#ifdef CONFIG_MLX5_CORE_EN_DCB
struct mlx5e_dcbx_dp dcbx_dp;
#endif
@@ -1014,7 +998,7 @@ void mlx5e_build_ptys2ethtool_map(void);
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev, u8 page_shift,
enum mlx5e_mpwrq_umr_mode umr_mode);
-void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq, u16 len, u16 start, bool close);
+void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq);
void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s);
@@ -1207,7 +1191,7 @@ int mlx5e_set_per_queue_coalesce(struct net_device *dev, u32 queue,
u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv);
u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv);
int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
- struct ethtool_ts_info *info);
+ struct kernel_ethtool_ts_info *info);
int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv,
struct ethtool_flash *flash);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
index 4d6225e0eec7..1e8b7d330701 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
@@ -154,6 +154,19 @@ struct mlx5e_tc_table *mlx5e_fs_get_tc(struct mlx5e_flow_steering *fs);
struct mlx5e_l2_table *mlx5e_fs_get_l2(struct mlx5e_flow_steering *fs);
struct mlx5_flow_namespace *mlx5e_fs_get_ns(struct mlx5e_flow_steering *fs, bool egress);
void mlx5e_fs_set_ns(struct mlx5e_flow_steering *fs, struct mlx5_flow_namespace *ns, bool egress);
+
+static inline bool mlx5e_fs_has_arfs(struct net_device *netdev)
+{
+ return IS_ENABLED(CONFIG_MLX5_EN_ARFS) &&
+ netdev->hw_features & NETIF_F_NTUPLE;
+}
+
+static inline bool mlx5e_fs_want_arfs(struct net_device *netdev)
+{
+ return IS_ENABLED(CONFIG_MLX5_EN_ARFS) &&
+ netdev->features & NETIF_F_NTUPLE;
+}
+
#ifdef CONFIG_MLX5_EN_RXNFC
struct mlx5e_ethtool_steering *mlx5e_fs_get_ethtool(struct mlx5e_flow_steering *fs);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index ec819dfc98be..6c9ccccca81e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -1071,18 +1071,18 @@ static u32 mlx5e_shampo_icosq_sz(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_rq_param *rq_param)
{
- int max_num_of_umr_per_wqe, max_hd_per_wqe, max_klm_per_umr, rest;
+ int max_num_of_umr_per_wqe, max_hd_per_wqe, max_ksm_per_umr, rest;
void *wqc = MLX5_ADDR_OF(rqc, rq_param->rqc, wq);
int wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz));
u32 wqebbs;
- max_klm_per_umr = MLX5E_MAX_KLM_PER_WQE(mdev);
+ max_ksm_per_umr = MLX5E_MAX_KSM_PER_WQE(mdev);
max_hd_per_wqe = mlx5e_shampo_hd_per_wqe(mdev, params, rq_param);
- max_num_of_umr_per_wqe = max_hd_per_wqe / max_klm_per_umr;
- rest = max_hd_per_wqe % max_klm_per_umr;
- wqebbs = MLX5E_KLM_UMR_WQEBBS(max_klm_per_umr) * max_num_of_umr_per_wqe;
+ max_num_of_umr_per_wqe = max_hd_per_wqe / max_ksm_per_umr;
+ rest = max_hd_per_wqe % max_ksm_per_umr;
+ wqebbs = MLX5E_KSM_UMR_WQEBBS(max_ksm_per_umr) * max_num_of_umr_per_wqe;
if (rest)
- wqebbs += MLX5E_KLM_UMR_WQEBBS(rest);
+ wqebbs += MLX5E_KSM_UMR_WQEBBS(rest);
wqebbs *= wq_size;
return wqebbs;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
index 6743806b8480..f0744a45db92 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
@@ -170,6 +170,7 @@ int mlx5e_activate_qos_sq(void *data, u16 node_qid, u32 hw_id)
mlx5e_tx_disable_queue(netdev_get_tx_queue(priv->netdev, qid));
priv->txq2sq[qid] = sq;
+ priv->txq2sq_stats[qid] = sq->stats;
/* Make the change to txq2sq visible before the queue is started.
* As mlx5e_xmit runs under a spinlock, there is an implicit ACQUIRE,
@@ -186,6 +187,7 @@ int mlx5e_activate_qos_sq(void *data, u16 node_qid, u32 hw_id)
void mlx5e_deactivate_qos_sq(struct mlx5e_priv *priv, u16 qid)
{
struct mlx5e_txqsq *sq;
+ u16 txq_ix;
sq = mlx5e_get_qos_sq(priv, qid);
if (!sq) /* Handle the case when the SQ failed to open. */
@@ -194,7 +196,10 @@ void mlx5e_deactivate_qos_sq(struct mlx5e_priv *priv, u16 qid)
qos_dbg(sq->mdev, "Deactivate QoS SQ qid %u\n", qid);
mlx5e_deactivate_txqsq(sq);
- priv->txq2sq[mlx5e_qid_from_qos(&priv->channels, qid)] = NULL;
+ txq_ix = mlx5e_qid_from_qos(&priv->channels, qid);
+
+ priv->txq2sq[txq_ix] = NULL;
+ priv->txq2sq_stats[txq_ix] = NULL;
/* Make the change to txq2sq visible before the queue is started again.
* As mlx5e_xmit runs under a spinlock, there is an implicit ACQUIRE,
@@ -325,6 +330,7 @@ void mlx5e_qos_deactivate_queues(struct mlx5e_channel *c)
{
struct mlx5e_params *params = &c->priv->channels.params;
struct mlx5e_txqsq __rcu **qos_sqs;
+ u16 txq_ix;
int i;
qos_sqs = mlx5e_state_dereference(c->priv, c->qos_sqs);
@@ -342,8 +348,11 @@ void mlx5e_qos_deactivate_queues(struct mlx5e_channel *c)
qos_dbg(c->mdev, "Deactivate QoS SQ qid %u\n", qid);
mlx5e_deactivate_txqsq(sq);
+ txq_ix = mlx5e_qid_from_qos(&c->priv->channels, qid);
+
/* The queue is disabled, no synchronization with datapath is needed. */
- c->priv->txq2sq[mlx5e_qid_from_qos(&c->priv->channels, qid)] = NULL;
+ c->priv->txq2sq[txq_ix] = NULL;
+ c->priv->txq2sq_stats[txq_ix] = NULL;
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index fadfa8b50beb..8cf8ba2622f2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -69,6 +69,8 @@ struct mlx5_tc_ct_priv {
struct rhashtable ct_tuples_nat_ht;
struct mlx5_flow_table *ct;
struct mlx5_flow_table *ct_nat;
+ struct mlx5_flow_group *ct_nat_miss_group;
+ struct mlx5_flow_handle *ct_nat_miss_rule;
struct mlx5e_post_act *post_act;
struct mutex control_lock; /* guards parallel adds/dels */
struct mapping_ctx *zone_mapping;
@@ -141,6 +143,8 @@ struct mlx5_ct_counter {
enum {
MLX5_CT_ENTRY_FLAG_VALID,
+ MLX5_CT_ENTRY_IN_CT_TABLE,
+ MLX5_CT_ENTRY_IN_CT_NAT_TABLE,
};
struct mlx5_ct_entry {
@@ -198,9 +202,15 @@ static const struct rhashtable_params tuples_nat_ht_params = {
};
static bool
-mlx5_tc_ct_entry_has_nat(struct mlx5_ct_entry *entry)
+mlx5_tc_ct_entry_in_ct_table(struct mlx5_ct_entry *entry)
{
- return !!(entry->tuple_nat_node.next);
+ return test_bit(MLX5_CT_ENTRY_IN_CT_TABLE, &entry->flags);
+}
+
+static bool
+mlx5_tc_ct_entry_in_ct_nat_table(struct mlx5_ct_entry *entry)
+{
+ return test_bit(MLX5_CT_ENTRY_IN_CT_NAT_TABLE, &entry->flags);
}
static int
@@ -526,8 +536,10 @@ static void
mlx5_tc_ct_entry_del_rules(struct mlx5_tc_ct_priv *ct_priv,
struct mlx5_ct_entry *entry)
{
- mlx5_tc_ct_entry_del_rule(ct_priv, entry, true);
- mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
+ if (mlx5_tc_ct_entry_in_ct_nat_table(entry))
+ mlx5_tc_ct_entry_del_rule(ct_priv, entry, true);
+ if (mlx5_tc_ct_entry_in_ct_table(entry))
+ mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
atomic_dec(&ct_priv->debugfs.stats.offloaded);
}
@@ -814,7 +826,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
&zone_rule->mh,
zone_restore_id,
nat,
- mlx5_tc_ct_entry_has_nat(entry));
+ mlx5_tc_ct_entry_in_ct_nat_table(entry));
if (err) {
ct_dbg("Failed to create ct entry mod hdr");
goto err_mod_hdr;
@@ -888,7 +900,7 @@ mlx5_tc_ct_entry_replace_rule(struct mlx5_tc_ct_priv *ct_priv,
*old_attr = *attr;
err = mlx5_tc_ct_entry_create_mod_hdr(ct_priv, attr, flow_rule, &mh, zone_restore_id,
- nat, mlx5_tc_ct_entry_has_nat(entry));
+ nat, mlx5_tc_ct_entry_in_ct_nat_table(entry));
if (err) {
ct_dbg("Failed to create ct entry mod hdr");
goto err_mod_hdr;
@@ -957,11 +969,13 @@ static void mlx5_tc_ct_entry_remove_from_tuples(struct mlx5_ct_entry *entry)
{
struct mlx5_tc_ct_priv *ct_priv = entry->ct_priv;
- rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht,
- &entry->tuple_nat_node,
- tuples_nat_ht_params);
- rhashtable_remove_fast(&ct_priv->ct_tuples_ht, &entry->tuple_node,
- tuples_ht_params);
+ if (mlx5_tc_ct_entry_in_ct_nat_table(entry))
+ rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht,
+ &entry->tuple_nat_node,
+ tuples_nat_ht_params);
+ if (mlx5_tc_ct_entry_in_ct_table(entry))
+ rhashtable_remove_fast(&ct_priv->ct_tuples_ht, &entry->tuple_node,
+ tuples_ht_params);
}
static void mlx5_tc_ct_entry_del(struct mlx5_ct_entry *entry)
@@ -1100,21 +1114,26 @@ mlx5_tc_ct_entry_add_rules(struct mlx5_tc_ct_priv *ct_priv,
return err;
}
- err = mlx5_tc_ct_entry_add_rule(ct_priv, flow_rule, entry, false,
- zone_restore_id);
- if (err)
- goto err_orig;
+ if (mlx5_tc_ct_entry_in_ct_table(entry)) {
+ err = mlx5_tc_ct_entry_add_rule(ct_priv, flow_rule, entry, false,
+ zone_restore_id);
+ if (err)
+ goto err_orig;
+ }
- err = mlx5_tc_ct_entry_add_rule(ct_priv, flow_rule, entry, true,
- zone_restore_id);
- if (err)
- goto err_nat;
+ if (mlx5_tc_ct_entry_in_ct_nat_table(entry)) {
+ err = mlx5_tc_ct_entry_add_rule(ct_priv, flow_rule, entry, true,
+ zone_restore_id);
+ if (err)
+ goto err_nat;
+ }
atomic_inc(&ct_priv->debugfs.stats.offloaded);
return 0;
err_nat:
- mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
+ if (mlx5_tc_ct_entry_in_ct_table(entry))
+ mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
err_orig:
mlx5_tc_ct_counter_put(ct_priv, entry);
return err;
@@ -1126,17 +1145,21 @@ mlx5_tc_ct_entry_replace_rules(struct mlx5_tc_ct_priv *ct_priv,
struct mlx5_ct_entry *entry,
u8 zone_restore_id)
{
- int err;
+ int err = 0;
- err = mlx5_tc_ct_entry_replace_rule(ct_priv, flow_rule, entry, false,
- zone_restore_id);
- if (err)
- return err;
+ if (mlx5_tc_ct_entry_in_ct_table(entry)) {
+ err = mlx5_tc_ct_entry_replace_rule(ct_priv, flow_rule, entry, false,
+ zone_restore_id);
+ if (err)
+ return err;
+ }
- err = mlx5_tc_ct_entry_replace_rule(ct_priv, flow_rule, entry, true,
- zone_restore_id);
- if (err)
- mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
+ if (mlx5_tc_ct_entry_in_ct_nat_table(entry)) {
+ err = mlx5_tc_ct_entry_replace_rule(ct_priv, flow_rule, entry, true,
+ zone_restore_id);
+ if (err && mlx5_tc_ct_entry_in_ct_table(entry))
+ mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
+ }
return err;
}
@@ -1224,18 +1247,24 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
if (err)
goto err_entries;
- err = rhashtable_lookup_insert_fast(&ct_priv->ct_tuples_ht,
- &entry->tuple_node,
- tuples_ht_params);
- if (err)
- goto err_tuple;
-
if (memcmp(&entry->tuple, &entry->tuple_nat, sizeof(entry->tuple))) {
err = rhashtable_lookup_insert_fast(&ct_priv->ct_tuples_nat_ht,
&entry->tuple_nat_node,
tuples_nat_ht_params);
if (err)
goto err_tuple_nat;
+
+ set_bit(MLX5_CT_ENTRY_IN_CT_NAT_TABLE, &entry->flags);
+ }
+
+ if (!mlx5_tc_ct_entry_in_ct_nat_table(entry)) {
+ err = rhashtable_lookup_insert_fast(&ct_priv->ct_tuples_ht,
+ &entry->tuple_node,
+ tuples_ht_params);
+ if (err)
+ goto err_tuple;
+
+ set_bit(MLX5_CT_ENTRY_IN_CT_TABLE, &entry->flags);
}
spin_unlock_bh(&ct_priv->ht_lock);
@@ -1251,17 +1280,10 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
err_rules:
spin_lock_bh(&ct_priv->ht_lock);
- if (mlx5_tc_ct_entry_has_nat(entry))
- rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht,
- &entry->tuple_nat_node, tuples_nat_ht_params);
-err_tuple_nat:
- rhashtable_remove_fast(&ct_priv->ct_tuples_ht,
- &entry->tuple_node,
- tuples_ht_params);
err_tuple:
- rhashtable_remove_fast(&ft->ct_entries_ht,
- &entry->node,
- cts_ht_params);
+ mlx5_tc_ct_entry_remove_from_tuples(entry);
+err_tuple_nat:
+ rhashtable_remove_fast(&ft->ct_entries_ht, &entry->node, cts_ht_params);
err_entries:
spin_unlock_bh(&ct_priv->ht_lock);
err_set:
@@ -2149,6 +2171,76 @@ mlx5_ct_tc_remove_dbgfs(struct mlx5_tc_ct_priv *ct_priv)
debugfs_remove_recursive(ct_priv->debugfs.root);
}
+static struct mlx5_flow_handle *
+tc_ct_add_miss_rule(struct mlx5_flow_table *ft,
+ struct mlx5_flow_table *next_ft)
+{
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_flow_act act = {};
+
+ act.flags = FLOW_ACT_IGNORE_FLOW_LEVEL | FLOW_ACT_NO_APPEND;
+ act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest.ft = next_ft;
+
+ return mlx5_add_flow_rules(ft, NULL, &act, &dest, 1);
+}
+
+static int
+tc_ct_add_ct_table_miss_rule(struct mlx5_flow_table *from,
+ struct mlx5_flow_table *to,
+ struct mlx5_flow_group **miss_group,
+ struct mlx5_flow_handle **miss_rule)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_group *group;
+ struct mlx5_flow_handle *rule;
+ unsigned int max_fte = from->max_fte;
+ u32 *flow_group_in;
+ int err = 0;
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!flow_group_in)
+ return -ENOMEM;
+
+ /* create miss group */
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index,
+ max_fte - 2);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
+ max_fte - 1);
+ group = mlx5_create_flow_group(from, flow_group_in);
+ if (IS_ERR(group)) {
+ err = PTR_ERR(group);
+ goto err_miss_grp;
+ }
+
+ /* add miss rule to next fdb */
+ rule = tc_ct_add_miss_rule(from, to);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ goto err_miss_rule;
+ }
+
+ *miss_group = group;
+ *miss_rule = rule;
+ kvfree(flow_group_in);
+ return 0;
+
+err_miss_rule:
+ mlx5_destroy_flow_group(group);
+err_miss_grp:
+ kvfree(flow_group_in);
+ return err;
+}
+
+static void
+tc_ct_del_ct_table_miss_rule(struct mlx5_flow_group *miss_group,
+ struct mlx5_flow_handle *miss_rule)
+{
+ mlx5_del_flow_rules(miss_rule);
+ mlx5_destroy_flow_group(miss_group);
+}
+
#define INIT_ERR_PREFIX "tc ct offload init failed"
struct mlx5_tc_ct_priv *
@@ -2212,6 +2304,12 @@ mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
goto err_ct_nat_tbl;
}
+ err = tc_ct_add_ct_table_miss_rule(ct_priv->ct_nat, ct_priv->ct,
+ &ct_priv->ct_nat_miss_group,
+ &ct_priv->ct_nat_miss_rule);
+ if (err)
+ goto err_ct_zone_ht;
+
ct_priv->post_act = post_act;
mutex_init(&ct_priv->control_lock);
if (rhashtable_init(&ct_priv->zone_ht, &zone_params))
@@ -2273,6 +2371,7 @@ mlx5_tc_ct_clean(struct mlx5_tc_ct_priv *ct_priv)
ct_priv->fs_ops->destroy(ct_priv->fs);
kfree(ct_priv->fs);
+ tc_ct_del_ct_table_miss_rule(ct_priv->ct_nat_miss_group, ct_priv->ct_nat_miss_rule);
mlx5_chains_destroy_global_table(chains, ct_priv->ct_nat);
mlx5_chains_destroy_global_table(chains, ct_priv->ct);
mapping_destroy(ct_priv->zone_mapping);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index 8dfb57f712b0..721f35e59757 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -850,6 +850,12 @@ int mlx5e_tc_tun_parse(struct net_device *filter_dev,
flow_rule_match_enc_control(rule, &match);
addr_type = match.key->addr_type;
+ if (flow_rule_has_enc_control_flags(match.mask->flags,
+ extack)) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
/* For tunnel addr_type used same key id`s as for non-tunnel */
if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
struct flow_match_ipv4_addrs match;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
index 879d698b6119..5ec468268d1a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
@@ -6,6 +6,8 @@
#include "en.h"
#include <linux/indirect_call_wrapper.h>
+#include <net/ip6_checksum.h>
+#include <net/tcp.h>
#define MLX5E_TX_WQE_EMPTY_DS_COUNT (sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS)
@@ -34,6 +36,25 @@
#define MLX5E_RX_ERR_CQE(cqe) (get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)
+#define MLX5E_KSM_UMR_WQE_SZ(sgl_len)\
+ (sizeof(struct mlx5e_umr_wqe) +\
+ (sizeof(struct mlx5_ksm) * (sgl_len)))
+
+#define MLX5E_KSM_UMR_WQEBBS(ksm_entries) \
+ (DIV_ROUND_UP(MLX5E_KSM_UMR_WQE_SZ(ksm_entries), MLX5_SEND_WQE_BB))
+
+#define MLX5E_KSM_UMR_DS_CNT(ksm_entries)\
+ (DIV_ROUND_UP(MLX5E_KSM_UMR_WQE_SZ(ksm_entries), MLX5_SEND_WQE_DS))
+
+#define MLX5E_KSM_MAX_ENTRIES_PER_WQE(wqe_size)\
+ (((wqe_size) - sizeof(struct mlx5e_umr_wqe)) / sizeof(struct mlx5_ksm))
+
+#define MLX5E_KSM_ENTRIES_PER_WQE(wqe_size)\
+ ALIGN_DOWN(MLX5E_KSM_MAX_ENTRIES_PER_WQE(wqe_size), MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT)
+
+#define MLX5E_MAX_KSM_PER_WQE(mdev) \
+ MLX5E_KSM_ENTRIES_PER_WQE(MLX5_SEND_WQE_BB * mlx5e_get_max_sq_aligned_wqebbs(mdev))
+
static inline
ktime_t mlx5e_cqe_ts_to_ns(cqe_ts_to_ns func, struct mlx5_clock *clock, u64 cqe_ts)
{
@@ -460,6 +481,41 @@ mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg,
}
}
+static inline void
+mlx5e_swp_encap_csum_partial(struct mlx5_core_dev *mdev, struct sk_buff *skb, bool tunnel)
+{
+ const struct iphdr *ip = tunnel ? inner_ip_hdr(skb) : ip_hdr(skb);
+ const struct ipv6hdr *ip6;
+ struct tcphdr *th;
+ struct udphdr *uh;
+ int len;
+
+ if (!MLX5_CAP_ETH(mdev, swp_csum_l4_partial) || !skb_is_gso(skb))
+ return;
+
+ if (skb_is_gso_tcp(skb)) {
+ th = inner_tcp_hdr(skb);
+ len = skb_shinfo(skb)->gso_size + inner_tcp_hdrlen(skb);
+
+ if (ip->version == 4) {
+ th->check = ~tcp_v4_check(len, ip->saddr, ip->daddr, 0);
+ } else {
+ ip6 = tunnel ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
+ th->check = ~tcp_v6_check(len, &ip6->saddr, &ip6->daddr, 0);
+ }
+ } else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
+ uh = (struct udphdr *)skb_inner_transport_header(skb);
+ len = skb_shinfo(skb)->gso_size + sizeof(struct udphdr);
+
+ if (ip->version == 4) {
+ uh->check = ~udp_v4_check(len, ip->saddr, ip->daddr, 0);
+ } else {
+ ip6 = tunnel ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
+ uh->check = ~udp_v6_check(len, &ip6->saddr, &ip6->daddr, 0);
+ }
+ }
+}
+
#define MLX5E_STOP_ROOM(wqebbs) ((wqebbs) * 2 - 1)
static inline u16 mlx5e_stop_room_for_wqe(struct mlx5_core_dev *mdev, u16 wqe_size)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
index 359050f0b54d..3cc640669247 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
@@ -116,6 +116,7 @@ static inline bool
mlx5e_ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5_wqe_eth_seg *eseg)
{
+ struct mlx5_core_dev *mdev = sq->mdev;
u8 inner_ipproto;
if (!mlx5e_ipsec_eseg_meta(eseg))
@@ -125,9 +126,12 @@ mlx5e_ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
inner_ipproto = xfrm_offload(skb)->inner_ipproto;
if (inner_ipproto) {
eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM;
- if (inner_ipproto == IPPROTO_TCP || inner_ipproto == IPPROTO_UDP)
+ if (inner_ipproto == IPPROTO_TCP || inner_ipproto == IPPROTO_UDP) {
+ mlx5e_swp_encap_csum_partial(mdev, skb, true);
eseg->cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM;
+ }
} else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
+ mlx5e_swp_encap_csum_partial(mdev, skb, false);
eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
sq->stats->csum_partial_inner++;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 3320f12ba2db..279dcb54af14 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -525,7 +525,7 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
- arfs_enabled = opened && (priv->netdev->features & NETIF_F_NTUPLE);
+ arfs_enabled = opened && mlx5e_fs_want_arfs(priv->netdev);
if (arfs_enabled)
mlx5e_arfs_disable(priv->fs);
@@ -1658,7 +1658,7 @@ static int mlx5e_set_pauseparam(struct net_device *netdev,
}
int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct mlx5_core_dev *mdev = priv->mdev;
@@ -1682,7 +1682,7 @@ int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
}
static int mlx5e_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct mlx5e_priv *priv = netdev_priv(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index 8c5b291a171f..05058710d2c7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -1307,8 +1307,7 @@ int mlx5e_create_flow_steering(struct mlx5e_flow_steering *fs,
return -EOPNOTSUPP;
mlx5e_fs_set_ns(fs, ns, false);
- err = mlx5e_arfs_create_tables(fs, rx_res,
- !!(netdev->hw_features & NETIF_F_NTUPLE));
+ err = mlx5e_arfs_create_tables(fs, rx_res, mlx5e_fs_has_arfs(netdev));
if (err) {
fs_err(fs, "Failed to create arfs tables, err=%d\n", err);
netdev->hw_features &= ~NETIF_F_NTUPLE;
@@ -1355,7 +1354,7 @@ err_destroy_ttc_table:
err_destroy_inner_ttc_table:
mlx5e_destroy_inner_ttc_table(fs);
err_destroy_arfs_tables:
- mlx5e_arfs_destroy_tables(fs, !!(netdev->hw_features & NETIF_F_NTUPLE));
+ mlx5e_arfs_destroy_tables(fs, mlx5e_fs_has_arfs(netdev));
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index eedbcba22689..6f686fabed44 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -39,6 +39,7 @@
#include <linux/debugfs.h>
#include <linux/if_bridge.h>
#include <linux/filter.h>
+#include <net/netdev_queues.h>
#include <net/page_pool/types.h>
#include <net/pkt_sched.h>
#include <net/xdp_sock_drv.h>
@@ -74,6 +75,27 @@
#include "lib/devcom.h"
#include "lib/sd.h"
+static bool mlx5e_hw_gro_supported(struct mlx5_core_dev *mdev)
+{
+ if (!MLX5_CAP_GEN(mdev, shampo))
+ return false;
+
+ /* Our HW-GRO implementation relies on "KSM Mkey" for
+ * SHAMPO headers buffer mapping
+ */
+ if (!MLX5_CAP_GEN(mdev, fixed_buffer_size))
+ return false;
+
+ if (!MLX5_CAP_GEN_2(mdev, min_mkey_log_entity_size_fixed_buffer_valid))
+ return false;
+
+ if (MLX5_CAP_GEN_2(mdev, min_mkey_log_entity_size_fixed_buffer) >
+ MLX5E_SHAMPO_LOG_HEADER_ENTRY_SIZE)
+ return false;
+
+ return true;
+}
+
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev, u8 page_shift,
enum mlx5e_mpwrq_umr_mode umr_mode)
{
@@ -504,8 +526,8 @@ static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev,
return err;
}
-static int mlx5e_create_umr_klm_mkey(struct mlx5_core_dev *mdev,
- u64 nentries,
+static int mlx5e_create_umr_ksm_mkey(struct mlx5_core_dev *mdev,
+ u64 nentries, u8 log_entry_size,
u32 *umr_mkey)
{
int inlen;
@@ -525,12 +547,13 @@ static int mlx5e_create_umr_klm_mkey(struct mlx5_core_dev *mdev,
MLX5_SET(mkc, mkc, umr_en, 1);
MLX5_SET(mkc, mkc, lw, 1);
MLX5_SET(mkc, mkc, lr, 1);
- MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_KLMS);
+ MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_KSM);
mlx5e_mkey_set_relaxed_ordering(mdev, mkc);
MLX5_SET(mkc, mkc, qpn, 0xffffff);
MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.hw_objs.pdn);
MLX5_SET(mkc, mkc, translations_octword_size, nentries);
- MLX5_SET(mkc, mkc, length64, 1);
+ MLX5_SET(mkc, mkc, log_page_size, log_entry_size);
+ MLX5_SET64(mkc, mkc, len, nentries << log_entry_size);
err = mlx5_core_create_mkey(mdev, umr_mkey, in, inlen);
kvfree(in);
@@ -565,14 +588,16 @@ static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq
static int mlx5e_create_rq_hd_umr_mkey(struct mlx5_core_dev *mdev,
struct mlx5e_rq *rq)
{
- u32 max_klm_size = BIT(MLX5_CAP_GEN(mdev, log_max_klm_list_size));
+ u32 max_ksm_size = BIT(MLX5_CAP_GEN(mdev, log_max_klm_list_size));
- if (max_klm_size < rq->mpwqe.shampo->hd_per_wq) {
- mlx5_core_err(mdev, "max klm list size 0x%x is smaller than shampo header buffer list size 0x%x\n",
- max_klm_size, rq->mpwqe.shampo->hd_per_wq);
+ if (max_ksm_size < rq->mpwqe.shampo->hd_per_wq) {
+ mlx5_core_err(mdev, "max ksm list size 0x%x is smaller than shampo header buffer list size 0x%x\n",
+ max_ksm_size, rq->mpwqe.shampo->hd_per_wq);
return -EINVAL;
}
- return mlx5e_create_umr_klm_mkey(mdev, rq->mpwqe.shampo->hd_per_wq,
+
+ return mlx5e_create_umr_ksm_mkey(mdev, rq->mpwqe.shampo->hd_per_wq,
+ MLX5E_SHAMPO_LOG_HEADER_ENTRY_SIZE,
&rq->mpwqe.shampo->mkey);
}
@@ -1208,15 +1233,6 @@ void mlx5e_free_rx_missing_descs(struct mlx5e_rq *rq)
head = mlx5_wq_ll_get_wqe_next_ix(wq, head);
}
- if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) {
- u16 len;
-
- len = (rq->mpwqe.shampo->pi - rq->mpwqe.shampo->ci) &
- (rq->mpwqe.shampo->hd_per_wq - 1);
- mlx5e_shampo_dealloc_hd(rq, len, rq->mpwqe.shampo->ci, false);
- rq->mpwqe.shampo->pi = rq->mpwqe.shampo->ci;
- }
-
rq->mpwqe.actual_wq_head = wq->head;
rq->mpwqe.umr_in_progress = 0;
rq->mpwqe.umr_completed = 0;
@@ -1244,8 +1260,7 @@ void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
}
if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
- mlx5e_shampo_dealloc_hd(rq, rq->mpwqe.shampo->hd_per_wq,
- 0, true);
+ mlx5e_shampo_dealloc_hd(rq);
} else {
struct mlx5_wq_cyc *wq = &rq->wqe.wq;
u16 missing = mlx5_wq_cyc_missing(wq);
@@ -3111,6 +3126,7 @@ static void mlx5e_build_txq_maps(struct mlx5e_priv *priv)
struct mlx5e_txqsq *sq = &c->sq[tc];
priv->txq2sq[sq->txq_ix] = sq;
+ priv->txq2sq_stats[sq->txq_ix] = sq->stats;
}
}
@@ -3125,6 +3141,7 @@ static void mlx5e_build_txq_maps(struct mlx5e_priv *priv)
struct mlx5e_txqsq *sq = &c->ptpsq[tc].txqsq;
priv->txq2sq[sq->txq_ix] = sq;
+ priv->txq2sq_stats[sq->txq_ix] = sq->stats;
}
out:
@@ -4259,13 +4276,19 @@ int mlx5e_set_features(struct net_device *netdev, netdev_features_t features)
#define MLX5E_HANDLE_FEATURE(feature, handler) \
mlx5e_handle_feature(netdev, &oper_features, feature, handler)
- err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro);
- err |= MLX5E_HANDLE_FEATURE(NETIF_F_GRO_HW, set_feature_hw_gro);
+ if (features & (NETIF_F_GRO_HW | NETIF_F_LRO)) {
+ err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS, set_feature_rx_fcs);
+ err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro);
+ err |= MLX5E_HANDLE_FEATURE(NETIF_F_GRO_HW, set_feature_hw_gro);
+ } else {
+ err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro);
+ err |= MLX5E_HANDLE_FEATURE(NETIF_F_GRO_HW, set_feature_hw_gro);
+ err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS, set_feature_rx_fcs);
+ }
err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER,
set_feature_cvlan_filter);
err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC, set_feature_hw_tc);
err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXALL, set_feature_rx_all);
- err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS, set_feature_rx_fcs);
err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan);
#ifdef CONFIG_MLX5_EN_ARFS
err |= MLX5E_HANDLE_FEATURE(NETIF_F_NTUPLE, set_feature_arfs);
@@ -4890,7 +4913,7 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
}
out:
- /* Disable CSUM and GSO if the udp dport is not offloaded by HW */
+ /* Disable CSUM and GSO if skb cannot be offloaded by HW */
return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
}
@@ -5276,6 +5299,136 @@ static bool mlx5e_tunnel_any_tx_proto_supported(struct mlx5_core_dev *mdev)
return (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev));
}
+static void mlx5e_get_queue_stats_rx(struct net_device *dev, int i,
+ struct netdev_queue_stats_rx *stats)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_channel_stats *channel_stats;
+ struct mlx5e_rq_stats *xskrq_stats;
+ struct mlx5e_rq_stats *rq_stats;
+
+ ASSERT_RTNL();
+ if (mlx5e_is_uplink_rep(priv))
+ return;
+
+ channel_stats = priv->channel_stats[i];
+ xskrq_stats = &channel_stats->xskrq;
+ rq_stats = &channel_stats->rq;
+
+ stats->packets = rq_stats->packets + xskrq_stats->packets;
+ stats->bytes = rq_stats->bytes + xskrq_stats->bytes;
+ stats->alloc_fail = rq_stats->buff_alloc_err +
+ xskrq_stats->buff_alloc_err;
+}
+
+static void mlx5e_get_queue_stats_tx(struct net_device *dev, int i,
+ struct netdev_queue_stats_tx *stats)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_sq_stats *sq_stats;
+
+ ASSERT_RTNL();
+ /* no special case needed for ptp htb etc since txq2sq_stats is kept up
+ * to date for active sq_stats, otherwise get_base_stats takes care of
+ * inactive sqs.
+ */
+ sq_stats = priv->txq2sq_stats[i];
+ stats->packets = sq_stats->packets;
+ stats->bytes = sq_stats->bytes;
+}
+
+static void mlx5e_get_base_stats(struct net_device *dev,
+ struct netdev_queue_stats_rx *rx,
+ struct netdev_queue_stats_tx *tx)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_ptp *ptp_channel;
+ int i, tc;
+
+ ASSERT_RTNL();
+ if (!mlx5e_is_uplink_rep(priv)) {
+ rx->packets = 0;
+ rx->bytes = 0;
+ rx->alloc_fail = 0;
+
+ for (i = priv->channels.params.num_channels; i < priv->stats_nch; i++) {
+ struct netdev_queue_stats_rx rx_i = {0};
+
+ mlx5e_get_queue_stats_rx(dev, i, &rx_i);
+
+ rx->packets += rx_i.packets;
+ rx->bytes += rx_i.bytes;
+ rx->alloc_fail += rx_i.alloc_fail;
+ }
+
+ /* always report PTP RX stats from base as there is no
+ * corresponding channel to report them under in
+ * mlx5e_get_queue_stats_rx.
+ */
+ if (priv->rx_ptp_opened) {
+ struct mlx5e_rq_stats *rq_stats = &priv->ptp_stats.rq;
+
+ rx->packets += rq_stats->packets;
+ rx->bytes += rq_stats->bytes;
+ }
+ }
+
+ tx->packets = 0;
+ tx->bytes = 0;
+
+ for (i = 0; i < priv->stats_nch; i++) {
+ struct mlx5e_channel_stats *channel_stats = priv->channel_stats[i];
+
+ /* handle two cases:
+ *
+ * 1. channels which are active. In this case,
+ * report only deactivated TCs on these channels.
+ *
+ * 2. channels which were deactivated
+ * (i > priv->channels.params.num_channels)
+ * must have all of their TCs [0 .. priv->max_opened_tc)
+ * examined because deactivated channels will not be in the
+ * range of [0..real_num_tx_queues) and will not have their
+ * stats reported by mlx5e_get_queue_stats_tx.
+ */
+ if (i < priv->channels.params.num_channels)
+ tc = mlx5e_get_dcb_num_tc(&priv->channels.params);
+ else
+ tc = 0;
+
+ for (; tc < priv->max_opened_tc; tc++) {
+ struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[tc];
+
+ tx->packets += sq_stats->packets;
+ tx->bytes += sq_stats->bytes;
+ }
+ }
+
+ /* if PTP TX was opened at some point and has since either:
+ * - been shutdown and set to NULL, or
+ * - simply disabled (bit unset)
+ *
+ * report stats directly from the ptp_stats structures as these queues
+ * are now unavailable and there is no txq index to retrieve these
+ * stats via calls to mlx5e_get_queue_stats_tx.
+ */
+ ptp_channel = priv->channels.ptp;
+ if (priv->tx_ptp_opened && (!ptp_channel || !test_bit(MLX5E_PTP_STATE_TX, ptp_channel->state))) {
+ for (tc = 0; tc < priv->max_opened_tc; tc++) {
+ struct mlx5e_sq_stats *sq_stats = &priv->ptp_stats.sq[tc];
+
+ tx->packets += sq_stats->packets;
+ tx->bytes += sq_stats->bytes;
+ }
+ }
+}
+
+static const struct netdev_stat_ops mlx5e_stat_ops = {
+ .get_queue_stats_rx = mlx5e_get_queue_stats_rx,
+ .get_queue_stats_tx = mlx5e_get_queue_stats_tx,
+ .get_base_stats = mlx5e_get_base_stats,
+};
+
static void mlx5e_build_nic_netdev(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -5293,6 +5446,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->watchdog_timeo = 15 * HZ;
+ netdev->stat_ops = &mlx5e_stat_ops;
netdev->ethtool_ops = &mlx5e_ethtool_ops;
netdev->vlan_features |= NETIF_F_SG;
@@ -5331,6 +5485,11 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
+ if (mlx5e_hw_gro_supported(mdev) &&
+ mlx5e_check_fragmented_striding_rq_cap(mdev, PAGE_SHIFT,
+ MLX5E_MPWRQ_UMR_MODE_ALIGNED))
+ netdev->hw_features |= NETIF_F_GRO_HW;
+
if (mlx5e_tunnel_any_tx_proto_supported(mdev)) {
netdev->hw_enc_features |= NETIF_F_HW_CSUM;
netdev->hw_enc_features |= NETIF_F_TSO;
@@ -5397,8 +5556,10 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
netdev->hw_features |= NETIF_F_HW_TC;
#endif
-#ifdef CONFIG_MLX5_EN_ARFS
+#if IS_ENABLED(CONFIG_MLX5_EN_ARFS)
netdev->hw_features |= NETIF_F_NTUPLE;
+#elif IS_ENABLED(CONFIG_MLX5_EN_RXNFC)
+ netdev->features |= NETIF_F_NTUPLE;
#endif
}
@@ -5572,7 +5733,7 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
err_tc_nic_cleanup:
mlx5e_tc_nic_cleanup(priv);
err_destroy_flow_steering:
- mlx5e_destroy_flow_steering(priv->fs, !!(priv->netdev->hw_features & NETIF_F_NTUPLE),
+ mlx5e_destroy_flow_steering(priv->fs, mlx5e_fs_has_arfs(priv->netdev),
priv->profile);
err_destroy_rx_res:
mlx5e_rx_res_destroy(priv->rx_res);
@@ -5588,7 +5749,7 @@ static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv)
{
mlx5e_accel_cleanup_rx(priv);
mlx5e_tc_nic_cleanup(priv);
- mlx5e_destroy_flow_steering(priv->fs, !!(priv->netdev->hw_features & NETIF_F_NTUPLE),
+ mlx5e_destroy_flow_steering(priv->fs, mlx5e_fs_has_arfs(priv->netdev),
priv->profile);
mlx5e_rx_res_destroy(priv->rx_res);
priv->rx_res = NULL;
@@ -5823,9 +5984,13 @@ int mlx5e_priv_init(struct mlx5e_priv *priv,
if (!priv->txq2sq)
goto err_destroy_workqueue;
+ priv->txq2sq_stats = kcalloc_node(num_txqs, sizeof(*priv->txq2sq_stats), GFP_KERNEL, node);
+ if (!priv->txq2sq_stats)
+ goto err_free_txq2sq;
+
priv->tx_rates = kcalloc_node(num_txqs, sizeof(*priv->tx_rates), GFP_KERNEL, node);
if (!priv->tx_rates)
- goto err_free_txq2sq;
+ goto err_free_txq2sq_stats;
priv->channel_stats =
kcalloc_node(nch, sizeof(*priv->channel_stats), GFP_KERNEL, node);
@@ -5836,6 +6001,8 @@ int mlx5e_priv_init(struct mlx5e_priv *priv,
err_free_tx_rates:
kfree(priv->tx_rates);
+err_free_txq2sq_stats:
+ kfree(priv->txq2sq_stats);
err_free_txq2sq:
kfree(priv->txq2sq);
err_destroy_workqueue:
@@ -5859,6 +6026,7 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv)
kvfree(priv->channel_stats[i]);
kfree(priv->channel_stats);
kfree(priv->tx_rates);
+ kfree(priv->txq2sq_stats);
kfree(priv->txq2sq);
destroy_workqueue(priv->wq);
mlx5e_selq_cleanup(&priv->selq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index b5333da20e8a..225da8d691fc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -523,15 +523,23 @@ mlx5e_add_skb_shared_info_frag(struct mlx5e_rq *rq, struct skb_shared_info *sinf
static inline void
mlx5e_add_skb_frag(struct mlx5e_rq *rq, struct sk_buff *skb,
- struct page *page, u32 frag_offset, u32 len,
+ struct mlx5e_frag_page *frag_page,
+ u32 frag_offset, u32 len,
unsigned int truesize)
{
- dma_addr_t addr = page_pool_get_dma_addr(page);
+ dma_addr_t addr = page_pool_get_dma_addr(frag_page->page);
+ u8 next_frag = skb_shinfo(skb)->nr_frags;
dma_sync_single_for_cpu(rq->pdev, addr + frag_offset, len,
rq->buff.map_dir);
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- page, frag_offset, len, truesize);
+
+ if (skb_can_coalesce(skb, next_frag, frag_page->page, frag_offset)) {
+ skb_coalesce_rx_frag(skb, next_frag - 1, len, truesize);
+ } else {
+ frag_page->frags++;
+ skb_add_rx_frag(skb, next_frag, frag_page->page,
+ frag_offset, len, truesize);
+ }
}
static inline void
@@ -619,25 +627,25 @@ static int bitmap_find_window(unsigned long *bitmap, int len,
return min(len, count);
}
-static void build_klm_umr(struct mlx5e_icosq *sq, struct mlx5e_umr_wqe *umr_wqe,
- __be32 key, u16 offset, u16 klm_len, u16 wqe_bbs)
+static void build_ksm_umr(struct mlx5e_icosq *sq, struct mlx5e_umr_wqe *umr_wqe,
+ __be32 key, u16 offset, u16 ksm_len)
{
- memset(umr_wqe, 0, offsetof(struct mlx5e_umr_wqe, inline_klms));
+ memset(umr_wqe, 0, offsetof(struct mlx5e_umr_wqe, inline_ksms));
umr_wqe->ctrl.opmod_idx_opcode =
cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
MLX5_OPCODE_UMR);
umr_wqe->ctrl.umr_mkey = key;
umr_wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT)
- | MLX5E_KLM_UMR_DS_CNT(klm_len));
+ | MLX5E_KSM_UMR_DS_CNT(ksm_len));
umr_wqe->uctrl.flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE;
umr_wqe->uctrl.xlt_offset = cpu_to_be16(offset);
- umr_wqe->uctrl.xlt_octowords = cpu_to_be16(klm_len);
+ umr_wqe->uctrl.xlt_octowords = cpu_to_be16(ksm_len);
umr_wqe->uctrl.mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
}
static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
struct mlx5e_icosq *sq,
- u16 klm_entries, u16 index)
+ u16 ksm_entries, u16 index)
{
struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
u16 entries, pi, header_offset, err, wqe_bbs, new_entries;
@@ -650,20 +658,20 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
int headroom, i;
headroom = rq->buff.headroom;
- new_entries = klm_entries - (shampo->pi & (MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT - 1));
- entries = ALIGN(klm_entries, MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT);
- wqe_bbs = MLX5E_KLM_UMR_WQEBBS(entries);
+ new_entries = ksm_entries - (shampo->pi & (MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT - 1));
+ entries = ALIGN(ksm_entries, MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT);
+ wqe_bbs = MLX5E_KSM_UMR_WQEBBS(entries);
pi = mlx5e_icosq_get_next_pi(sq, wqe_bbs);
umr_wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
- build_klm_umr(sq, umr_wqe, shampo->key, index, entries, wqe_bbs);
+ build_ksm_umr(sq, umr_wqe, shampo->key, index, entries);
frag_page = &shampo->pages[page_index];
for (i = 0; i < entries; i++, index++) {
dma_info = &shampo->info[index];
- if (i >= klm_entries || (index < shampo->pi && shampo->pi - index <
- MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT))
- goto update_klm;
+ if (i >= ksm_entries || (index < shampo->pi && shampo->pi - index <
+ MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT))
+ goto update_ksm;
header_offset = (index & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) <<
MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE;
if (!(header_offset & (PAGE_SIZE - 1))) {
@@ -683,12 +691,11 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
dma_info->frag_page = frag_page;
}
-update_klm:
- umr_wqe->inline_klms[i].bcount =
- cpu_to_be32(MLX5E_RX_MAX_HEAD);
- umr_wqe->inline_klms[i].key = cpu_to_be32(lkey);
- umr_wqe->inline_klms[i].va =
- cpu_to_be64(dma_info->addr + headroom);
+update_ksm:
+ umr_wqe->inline_ksms[i] = (struct mlx5_ksm) {
+ .key = cpu_to_be32(lkey),
+ .va = cpu_to_be64(dma_info->addr + headroom),
+ };
}
sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) {
@@ -720,37 +727,37 @@ err_unmap:
static int mlx5e_alloc_rx_hd_mpwqe(struct mlx5e_rq *rq)
{
struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
- u16 klm_entries, num_wqe, index, entries_before;
+ u16 ksm_entries, num_wqe, index, entries_before;
struct mlx5e_icosq *sq = rq->icosq;
- int i, err, max_klm_entries, len;
+ int i, err, max_ksm_entries, len;
- max_klm_entries = MLX5E_MAX_KLM_PER_WQE(rq->mdev);
- klm_entries = bitmap_find_window(shampo->bitmap,
+ max_ksm_entries = MLX5E_MAX_KSM_PER_WQE(rq->mdev);
+ ksm_entries = bitmap_find_window(shampo->bitmap,
shampo->hd_per_wqe,
shampo->hd_per_wq, shampo->pi);
- if (!klm_entries)
+ if (!ksm_entries)
return 0;
- klm_entries += (shampo->pi & (MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT - 1));
- index = ALIGN_DOWN(shampo->pi, MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT);
+ ksm_entries += (shampo->pi & (MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT - 1));
+ index = ALIGN_DOWN(shampo->pi, MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT);
entries_before = shampo->hd_per_wq - index;
- if (unlikely(entries_before < klm_entries))
- num_wqe = DIV_ROUND_UP(entries_before, max_klm_entries) +
- DIV_ROUND_UP(klm_entries - entries_before, max_klm_entries);
+ if (unlikely(entries_before < ksm_entries))
+ num_wqe = DIV_ROUND_UP(entries_before, max_ksm_entries) +
+ DIV_ROUND_UP(ksm_entries - entries_before, max_ksm_entries);
else
- num_wqe = DIV_ROUND_UP(klm_entries, max_klm_entries);
+ num_wqe = DIV_ROUND_UP(ksm_entries, max_ksm_entries);
for (i = 0; i < num_wqe; i++) {
- len = (klm_entries > max_klm_entries) ? max_klm_entries :
- klm_entries;
+ len = (ksm_entries > max_ksm_entries) ? max_ksm_entries :
+ ksm_entries;
if (unlikely(index + len > shampo->hd_per_wq))
len = shampo->hd_per_wq - index;
err = mlx5e_build_shampo_hd_umr(rq, sq, len, index);
if (unlikely(err))
return err;
index = (index + len) & (rq->mpwqe.shampo->hd_per_wq - 1);
- klm_entries -= len;
+ ksm_entries -= len;
}
return 0;
@@ -839,44 +846,28 @@ err:
return err;
}
-/* This function is responsible to dealloc SHAMPO header buffer.
- * close == true specifies that we are in the middle of closing RQ operation so
- * we go over all the entries and if they are not in use we free them,
- * otherwise we only go over a specific range inside the header buffer that are
- * not in use.
- */
-void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq, u16 len, u16 start, bool close)
+static void
+mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index)
{
struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
- struct mlx5e_frag_page *deleted_page = NULL;
- int hd_per_wq = shampo->hd_per_wq;
- struct mlx5e_dma_info *hd_info;
- int i, index = start;
-
- for (i = 0; i < len; i++, index++) {
- if (index == hd_per_wq)
- index = 0;
-
- if (close && !test_bit(index, shampo->bitmap))
- continue;
+ u64 addr = shampo->info[header_index].addr;
- hd_info = &shampo->info[index];
- hd_info->addr = ALIGN_DOWN(hd_info->addr, PAGE_SIZE);
- if (hd_info->frag_page && hd_info->frag_page != deleted_page) {
- deleted_page = hd_info->frag_page;
- mlx5e_page_release_fragmented(rq, hd_info->frag_page);
- }
+ if (((header_index + 1) & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) == 0) {
+ struct mlx5e_dma_info *dma_info = &shampo->info[header_index];
- hd_info->frag_page = NULL;
+ dma_info->addr = ALIGN_DOWN(addr, PAGE_SIZE);
+ mlx5e_page_release_fragmented(rq, dma_info->frag_page);
}
+ clear_bit(header_index, shampo->bitmap);
+}
- if (start + len > hd_per_wq) {
- len -= hd_per_wq - start;
- bitmap_clear(shampo->bitmap, start, hd_per_wq - start);
- start = 0;
- }
+void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq)
+{
+ struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
+ int i;
- bitmap_clear(shampo->bitmap, start, len);
+ for_each_set_bit(i, shampo->bitmap, rq->mpwqe.shampo->hd_per_wq)
+ mlx5e_free_rx_shampo_hd_entry(rq, i);
}
static void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
@@ -1191,9 +1182,8 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
check = csum_partial(tcp, tcp->doff * 4,
csum_unfold((__force __sum16)cqe->check_sum));
/* Almost done, don't forget the pseudo header */
- tcp->check = csum_tcpudp_magic(ipv4->saddr, ipv4->daddr,
- tot_len - sizeof(struct iphdr),
- IPPROTO_TCP, check);
+ tcp->check = tcp_v4_check(tot_len - sizeof(struct iphdr),
+ ipv4->saddr, ipv4->daddr, check);
} else {
u16 payload_len = tot_len - sizeof(struct ipv6hdr);
struct ipv6hdr *ipv6 = ip_p;
@@ -1208,8 +1198,8 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
check = csum_partial(tcp, tcp->doff * 4,
csum_unfold((__force __sum16)cqe->check_sum));
/* Almost done, don't forget the pseudo header */
- tcp->check = csum_ipv6_magic(&ipv6->saddr, &ipv6->daddr, payload_len,
- IPPROTO_TCP, check);
+ tcp->check = tcp_v6_check(payload_len, &ipv6->saddr,
+ &ipv6->daddr, check);
}
}
@@ -1612,9 +1602,7 @@ static void mlx5e_shampo_complete_rx_cqe(struct mlx5e_rq *rq,
struct mlx5e_rq_stats *stats = rq->stats;
stats->packets++;
- stats->gro_packets++;
stats->bytes += cqe_bcnt;
- stats->gro_bytes += cqe_bcnt;
if (NAPI_GRO_CB(skb)->count != 1)
return;
mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
@@ -1964,30 +1952,24 @@ const struct mlx5e_rx_handlers mlx5e_rx_handlers_rep = {
#endif
static void
-mlx5e_fill_skb_data(struct sk_buff *skb, struct mlx5e_rq *rq,
- struct mlx5e_frag_page *frag_page,
- u32 data_bcnt, u32 data_offset)
+mlx5e_shampo_fill_skb_data(struct sk_buff *skb, struct mlx5e_rq *rq,
+ struct mlx5e_frag_page *frag_page,
+ u32 data_bcnt, u32 data_offset)
{
net_prefetchw(skb->data);
- while (data_bcnt) {
+ do {
/* Non-linear mode, hence non-XSK, which always uses PAGE_SIZE. */
u32 pg_consumed_bytes = min_t(u32, PAGE_SIZE - data_offset, data_bcnt);
- unsigned int truesize;
-
- if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
- truesize = pg_consumed_bytes;
- else
- truesize = ALIGN(pg_consumed_bytes, BIT(rq->mpwqe.log_stride_sz));
+ unsigned int truesize = pg_consumed_bytes;
- frag_page->frags++;
- mlx5e_add_skb_frag(rq, skb, frag_page->page, data_offset,
+ mlx5e_add_skb_frag(rq, skb, frag_page, data_offset,
pg_consumed_bytes, truesize);
data_bcnt -= pg_consumed_bytes;
data_offset = 0;
frag_page++;
- }
+ } while (data_bcnt);
}
static struct sk_buff *
@@ -2212,8 +2194,8 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
if (likely(frag_size <= BIT(MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE))) {
/* build SKB around header */
dma_sync_single_range_for_cpu(rq->pdev, head->addr, 0, frag_size, rq->buff.map_dir);
- prefetchw(hdr);
- prefetch(data);
+ net_prefetchw(hdr);
+ net_prefetch(data);
skb = mlx5e_build_linear_skb(rq, hdr, frag_size, rx_headroom, head_size, 0);
if (unlikely(!skb))
@@ -2230,7 +2212,7 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
return NULL;
}
- prefetchw(skb->data);
+ net_prefetchw(skb->data);
mlx5e_copy_skb_header(rq, skb, head->frag_page->page, head->addr,
head_offset + rx_headroom,
rx_headroom, head_size);
@@ -2261,12 +2243,19 @@ mlx5e_shampo_flush_skb(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, bool match)
{
struct sk_buff *skb = rq->hw_gro_data->skb;
struct mlx5e_rq_stats *stats = rq->stats;
+ u16 gro_count = NAPI_GRO_CB(skb)->count;
- stats->gro_skbs++;
if (likely(skb_shinfo(skb)->nr_frags))
mlx5e_shampo_align_fragment(skb, rq->mpwqe.log_stride_sz);
- if (NAPI_GRO_CB(skb)->count > 1)
+ if (gro_count > 1) {
+ stats->gro_skbs++;
+ stats->gro_packets += gro_count;
+ stats->gro_bytes += skb->data_len + skb_headlen(skb) * gro_count;
+
mlx5e_shampo_update_hdr(rq, cqe, match);
+ } else {
+ skb_shinfo(skb)->gso_size = 0;
+ }
napi_gro_receive(rq->cq.napi, skb);
rq->hw_gro_data->skb = NULL;
}
@@ -2279,21 +2268,6 @@ mlx5e_hw_gro_skb_has_enough_space(struct sk_buff *skb, u16 data_bcnt)
return PAGE_SIZE * nr_frags + data_bcnt <= GRO_LEGACY_MAX_SIZE;
}
-static void
-mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index)
-{
- struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
- u64 addr = shampo->info[header_index].addr;
-
- if (((header_index + 1) & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) == 0) {
- struct mlx5e_dma_info *dma_info = &shampo->info[header_index];
-
- dma_info->addr = ALIGN_DOWN(addr, PAGE_SIZE);
- mlx5e_page_release_fragmented(rq, dma_info->frag_page);
- }
- bitmap_clear(shampo->bitmap, header_index, 1);
-}
-
static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
{
u16 data_bcnt = mpwrq_get_cqe_byte_cnt(cqe) - cqe->shampo.header_size;
@@ -2327,8 +2301,6 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq
goto mpwrq_cqe_out;
}
- stats->gro_match_packets += match;
-
if (*skb && (!match || !(mlx5e_hw_gro_skb_has_enough_space(*skb, data_bcnt)))) {
match = false;
mlx5e_shampo_flush_skb(rq, cqe, match);
@@ -2359,21 +2331,30 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq
}
if (likely(head_size)) {
- struct mlx5e_frag_page *frag_page;
+ if (data_bcnt) {
+ struct mlx5e_frag_page *frag_page;
- frag_page = &wi->alloc_units.frag_pages[page_idx];
- mlx5e_fill_skb_data(*skb, rq, frag_page, data_bcnt, data_offset);
+ frag_page = &wi->alloc_units.frag_pages[page_idx];
+ mlx5e_shampo_fill_skb_data(*skb, rq, frag_page, data_bcnt, data_offset);
+ } else {
+ stats->hds_nodata_packets++;
+ stats->hds_nodata_bytes += head_size;
+ }
}
mlx5e_shampo_complete_rx_cqe(rq, cqe, cqe_bcnt, *skb);
- if (flush)
+ if (flush && rq->hw_gro_data->skb)
mlx5e_shampo_flush_skb(rq, cqe, match);
free_hd_entry:
- mlx5e_free_rx_shampo_hd_entry(rq, header_index);
+ if (likely(head_size))
+ mlx5e_free_rx_shampo_hd_entry(rq, header_index);
mpwrq_cqe_out:
if (likely(wi->consumed_strides < rq->mpwqe.num_strides))
return;
+ if (unlikely(!cstrides))
+ return;
+
wq = &rq->mpwqe.wq;
wqe = mlx5_wq_ll_get_wqe(wq, wqe_id);
mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index e1ed214e8651..e7a3290a708a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -141,8 +141,9 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_skbs) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_match_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_large_hds) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_hds_nodata_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_hds_nodata_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_ecn_mark) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_removed_vlan_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
@@ -343,8 +344,9 @@ static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s,
s->rx_gro_packets += rq_stats->gro_packets;
s->rx_gro_bytes += rq_stats->gro_bytes;
s->rx_gro_skbs += rq_stats->gro_skbs;
- s->rx_gro_match_packets += rq_stats->gro_match_packets;
s->rx_gro_large_hds += rq_stats->gro_large_hds;
+ s->rx_hds_nodata_packets += rq_stats->hds_nodata_packets;
+ s->rx_hds_nodata_bytes += rq_stats->hds_nodata_bytes;
s->rx_ecn_mark += rq_stats->ecn_mark;
s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets;
s->rx_csum_none += rq_stats->csum_none;
@@ -2057,8 +2059,9 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_bytes) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_skbs) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_match_packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_large_hds) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, hds_nodata_packets) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, hds_nodata_bytes) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, ecn_mark) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 650732288616..4c5858c1dd82 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -153,8 +153,9 @@ struct mlx5e_sw_stats {
u64 rx_gro_packets;
u64 rx_gro_bytes;
u64 rx_gro_skbs;
- u64 rx_gro_match_packets;
u64 rx_gro_large_hds;
+ u64 rx_hds_nodata_packets;
+ u64 rx_hds_nodata_bytes;
u64 rx_mcast_packets;
u64 rx_ecn_mark;
u64 rx_removed_vlan_packets;
@@ -352,8 +353,9 @@ struct mlx5e_rq_stats {
u64 gro_packets;
u64 gro_bytes;
u64 gro_skbs;
- u64 gro_match_packets;
u64 gro_large_hds;
+ u64 hds_nodata_packets;
+ u64 hds_nodata_bytes;
u64 mcast_packets;
u64 ecn_mark;
u64 removed_vlan_packets;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index ac1565c0c8af..cb7e7e4104af 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -714,7 +714,7 @@ err2:
err1:
mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL);
mlx5_eq_notifier_unregister(dev, &table->cq_err_nb);
- mlx5_ctrl_irq_release(table->ctrl_irq);
+ mlx5_ctrl_irq_release(dev, table->ctrl_irq);
return err;
}
@@ -730,7 +730,7 @@ static void destroy_async_eqs(struct mlx5_core_dev *dev)
cleanup_async_eq(dev, &table->cmd_eq, "cmd");
mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL);
mlx5_eq_notifier_unregister(dev, &table->cq_err_nb);
- mlx5_ctrl_irq_release(table->ctrl_irq);
+ mlx5_ctrl_irq_release(dev, table->ctrl_irq);
}
struct mlx5_eq *mlx5_get_async_eq(struct mlx5_core_dev *dev)
@@ -918,7 +918,7 @@ static int comp_irq_request_sf(struct mlx5_core_dev *dev, u16 vecidx)
af_desc.is_managed = 1;
cpumask_copy(&af_desc.mask, cpu_online_mask);
cpumask_andnot(&af_desc.mask, &af_desc.mask, &table->used_cpus);
- irq = mlx5_irq_affinity_request(pool, &af_desc);
+ irq = mlx5_irq_affinity_request(dev, pool, &af_desc);
if (IS_ERR(irq))
return PTR_ERR(irq);
@@ -1187,7 +1187,6 @@ static int get_num_eqs(struct mlx5_core_dev *dev)
{
struct mlx5_eq_table *eq_table = dev->priv.eq_table;
int max_dev_eqs;
- int max_eqs_sf;
int num_eqs;
/* If ethernet is disabled we use just a single completion vector to
@@ -1202,7 +1201,11 @@ static int get_num_eqs(struct mlx5_core_dev *dev)
num_eqs = min_t(int, mlx5_irq_table_get_num_comp(eq_table->irq_table),
max_dev_eqs - MLX5_MAX_ASYNC_EQS);
if (mlx5_core_is_sf(dev)) {
- max_eqs_sf = min_t(int, MLX5_COMP_EQS_PER_SF,
+ int max_eqs_sf = MLX5_CAP_GEN_2(dev, sf_eq_usage) ?
+ MLX5_CAP_GEN_2(dev, max_num_eqs_24b) :
+ MLX5_COMP_EQS_PER_SF;
+
+ max_eqs_sf = min_t(int, max_eqs_sf,
mlx5_irq_table_get_sfs_vec(eq_table->irq_table));
num_eqs = min_t(int, num_eqs, max_eqs_sf);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
index d2ebe56c3977..20146a2dc7f4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
@@ -531,7 +531,7 @@ static bool esw_qos_element_type_supported(struct mlx5_core_dev *dev, int type)
switch (type) {
case SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR:
return MLX5_CAP_QOS(dev, esw_element_type) &
- ELEMENT_TYPE_CAP_MASK_TASR;
+ ELEMENT_TYPE_CAP_MASK_TSAR;
case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT:
return MLX5_CAP_QOS(dev, esw_element_type) &
ELEMENT_TYPE_CAP_MASK_VPORT;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 88745dc6aed5..578466d69f21 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -223,6 +223,7 @@ struct mlx5_vport {
u16 vport;
bool enabled;
+ bool max_eqs_set;
enum mlx5_eswitch_vport_event enabled_events;
int index;
struct mlx5_devlink_port *dl_port;
@@ -579,6 +580,8 @@ int mlx5_devlink_port_fn_max_io_eqs_get(struct devlink_port *port,
int mlx5_devlink_port_fn_max_io_eqs_set(struct devlink_port *port,
u32 max_io_eqs,
struct netlink_ext_ack *extack);
+int mlx5_devlink_port_fn_max_io_eqs_set_sf_default(struct devlink_port *port,
+ struct netlink_ext_ack *extack);
void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 72949cb85244..768199d2255a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -68,6 +68,7 @@
#define MLX5_ESW_FT_OFFLOADS_DROP_RULE (1)
#define MLX5_ESW_MAX_CTRL_EQS 4
+#define MLX5_ESW_DEFAULT_SF_COMP_EQS 8
static struct esw_vport_tbl_namespace mlx5_esw_vport_tbl_mirror_ns = {
.max_fte = MLX5_ESW_VPORT_TBL_SIZE,
@@ -4676,13 +4677,25 @@ mlx5_devlink_port_fn_max_io_eqs_set(struct devlink_port *port, u32 max_io_eqs,
hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
MLX5_SET(cmd_hca_cap_2, hca_caps, max_num_eqs_24b, max_eqs);
+ if (mlx5_esw_is_sf_vport(esw, vport_num))
+ MLX5_SET(cmd_hca_cap_2, hca_caps, sf_eq_usage, 1);
+
err = mlx5_vport_set_other_func_cap(esw->dev, hca_caps, vport_num,
MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE2);
if (err)
NL_SET_ERR_MSG_MOD(extack, "Failed setting HCA caps");
-
+ vport->max_eqs_set = true;
out:
mutex_unlock(&esw->state_lock);
kfree(query_ctx);
return err;
}
+
+int
+mlx5_devlink_port_fn_max_io_eqs_set_sf_default(struct devlink_port *port,
+ struct netlink_ext_ack *extack)
+{
+ return mlx5_devlink_port_fn_max_io_eqs_set(port,
+ MLX5_ESW_DEFAULT_SF_COMP_EQS,
+ extack);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 32cdacc34a0d..a47d6419160d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -3353,9 +3353,9 @@ static int mlx5_fs_mode_get(struct devlink *devlink, u32 id,
struct mlx5_core_dev *dev = devlink_priv(devlink);
if (dev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_SMFS)
- strcpy(ctx->val.vstr, "smfs");
+ strscpy(ctx->val.vstr, "smfs", sizeof(ctx->val.vstr));
else
- strcpy(ctx->val.vstr, "dmfs");
+ strscpy(ctx->val.vstr, "dmfs", sizeof(ctx->val.vstr));
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
index 779d92b762d3..905bdbaffb9a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
@@ -136,7 +136,7 @@ static int mlx5i_get_coalesce(struct net_device *netdev,
}
static int mlx5i_get_ts_info(struct net_device *netdev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct mlx5e_priv *priv = mlx5i_epriv(netdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 8e0404c0d1ca..0979d672d47f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -372,7 +372,7 @@ static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
mlx5e_fs_set_ns(priv->fs, ns, false);
err = mlx5e_arfs_create_tables(priv->fs, priv->rx_res,
- !!(priv->netdev->hw_features & NETIF_F_NTUPLE));
+ mlx5e_fs_has_arfs(priv->netdev));
if (err) {
netdev_err(priv->netdev, "Failed to create arfs tables, err=%d\n",
err);
@@ -391,8 +391,7 @@ static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
return 0;
err_destroy_arfs_tables:
- mlx5e_arfs_destroy_tables(priv->fs,
- !!(priv->netdev->hw_features & NETIF_F_NTUPLE));
+ mlx5e_arfs_destroy_tables(priv->fs, mlx5e_fs_has_arfs(priv->netdev));
return err;
}
@@ -400,8 +399,7 @@ err_destroy_arfs_tables:
static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv)
{
mlx5e_destroy_ttc_table(priv->fs);
- mlx5e_arfs_destroy_tables(priv->fs,
- !!(priv->netdev->hw_features & NETIF_F_NTUPLE));
+ mlx5e_arfs_destroy_tables(priv->fs, mlx5e_fs_has_arfs(priv->netdev));
mlx5e_ethtool_cleanup_steering(priv->fs);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
index 612e666ec263..f7b01b3f0cba 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
@@ -112,15 +112,18 @@ irq_pool_find_least_loaded(struct mlx5_irq_pool *pool, const struct cpumask *req
/**
* mlx5_irq_affinity_request - request an IRQ according to the given mask.
+ * @dev: mlx5 core device which is requesting the IRQ.
* @pool: IRQ pool to request from.
* @af_desc: affinity descriptor for this IRQ.
*
* This function returns a pointer to IRQ, or ERR_PTR in case of error.
*/
struct mlx5_irq *
-mlx5_irq_affinity_request(struct mlx5_irq_pool *pool, struct irq_affinity_desc *af_desc)
+mlx5_irq_affinity_request(struct mlx5_core_dev *dev, struct mlx5_irq_pool *pool,
+ struct irq_affinity_desc *af_desc)
{
struct mlx5_irq *least_loaded_irq, *new_irq;
+ int ret;
mutex_lock(&pool->lock);
least_loaded_irq = irq_pool_find_least_loaded(pool, &af_desc->mask);
@@ -153,6 +156,16 @@ out:
mlx5_irq_read_locked(least_loaded_irq) / MLX5_EQ_REFS_PER_IRQ);
unlock:
mutex_unlock(&pool->lock);
+ if (mlx5_irq_pool_is_sf_pool(pool)) {
+ ret = auxiliary_device_sysfs_irq_add(mlx5_sf_coredev_to_adev(dev),
+ mlx5_irq_get_irq(least_loaded_irq));
+ if (ret) {
+ mlx5_core_err(dev, "Failed to create sysfs entry for irq %d, ret = %d\n",
+ mlx5_irq_get_irq(least_loaded_irq), ret);
+ mlx5_irq_put(least_loaded_irq);
+ least_loaded_irq = ERR_PTR(ret);
+ }
+ }
return least_loaded_irq;
}
@@ -164,6 +177,9 @@ void mlx5_irq_affinity_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *i
cpu = cpumask_first(mlx5_irq_get_affinity_mask(irq));
synchronize_irq(pci_irq_vector(pool->dev->pdev,
mlx5_irq_get_index(irq)));
+ if (mlx5_irq_pool_is_sf_pool(pool))
+ auxiliary_device_sysfs_irq_remove(mlx5_sf_coredev_to_adev(dev),
+ mlx5_irq_get_irq(irq));
if (mlx5_irq_put(irq))
if (pool->irqs_per_cpu)
cpu_put(pool, cpu);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 459a836a5d9c..527da58c7953 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1819,6 +1819,7 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
mutex_init(&dev->intf_state_mutex);
lockdep_set_class(&dev->intf_state_mutex, &dev->lock_key);
mutex_init(&dev->mlx5e_res.uplink_netdev_lock);
+ mutex_init(&dev->wc_state_lock);
mutex_init(&priv->bfregs.reg_head.lock);
mutex_init(&priv->bfregs.wc_head.lock);
@@ -1916,6 +1917,7 @@ void mlx5_mdev_uninit(struct mlx5_core_dev *dev)
mutex_destroy(&priv->alloc_mutex);
mutex_destroy(&priv->bfregs.wc_head.lock);
mutex_destroy(&priv->bfregs.reg_head.lock);
+ mutex_destroy(&dev->wc_state_lock);
mutex_destroy(&dev->mlx5e_res.uplink_netdev_lock);
mutex_destroy(&dev->intf_state_mutex);
lockdep_unregister_key(&dev->lock_key);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index a7fd18888b6e..62c770b0eaa8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -320,6 +320,12 @@ static inline bool mlx5_core_is_sf(const struct mlx5_core_dev *dev)
return dev->coredev_type == MLX5_COREDEV_SF;
}
+static inline struct auxiliary_device *
+mlx5_sf_coredev_to_adev(struct mlx5_core_dev *mdev)
+{
+ return container_of(mdev->device, struct auxiliary_device, dev);
+}
+
int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx);
void mlx5_mdev_uninit(struct mlx5_core_dev *dev);
int mlx5_init_one(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
index 1088114e905d..0881e961d8b1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
@@ -25,7 +25,7 @@ int mlx5_set_msix_vec_count(struct mlx5_core_dev *dev, int devfn,
int mlx5_get_default_msix_vec_count(struct mlx5_core_dev *dev, int num_vfs);
struct mlx5_irq *mlx5_ctrl_irq_request(struct mlx5_core_dev *dev);
-void mlx5_ctrl_irq_release(struct mlx5_irq *ctrl_irq);
+void mlx5_ctrl_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *ctrl_irq);
struct mlx5_irq *mlx5_irq_request(struct mlx5_core_dev *dev, u16 vecidx,
struct irq_affinity_desc *af_desc,
struct cpu_rmap **rmap);
@@ -36,13 +36,15 @@ int mlx5_irq_attach_nb(struct mlx5_irq *irq, struct notifier_block *nb);
int mlx5_irq_detach_nb(struct mlx5_irq *irq, struct notifier_block *nb);
struct cpumask *mlx5_irq_get_affinity_mask(struct mlx5_irq *irq);
int mlx5_irq_get_index(struct mlx5_irq *irq);
+int mlx5_irq_get_irq(const struct mlx5_irq *irq);
struct mlx5_irq_pool;
#ifdef CONFIG_MLX5_SF
struct mlx5_irq *mlx5_irq_affinity_irq_request_auto(struct mlx5_core_dev *dev,
struct cpumask *used_cpus, u16 vecidx);
-struct mlx5_irq *mlx5_irq_affinity_request(struct mlx5_irq_pool *pool,
- struct irq_affinity_desc *af_desc);
+struct mlx5_irq *
+mlx5_irq_affinity_request(struct mlx5_core_dev *dev, struct mlx5_irq_pool *pool,
+ struct irq_affinity_desc *af_desc);
void mlx5_irq_affinity_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *irq);
#else
static inline
@@ -53,7 +55,8 @@ struct mlx5_irq *mlx5_irq_affinity_irq_request_auto(struct mlx5_core_dev *dev,
}
static inline struct mlx5_irq *
-mlx5_irq_affinity_request(struct mlx5_irq_pool *pool, struct irq_affinity_desc *af_desc)
+mlx5_irq_affinity_request(struct mlx5_core_dev *dev, struct mlx5_irq_pool *pool,
+ struct irq_affinity_desc *af_desc)
{
return ERR_PTR(-EOPNOTSUPP);
}
@@ -61,6 +64,7 @@ mlx5_irq_affinity_request(struct mlx5_irq_pool *pool, struct irq_affinity_desc *
static inline
void mlx5_irq_affinity_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *irq)
{
+ mlx5_irq_release_vector(irq);
}
#endif
#endif /* __MLX5_IRQ_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
index 401d39069680..81a9232a03e1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
@@ -16,6 +16,7 @@
#endif
#define MLX5_SFS_PER_CTRL_IRQ 64
+#define MLX5_MAX_MSIX_PER_SF 256
#define MLX5_IRQ_CTRL_SF_MAX 8
/* min num of vectors for SFs to be enabled */
#define MLX5_IRQ_VEC_COMP_BASE_SF 2
@@ -367,6 +368,11 @@ struct cpumask *mlx5_irq_get_affinity_mask(struct mlx5_irq *irq)
return irq->mask;
}
+int mlx5_irq_get_irq(const struct mlx5_irq *irq)
+{
+ return irq->map.virq;
+}
+
int mlx5_irq_get_index(struct mlx5_irq *irq)
{
return irq->map.index;
@@ -440,11 +446,12 @@ static void _mlx5_irq_release(struct mlx5_irq *irq)
/**
* mlx5_ctrl_irq_release - release a ctrl IRQ back to the system.
+ * @dev: mlx5 device that releasing the IRQ.
* @ctrl_irq: ctrl IRQ to be released.
*/
-void mlx5_ctrl_irq_release(struct mlx5_irq *ctrl_irq)
+void mlx5_ctrl_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *ctrl_irq)
{
- _mlx5_irq_release(ctrl_irq);
+ mlx5_irq_affinity_irq_release(dev, ctrl_irq);
}
/**
@@ -473,7 +480,7 @@ struct mlx5_irq *mlx5_ctrl_irq_request(struct mlx5_core_dev *dev)
/* Allocate the IRQ in index 0. The vector was already allocated */
irq = irq_pool_request_vector(pool, 0, &af_desc, NULL);
} else {
- irq = mlx5_irq_affinity_request(pool, &af_desc);
+ irq = mlx5_irq_affinity_request(dev, pool, &af_desc);
}
return irq;
@@ -589,8 +596,6 @@ static void irq_pool_free(struct mlx5_irq_pool *pool)
static int irq_pools_init(struct mlx5_core_dev *dev, int sf_vec, int pcif_vec)
{
struct mlx5_irq_table *table = dev->priv.irq_table;
- int num_sf_ctrl_by_msix;
- int num_sf_ctrl_by_sfs;
int num_sf_ctrl;
int err;
@@ -608,10 +613,8 @@ static int irq_pools_init(struct mlx5_core_dev *dev, int sf_vec, int pcif_vec)
}
/* init sf_ctrl_pool */
- num_sf_ctrl_by_msix = DIV_ROUND_UP(sf_vec, MLX5_COMP_EQS_PER_SF);
- num_sf_ctrl_by_sfs = DIV_ROUND_UP(mlx5_sf_max_functions(dev),
- MLX5_SFS_PER_CTRL_IRQ);
- num_sf_ctrl = min_t(int, num_sf_ctrl_by_msix, num_sf_ctrl_by_sfs);
+ num_sf_ctrl = DIV_ROUND_UP(mlx5_sf_max_functions(dev),
+ MLX5_SFS_PER_CTRL_IRQ);
num_sf_ctrl = min_t(int, MLX5_IRQ_CTRL_SF_MAX, num_sf_ctrl);
table->sf_ctrl_pool = irq_pool_alloc(dev, pcif_vec, num_sf_ctrl,
"mlx5_sf_ctrl",
@@ -726,8 +729,7 @@ int mlx5_irq_table_create(struct mlx5_core_dev *dev)
total_vec = pcif_vec;
if (mlx5_sf_max_functions(dev))
- total_vec += MLX5_IRQ_CTRL_SF_MAX +
- MLX5_COMP_EQS_PER_SF * mlx5_sf_max_functions(dev);
+ total_vec += MLX5_MAX_MSIX_PER_SF * mlx5_sf_max_functions(dev);
total_vec = min_t(int, total_vec, pci_msix_vec_count(dev->pdev));
pcif_vec = min_t(int, pcif_vec, pci_msix_vec_count(dev->pdev));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
index 6c11e075cab0..a96be98be032 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
@@ -161,6 +161,7 @@ int mlx5_devlink_sf_port_fn_state_get(struct devlink_port *dl_port,
static int mlx5_sf_activate(struct mlx5_core_dev *dev, struct mlx5_sf *sf,
struct netlink_ext_ack *extack)
{
+ struct mlx5_vport *vport;
int err;
if (mlx5_sf_is_active(sf))
@@ -170,6 +171,13 @@ static int mlx5_sf_activate(struct mlx5_core_dev *dev, struct mlx5_sf *sf,
return -EBUSY;
}
+ vport = mlx5_devlink_port_vport_get(&sf->dl_port.dl_port);
+ if (!vport->max_eqs_set && MLX5_CAP_GEN_2(dev, max_num_eqs_24b)) {
+ err = mlx5_devlink_port_fn_max_io_eqs_set_sf_default(&sf->dl_port.dl_port,
+ extack);
+ if (err)
+ return err;
+ }
err = mlx5_cmd_sf_enable_hca(dev, sf->hw_fn_id);
if (err)
return err;
@@ -318,7 +326,11 @@ int mlx5_devlink_sf_port_new(struct devlink *devlink,
static void mlx5_sf_dealloc(struct mlx5_sf_table *table, struct mlx5_sf *sf)
{
+ struct mlx5_vport *vport;
+
mutex_lock(&table->sf_state_lock);
+ vport = mlx5_devlink_port_vport_get(&sf->dl_port.dl_port);
+ vport->max_eqs_set = false;
mlx5_sf_function_id_erase(table, sf);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
index 81eff6c410ce..7618c6147f86 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
@@ -1379,6 +1379,11 @@ int mlx5dr_cmd_create_modify_header_arg(struct mlx5_core_dev *dev,
void mlx5dr_cmd_destroy_modify_header_arg(struct mlx5_core_dev *dev,
u32 obj_id);
+int mlx5dr_definer_get(struct mlx5dr_domain *dmn, u16 format_id,
+ u8 *dw_selectors, u8 *byte_selectors,
+ u8 *match_mask, u32 *definer_id);
+void mlx5dr_definer_put(struct mlx5dr_domain *dmn, u32 definer_id);
+
struct mlx5dr_icm_pool *mlx5dr_icm_pool_create(struct mlx5dr_domain *dmn,
enum mlx5dr_icm_type icm_type);
void mlx5dr_icm_pool_destroy(struct mlx5dr_icm_pool *pool);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
index 89fced86936f..3ac7dc67509f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
@@ -153,11 +153,6 @@ int mlx5dr_action_destroy(struct mlx5dr_action *action);
u32 mlx5dr_action_get_pkt_reformat_id(struct mlx5dr_action *action);
-int mlx5dr_definer_get(struct mlx5dr_domain *dmn, u16 format_id,
- u8 *dw_selectors, u8 *byte_selectors,
- u8 *match_mask, u32 *definer_id);
-void mlx5dr_definer_put(struct mlx5dr_domain *dmn, u32 definer_id);
-
static inline bool
mlx5dr_is_supported(struct mlx5_core_dev *dev)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wc.c b/drivers/net/ethernet/mellanox/mlx5/core/wc.c
new file mode 100644
index 000000000000..1bed75eca97d
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wc.c
@@ -0,0 +1,434 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include <linux/io.h>
+#include <linux/mlx5/transobj.h>
+#include "lib/clock.h"
+#include "mlx5_core.h"
+#include "wq.h"
+
+#define TEST_WC_NUM_WQES 255
+#define TEST_WC_LOG_CQ_SZ (order_base_2(TEST_WC_NUM_WQES))
+#define TEST_WC_SQ_LOG_WQ_SZ TEST_WC_LOG_CQ_SZ
+#define TEST_WC_POLLING_MAX_TIME_JIFFIES msecs_to_jiffies(100)
+
+struct mlx5_wc_cq {
+ /* data path - accessed per cqe */
+ struct mlx5_cqwq wq;
+
+ /* data path - accessed per napi poll */
+ struct mlx5_core_cq mcq;
+
+ /* control */
+ struct mlx5_core_dev *mdev;
+ struct mlx5_wq_ctrl wq_ctrl;
+};
+
+struct mlx5_wc_sq {
+ /* data path */
+ u16 cc;
+ u16 pc;
+
+ /* read only */
+ struct mlx5_wq_cyc wq;
+ u32 sqn;
+
+ /* control path */
+ struct mlx5_wq_ctrl wq_ctrl;
+
+ struct mlx5_wc_cq cq;
+ struct mlx5_sq_bfreg bfreg;
+};
+
+static int mlx5_wc_create_cqwq(struct mlx5_core_dev *mdev, void *cqc,
+ struct mlx5_wc_cq *cq)
+{
+ struct mlx5_core_cq *mcq = &cq->mcq;
+ struct mlx5_wq_param param = {};
+ int err;
+ u32 i;
+
+ err = mlx5_cqwq_create(mdev, &param, cqc, &cq->wq, &cq->wq_ctrl);
+ if (err)
+ return err;
+
+ mcq->cqe_sz = 64;
+ mcq->set_ci_db = cq->wq_ctrl.db.db;
+ mcq->arm_db = cq->wq_ctrl.db.db + 1;
+
+ for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
+ struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
+
+ cqe->op_own = 0xf1;
+ }
+
+ cq->mdev = mdev;
+
+ return 0;
+}
+
+static int create_wc_cq(struct mlx5_wc_cq *cq, void *cqc_data)
+{
+ u32 out[MLX5_ST_SZ_DW(create_cq_out)];
+ struct mlx5_core_dev *mdev = cq->mdev;
+ struct mlx5_core_cq *mcq = &cq->mcq;
+ int err, inlen, eqn;
+ void *in, *cqc;
+
+ err = mlx5_comp_eqn_get(mdev, 0, &eqn);
+ if (err)
+ return err;
+
+ inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
+ sizeof(u64) * cq->wq_ctrl.buf.npages;
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
+
+ memcpy(cqc, cqc_data, MLX5_ST_SZ_BYTES(cqc));
+
+ mlx5_fill_page_frag_array(&cq->wq_ctrl.buf,
+ (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
+
+ MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
+ MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
+ MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
+ MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
+ MLX5_ADAPTER_PAGE_SHIFT);
+ MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
+
+ err = mlx5_core_create_cq(mdev, mcq, in, inlen, out, sizeof(out));
+
+ kvfree(in);
+
+ return err;
+}
+
+static int mlx5_wc_create_cq(struct mlx5_core_dev *mdev, struct mlx5_wc_cq *cq)
+{
+ void *cqc;
+ int err;
+
+ cqc = kvzalloc(MLX5_ST_SZ_BYTES(cqc), GFP_KERNEL);
+ if (!cqc)
+ return -ENOMEM;
+
+ MLX5_SET(cqc, cqc, log_cq_size, TEST_WC_LOG_CQ_SZ);
+ MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
+ if (MLX5_CAP_GEN(mdev, cqe_128_always) && cache_line_size() >= 128)
+ MLX5_SET(cqc, cqc, cqe_sz, CQE_STRIDE_128_PAD);
+
+ err = mlx5_wc_create_cqwq(mdev, cqc, cq);
+ if (err) {
+ mlx5_core_err(mdev, "Failed to create wc cq wq, err=%d\n", err);
+ goto err_create_cqwq;
+ }
+
+ err = create_wc_cq(cq, cqc);
+ if (err) {
+ mlx5_core_err(mdev, "Failed to create wc cq, err=%d\n", err);
+ goto err_create_cq;
+ }
+
+ kvfree(cqc);
+ return 0;
+
+err_create_cq:
+ mlx5_wq_destroy(&cq->wq_ctrl);
+err_create_cqwq:
+ kvfree(cqc);
+ return err;
+}
+
+static void mlx5_wc_destroy_cq(struct mlx5_wc_cq *cq)
+{
+ mlx5_core_destroy_cq(cq->mdev, &cq->mcq);
+ mlx5_wq_destroy(&cq->wq_ctrl);
+}
+
+static int create_wc_sq(struct mlx5_core_dev *mdev, void *sqc_data,
+ struct mlx5_wc_sq *sq)
+{
+ void *in, *sqc, *wq;
+ int inlen, err;
+ u8 ts_format;
+
+ inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
+ sizeof(u64) * sq->wq_ctrl.buf.npages;
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
+ wq = MLX5_ADDR_OF(sqc, sqc, wq);
+
+ memcpy(sqc, sqc_data, MLX5_ST_SZ_BYTES(sqc));
+ MLX5_SET(sqc, sqc, cqn, sq->cq.mcq.cqn);
+
+ MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
+ MLX5_SET(sqc, sqc, flush_in_error_en, 1);
+
+ ts_format = mlx5_is_real_time_sq(mdev) ?
+ MLX5_TIMESTAMP_FORMAT_REAL_TIME :
+ MLX5_TIMESTAMP_FORMAT_FREE_RUNNING;
+ MLX5_SET(sqc, sqc, ts_format, ts_format);
+
+ MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
+ MLX5_SET(wq, wq, uar_page, sq->bfreg.index);
+ MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift -
+ MLX5_ADAPTER_PAGE_SHIFT);
+ MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma);
+
+ mlx5_fill_page_frag_array(&sq->wq_ctrl.buf,
+ (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
+
+ err = mlx5_core_create_sq(mdev, in, inlen, &sq->sqn);
+ if (err) {
+ mlx5_core_err(mdev, "Failed to create wc sq, err=%d\n", err);
+ goto err_create_sq;
+ }
+
+ memset(in, 0, MLX5_ST_SZ_BYTES(modify_sq_in));
+ MLX5_SET(modify_sq_in, in, sq_state, MLX5_SQC_STATE_RST);
+ sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
+ MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RDY);
+
+ err = mlx5_core_modify_sq(mdev, sq->sqn, in);
+ if (err) {
+ mlx5_core_err(mdev, "Failed to set wc sq(sqn=0x%x) ready, err=%d\n",
+ sq->sqn, err);
+ goto err_modify_sq;
+ }
+
+ kvfree(in);
+ return 0;
+
+err_modify_sq:
+ mlx5_core_destroy_sq(mdev, sq->sqn);
+err_create_sq:
+ kvfree(in);
+ return err;
+}
+
+static int mlx5_wc_create_sq(struct mlx5_core_dev *mdev, struct mlx5_wc_sq *sq)
+{
+ struct mlx5_wq_param param = {};
+ void *sqc_data, *wq;
+ int err;
+
+ sqc_data = kvzalloc(MLX5_ST_SZ_BYTES(sqc), GFP_KERNEL);
+ if (!sqc_data)
+ return -ENOMEM;
+
+ wq = MLX5_ADDR_OF(sqc, sqc_data, wq);
+ MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
+ MLX5_SET(wq, wq, pd, mdev->mlx5e_res.hw_objs.pdn);
+ MLX5_SET(wq, wq, log_wq_sz, TEST_WC_SQ_LOG_WQ_SZ);
+
+ err = mlx5_wq_cyc_create(mdev, &param, wq, &sq->wq, &sq->wq_ctrl);
+ if (err) {
+ mlx5_core_err(mdev, "Failed to create wc sq wq, err=%d\n", err);
+ goto err_create_wq_cyc;
+ }
+
+ err = create_wc_sq(mdev, sqc_data, sq);
+ if (err)
+ goto err_create_sq;
+
+ mlx5_core_dbg(mdev, "wc sq->sqn = 0x%x created\n", sq->sqn);
+
+ kvfree(sqc_data);
+ return 0;
+
+err_create_sq:
+ mlx5_wq_destroy(&sq->wq_ctrl);
+err_create_wq_cyc:
+ kvfree(sqc_data);
+ return err;
+}
+
+static void mlx5_wc_destroy_sq(struct mlx5_wc_sq *sq)
+{
+ mlx5_core_destroy_sq(sq->cq.mdev, sq->sqn);
+ mlx5_wq_destroy(&sq->wq_ctrl);
+}
+
+static void mlx5_wc_post_nop(struct mlx5_wc_sq *sq, bool signaled)
+{
+ int buf_size = (1 << MLX5_CAP_GEN(sq->cq.mdev, log_bf_reg_size)) / 2;
+ struct mlx5_wqe_ctrl_seg *ctrl;
+ __be32 mmio_wqe[16] = {};
+ u16 pi;
+
+ pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
+ ctrl = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
+ memset(ctrl, 0, sizeof(*ctrl));
+ ctrl->opmod_idx_opcode =
+ cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) | MLX5_OPCODE_NOP);
+ ctrl->qpn_ds =
+ cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
+ DIV_ROUND_UP(sizeof(struct mlx5_wqe_ctrl_seg), MLX5_SEND_WQE_DS));
+ if (signaled)
+ ctrl->fm_ce_se |= MLX5_WQE_CTRL_CQ_UPDATE;
+
+ memcpy(mmio_wqe, ctrl, sizeof(*ctrl));
+ ((struct mlx5_wqe_ctrl_seg *)&mmio_wqe)->fm_ce_se |=
+ MLX5_WQE_CTRL_CQ_UPDATE;
+
+ /* ensure wqe is visible to device before updating doorbell record */
+ dma_wmb();
+
+ sq->pc++;
+ sq->wq.db[MLX5_SND_DBR] = cpu_to_be32(sq->pc);
+
+ /* ensure doorbell record is visible to device before ringing the
+ * doorbell
+ */
+ wmb();
+
+ __iowrite64_copy(sq->bfreg.map + sq->bfreg.offset, mmio_wqe,
+ sizeof(mmio_wqe) / 8);
+
+ sq->bfreg.offset ^= buf_size;
+}
+
+static int mlx5_wc_poll_cq(struct mlx5_wc_sq *sq)
+{
+ struct mlx5_wc_cq *cq = &sq->cq;
+ struct mlx5_cqe64 *cqe;
+
+ cqe = mlx5_cqwq_get_cqe(&cq->wq);
+ if (!cqe)
+ return -ETIMEDOUT;
+
+ /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
+ * otherwise a cq overrun may occur
+ */
+ mlx5_cqwq_pop(&cq->wq);
+
+ if (get_cqe_opcode(cqe) == MLX5_CQE_REQ) {
+ int wqe_counter = be16_to_cpu(cqe->wqe_counter);
+ struct mlx5_core_dev *mdev = cq->mdev;
+
+ if (wqe_counter == TEST_WC_NUM_WQES - 1)
+ mdev->wc_state = MLX5_WC_STATE_UNSUPPORTED;
+ else
+ mdev->wc_state = MLX5_WC_STATE_SUPPORTED;
+
+ mlx5_core_dbg(mdev, "wc wqe_counter = 0x%x\n", wqe_counter);
+ }
+
+ mlx5_cqwq_update_db_record(&cq->wq);
+
+ /* ensure cq space is freed before enabling more cqes */
+ wmb();
+
+ sq->cc++;
+
+ return 0;
+}
+
+static void mlx5_core_test_wc(struct mlx5_core_dev *mdev)
+{
+ unsigned long expires;
+ struct mlx5_wc_sq *sq;
+ int i, err;
+
+ if (mdev->wc_state != MLX5_WC_STATE_UNINITIALIZED)
+ return;
+
+ sq = kzalloc(sizeof(*sq), GFP_KERNEL);
+ if (!sq)
+ return;
+
+ err = mlx5_alloc_bfreg(mdev, &sq->bfreg, true, false);
+ if (err) {
+ mlx5_core_err(mdev, "Failed to alloc bfreg for wc, err=%d\n", err);
+ goto err_alloc_bfreg;
+ }
+
+ err = mlx5_wc_create_cq(mdev, &sq->cq);
+ if (err)
+ goto err_create_cq;
+
+ err = mlx5_wc_create_sq(mdev, sq);
+ if (err)
+ goto err_create_sq;
+
+ for (i = 0; i < TEST_WC_NUM_WQES - 1; i++)
+ mlx5_wc_post_nop(sq, false);
+
+ mlx5_wc_post_nop(sq, true);
+
+ expires = jiffies + TEST_WC_POLLING_MAX_TIME_JIFFIES;
+ do {
+ err = mlx5_wc_poll_cq(sq);
+ if (err)
+ usleep_range(2, 10);
+ } while (mdev->wc_state == MLX5_WC_STATE_UNINITIALIZED &&
+ time_is_after_jiffies(expires));
+
+ mlx5_wc_destroy_sq(sq);
+
+err_create_sq:
+ mlx5_wc_destroy_cq(&sq->cq);
+err_create_cq:
+ mlx5_free_bfreg(mdev, &sq->bfreg);
+err_alloc_bfreg:
+ kfree(sq);
+}
+
+bool mlx5_wc_support_get(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_core_dev *parent = NULL;
+
+ if (!MLX5_CAP_GEN(mdev, bf)) {
+ mlx5_core_dbg(mdev, "BlueFlame not supported\n");
+ goto out;
+ }
+
+ if (!MLX5_CAP_GEN(mdev, log_max_sq)) {
+ mlx5_core_dbg(mdev, "SQ not supported\n");
+ goto out;
+ }
+
+ if (mdev->wc_state != MLX5_WC_STATE_UNINITIALIZED)
+ /* No need to lock anything as we perform WC test only
+ * once for whole device and was already done.
+ */
+ goto out;
+
+ mutex_lock(&mdev->wc_state_lock);
+
+ if (mdev->wc_state != MLX5_WC_STATE_UNINITIALIZED)
+ goto unlock;
+
+#ifdef CONFIG_MLX5_SF
+ if (mlx5_core_is_sf(mdev))
+ parent = mdev->priv.parent_mdev;
+#endif
+
+ if (parent) {
+ mutex_lock(&parent->wc_state_lock);
+
+ mlx5_core_test_wc(parent);
+
+ mlx5_core_dbg(mdev, "parent set wc_state=%d\n",
+ parent->wc_state);
+ mdev->wc_state = parent->wc_state;
+
+ mutex_unlock(&parent->wc_state_lock);
+ }
+
+ mlx5_core_test_wc(mdev);
+
+unlock:
+ mutex_unlock(&mdev->wc_state_lock);
+out:
+ mlx5_core_dbg(mdev, "wc_state=%d\n", mdev->wc_state);
+
+ return mdev->wc_state == MLX5_WC_STATE_SUPPORTED;
+}
+EXPORT_SYMBOL(mlx5_wc_support_get);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
index a510bf2cff2f..74f7e27b490f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
@@ -33,6 +33,7 @@ config MLXSW_CORE_THERMAL
config MLXSW_PCI
tristate "PCI bus implementation for Mellanox Technologies Switch ASICs"
depends on PCI && HAS_IOMEM && MLXSW_CORE
+ select PAGE_POOL
default m
help
This is PCI bus implementation for Mellanox Technologies Switch ASICs.
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
index 6c06b0592760..294e758f1067 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
@@ -513,6 +513,63 @@ mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core,
}
EXPORT_SYMBOL(mlxsw_env_get_module_eeprom_by_page);
+int
+mlxsw_env_set_module_eeprom_by_page(struct mlxsw_core *mlxsw_core,
+ u8 slot_index, u8 module,
+ const struct ethtool_module_eeprom *page,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+ u32 bytes_written = 0;
+ u16 device_addr;
+ int err;
+
+ if (!mlxsw_env_linecard_is_active(mlxsw_env, slot_index)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot write to EEPROM of a module on an inactive line card");
+ return -EIO;
+ }
+
+ err = mlxsw_env_validate_module_type(mlxsw_core, slot_index, module);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "EEPROM is not equipped on port module type");
+ return err;
+ }
+
+ device_addr = page->offset;
+
+ while (bytes_written < page->length) {
+ char mcia_pl[MLXSW_REG_MCIA_LEN];
+ char eeprom_tmp[128] = {};
+ u8 size;
+
+ size = min_t(u8, page->length - bytes_written,
+ mlxsw_env->max_eeprom_len);
+
+ mlxsw_reg_mcia_pack(mcia_pl, slot_index, module, page->page,
+ device_addr + bytes_written, size,
+ page->i2c_address);
+ mlxsw_reg_mcia_bank_number_set(mcia_pl, page->bank);
+ memcpy(eeprom_tmp, page->data + bytes_written, size);
+ mlxsw_reg_mcia_eeprom_memcpy_to(mcia_pl, eeprom_tmp);
+
+ err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcia), mcia_pl);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to access module's EEPROM");
+ return err;
+ }
+
+ err = mlxsw_env_mcia_status_process(mcia_pl, extack);
+ if (err)
+ return err;
+
+ bytes_written += size;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_env_set_module_eeprom_by_page);
+
static int mlxsw_env_module_reset(struct mlxsw_core *mlxsw_core, u8 slot_index,
u8 module)
{
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.h b/drivers/net/ethernet/mellanox/mlxsw/core_env.h
index a197e3ae069c..e4ff17869400 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_env.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.h
@@ -28,6 +28,12 @@ mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core,
const struct ethtool_module_eeprom *page,
struct netlink_ext_ack *extack);
+int
+mlxsw_env_set_module_eeprom_by_page(struct mlxsw_core *mlxsw_core,
+ u8 slot_index, u8 module,
+ const struct ethtool_module_eeprom *page,
+ struct netlink_ext_ack *extack);
+
int mlxsw_env_reset_module(struct net_device *netdev,
struct mlxsw_core *mlxsw_core, u8 slot_index,
u8 module, u32 *flags);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
index 5c511e1a8efa..d61478c0c632 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
@@ -100,6 +100,12 @@ static const struct mlxsw_cooling_states default_cooling_states[] = {
struct mlxsw_thermal;
+struct mlxsw_thermal_cooling_device {
+ struct mlxsw_thermal *thermal;
+ struct thermal_cooling_device *cdev;
+ unsigned int idx;
+};
+
struct mlxsw_thermal_module {
struct mlxsw_thermal *parent;
struct thermal_zone_device *tzdev;
@@ -123,7 +129,7 @@ struct mlxsw_thermal {
const struct mlxsw_bus_info *bus_info;
struct thermal_zone_device *tzdev;
int polling_delay;
- struct thermal_cooling_device *cdevs[MLXSW_MFCR_PWMS_MAX];
+ struct mlxsw_thermal_cooling_device cdevs[MLXSW_MFCR_PWMS_MAX];
struct thermal_trip trips[MLXSW_THERMAL_NUM_TRIPS];
struct mlxsw_cooling_states cooling_states[MLXSW_THERMAL_NUM_TRIPS];
struct mlxsw_thermal_area line_cards[];
@@ -147,7 +153,7 @@ static int mlxsw_get_cooling_device_idx(struct mlxsw_thermal *thermal,
int i;
for (i = 0; i < MLXSW_MFCR_PWMS_MAX; i++)
- if (thermal->cdevs[i] == cdev)
+ if (thermal->cdevs[i].cdev == cdev)
return i;
/* Allow mlxsw thermal zone binding to an external cooling device */
@@ -352,17 +358,14 @@ static int mlxsw_thermal_get_cur_state(struct thermal_cooling_device *cdev,
unsigned long *p_state)
{
- struct mlxsw_thermal *thermal = cdev->devdata;
+ struct mlxsw_thermal_cooling_device *mlxsw_cdev = cdev->devdata;
+ struct mlxsw_thermal *thermal = mlxsw_cdev->thermal;
struct device *dev = thermal->bus_info->dev;
char mfsc_pl[MLXSW_REG_MFSC_LEN];
- int err, idx;
u8 duty;
+ int err;
- idx = mlxsw_get_cooling_device_idx(thermal, cdev);
- if (idx < 0)
- return idx;
-
- mlxsw_reg_mfsc_pack(mfsc_pl, idx, 0);
+ mlxsw_reg_mfsc_pack(mfsc_pl, mlxsw_cdev->idx, 0);
err = mlxsw_reg_query(thermal->core, MLXSW_REG(mfsc), mfsc_pl);
if (err) {
dev_err(dev, "Failed to query PWM duty\n");
@@ -378,22 +381,19 @@ static int mlxsw_thermal_set_cur_state(struct thermal_cooling_device *cdev,
unsigned long state)
{
- struct mlxsw_thermal *thermal = cdev->devdata;
+ struct mlxsw_thermal_cooling_device *mlxsw_cdev = cdev->devdata;
+ struct mlxsw_thermal *thermal = mlxsw_cdev->thermal;
struct device *dev = thermal->bus_info->dev;
char mfsc_pl[MLXSW_REG_MFSC_LEN];
- int idx;
int err;
if (state > MLXSW_THERMAL_MAX_STATE)
return -EINVAL;
- idx = mlxsw_get_cooling_device_idx(thermal, cdev);
- if (idx < 0)
- return idx;
-
/* Normalize the state to the valid speed range. */
state = max_t(unsigned long, MLXSW_THERMAL_MIN_STATE, state);
- mlxsw_reg_mfsc_pack(mfsc_pl, idx, mlxsw_state_to_duty(state));
+ mlxsw_reg_mfsc_pack(mfsc_pl, mlxsw_cdev->idx,
+ mlxsw_state_to_duty(state));
err = mlxsw_reg_write(thermal->core, MLXSW_REG(mfsc), mfsc_pl);
if (err) {
dev_err(dev, "Failed to write PWM duty\n");
@@ -753,17 +753,21 @@ int mlxsw_thermal_init(struct mlxsw_core *core,
}
for (i = 0; i < MLXSW_MFCR_PWMS_MAX; i++) {
if (pwm_active & BIT(i)) {
+ struct mlxsw_thermal_cooling_device *mlxsw_cdev;
struct thermal_cooling_device *cdev;
+ mlxsw_cdev = &thermal->cdevs[i];
+ mlxsw_cdev->thermal = thermal;
+ mlxsw_cdev->idx = i;
cdev = thermal_cooling_device_register("mlxsw_fan",
- thermal,
+ mlxsw_cdev,
&mlxsw_cooling_ops);
if (IS_ERR(cdev)) {
err = PTR_ERR(cdev);
dev_err(dev, "Failed to register cooling device\n");
goto err_thermal_cooling_device_register;
}
- thermal->cdevs[i] = cdev;
+ mlxsw_cdev->cdev = cdev;
}
}
@@ -824,8 +828,7 @@ err_thermal_modules_init:
err_thermal_zone_device_register:
err_thermal_cooling_device_register:
for (i = 0; i < MLXSW_MFCR_PWMS_MAX; i++)
- if (thermal->cdevs[i])
- thermal_cooling_device_unregister(thermal->cdevs[i]);
+ thermal_cooling_device_unregister(thermal->cdevs[i].cdev);
err_reg_write:
err_reg_query:
kfree(thermal);
@@ -847,12 +850,8 @@ void mlxsw_thermal_fini(struct mlxsw_thermal *thermal)
thermal->tzdev = NULL;
}
- for (i = 0; i < MLXSW_MFCR_PWMS_MAX; i++) {
- if (thermal->cdevs[i]) {
- thermal_cooling_device_unregister(thermal->cdevs[i]);
- thermal->cdevs[i] = NULL;
- }
- }
+ for (i = 0; i < MLXSW_MFCR_PWMS_MAX; i++)
+ thermal_cooling_device_unregister(thermal->cdevs[i].cdev);
kfree(thermal);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/item.h b/drivers/net/ethernet/mellanox/mlxsw/item.h
index cfafbeb42586..a619a0736bd1 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/item.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/item.h
@@ -218,6 +218,10 @@ __mlxsw_item_bit_array_offset(const struct mlxsw_item *item,
}
max_index = (item->size.bytes << 3) / item->element_size - 1;
+ if (WARN_ONCE(index > max_index,
+ "name=%s,index=%u,max_index=%u\n", item->name, index,
+ max_index))
+ index = 0;
be_index = max_index - index;
offset = be_index * item->element_size >> 3;
in_byte_index = index % (BITS_PER_BYTE / item->element_size);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
index f0ceb196a6ce..828c65036a4c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/minimal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
@@ -140,6 +140,20 @@ mlxsw_m_get_module_eeprom_by_page(struct net_device *netdev,
page, extack);
}
+static int
+mlxsw_m_set_module_eeprom_by_page(struct net_device *netdev,
+ const struct ethtool_module_eeprom *page,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev);
+ struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core;
+
+ return mlxsw_env_set_module_eeprom_by_page(core,
+ mlxsw_m_port->slot_index,
+ mlxsw_m_port->module,
+ page, extack);
+}
+
static int mlxsw_m_reset(struct net_device *netdev, u32 *flags)
{
struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev);
@@ -181,6 +195,7 @@ static const struct ethtool_ops mlxsw_m_port_ethtool_ops = {
.get_module_info = mlxsw_m_get_module_info,
.get_module_eeprom = mlxsw_m_get_module_eeprom,
.get_module_eeprom_by_page = mlxsw_m_get_module_eeprom_by_page,
+ .set_module_eeprom_by_page = mlxsw_m_set_module_eeprom_by_page,
.reset = mlxsw_m_reset,
.get_module_power_mode = mlxsw_m_get_module_power_mode,
.set_module_power_mode = mlxsw_m_set_module_power_mode,
@@ -702,8 +717,8 @@ static struct mlxsw_driver mlxsw_m_driver = {
};
static const struct i2c_device_id mlxsw_m_i2c_id[] = {
- { "mlxsw_minimal", 0},
- { },
+ { "mlxsw_minimal" },
+ { }
};
static struct i2c_driver mlxsw_m_i2c_driver = {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index c0ced4d315f3..060e5b939211 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -13,6 +13,7 @@
#include <linux/if_vlan.h>
#include <linux/log2.h>
#include <linux/string.h>
+#include <net/page_pool/helpers.h>
#include "pci_hw.h"
#include "pci.h"
@@ -61,15 +62,11 @@ struct mlxsw_pci_mem_item {
};
struct mlxsw_pci_queue_elem_info {
+ struct page *pages[MLXSW_PCI_WQE_SG_ENTRIES];
char *elem; /* pointer to actual dma mapped element mem chunk */
- union {
- struct {
- struct sk_buff *skb;
- } sdq;
- struct {
- struct sk_buff *skb;
- } rdq;
- } u;
+ struct {
+ struct sk_buff *skb;
+ } sdq;
};
struct mlxsw_pci_queue {
@@ -88,10 +85,14 @@ struct mlxsw_pci_queue {
enum mlxsw_pci_cqe_v v;
struct mlxsw_pci_queue *dq;
struct napi_struct napi;
+ struct page_pool *page_pool;
} cq;
struct {
struct tasklet_struct tasklet;
} eq;
+ struct {
+ struct mlxsw_pci_queue *cq;
+ } rdq;
} u;
};
@@ -110,6 +111,7 @@ struct mlxsw_pci {
bool cff_support;
enum mlxsw_cmd_mbox_config_profile_lag_mode lag_mode;
enum mlxsw_cmd_mbox_config_profile_flood_mode flood_mode;
+ u8 num_sg_entries; /* Number of scatter/gather entries for packets. */
struct mlxsw_pci_queue_type_group queues[MLXSW_PCI_QUEUE_TYPE_COUNT];
u32 doorbell_offset;
struct mlxsw_core *core;
@@ -335,6 +337,29 @@ static void mlxsw_pci_sdq_fini(struct mlxsw_pci *mlxsw_pci,
mlxsw_cmd_hw2sw_sdq(mlxsw_pci->core, q->num);
}
+#define MLXSW_PCI_SKB_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN)
+
+#define MLXSW_PCI_RX_BUF_SW_OVERHEAD \
+ (MLXSW_PCI_SKB_HEADROOM + \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+
+static void
+mlxsw_pci_wqe_rx_frag_set(struct mlxsw_pci *mlxsw_pci, struct page *page,
+ char *wqe, int index, size_t frag_len)
+{
+ dma_addr_t mapaddr;
+
+ mapaddr = page_pool_get_dma_addr(page);
+
+ if (index == 0) {
+ mapaddr += MLXSW_PCI_SKB_HEADROOM;
+ frag_len = frag_len - MLXSW_PCI_RX_BUF_SW_OVERHEAD;
+ }
+
+ mlxsw_pci_wqe_address_set(wqe, index, mapaddr);
+ mlxsw_pci_wqe_byte_count_set(wqe, index, frag_len);
+}
+
static int mlxsw_pci_wqe_frag_map(struct mlxsw_pci *mlxsw_pci, char *wqe,
int index, char *frag_data, size_t frag_len,
int direction)
@@ -364,43 +389,140 @@ static void mlxsw_pci_wqe_frag_unmap(struct mlxsw_pci *mlxsw_pci, char *wqe,
dma_unmap_single(&pdev->dev, mapaddr, frag_len, direction);
}
-static int mlxsw_pci_rdq_skb_alloc(struct mlxsw_pci *mlxsw_pci,
- struct mlxsw_pci_queue_elem_info *elem_info,
- gfp_t gfp)
+static struct sk_buff *mlxsw_pci_rdq_build_skb(struct page *pages[],
+ u16 byte_count)
{
- size_t buf_len = MLXSW_PORT_MAX_MTU;
- char *wqe = elem_info->elem;
+ unsigned int linear_data_size;
struct sk_buff *skb;
- int err;
+ int page_index = 0;
+ bool linear_only;
+ void *data;
+
+ data = page_address(pages[page_index]);
+ net_prefetch(data);
+
+ skb = napi_build_skb(data, PAGE_SIZE);
+ if (unlikely(!skb))
+ return ERR_PTR(-ENOMEM);
+
+ linear_only = byte_count + MLXSW_PCI_RX_BUF_SW_OVERHEAD <= PAGE_SIZE;
+ linear_data_size = linear_only ? byte_count :
+ PAGE_SIZE -
+ MLXSW_PCI_RX_BUF_SW_OVERHEAD;
+
+ skb_reserve(skb, MLXSW_PCI_SKB_HEADROOM);
+ skb_put(skb, linear_data_size);
+
+ if (linear_only)
+ return skb;
+
+ byte_count -= linear_data_size;
+ page_index++;
+
+ while (byte_count > 0) {
+ unsigned int frag_size;
+ struct page *page;
+
+ page = pages[page_index];
+ frag_size = min(byte_count, PAGE_SIZE);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ page, 0, frag_size, PAGE_SIZE);
+ byte_count -= frag_size;
+ page_index++;
+ }
- skb = __netdev_alloc_skb_ip_align(NULL, buf_len, gfp);
- if (!skb)
+ return skb;
+}
+
+static int mlxsw_pci_rdq_page_alloc(struct mlxsw_pci_queue *q,
+ struct mlxsw_pci_queue_elem_info *elem_info,
+ int index)
+{
+ struct mlxsw_pci_queue *cq = q->u.rdq.cq;
+ char *wqe = elem_info->elem;
+ struct page *page;
+
+ page = page_pool_dev_alloc_pages(cq->u.cq.page_pool);
+ if (unlikely(!page))
return -ENOMEM;
- err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
- buf_len, DMA_FROM_DEVICE);
- if (err)
- goto err_frag_map;
+ mlxsw_pci_wqe_rx_frag_set(q->pci, page, wqe, index, PAGE_SIZE);
+ elem_info->pages[index] = page;
+ return 0;
+}
+
+static void mlxsw_pci_rdq_page_free(struct mlxsw_pci_queue *q,
+ struct mlxsw_pci_queue_elem_info *elem_info,
+ int index)
+{
+ struct mlxsw_pci_queue *cq = q->u.rdq.cq;
+
+ page_pool_put_page(cq->u.cq.page_pool, elem_info->pages[index], -1,
+ false);
+}
+
+static u8 mlxsw_pci_num_sg_entries_get(u16 byte_count)
+{
+ return DIV_ROUND_UP(byte_count + MLXSW_PCI_RX_BUF_SW_OVERHEAD,
+ PAGE_SIZE);
+}
+
+static int
+mlxsw_pci_elem_info_pages_ref_store(const struct mlxsw_pci_queue *q,
+ const struct mlxsw_pci_queue_elem_info *el,
+ u16 byte_count, struct page *pages[],
+ u8 *p_num_sg_entries)
+{
+ u8 num_sg_entries;
+ int i;
+
+ num_sg_entries = mlxsw_pci_num_sg_entries_get(byte_count);
+ if (WARN_ON_ONCE(num_sg_entries > q->pci->num_sg_entries))
+ return -EINVAL;
+
+ for (i = 0; i < num_sg_entries; i++)
+ pages[i] = el->pages[i];
+
+ *p_num_sg_entries = num_sg_entries;
+ return 0;
+}
+
+static int
+mlxsw_pci_rdq_pages_alloc(struct mlxsw_pci_queue *q,
+ struct mlxsw_pci_queue_elem_info *elem_info,
+ u8 num_sg_entries)
+{
+ struct page *old_pages[MLXSW_PCI_WQE_SG_ENTRIES];
+ struct mlxsw_pci_queue *cq = q->u.rdq.cq;
+ int i, err;
+
+ for (i = 0; i < num_sg_entries; i++) {
+ old_pages[i] = elem_info->pages[i];
+ err = mlxsw_pci_rdq_page_alloc(q, elem_info, i);
+ if (err) {
+ dev_err_ratelimited(&q->pci->pdev->dev, "Failed to alloc page\n");
+ goto err_page_alloc;
+ }
+ }
- elem_info->u.rdq.skb = skb;
return 0;
-err_frag_map:
- dev_kfree_skb_any(skb);
+err_page_alloc:
+ for (i--; i >= 0; i--)
+ page_pool_recycle_direct(cq->u.cq.page_pool, old_pages[i]);
+
return err;
}
-static void mlxsw_pci_rdq_skb_free(struct mlxsw_pci *mlxsw_pci,
- struct mlxsw_pci_queue_elem_info *elem_info)
+static void
+mlxsw_pci_rdq_pages_recycle(struct mlxsw_pci_queue *q, struct page *pages[],
+ u8 num_sg_entries)
{
- struct sk_buff *skb;
- char *wqe;
-
- skb = elem_info->u.rdq.skb;
- wqe = elem_info->elem;
+ struct mlxsw_pci_queue *cq = q->u.rdq.cq;
+ int i;
- mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
- dev_kfree_skb_any(skb);
+ for (i = 0; i < num_sg_entries; i++)
+ page_pool_recycle_direct(cq->u.cq.page_pool, pages[i]);
}
static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
@@ -410,7 +532,7 @@ static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
u8 sdq_count = mlxsw_pci->num_sdqs;
struct mlxsw_pci_queue *cq;
u8 cq_num;
- int i;
+ int i, j;
int err;
q->producer_counter = 0;
@@ -434,15 +556,19 @@ static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
cq = mlxsw_pci_cq_get(mlxsw_pci, cq_num);
cq->u.cq.dq = q;
+ q->u.rdq.cq = cq;
mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
for (i = 0; i < q->count; i++) {
elem_info = mlxsw_pci_queue_elem_info_producer_get(q);
BUG_ON(!elem_info);
- err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info, GFP_KERNEL);
- if (err)
- goto rollback;
+
+ for (j = 0; j < mlxsw_pci->num_sg_entries; j++) {
+ err = mlxsw_pci_rdq_page_alloc(q, elem_info, j);
+ if (err)
+ goto rollback;
+ }
/* Everything is set up, ring doorbell to pass elem to HW */
q->producer_counter++;
mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
@@ -453,8 +579,11 @@ static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
rollback:
for (i--; i >= 0; i--) {
elem_info = mlxsw_pci_queue_elem_info_get(q, i);
- mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info);
+ for (j--; j >= 0; j--)
+ mlxsw_pci_rdq_page_free(q, elem_info, j);
+ j = mlxsw_pci->num_sg_entries;
}
+ q->u.rdq.cq = NULL;
cq->u.cq.dq = NULL;
mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num);
@@ -465,12 +594,13 @@ static void mlxsw_pci_rdq_fini(struct mlxsw_pci *mlxsw_pci,
struct mlxsw_pci_queue *q)
{
struct mlxsw_pci_queue_elem_info *elem_info;
- int i;
+ int i, j;
mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num);
for (i = 0; i < q->count; i++) {
elem_info = mlxsw_pci_queue_elem_info_get(q, i);
- mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info);
+ for (j = 0; j < mlxsw_pci->num_sg_entries; j++)
+ mlxsw_pci_rdq_page_free(q, elem_info, j);
}
}
@@ -515,7 +645,7 @@ static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci,
struct mlxsw_pci_queue *q,
u16 consumer_counter_limit,
enum mlxsw_pci_cqe_v cqe_v,
- char *cqe)
+ char *cqe, int budget)
{
struct pci_dev *pdev = mlxsw_pci->pdev;
struct mlxsw_pci_queue_elem_info *elem_info;
@@ -526,8 +656,8 @@ static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci,
spin_lock(&q->lock);
elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
- tx_info = mlxsw_skb_cb(elem_info->u.sdq.skb)->tx_info;
- skb = elem_info->u.sdq.skb;
+ tx_info = mlxsw_skb_cb(elem_info->sdq.skb)->tx_info;
+ skb = elem_info->sdq.skb;
wqe = elem_info->elem;
for (i = 0; i < MLXSW_PCI_WQE_SG_ENTRIES; i++)
mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE);
@@ -541,8 +671,8 @@ static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci,
}
if (skb)
- dev_kfree_skb_any(skb);
- elem_info->u.sdq.skb = NULL;
+ napi_consume_skb(skb, budget);
+ elem_info->sdq.skb = NULL;
if (q->consumer_counter++ != consumer_counter_limit)
dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in SDQ\n");
@@ -604,27 +734,40 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
enum mlxsw_pci_cqe_v cqe_v, char *cqe)
{
struct pci_dev *pdev = mlxsw_pci->pdev;
+ struct page *pages[MLXSW_PCI_WQE_SG_ENTRIES];
struct mlxsw_pci_queue_elem_info *elem_info;
struct mlxsw_rx_info rx_info = {};
- char wqe[MLXSW_PCI_WQE_SIZE];
struct sk_buff *skb;
+ u8 num_sg_entries;
u16 byte_count;
int err;
elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
- skb = elem_info->u.rdq.skb;
- memcpy(wqe, elem_info->elem, MLXSW_PCI_WQE_SIZE);
if (q->consumer_counter++ != consumer_counter_limit)
dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in RDQ\n");
- err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info, GFP_ATOMIC);
- if (err) {
- dev_err_ratelimited(&pdev->dev, "Failed to alloc skb for RDQ\n");
+ byte_count = mlxsw_pci_cqe_byte_count_get(cqe);
+ if (mlxsw_pci_cqe_crc_get(cqe_v, cqe))
+ byte_count -= ETH_FCS_LEN;
+
+ err = mlxsw_pci_elem_info_pages_ref_store(q, elem_info, byte_count,
+ pages, &num_sg_entries);
+ if (err)
+ goto out;
+
+ err = mlxsw_pci_rdq_pages_alloc(q, elem_info, num_sg_entries);
+ if (err)
+ goto out;
+
+ skb = mlxsw_pci_rdq_build_skb(pages, byte_count);
+ if (IS_ERR(skb)) {
+ dev_err_ratelimited(&pdev->dev, "Failed to build skb for RDQ\n");
+ mlxsw_pci_rdq_pages_recycle(q, pages, num_sg_entries);
goto out;
}
- mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
+ skb_mark_for_recycle(skb);
if (mlxsw_pci_cqe_lag_get(cqe_v, cqe)) {
rx_info.is_lag = true;
@@ -657,10 +800,6 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
mlxsw_pci_skb_cb_ts_set(mlxsw_pci, skb, cqe_v, cqe);
- byte_count = mlxsw_pci_cqe_byte_count_get(cqe);
- if (mlxsw_pci_cqe_crc_get(cqe_v, cqe))
- byte_count -= ETH_FCS_LEN;
- skb_put(skb, byte_count);
mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info);
out:
@@ -785,7 +924,7 @@ static int mlxsw_pci_napi_poll_cq_tx(struct napi_struct *napi, int budget)
mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq,
- wqe_counter, q->u.cq.v, ncqe);
+ wqe_counter, q->u.cq.v, ncqe, budget);
work_done++;
}
@@ -832,19 +971,51 @@ static void mlxsw_pci_cq_napi_setup(struct mlxsw_pci_queue *q,
mlxsw_pci_napi_poll_cq_rx);
break;
}
-
- napi_enable(&q->u.cq.napi);
}
static void mlxsw_pci_cq_napi_teardown(struct mlxsw_pci_queue *q)
{
- napi_disable(&q->u.cq.napi);
netif_napi_del(&q->u.cq.napi);
}
+static int mlxsw_pci_cq_page_pool_init(struct mlxsw_pci_queue *q,
+ enum mlxsw_pci_cq_type cq_type)
+{
+ struct page_pool_params pp_params = {};
+ struct mlxsw_pci *mlxsw_pci = q->pci;
+ struct page_pool *page_pool;
+
+ if (cq_type != MLXSW_PCI_CQ_RDQ)
+ return 0;
+
+ pp_params.flags = PP_FLAG_DMA_MAP;
+ pp_params.pool_size = MLXSW_PCI_WQE_COUNT * mlxsw_pci->num_sg_entries;
+ pp_params.nid = dev_to_node(&mlxsw_pci->pdev->dev);
+ pp_params.dev = &mlxsw_pci->pdev->dev;
+ pp_params.napi = &q->u.cq.napi;
+ pp_params.dma_dir = DMA_FROM_DEVICE;
+
+ page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(page_pool))
+ return PTR_ERR(page_pool);
+
+ q->u.cq.page_pool = page_pool;
+ return 0;
+}
+
+static void mlxsw_pci_cq_page_pool_fini(struct mlxsw_pci_queue *q,
+ enum mlxsw_pci_cq_type cq_type)
+{
+ if (cq_type != MLXSW_PCI_CQ_RDQ)
+ return;
+
+ page_pool_destroy(q->u.cq.page_pool);
+}
+
static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
struct mlxsw_pci_queue *q)
{
+ enum mlxsw_pci_cq_type cq_type = mlxsw_pci_cq_type(mlxsw_pci, q);
int i;
int err;
@@ -874,15 +1045,29 @@ static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
err = mlxsw_cmd_sw2hw_cq(mlxsw_pci->core, mbox, q->num);
if (err)
return err;
- mlxsw_pci_cq_napi_setup(q, mlxsw_pci_cq_type(mlxsw_pci, q));
+ mlxsw_pci_cq_napi_setup(q, cq_type);
+
+ err = mlxsw_pci_cq_page_pool_init(q, cq_type);
+ if (err)
+ goto err_page_pool_init;
+
+ napi_enable(&q->u.cq.napi);
mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
return 0;
+
+err_page_pool_init:
+ mlxsw_pci_cq_napi_teardown(q);
+ return err;
}
static void mlxsw_pci_cq_fini(struct mlxsw_pci *mlxsw_pci,
struct mlxsw_pci_queue *q)
{
+ enum mlxsw_pci_cq_type cq_type = mlxsw_pci_cq_type(mlxsw_pci, q);
+
+ napi_disable(&q->u.cq.napi);
+ mlxsw_pci_cq_page_pool_fini(q, cq_type);
mlxsw_pci_cq_napi_teardown(q);
mlxsw_cmd_hw2sw_cq(mlxsw_pci->core, q->num);
}
@@ -1599,6 +1784,7 @@ static int mlxsw_pci_reset_at_pci_disable(struct mlxsw_pci *mlxsw_pci,
{
struct pci_dev *pdev = mlxsw_pci->pdev;
char mrsr_pl[MLXSW_REG_MRSR_LEN];
+ struct pci_dev *bridge;
int err;
if (!pci_reset_sbr_supported) {
@@ -1615,6 +1801,9 @@ static int mlxsw_pci_reset_at_pci_disable(struct mlxsw_pci *mlxsw_pci,
sbr:
device_lock_assert(&pdev->dev);
+ bridge = pci_upstream_bridge(pdev);
+ if (bridge)
+ pci_cfg_access_lock(bridge);
pci_cfg_access_lock(pdev);
pci_save_state(pdev);
@@ -1624,6 +1813,8 @@ sbr:
pci_restore_state(pdev);
pci_cfg_access_unlock(pdev);
+ if (bridge)
+ pci_cfg_access_unlock(bridge);
return err;
}
@@ -1703,6 +1894,17 @@ static void mlxsw_pci_free_irq_vectors(struct mlxsw_pci *mlxsw_pci)
pci_free_irq_vectors(mlxsw_pci->pdev);
}
+static void mlxsw_pci_num_sg_entries_set(struct mlxsw_pci *mlxsw_pci)
+{
+ u8 num_sg_entries;
+
+ num_sg_entries = mlxsw_pci_num_sg_entries_get(MLXSW_PORT_MAX_MTU);
+ mlxsw_pci->num_sg_entries = min(num_sg_entries,
+ MLXSW_PCI_WQE_SG_ENTRIES);
+
+ WARN_ON(num_sg_entries > MLXSW_PCI_WQE_SG_ENTRIES);
+}
+
static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
const struct mlxsw_config_profile *profile,
struct mlxsw_res *res)
@@ -1825,6 +2027,8 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
if (err)
goto err_requery_resources;
+ mlxsw_pci_num_sg_entries_set(mlxsw_pci);
+
err = mlxsw_pci_napi_devs_init(mlxsw_pci);
if (err)
goto err_napi_devs_init;
@@ -1931,7 +2135,7 @@ static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb,
goto unlock;
}
mlxsw_skb_cb(skb)->tx_info = *tx_info;
- elem_info->u.sdq.skb = skb;
+ elem_info->sdq.skb = skb;
wqe = elem_info->elem;
mlxsw_pci_wqe_c_set(wqe, 1); /* always report completion */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/port.h b/drivers/net/ethernet/mellanox/mlxsw/port.h
index ac4d4ea51597..0a73b1a4526e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/port.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/port.h
@@ -6,7 +6,8 @@
#include <linux/types.h>
-#define MLXSW_PORT_MAX_MTU 10000
+#define MLXSW_PORT_MAX_MTU (10 * 1024)
+#define MLXSW_PORT_ETH_FRAME_HDR (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
#define MLXSW_PORT_DEFAULT_VID 1
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 030ed71f945d..f064789f3240 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -405,29 +405,12 @@ static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
mlxsw_sp_port->dev->dev_addr);
}
-static int mlxsw_sp_port_max_mtu_get(struct mlxsw_sp_port *mlxsw_sp_port, int *p_max_mtu)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- char pmtu_pl[MLXSW_REG_PMTU_LEN];
- int err;
-
- mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
- err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
- if (err)
- return err;
-
- *p_max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
- return 0;
-}
-
static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char pmtu_pl[MLXSW_REG_PMTU_LEN];
- mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
- if (mtu > mlxsw_sp_port->max_mtu)
- return -EINVAL;
+ mtu += MLXSW_PORT_ETH_FRAME_HDR;
mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
@@ -1697,8 +1680,8 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u16 local_port,
NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK;
- dev->min_mtu = 0;
- dev->max_mtu = ETH_MAX_MTU;
+ dev->min_mtu = ETH_MIN_MTU;
+ dev->max_mtu = MLXSW_PORT_MAX_MTU - MLXSW_PORT_ETH_FRAME_HDR;
/* Each packet needs to have a Tx header (metadata) on top all other
* headers.
@@ -1727,13 +1710,6 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u16 local_port,
goto err_max_speed_get;
}
- err = mlxsw_sp_port_max_mtu_get(mlxsw_sp_port, &mlxsw_sp_port->max_mtu);
- if (err) {
- dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum MTU\n",
- mlxsw_sp_port->local_port);
- goto err_port_max_mtu_get;
- }
-
err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
@@ -1877,7 +1853,6 @@ err_port_ets_init:
err_port_buffers_init:
err_port_admin_status_set:
err_port_mtu_set:
-err_port_max_mtu_get:
err_max_speed_get:
err_port_speed_by_width_set:
err_port_system_port_mapping_set:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 3beb5d0847ab..8d3c61287696 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -238,7 +238,7 @@ struct mlxsw_sp_ptp_ops {
struct hwtstamp_config *config);
void (*shaper_work)(struct work_struct *work);
int (*get_ts_info)(struct mlxsw_sp *mlxsw_sp,
- struct ethtool_ts_info *info);
+ struct kernel_ethtool_ts_info *info);
int (*get_stats_count)(void);
void (*get_stats_strings)(u8 **p);
void (*get_stats)(struct mlxsw_sp_port *mlxsw_sp_port,
@@ -359,7 +359,6 @@ struct mlxsw_sp_port {
u16 egr_types;
struct mlxsw_sp_ptp_port_stats stats;
} ptp;
- int max_mtu;
u32 max_speed;
struct mlxsw_sp_hdroom *hdroom;
u64 module_overheat_initial_val;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c
index 4b713832fdd5..07cb1e26ca3e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c
@@ -391,7 +391,8 @@ mlxsw_sp_acl_atcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
if (err)
return err;
- lkey_id = aregion->ops->lkey_id_get(aregion, aentry->enc_key, erp_id);
+ lkey_id = aregion->ops->lkey_id_get(aregion, aentry->ht_key.enc_key,
+ erp_id);
if (IS_ERR(lkey_id))
return PTR_ERR(lkey_id);
aentry->lkey_id = lkey_id;
@@ -399,7 +400,7 @@ mlxsw_sp_acl_atcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
kvdl_index = mlxsw_afa_block_first_kvdl_index(rulei->act_block);
mlxsw_reg_ptce3_pack(ptce3_pl, true, MLXSW_REG_PTCE3_OP_WRITE_WRITE,
priority, region->tcam_region_info,
- aentry->enc_key, erp_id,
+ aentry->ht_key.enc_key, erp_id,
aentry->delta_info.start,
aentry->delta_info.mask,
aentry->delta_info.value,
@@ -428,7 +429,7 @@ mlxsw_sp_acl_atcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp,
mlxsw_reg_ptce3_pack(ptce3_pl, false, MLXSW_REG_PTCE3_OP_WRITE_WRITE, 0,
region->tcam_region_info,
- aentry->enc_key, erp_id,
+ aentry->ht_key.enc_key, erp_id,
aentry->delta_info.start,
aentry->delta_info.mask,
aentry->delta_info.value,
@@ -457,7 +458,7 @@ mlxsw_sp_acl_atcam_region_entry_action_replace(struct mlxsw_sp *mlxsw_sp,
kvdl_index = mlxsw_afa_block_first_kvdl_index(rulei->act_block);
mlxsw_reg_ptce3_pack(ptce3_pl, true, MLXSW_REG_PTCE3_OP_WRITE_UPDATE,
priority, region->tcam_region_info,
- aentry->enc_key, erp_id,
+ aentry->ht_key.enc_key, erp_id,
aentry->delta_info.start,
aentry->delta_info.mask,
aentry->delta_info.value,
@@ -480,26 +481,23 @@ __mlxsw_sp_acl_atcam_entry_add(struct mlxsw_sp *mlxsw_sp,
int err;
mlxsw_afk_encode(afk, region->key_info, &rulei->values,
- aentry->ht_key.full_enc_key, mask);
+ aentry->ht_key.enc_key, mask);
erp_mask = mlxsw_sp_acl_erp_mask_get(aregion, mask, false);
if (IS_ERR(erp_mask))
return PTR_ERR(erp_mask);
aentry->erp_mask = erp_mask;
aentry->ht_key.erp_id = mlxsw_sp_acl_erp_mask_erp_id(erp_mask);
- memcpy(aentry->enc_key, aentry->ht_key.full_enc_key,
- sizeof(aentry->enc_key));
/* Compute all needed delta information and clear the delta bits
- * from the encrypted key.
+ * from the encoded key.
*/
delta = mlxsw_sp_acl_erp_delta(aentry->erp_mask);
aentry->delta_info.start = mlxsw_sp_acl_erp_delta_start(delta);
aentry->delta_info.mask = mlxsw_sp_acl_erp_delta_mask(delta);
aentry->delta_info.value =
- mlxsw_sp_acl_erp_delta_value(delta,
- aentry->ht_key.full_enc_key);
- mlxsw_sp_acl_erp_delta_clear(delta, aentry->enc_key);
+ mlxsw_sp_acl_erp_delta_value(delta, aentry->ht_key.enc_key);
+ mlxsw_sp_acl_erp_delta_clear(delta, aentry->ht_key.enc_key);
/* Add rule to the list of A-TCAM rules, assuming this
* rule is intended to A-TCAM. In case this rule does
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
index 95f63fcf4ba1..a54eedb69a3f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
@@ -249,7 +249,7 @@ __mlxsw_sp_acl_bf_key_encode(struct mlxsw_sp_acl_atcam_region *aregion,
memcpy(chunk + pad_bytes, &erp_region_id,
sizeof(erp_region_id));
memcpy(chunk + key_offset,
- &aentry->enc_key[chunk_key_offsets[chunk_index]],
+ &aentry->ht_key.enc_key[chunk_key_offsets[chunk_index]],
chunk_key_len);
chunk += chunk_len;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
index d231f4d2888b..9eee229303cc 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
@@ -1217,18 +1217,6 @@ static bool mlxsw_sp_acl_erp_delta_check(void *priv, const void *parent_obj,
return err ? false : true;
}
-static int mlxsw_sp_acl_erp_hints_obj_cmp(const void *obj1, const void *obj2)
-{
- const struct mlxsw_sp_acl_erp_key *key1 = obj1;
- const struct mlxsw_sp_acl_erp_key *key2 = obj2;
-
- /* For hints purposes, two objects are considered equal
- * in case the masks are the same. Does not matter what
- * the "ctcam" value is.
- */
- return memcmp(key1->mask, key2->mask, sizeof(key1->mask));
-}
-
static void *mlxsw_sp_acl_erp_delta_create(void *priv, void *parent_obj,
void *obj)
{
@@ -1308,7 +1296,6 @@ static void mlxsw_sp_acl_erp_root_destroy(void *priv, void *root_priv)
static const struct objagg_ops mlxsw_sp_acl_erp_objagg_ops = {
.obj_size = sizeof(struct mlxsw_sp_acl_erp_key),
.delta_check = mlxsw_sp_acl_erp_delta_check,
- .hints_obj_cmp = mlxsw_sp_acl_erp_hints_obj_cmp,
.delta_create = mlxsw_sp_acl_erp_delta_create,
.delta_destroy = mlxsw_sp_acl_erp_delta_destroy,
.root_create = mlxsw_sp_acl_erp_root_create,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
index 79a1d8606512..010204f73ea4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
@@ -167,9 +167,9 @@ struct mlxsw_sp_acl_atcam_region {
};
struct mlxsw_sp_acl_atcam_entry_ht_key {
- char full_enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* Encoded
- * key.
- */
+ char enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* Encoded key, minus
+ * delta bits.
+ */
u8 erp_id;
};
@@ -181,9 +181,6 @@ struct mlxsw_sp_acl_atcam_entry {
struct rhash_head ht_node;
struct list_head list; /* Member in entries_list */
struct mlxsw_sp_acl_atcam_entry_ht_key ht_key;
- char enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* Encoded key,
- * minus delta bits.
- */
struct {
u16 start;
u8 mask;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index ba090262e27e..2c0cfa79d138 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -399,11 +399,13 @@ void mlxsw_sp_hdroom_bufs_reset_sizes(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_hdroom *hdroom)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ unsigned int max_mtu = mlxsw_sp_port->dev->max_mtu;
u16 reserve_cells;
int i;
+ max_mtu += MLXSW_PORT_ETH_FRAME_HDR;
/* Internal buffer. */
- reserve_cells = mlxsw_sp_hdroom_int_buf_size_get(mlxsw_sp, mlxsw_sp_port->max_mtu,
+ reserve_cells = mlxsw_sp_hdroom_int_buf_size_get(mlxsw_sp, max_mtu,
mlxsw_sp_port->max_speed);
reserve_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, reserve_cells);
hdroom->int_buf.reserve_cells = reserve_cells;
@@ -613,7 +615,9 @@ static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port *mlxsw_sp_port)
mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
/* Buffer 9 is used for control traffic. */
- size9 = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, mlxsw_sp_port->max_mtu);
+ size9 = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port,
+ mlxsw_sp_port->dev->max_mtu +
+ MLXSW_PORT_ETH_FRAME_HDR);
hdroom.bufs.buf[9].size_cells = mlxsw_sp_bytes_cells(mlxsw_sp, size9);
return __mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom, true);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
index ca80af06465f..fa6eddd27ecf 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
@@ -283,7 +283,7 @@ static u64 mlxsw_sp_dpipe_table_erif_size_get(void *priv)
return MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
}
-static struct devlink_dpipe_table_ops mlxsw_sp_erif_ops = {
+static const struct devlink_dpipe_table_ops mlxsw_sp_erif_ops = {
.matches_dump = mlxsw_sp_dpipe_table_erif_matches_dump,
.actions_dump = mlxsw_sp_dpipe_table_erif_actions_dump,
.entries_dump = mlxsw_sp_dpipe_table_erif_entries_dump,
@@ -734,7 +734,7 @@ static u64 mlxsw_sp_dpipe_table_host4_size_get(void *priv)
return mlxsw_sp_dpipe_table_host_size_get(mlxsw_sp, AF_INET);
}
-static struct devlink_dpipe_table_ops mlxsw_sp_host4_ops = {
+static const struct devlink_dpipe_table_ops mlxsw_sp_host4_ops = {
.matches_dump = mlxsw_sp_dpipe_table_host4_matches_dump,
.actions_dump = mlxsw_sp_dpipe_table_host_actions_dump,
.entries_dump = mlxsw_sp_dpipe_table_host4_entries_dump,
@@ -811,7 +811,7 @@ static u64 mlxsw_sp_dpipe_table_host6_size_get(void *priv)
return mlxsw_sp_dpipe_table_host_size_get(mlxsw_sp, AF_INET6);
}
-static struct devlink_dpipe_table_ops mlxsw_sp_host6_ops = {
+static const struct devlink_dpipe_table_ops mlxsw_sp_host6_ops = {
.matches_dump = mlxsw_sp_dpipe_table_host6_matches_dump,
.actions_dump = mlxsw_sp_dpipe_table_host_actions_dump,
.entries_dump = mlxsw_sp_dpipe_table_host6_entries_dump,
@@ -1230,7 +1230,7 @@ mlxsw_sp_dpipe_table_adj_size_get(void *priv)
return size;
}
-static struct devlink_dpipe_table_ops mlxsw_sp_dpipe_table_adj_ops = {
+static const struct devlink_dpipe_table_ops mlxsw_sp_dpipe_table_adj_ops = {
.matches_dump = mlxsw_sp_dpipe_table_adj_matches_dump,
.actions_dump = mlxsw_sp_dpipe_table_adj_actions_dump,
.entries_dump = mlxsw_sp_dpipe_table_adj_entries_dump,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
index a755b0a901d3..2bed8c86b7cf 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
@@ -1068,7 +1068,21 @@ mlxsw_sp_get_module_eeprom_by_page(struct net_device *dev,
}
static int
-mlxsw_sp_get_ts_info(struct net_device *netdev, struct ethtool_ts_info *info)
+mlxsw_sp_set_module_eeprom_by_page(struct net_device *dev,
+ const struct ethtool_module_eeprom *page,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 slot_index = mlxsw_sp_port->mapping.slot_index;
+ u8 module = mlxsw_sp_port->mapping.module;
+
+ return mlxsw_env_set_module_eeprom_by_page(mlxsw_sp->core, slot_index,
+ module, page, extack);
+}
+
+static int
+mlxsw_sp_get_ts_info(struct net_device *netdev, struct kernel_ethtool_ts_info *info)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
@@ -1256,6 +1270,7 @@ const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
.get_module_info = mlxsw_sp_get_module_info,
.get_module_eeprom = mlxsw_sp_get_module_eeprom,
.get_module_eeprom_by_page = mlxsw_sp_get_module_eeprom_by_page,
+ .set_module_eeprom_by_page = mlxsw_sp_set_module_eeprom_by_page,
.get_ts_info = mlxsw_sp_get_ts_info,
.get_eth_phy_stats = mlxsw_sp_get_eth_phy_stats,
.get_eth_mac_stats = mlxsw_sp_get_eth_mac_stats,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
index cbb6c75a6620..5b174cb95eb8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
@@ -1276,7 +1276,7 @@ int mlxsw_sp1_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
}
int mlxsw_sp1_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
info->phc_index = ptp_clock_index(mlxsw_sp->clock->ptp);
@@ -1661,7 +1661,7 @@ err_get_message_types:
}
int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
info->phc_index = ptp_clock_index(mlxsw_sp->clock->ptp);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
index a8b88230959a..769095d4932d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
@@ -11,7 +11,7 @@ struct mlxsw_sp;
struct mlxsw_sp_port;
struct mlxsw_sp_ptp_clock;
-static inline int mlxsw_sp_ptp_get_ts_info_noptp(struct ethtool_ts_info *info)
+static inline int mlxsw_sp_ptp_get_ts_info_noptp(struct kernel_ethtool_ts_info *info)
{
info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
SOF_TIMESTAMPING_SOFTWARE;
@@ -50,7 +50,7 @@ int mlxsw_sp1_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
void mlxsw_sp1_ptp_shaper_work(struct work_struct *work);
int mlxsw_sp1_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
- struct ethtool_ts_info *info);
+ struct kernel_ethtool_ts_info *info);
int mlxsw_sp1_get_stats_count(void);
void mlxsw_sp1_get_stats_strings(u8 **p);
@@ -84,7 +84,7 @@ int mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
struct hwtstamp_config *config);
int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
- struct ethtool_ts_info *info);
+ struct kernel_ethtool_ts_info *info);
int mlxsw_sp2_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
struct mlxsw_sp_port *mlxsw_sp_port,
@@ -152,7 +152,7 @@ static inline void mlxsw_sp1_ptp_shaper_work(struct work_struct *work)
}
static inline int mlxsw_sp1_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
return mlxsw_sp_ptp_get_ts_info_noptp(info);
}
@@ -227,7 +227,7 @@ mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
}
static inline int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
return mlxsw_sp_ptp_get_ts_info_noptp(info);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 40ba314fbc72..800dfb64ec83 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -11450,12 +11450,16 @@ static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
{
bool old_inc_parsing_depth, new_inc_parsing_depth;
struct mlxsw_sp_mp_hash_config config = {};
+ struct net *net = mlxsw_sp_net(mlxsw_sp);
char recr2_pl[MLXSW_REG_RECR2_LEN];
unsigned long bit;
u32 seed;
int err;
- seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 0);
+ seed = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_seed).user_seed;
+ if (!seed)
+ seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 0);
+
mlxsw_reg_recr2_pack(recr2_pl, seed);
mlxsw_sp_mp4_hash_init(mlxsw_sp, &config);
mlxsw_sp_mp6_hash_init(mlxsw_sp, &config);