summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/renesas
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2024-07-16 19:28:34 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2024-07-16 19:28:34 -0700
commit51835949dda3783d4639cfa74ce13a3c9829de00 (patch)
tree2b593de5eba6ecc73f7c58fc65fdaffae45c7323 /drivers/net/ethernet/renesas
parent0434dbe32053d07d658165be681505120c6b1abc (diff)
parent77ae5e5b00720372af2860efdc4bc652ac682696 (diff)
Merge tag 'net-next-6.11' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-nextHEADmaster
Pull networking updates from Jakub Kicinski: "Not much excitement - a handful of large patchsets (devmem among them) did not make it in time. Core & protocols: - Use local_lock in addition to local_bh_disable() to protect per-CPU resources in networking, a step closer for local_bh_disable() not to act as a big lock on PREEMPT_RT - Use flex array for netdevice priv area, ensure its cache alignment - Add a sysctl knob to allow user to specify a default rto_min at socket init time. Bit of a big hammer but multiple companies were independently carrying such patch downstream so clearly it's useful - Support scheduling transmission of packets based on CLOCK_TAI - Un-pin TCP TIMEWAIT timer to avoid it firing on CPUs later cordoned off using cpusets - Support multiple L2TPv3 UDP tunnels using the same 5-tuple address - Allow configuration of multipath hash seed, to both allow synchronizing hashing of two routers, and preventing partial accidental sync - Improve TCP compliance with RFC 9293 for simultaneous connect() - Support sending NAT keepalives in IPsec ESP in UDP states. Userspace IKE daemon had to do this before, but the kernel can better keep track of it - Support sending supervision HSR frames with MAC addresses stored in ProxyNodeTable when RedBox (i.e. HSR-SAN) is enabled - Introduce IPPROTO_SMC for selecting SMC when socket is created - Allow UDP GSO transmit from devices with no checksum offload - openvswitch: add packet sampling via psample, separating the sampled traffic from "upcall" packets sent to user space for forwarding - nf_tables: shrink memory consumption for transaction objects Things we sprinkled into general kernel code: - Power Sequencing subsystem (used by Qualcomm Bluetooth driver for QCA6390) [ Already merged separately - Linus ] - Add IRQ information in sysfs for auxiliary bus - Introduce guard definition for local_lock - Add aligned flavor of __cacheline_group_{begin, end}() markings for grouping fields in structures BPF: - Notify user space (via epoll) when a struct_ops object is getting detached/unregistered - Add new kfuncs for a generic, open-coded bits iterator - Enable BPF programs to declare arrays of kptr, bpf_rb_root, and bpf_list_head - Support resilient split BTF which cuts down on duplication and makes BTF as compact as possible WRT BTF from modules - Add support for dumping kfunc prototypes from BTF which enables both detecting as well as dumping compilable prototypes for kfuncs - riscv64 BPF JIT improvements in particular to add 12-argument support for BPF trampolines and to utilize bpf_prog_pack for the latter - Add the capability to offload the netfilter flowtable in XDP layer through kfuncs Driver API: - Allow users to configure IRQ tresholds between which automatic IRQ moderation can choose - Expand Power Sourcing (PoE) status with power, class and failure reason. Support setting power limits - Track additional RSS contexts in the core, make sure configuration changes don't break them - Support IPsec crypto offload for IPv6 ESP and IPv4 UDP-encapsulated ESP data paths - Support updating firmware on SFP modules Tests and tooling: - mptcp: use net/lib.sh to manage netns - TCP-AO and TCP-MD5: replace debug prints used by tests with tracepoints - openvswitch: make test self-contained (don't depend on OvS CLI tools) Drivers: - Ethernet high-speed NICs: - Broadcom (bnxt): - increase the max total outstanding PTP TX packets to 4 - add timestamping statistics support - implement netdev_queue_mgmt_ops - support new RSS context API - Intel (100G, ice, idpf): - implement FEC statistics and dumping signal quality indicators - support E825C products (with 56Gbps PHYs) - nVidia/Mellanox: - support HW-GRO - mlx4/mlx5: support per-queue statistics via netlink - obey the max number of EQs setting in sub-functions - AMD/Solarflare: - support new RSS context API - AMD/Pensando: - ionic: rework fix for doorbell miss to lower overhead and skip it on new HW - Wangxun: - txgbe: support Flow Director perfect filters - Ethernet NICs consumer, embedded and virtual: - Add driver for Tehuti Networks TN40xx chips - Add driver for Meta's internal NIC chips - Add driver for Ethernet MAC on Airoha EN7581 SoCs - Add driver for Renesas Ethernet-TSN devices - Google cloud vNIC: - flow steering support - Microsoft vNIC: - support page sizes other than 4KB on ARM64 - vmware vNIC: - support latency measurement (update to version 9) - VirtIO net: - support for Byte Queue Limits - support configuring thresholds for automatic IRQ moderation - support for AF_XDP Rx zero-copy - Synopsys (stmmac): - support for STM32MP13 SoC - let platforms select the right PCS implementation - TI: - icssg-prueth: add multicast filtering support - icssg-prueth: enable PTP timestamping and PPS - Renesas: - ravb: improve Rx performance 30-400% by using page pool, theaded NAPI and timer-based IRQ coalescing - ravb: add MII support for R-Car V4M - Cadence (macb): - macb: add ARP support to Wake-On-LAN - Cortina: - use phylib for RX and TX pause configuration - Ethernet switches: - nVidia/Mellanox: - support configuration of multipath hash seed - report more accurate max MTU - use page_pool to improve Rx performance - MediaTek: - mt7530: add support for bridge port isolation - Qualcomm: - qca8k: add support for bridge port isolation - Microchip: - lan9371/2: add 100BaseTX PHY support - NXP: - vsc73xx: implement VLAN operations - Ethernet PHYs: - aquantia: enable support for aqr115c - aquantia: add support for PHY LEDs - realtek: add support for rtl8224 2.5Gbps PHY - xpcs: add memory-mapped device support - add BroadR-Reach link mode and support in Broadcom's PHY driver - CAN: - add document for ISO 15765-2 protocol support - mcp251xfd: workaround for erratum DS80000789E, use timestamps to catch when device returns incorrect FIFO status - WiFi: - mac80211/cfg80211: - parse Transmit Power Envelope (TPE) data in mac80211 instead of in drivers - improvements for 6 GHz regulatory flexibility - multi-link improvements - support multiple radios per wiphy - remove DEAUTH_NEED_MGD_TX_PREP flag - Intel (iwlwifi): - bump FW API to 91 for BZ/SC devices - report 64-bit radiotap timestamp - enable P2P low latency by default - handle Transmit Power Envelope (TPE) advertised by AP - remove support for older FW for new devices - fast resume (keeping the device configured) - mvm: re-enable Multi-Link Operation (MLO) - aggregation (A-MSDU) optimizations - MediaTek (mt76): - mt7925 Multi-Link Operation (MLO) support - Qualcomm (ath10k): - LED support for various chipsets - Qualcomm (ath12k): - remove unsupported Tx monitor handling - support channel 2 in 6 GHz band - support Spatial Multiplexing Power Save (SMPS) in 6 GHz band - supprt multiple BSSID (MBSSID) and Enhanced Multi-BSSID Advertisements (EMA) - support dynamic VLAN - add panic handler for resetting the firmware state - DebugFS support for datapath statistics - WCN7850: support for Wake on WLAN - Microchip (wilc1000): - read MAC address during probe to make it visible to user space - suspend/resume improvements - TI (wl18xx): - support newer firmware versions - RealTek (rtw89): - preparation for RTL8852BE-VT support - Wake on WLAN support for WiFi 6 chips - 36-bit PCI DMA support - RealTek (rtlwifi): - RTL8192DU support - Broadcom (brcmfmac): - Management Frame Protection support (to enable WPA3) - Bluetooth: - qualcomm: use the power sequencer for QCA6390 - btusb: mediatek: add ISO data transmission functions - hci_bcm4377: add BCM4388 support - btintel: add support for BlazarU core - btintel: add support for Whale Peak2 - btnxpuart: add support for AW693 A1 chipset - btnxpuart: add support for IW615 chipset - btusb: add Realtek RTL8852BE support ID 0x13d3:0x3591" * tag 'net-next-6.11' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next: (1589 commits) eth: fbnic: Fix spelling mistake "tiggerring" -> "triggering" tcp: Replace strncpy() with strscpy() wifi: ath12k: fix build vs old compiler tcp: Don't access uninit tcp_rsk(req)->ao_keyid in tcp_create_openreq_child(). eth: fbnic: Write the TCAM tables used for RSS control and Rx to host eth: fbnic: Add L2 address programming eth: fbnic: Add basic Rx handling eth: fbnic: Add basic Tx handling eth: fbnic: Add link detection eth: fbnic: Add initial messaging to notify FW of our presence eth: fbnic: Implement Rx queue alloc/start/stop/free eth: fbnic: Implement Tx queue alloc/start/stop/free eth: fbnic: Allocate a netdevice and napi vectors with queues eth: fbnic: Add FW communication mechanism eth: fbnic: Add message parsing for FW messages eth: fbnic: Add register init to set PCIe/Ethernet device config eth: fbnic: Allocate core device specific structures and devlink interface eth: fbnic: Add scaffolding for Meta's NIC driver PCI: Add Meta Platforms vendor ID net/sched: cls_flower: propagate tca[TCA_OPTIONS] to NL_REQ_ATTR_CHECK ...
Diffstat (limited to 'drivers/net/ethernet/renesas')
-rw-r--r--drivers/net/ethernet/renesas/Kconfig11
-rw-r--r--drivers/net/ethernet/renesas/Makefile2
-rw-r--r--drivers/net/ethernet/renesas/ravb.h15
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c524
-rw-r--r--drivers/net/ethernet/renesas/rswitch.c2
-rw-r--r--drivers/net/ethernet/renesas/rtsn.c1391
-rw-r--r--drivers/net/ethernet/renesas/rtsn.h464
7 files changed, 2181 insertions, 228 deletions
diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig
index b03fae7a0f72..9b7559c88bee 100644
--- a/drivers/net/ethernet/renesas/Kconfig
+++ b/drivers/net/ethernet/renesas/Kconfig
@@ -33,6 +33,7 @@ config RAVB
select CRC32
select MII
select MDIO_BITBANG
+ select PAGE_POOL
select PHYLIB
select RESET_CONTROLLER
help
@@ -58,4 +59,14 @@ config RENESAS_GEN4_PTP
help
Renesas R-Car Gen4 gPTP device driver.
+config RTSN
+ tristate "Renesas Ethernet-TSN support"
+ depends on ARCH_RENESAS || COMPILE_TEST
+ depends on PTP_1588_CLOCK
+ select CRC32
+ select PHYLIB
+ select RENESAS_GEN4_PTP
+ help
+ Renesas Ethernet-TSN device driver.
+
endif # NET_VENDOR_RENESAS
diff --git a/drivers/net/ethernet/renesas/Makefile b/drivers/net/ethernet/renesas/Makefile
index 9070acfd6aaf..f65fc76f8b4d 100644
--- a/drivers/net/ethernet/renesas/Makefile
+++ b/drivers/net/ethernet/renesas/Makefile
@@ -11,3 +11,5 @@ obj-$(CONFIG_RAVB) += ravb.o
obj-$(CONFIG_RENESAS_ETHER_SWITCH) += rswitch.o
obj-$(CONFIG_RENESAS_GEN4_PTP) += rcar_gen4_ptp.o
+
+obj-$(CONFIG_RTSN) += rtsn.o
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
index b48935ec7e28..9893c91af105 100644
--- a/drivers/net/ethernet/renesas/ravb.h
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -19,6 +19,7 @@
#include <linux/phy.h>
#include <linux/platform_device.h>
#include <linux/ptp_clock_kernel.h>
+#include <net/page_pool/types.h>
#define BE_TX_RING_SIZE 64 /* TX ring size for Best Effort */
#define BE_RX_RING_SIZE 1024 /* RX ring size for Best Effort */
@@ -257,6 +258,7 @@ enum APSR_BIT {
APSR_CMSW = 0x00000010,
APSR_RDM = 0x00002000,
APSR_TDM = 0x00004000,
+ APSR_MIISELECT = 0x01000000, /* R-Car V4M only */
};
/* RCR */
@@ -1039,7 +1041,7 @@ struct ravb_ptp {
};
struct ravb_hw_info {
- bool (*receive)(struct net_device *ndev, int *quota, int q);
+ int (*receive)(struct net_device *ndev, int budget, int q);
void (*set_rate)(struct net_device *ndev);
int (*set_feature)(struct net_device *ndev, netdev_features_t features);
int (*dmac_init)(struct net_device *ndev);
@@ -1051,9 +1053,10 @@ struct ravb_hw_info {
int stats_len;
u32 tccr_mask;
u32 rx_max_frame_size;
- u32 rx_max_desc_use;
+ u32 rx_buffer_size;
u32 rx_desc_size;
unsigned aligned_tx: 1;
+ unsigned coalesce_irqs:1; /* Needs software IRQ coalescing */
/* hardware features */
unsigned internal_delay:1; /* AVB-DMAC has internal delays */
@@ -1070,6 +1073,11 @@ struct ravb_hw_info {
unsigned half_duplex:1; /* E-MAC supports half duplex mode */
};
+struct ravb_rx_buffer {
+ struct page *page;
+ unsigned int offset;
+};
+
struct ravb_private {
struct net_device *ndev;
struct platform_device *pdev;
@@ -1093,7 +1101,8 @@ struct ravb_private {
struct ravb_tx_desc *tx_ring[NUM_TX_QUEUE];
void *tx_align[NUM_TX_QUEUE];
struct sk_buff *rx_1st_skb;
- struct sk_buff **rx_skb[NUM_RX_QUEUE];
+ struct page_pool *rx_pool[NUM_RX_QUEUE];
+ struct ravb_rx_buffer *rx_buffers[NUM_RX_QUEUE];
struct sk_buff **tx_skb[NUM_TX_QUEUE];
u32 rx_over_errors;
u32 rx_fifo_errors;
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 4d100283c30f..c02fb296bf7d 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -30,6 +30,7 @@
#include <linux/reset.h>
#include <linux/math64.h>
#include <net/ip.h>
+#include <net/page_pool/helpers.h>
#include "ravb.h"
@@ -113,25 +114,6 @@ static void ravb_set_rate_rcar(struct net_device *ndev)
}
}
-static struct sk_buff *
-ravb_alloc_skb(struct net_device *ndev, const struct ravb_hw_info *info,
- gfp_t gfp_mask)
-{
- struct sk_buff *skb;
- u32 reserve;
-
- skb = __netdev_alloc_skb(ndev, info->rx_max_frame_size + RAVB_ALIGN - 1,
- gfp_mask);
- if (!skb)
- return NULL;
-
- reserve = (unsigned long)skb->data & (RAVB_ALIGN - 1);
- if (reserve)
- skb_reserve(skb, RAVB_ALIGN - reserve);
-
- return skb;
-}
-
/* Get MAC address from the MAC address registers
*
* Ethernet AVB device doesn't have ROM for MAC address.
@@ -257,21 +239,10 @@ static void ravb_rx_ring_free(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
unsigned int ring_size;
- unsigned int i;
if (!priv->rx_ring[q].raw)
return;
- for (i = 0; i < priv->num_rx_ring[q]; i++) {
- struct ravb_rx_desc *desc = ravb_rx_get_desc(priv, q, i);
-
- if (!dma_mapping_error(ndev->dev.parent,
- le32_to_cpu(desc->dptr)))
- dma_unmap_single(ndev->dev.parent,
- le32_to_cpu(desc->dptr),
- priv->info->rx_max_frame_size,
- DMA_FROM_DEVICE);
- }
ring_size = priv->info->rx_desc_size * (priv->num_rx_ring[q] + 1);
dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q].raw,
priv->rx_desc_dma[q]);
@@ -298,13 +269,16 @@ static void ravb_ring_free(struct net_device *ndev, int q)
priv->tx_ring[q] = NULL;
}
- /* Free RX skb ringbuffer */
- if (priv->rx_skb[q]) {
- for (i = 0; i < priv->num_rx_ring[q]; i++)
- dev_kfree_skb(priv->rx_skb[q][i]);
+ /* Free RX buffers */
+ for (i = 0; i < priv->num_rx_ring[q]; i++) {
+ if (priv->rx_buffers[q][i].page)
+ page_pool_put_page(priv->rx_pool[q],
+ priv->rx_buffers[q][i].page,
+ 0, true);
}
- kfree(priv->rx_skb[q]);
- priv->rx_skb[q] = NULL;
+ kfree(priv->rx_buffers[q]);
+ priv->rx_buffers[q] = NULL;
+ page_pool_destroy(priv->rx_pool[q]);
/* Free aligned TX buffers */
kfree(priv->tx_align[q]);
@@ -317,35 +291,64 @@ static void ravb_ring_free(struct net_device *ndev, int q)
priv->tx_skb[q] = NULL;
}
-static void ravb_rx_ring_format(struct net_device *ndev, int q)
+static int
+ravb_alloc_rx_buffer(struct net_device *ndev, int q, u32 entry, gfp_t gfp_mask,
+ struct ravb_rx_desc *rx_desc)
{
struct ravb_private *priv = netdev_priv(ndev);
- struct ravb_rx_desc *rx_desc;
- unsigned int rx_ring_size;
+ const struct ravb_hw_info *info = priv->info;
+ struct ravb_rx_buffer *rx_buff;
dma_addr_t dma_addr;
- unsigned int i;
+ unsigned int size;
- rx_ring_size = priv->info->rx_desc_size * priv->num_rx_ring[q];
- memset(priv->rx_ring[q].raw, 0, rx_ring_size);
- /* Build RX ring buffer */
- for (i = 0; i < priv->num_rx_ring[q]; i++) {
- /* RX descriptor */
- rx_desc = ravb_rx_get_desc(priv, q, i);
- rx_desc->ds_cc = cpu_to_le16(priv->info->rx_max_desc_use);
- dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
- priv->info->rx_max_frame_size,
- DMA_FROM_DEVICE);
+ rx_buff = &priv->rx_buffers[q][entry];
+ size = info->rx_buffer_size;
+ rx_buff->page = page_pool_alloc(priv->rx_pool[q], &rx_buff->offset,
+ &size, gfp_mask);
+ if (unlikely(!rx_buff->page)) {
/* We just set the data size to 0 for a failed mapping which
* should prevent DMA from happening...
*/
- if (dma_mapping_error(ndev->dev.parent, dma_addr))
- rx_desc->ds_cc = cpu_to_le16(0);
- rx_desc->dptr = cpu_to_le32(dma_addr);
+ rx_desc->ds_cc = cpu_to_le16(0);
+ return -ENOMEM;
+ }
+
+ dma_addr = page_pool_get_dma_addr(rx_buff->page) + rx_buff->offset;
+ dma_sync_single_for_device(ndev->dev.parent, dma_addr,
+ info->rx_buffer_size, DMA_FROM_DEVICE);
+ rx_desc->dptr = cpu_to_le32(dma_addr);
+
+ /* The end of the RX buffer is used to store skb shared data, so we need
+ * to ensure that the hardware leaves enough space for this.
+ */
+ rx_desc->ds_cc = cpu_to_le16(info->rx_buffer_size -
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) -
+ ETH_FCS_LEN + sizeof(__sum16));
+ return 0;
+}
+
+static u32
+ravb_rx_ring_refill(struct net_device *ndev, int q, u32 count, gfp_t gfp_mask)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ struct ravb_rx_desc *rx_desc;
+ u32 i, entry;
+
+ for (i = 0; i < count; i++) {
+ entry = (priv->dirty_rx[q] + i) % priv->num_rx_ring[q];
+ rx_desc = ravb_rx_get_desc(priv, q, entry);
+
+ if (!priv->rx_buffers[q][entry].page) {
+ if (unlikely(ravb_alloc_rx_buffer(ndev, q, entry,
+ gfp_mask, rx_desc)))
+ break;
+ }
+ /* Descriptor type must be set after all the above writes */
+ dma_wmb();
rx_desc->die_dt = DT_FEMPTY;
}
- rx_desc = ravb_rx_get_desc(priv, q, i);
- rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
- rx_desc->die_dt = DT_LINKFIX; /* type */
+
+ return i;
}
/* Format skb and descriptor buffer for Ethernet AVB */
@@ -353,6 +356,7 @@ static void ravb_ring_format(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
unsigned int num_tx_desc = priv->num_tx_desc;
+ struct ravb_rx_desc *rx_desc;
struct ravb_tx_desc *tx_desc;
struct ravb_desc *desc;
unsigned int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q] *
@@ -364,7 +368,13 @@ static void ravb_ring_format(struct net_device *ndev, int q)
priv->dirty_rx[q] = 0;
priv->dirty_tx[q] = 0;
- ravb_rx_ring_format(ndev, q);
+ /* Regular RX descriptors have already been initialized by
+ * ravb_rx_ring_refill(), we just need to initialize the final link
+ * descriptor.
+ */
+ rx_desc = ravb_rx_get_desc(priv, q, priv->num_rx_ring[q]);
+ rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
+ rx_desc->die_dt = DT_LINKFIX; /* type */
memset(priv->tx_ring[q], 0, tx_ring_size);
/* Build TX ring buffer */
@@ -408,26 +418,47 @@ static void *ravb_alloc_rx_desc(struct net_device *ndev, int q)
static int ravb_ring_init(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
- const struct ravb_hw_info *info = priv->info;
unsigned int num_tx_desc = priv->num_tx_desc;
+ struct page_pool_params params = {
+ .order = 0,
+ .flags = PP_FLAG_DMA_MAP,
+ .pool_size = priv->num_rx_ring[q],
+ .nid = NUMA_NO_NODE,
+ .dev = ndev->dev.parent,
+ .dma_dir = DMA_FROM_DEVICE,
+ };
unsigned int ring_size;
- struct sk_buff *skb;
- unsigned int i;
+ u32 num_filled;
+
+ /* Allocate RX page pool and buffers */
+ priv->rx_pool[q] = page_pool_create(&params);
+ if (IS_ERR(priv->rx_pool[q]))
+ goto error;
- /* Allocate RX and TX skb rings */
- priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q],
- sizeof(*priv->rx_skb[q]), GFP_KERNEL);
+ /* Allocate RX buffers */
+ priv->rx_buffers[q] = kcalloc(priv->num_rx_ring[q],
+ sizeof(*priv->rx_buffers[q]), GFP_KERNEL);
+ if (!priv->rx_buffers[q])
+ goto error;
+
+ /* Allocate TX skb rings */
priv->tx_skb[q] = kcalloc(priv->num_tx_ring[q],
sizeof(*priv->tx_skb[q]), GFP_KERNEL);
- if (!priv->rx_skb[q] || !priv->tx_skb[q])
+ if (!priv->tx_skb[q])
goto error;
- for (i = 0; i < priv->num_rx_ring[q]; i++) {
- skb = ravb_alloc_skb(ndev, info, GFP_KERNEL);
- if (!skb)
- goto error;
- priv->rx_skb[q][i] = skb;
- }
+ /* Allocate all RX descriptors. */
+ if (!ravb_alloc_rx_desc(ndev, q))
+ goto error;
+
+ /* Populate RX ring buffer. */
+ priv->dirty_rx[q] = 0;
+ ring_size = priv->info->rx_desc_size * priv->num_rx_ring[q];
+ memset(priv->rx_ring[q].raw, 0, ring_size);
+ num_filled = ravb_rx_ring_refill(ndev, q, priv->num_rx_ring[q],
+ GFP_KERNEL);
+ if (num_filled != priv->num_rx_ring[q])
+ goto error;
if (num_tx_desc > 1) {
/* Allocate rings for the aligned buffers */
@@ -437,12 +468,6 @@ static int ravb_ring_init(struct net_device *ndev, int q)
goto error;
}
- /* Allocate all RX descriptors. */
- if (!ravb_alloc_rx_desc(ndev, q))
- goto error;
-
- priv->dirty_rx[q] = 0;
-
/* Allocate all TX descriptors. */
ring_size = sizeof(struct ravb_tx_desc) *
(priv->num_tx_ring[q] * num_tx_desc + 1);
@@ -554,6 +579,16 @@ static void ravb_emac_init_rcar(struct net_device *ndev)
ravb_write(ndev, ECSIPR_ICDIP | ECSIPR_MPDIP | ECSIPR_LCHNGIP, ECSIPR);
}
+static void ravb_emac_init_rcar_gen4(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ bool mii = priv->phy_interface == PHY_INTERFACE_MODE_MII;
+
+ ravb_modify(ndev, APSR, APSR_MIISELECT, mii ? APSR_MIISELECT : 0);
+
+ ravb_emac_init_rcar(ndev);
+}
+
/* E-MAC init function */
static void ravb_emac_init(struct net_device *ndev)
{
@@ -706,7 +741,9 @@ static void ravb_get_tx_tstamp(struct net_device *ndev)
static void ravb_rx_csum_gbeth(struct sk_buff *skb)
{
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
__wsum csum_ip_hdr, csum_proto;
+ skb_frag_t *last_frag;
u8 *hw_csum;
/* The hardware checksum status is contained in sizeof(__sum16) * 2 = 4
@@ -716,12 +753,24 @@ static void ravb_rx_csum_gbeth(struct sk_buff *skb)
if (unlikely(skb->len < sizeof(__sum16) * 2))
return;
- hw_csum = skb_tail_pointer(skb) - sizeof(__sum16);
+ if (skb_is_nonlinear(skb)) {
+ last_frag = &shinfo->frags[shinfo->nr_frags - 1];
+ hw_csum = skb_frag_address(last_frag) +
+ skb_frag_size(last_frag);
+ } else {
+ hw_csum = skb_tail_pointer(skb);
+ }
+
+ hw_csum -= sizeof(__sum16);
csum_proto = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum));
hw_csum -= sizeof(__sum16);
csum_ip_hdr = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum));
- skb_trim(skb, skb->len - 2 * sizeof(__sum16));
+
+ if (skb_is_nonlinear(skb))
+ skb_frag_size_sub(last_frag, 2 * sizeof(__sum16));
+ else
+ skb_trim(skb, skb->len - 2 * sizeof(__sum16));
/* TODO: IPV6 Rx checksum */
if (skb->protocol == htons(ETH_P_IP) && !csum_ip_hdr && !csum_proto)
@@ -743,30 +792,14 @@ static void ravb_rx_csum(struct sk_buff *skb)
skb_trim(skb, skb->len - sizeof(__sum16));
}
-static struct sk_buff *ravb_get_skb_gbeth(struct net_device *ndev, int entry,
- struct ravb_rx_desc *desc)
-{
- struct ravb_private *priv = netdev_priv(ndev);
- struct sk_buff *skb;
-
- skb = priv->rx_skb[RAVB_BE][entry];
- priv->rx_skb[RAVB_BE][entry] = NULL;
- dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
- ALIGN(priv->info->rx_max_frame_size, 16),
- DMA_FROM_DEVICE);
-
- return skb;
-}
-
/* Packet receive function for Gigabit Ethernet */
-static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
+static int ravb_rx_gbeth(struct net_device *ndev, int budget, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
struct net_device_stats *stats;
struct ravb_rx_desc *desc;
struct sk_buff *skb;
- dma_addr_t dma_addr;
int rx_packets = 0;
u8 desc_status;
u16 desc_len;
@@ -781,7 +814,7 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
for (i = 0; i < limit; i++, priv->cur_rx[q]++) {
entry = priv->cur_rx[q] % priv->num_rx_ring[q];
desc = &priv->rx_ring[q].desc[entry];
- if (rx_packets == *quota || desc->die_dt == DT_FEMPTY)
+ if (rx_packets == budget || desc->die_dt == DT_FEMPTY)
break;
/* Descriptor type must be checked before all other reads */
@@ -807,87 +840,110 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
if (desc_status & MSC_CEEF)
stats->rx_missed_errors++;
} else {
+ struct ravb_rx_buffer *rx_buff;
+ void *rx_addr;
+
+ rx_buff = &priv->rx_buffers[q][entry];
+ rx_addr = page_address(rx_buff->page) + rx_buff->offset;
die_dt = desc->die_dt & 0xF0;
+ dma_sync_single_for_cpu(ndev->dev.parent,
+ le32_to_cpu(desc->dptr),
+ desc_len, DMA_FROM_DEVICE);
+
switch (die_dt) {
case DT_FSINGLE:
- skb = ravb_get_skb_gbeth(ndev, entry, desc);
- skb_put(skb, desc_len);
- skb->protocol = eth_type_trans(skb, ndev);
- if (ndev->features & NETIF_F_RXCSUM)
- ravb_rx_csum_gbeth(skb);
- napi_gro_receive(&priv->napi[q], skb);
- rx_packets++;
- stats->rx_bytes += desc_len;
- break;
case DT_FSTART:
- priv->rx_1st_skb = ravb_get_skb_gbeth(ndev, entry, desc);
- skb_put(priv->rx_1st_skb, desc_len);
+ /* Start of packet: Set initial data length. */
+ skb = napi_build_skb(rx_addr,
+ info->rx_buffer_size);
+ if (unlikely(!skb)) {
+ stats->rx_errors++;
+ page_pool_put_page(priv->rx_pool[q],
+ rx_buff->page, 0,
+ true);
+ goto refill;
+ }
+ skb_mark_for_recycle(skb);
+ skb_put(skb, desc_len);
+
+ /* Save this skb if the packet spans multiple
+ * descriptors.
+ */
+ if (die_dt == DT_FSTART)
+ priv->rx_1st_skb = skb;
break;
+
case DT_FMID:
- skb = ravb_get_skb_gbeth(ndev, entry, desc);
- skb_copy_to_linear_data_offset(priv->rx_1st_skb,
- priv->rx_1st_skb->len,
- skb->data,
- desc_len);
- skb_put(priv->rx_1st_skb, desc_len);
- dev_kfree_skb(skb);
- break;
case DT_FEND:
- skb = ravb_get_skb_gbeth(ndev, entry, desc);
- skb_copy_to_linear_data_offset(priv->rx_1st_skb,
- priv->rx_1st_skb->len,
- skb->data,
- desc_len);
- skb_put(priv->rx_1st_skb, desc_len);
- dev_kfree_skb(skb);
- priv->rx_1st_skb->protocol =
- eth_type_trans(priv->rx_1st_skb, ndev);
+ /* Continuing a packet: Add this buffer as an RX
+ * frag.
+ */
+
+ /* rx_1st_skb will be NULL if napi_build_skb()
+ * failed for the first descriptor of a
+ * multi-descriptor packet.
+ */
+ if (unlikely(!priv->rx_1st_skb)) {
+ stats->rx_errors++;
+ page_pool_put_page(priv->rx_pool[q],
+ rx_buff->page, 0,
+ true);
+
+ /* We may find a DT_FSINGLE or DT_FSTART
+ * descriptor in the queue which we can
+ * process, so don't give up yet.
+ */
+ continue;
+ }
+ skb_add_rx_frag(priv->rx_1st_skb,
+ skb_shinfo(priv->rx_1st_skb)->nr_frags,
+ rx_buff->page, rx_buff->offset,
+ desc_len, info->rx_buffer_size);
+
+ /* Set skb to point at the whole packet so that
+ * we only need one code path for finishing a
+ * packet.
+ */
+ skb = priv->rx_1st_skb;
+ }
+
+ switch (die_dt) {
+ case DT_FSINGLE:
+ case DT_FEND:
+ /* Finishing a packet: Determine protocol &
+ * checksum, hand off to NAPI and update our
+ * stats.
+ */
+ skb->protocol = eth_type_trans(skb, ndev);
if (ndev->features & NETIF_F_RXCSUM)
- ravb_rx_csum_gbeth(priv->rx_1st_skb);
- stats->rx_bytes += priv->rx_1st_skb->len;
- napi_gro_receive(&priv->napi[q],
- priv->rx_1st_skb);
+ ravb_rx_csum_gbeth(skb);
+ stats->rx_bytes += skb->len;
+ napi_gro_receive(&priv->napi[q], skb);
rx_packets++;
- break;
+
+ /* Clear rx_1st_skb so that it will only be
+ * non-NULL when valid.
+ */
+ priv->rx_1st_skb = NULL;
}
+
+ /* Mark this RX buffer as consumed. */
+ rx_buff->page = NULL;
}
}
+refill:
/* Refill the RX ring buffers. */
- for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
- entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
- desc = &priv->rx_ring[q].desc[entry];
- desc->ds_cc = cpu_to_le16(priv->info->rx_max_desc_use);
-
- if (!priv->rx_skb[q][entry]) {
- skb = ravb_alloc_skb(ndev, info, GFP_ATOMIC);
- if (!skb)
- break;
- dma_addr = dma_map_single(ndev->dev.parent,
- skb->data,
- priv->info->rx_max_frame_size,
- DMA_FROM_DEVICE);
- skb_checksum_none_assert(skb);
- /* We just set the data size to 0 for a failed mapping
- * which should prevent DMA from happening...
- */
- if (dma_mapping_error(ndev->dev.parent, dma_addr))
- desc->ds_cc = cpu_to_le16(0);
- desc->dptr = cpu_to_le32(dma_addr);
- priv->rx_skb[q][entry] = skb;
- }
- /* Descriptor type must be set after all the above writes */
- dma_wmb();
- desc->die_dt = DT_FEMPTY;
- }
+ priv->dirty_rx[q] += ravb_rx_ring_refill(ndev, q,
+ priv->cur_rx[q] - priv->dirty_rx[q],
+ GFP_ATOMIC);
stats->rx_packets += rx_packets;
- *quota -= rx_packets;
- return *quota == 0;
+ return rx_packets;
}
/* Packet receive function for Ethernet AVB */
-static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
+static int ravb_rx_rcar(struct net_device *ndev, int budget, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
@@ -895,7 +951,6 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
struct ravb_ex_rx_desc *desc;
unsigned int limit, i;
struct sk_buff *skb;
- dma_addr_t dma_addr;
struct timespec64 ts;
int rx_packets = 0;
u8 desc_status;
@@ -906,7 +961,7 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
for (i = 0; i < limit; i++, priv->cur_rx[q]++) {
entry = priv->cur_rx[q] % priv->num_rx_ring[q];
desc = &priv->rx_ring[q].ex_desc[entry];
- if (rx_packets == *quota || desc->die_dt == DT_FEMPTY)
+ if (rx_packets == budget || desc->die_dt == DT_FEMPTY)
break;
/* Descriptor type must be checked before all other reads */
@@ -934,12 +989,23 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
stats->rx_missed_errors++;
} else {
u32 get_ts = priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE;
-
- skb = priv->rx_skb[q][entry];
- priv->rx_skb[q][entry] = NULL;
- dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
- priv->info->rx_max_frame_size,
- DMA_FROM_DEVICE);
+ struct ravb_rx_buffer *rx_buff;
+ void *rx_addr;
+
+ rx_buff = &priv->rx_buffers[q][entry];
+ rx_addr = page_address(rx_buff->page) + rx_buff->offset;
+ dma_sync_single_for_cpu(ndev->dev.parent,
+ le32_to_cpu(desc->dptr),
+ pkt_len, DMA_FROM_DEVICE);
+
+ skb = napi_build_skb(rx_addr, info->rx_buffer_size);
+ if (unlikely(!skb)) {
+ stats->rx_errors++;
+ page_pool_put_page(priv->rx_pool[q],
+ rx_buff->page, 0, true);
+ break;
+ }
+ skb_mark_for_recycle(skb);
get_ts &= (q == RAVB_NC) ?
RAVB_RXTSTAMP_TYPE_V2_L2_EVENT :
~RAVB_RXTSTAMP_TYPE_V2_L2_EVENT;
@@ -961,48 +1027,28 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
napi_gro_receive(&priv->napi[q], skb);
rx_packets++;
stats->rx_bytes += pkt_len;
+
+ /* Mark this RX buffer as consumed. */
+ rx_buff->page = NULL;
}
}
/* Refill the RX ring buffers. */
- for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
- entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
- desc = &priv->rx_ring[q].ex_desc[entry];
- desc->ds_cc = cpu_to_le16(priv->info->rx_max_desc_use);
-
- if (!priv->rx_skb[q][entry]) {
- skb = ravb_alloc_skb(ndev, info, GFP_ATOMIC);
- if (!skb)
- break; /* Better luck next round. */
- dma_addr = dma_map_single(ndev->dev.parent, skb->data,
- priv->info->rx_max_frame_size,
- DMA_FROM_DEVICE);
- skb_checksum_none_assert(skb);
- /* We just set the data size to 0 for a failed mapping
- * which should prevent DMA from happening...
- */
- if (dma_mapping_error(ndev->dev.parent, dma_addr))
- desc->ds_cc = cpu_to_le16(0);
- desc->dptr = cpu_to_le32(dma_addr);
- priv->rx_skb[q][entry] = skb;
- }
- /* Descriptor type must be set after all the above writes */
- dma_wmb();
- desc->die_dt = DT_FEMPTY;
- }
+ priv->dirty_rx[q] += ravb_rx_ring_refill(ndev, q,
+ priv->cur_rx[q] - priv->dirty_rx[q],
+ GFP_ATOMIC);
stats->rx_packets += rx_packets;
- *quota -= rx_packets;
- return *quota == 0;
+ return rx_packets;
}
/* Packet receive function for Ethernet AVB */
-static bool ravb_rx(struct net_device *ndev, int *quota, int q)
+static int ravb_rx(struct net_device *ndev, int budget, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
- return info->receive(ndev, quota, q);
+ return info->receive(ndev, budget, q);
}
static void ravb_rcv_snd_disable(struct net_device *ndev)
@@ -1319,13 +1365,12 @@ static int ravb_poll(struct napi_struct *napi, int budget)
unsigned long flags;
int q = napi - priv->napi;
int mask = BIT(q);
- int quota = budget;
- bool unmask;
+ int work_done;
/* Processing RX Descriptor Ring */
/* Clear RX interrupt */
ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0);
- unmask = !ravb_rx(ndev, &quota, q);
+ work_done = ravb_rx(ndev, budget, q);
/* Processing TX Descriptor Ring */
spin_lock_irqsave(&priv->lock, flags);
@@ -1344,24 +1389,20 @@ static int ravb_poll(struct napi_struct *napi, int budget)
if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors)
ndev->stats.rx_fifo_errors = priv->rx_fifo_errors;
- if (!unmask)
- goto out;
-
- napi_complete(napi);
-
- /* Re-enable RX/TX interrupts */
- spin_lock_irqsave(&priv->lock, flags);
- if (!info->irq_en_dis) {
- ravb_modify(ndev, RIC0, mask, mask);
- ravb_modify(ndev, TIC, mask, mask);
- } else {
- ravb_write(ndev, mask, RIE0);
- ravb_write(ndev, mask, TIE);
+ if (work_done < budget && napi_complete_done(napi, work_done)) {
+ /* Re-enable RX/TX interrupts */
+ spin_lock_irqsave(&priv->lock, flags);
+ if (!info->irq_en_dis) {
+ ravb_modify(ndev, RIC0, mask, mask);
+ ravb_modify(ndev, TIC, mask, mask);
+ } else {
+ ravb_write(ndev, mask, RIE0);
+ ravb_write(ndev, mask, TIE);
+ }
+ spin_unlock_irqrestore(&priv->lock, flags);
}
- spin_unlock_irqrestore(&priv->lock, flags);
-out:
- return budget - quota;
+ return work_done;
}
static void ravb_set_duplex_gbeth(struct net_device *ndev)
@@ -1696,7 +1737,7 @@ static int ravb_set_ringparam(struct net_device *ndev,
}
static int ravb_get_ts_info(struct net_device *ndev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *hw_info = priv->info;
@@ -2621,6 +2662,28 @@ static int ravb_mdio_release(struct ravb_private *priv)
return 0;
}
+static const struct ravb_hw_info ravb_gen2_hw_info = {
+ .receive = ravb_rx_rcar,
+ .set_rate = ravb_set_rate_rcar,
+ .set_feature = ravb_set_features_rcar,
+ .dmac_init = ravb_dmac_init_rcar,
+ .emac_init = ravb_emac_init_rcar,
+ .gstrings_stats = ravb_gstrings_stats,
+ .gstrings_size = sizeof(ravb_gstrings_stats),
+ .net_hw_features = NETIF_F_RXCSUM,
+ .net_features = NETIF_F_RXCSUM,
+ .stats_len = ARRAY_SIZE(ravb_gstrings_stats),
+ .tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
+ .rx_max_frame_size = SZ_2K,
+ .rx_buffer_size = SZ_2K +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
+ .rx_desc_size = sizeof(struct ravb_ex_rx_desc),
+ .aligned_tx = 1,
+ .gptp = 1,
+ .nc_queues = 1,
+ .magic_pkt = 1,
+};
+
static const struct ravb_hw_info ravb_gen3_hw_info = {
.receive = ravb_rx_rcar,
.set_rate = ravb_set_rate_rcar,
@@ -2634,7 +2697,8 @@ static const struct ravb_hw_info ravb_gen3_hw_info = {
.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
.tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
.rx_max_frame_size = SZ_2K,
- .rx_max_desc_use = SZ_2K - ETH_FCS_LEN + sizeof(__sum16),
+ .rx_buffer_size = SZ_2K +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
.rx_desc_size = sizeof(struct ravb_ex_rx_desc),
.internal_delay = 1,
.tx_counters = 1,
@@ -2645,12 +2709,12 @@ static const struct ravb_hw_info ravb_gen3_hw_info = {
.magic_pkt = 1,
};
-static const struct ravb_hw_info ravb_gen2_hw_info = {
+static const struct ravb_hw_info ravb_gen4_hw_info = {
.receive = ravb_rx_rcar,
.set_rate = ravb_set_rate_rcar,
.set_feature = ravb_set_features_rcar,
.dmac_init = ravb_dmac_init_rcar,
- .emac_init = ravb_emac_init_rcar,
+ .emac_init = ravb_emac_init_rcar_gen4,
.gstrings_stats = ravb_gstrings_stats,
.gstrings_size = sizeof(ravb_gstrings_stats),
.net_hw_features = NETIF_F_RXCSUM,
@@ -2658,10 +2722,14 @@ static const struct ravb_hw_info ravb_gen2_hw_info = {
.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
.tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
.rx_max_frame_size = SZ_2K,
- .rx_max_desc_use = SZ_2K - ETH_FCS_LEN + sizeof(__sum16),
+ .rx_buffer_size = SZ_2K +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
.rx_desc_size = sizeof(struct ravb_ex_rx_desc),
- .aligned_tx = 1,
- .gptp = 1,
+ .internal_delay = 1,
+ .tx_counters = 1,
+ .multi_irqs = 1,
+ .irq_en_dis = 1,
+ .ccc_gac = 1,
.nc_queues = 1,
.magic_pkt = 1,
};
@@ -2679,7 +2747,8 @@ static const struct ravb_hw_info ravb_rzv2m_hw_info = {
.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
.tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
.rx_max_frame_size = SZ_2K,
- .rx_max_desc_use = SZ_2K - ETH_FCS_LEN + sizeof(__sum16),
+ .rx_buffer_size = SZ_2K +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
.rx_desc_size = sizeof(struct ravb_ex_rx_desc),
.multi_irqs = 1,
.err_mgmt_irqs = 1,
@@ -2702,9 +2771,10 @@ static const struct ravb_hw_info gbeth_hw_info = {
.stats_len = ARRAY_SIZE(ravb_gstrings_stats_gbeth),
.tccr_mask = TCCR_TSRQ0,
.rx_max_frame_size = SZ_8K,
- .rx_max_desc_use = 4080,
+ .rx_buffer_size = SZ_2K,
.rx_desc_size = sizeof(struct ravb_rx_desc),
.aligned_tx = 1,
+ .coalesce_irqs = 1,
.tx_counters = 1,
.carrier_counters = 1,
.half_duplex = 1,
@@ -2716,7 +2786,7 @@ static const struct of_device_id ravb_match_table[] = {
{ .compatible = "renesas,etheravb-rcar-gen2", .data = &ravb_gen2_hw_info },
{ .compatible = "renesas,etheravb-r8a7795", .data = &ravb_gen3_hw_info },
{ .compatible = "renesas,etheravb-rcar-gen3", .data = &ravb_gen3_hw_info },
- { .compatible = "renesas,etheravb-rcar-gen4", .data = &ravb_gen3_hw_info },
+ { .compatible = "renesas,etheravb-rcar-gen4", .data = &ravb_gen4_hw_info },
{ .compatible = "renesas,etheravb-rzv2m", .data = &ravb_rzv2m_hw_info },
{ .compatible = "renesas,rzg2l-gbeth", .data = &gbeth_hw_info },
{ }
@@ -2981,6 +3051,12 @@ static int ravb_probe(struct platform_device *pdev)
if (info->nc_queues)
netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll);
+ if (info->coalesce_irqs) {
+ netdev_sw_irq_coalesce_default_on(ndev);
+ if (num_present_cpus() == 1)
+ dev_set_threaded(ndev, true);
+ }
+
/* Network device register */
error = register_netdev(ndev);
if (error)
diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c
index 24c90d8f5a44..ff50e20856ec 100644
--- a/drivers/net/ethernet/renesas/rswitch.c
+++ b/drivers/net/ethernet/renesas/rswitch.c
@@ -1809,7 +1809,7 @@ static const struct net_device_ops rswitch_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
};
-static int rswitch_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info)
+static int rswitch_get_ts_info(struct net_device *ndev, struct kernel_ethtool_ts_info *info)
{
struct rswitch_device *rdev = netdev_priv(ndev);
diff --git a/drivers/net/ethernet/renesas/rtsn.c b/drivers/net/ethernet/renesas/rtsn.c
new file mode 100644
index 000000000000..577227c007ab
--- /dev/null
+++ b/drivers/net/ethernet/renesas/rtsn.c
@@ -0,0 +1,1391 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Renesas Ethernet-TSN device driver
+ *
+ * Copyright (C) 2022 Renesas Electronics Corporation
+ * Copyright (C) 2023 Niklas Söderlund <niklas.soderlund@ragnatech.se>
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/module.h>
+#include <linux/net_tstamp.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/spinlock.h>
+
+#include "rtsn.h"
+#include "rcar_gen4_ptp.h"
+
+struct rtsn_private {
+ struct net_device *ndev;
+ struct platform_device *pdev;
+ void __iomem *base;
+ struct rcar_gen4_ptp_private *ptp_priv;
+ struct clk *clk;
+ struct reset_control *reset;
+
+ u32 num_tx_ring;
+ u32 num_rx_ring;
+ u32 tx_desc_bat_size;
+ dma_addr_t tx_desc_bat_dma;
+ struct rtsn_desc *tx_desc_bat;
+ u32 rx_desc_bat_size;
+ dma_addr_t rx_desc_bat_dma;
+ struct rtsn_desc *rx_desc_bat;
+ dma_addr_t tx_desc_dma;
+ dma_addr_t rx_desc_dma;
+ struct rtsn_ext_desc *tx_ring;
+ struct rtsn_ext_ts_desc *rx_ring;
+ struct sk_buff **tx_skb;
+ struct sk_buff **rx_skb;
+ spinlock_t lock; /* Register access lock */
+ u32 cur_tx;
+ u32 dirty_tx;
+ u32 cur_rx;
+ u32 dirty_rx;
+ u8 ts_tag;
+ struct napi_struct napi;
+ struct rtnl_link_stats64 stats;
+
+ struct mii_bus *mii;
+ phy_interface_t iface;
+ int link;
+ int speed;
+
+ int tx_data_irq;
+ int rx_data_irq;
+};
+
+static u32 rtsn_read(struct rtsn_private *priv, enum rtsn_reg reg)
+{
+ return ioread32(priv->base + reg);
+}
+
+static void rtsn_write(struct rtsn_private *priv, enum rtsn_reg reg, u32 data)
+{
+ iowrite32(data, priv->base + reg);
+}
+
+static void rtsn_modify(struct rtsn_private *priv, enum rtsn_reg reg,
+ u32 clear, u32 set)
+{
+ rtsn_write(priv, reg, (rtsn_read(priv, reg) & ~clear) | set);
+}
+
+static int rtsn_reg_wait(struct rtsn_private *priv, enum rtsn_reg reg,
+ u32 mask, u32 expected)
+{
+ u32 val;
+
+ return readl_poll_timeout(priv->base + reg, val,
+ (val & mask) == expected,
+ RTSN_INTERVAL_US, RTSN_TIMEOUT_US);
+}
+
+static void rtsn_ctrl_data_irq(struct rtsn_private *priv, bool enable)
+{
+ if (enable) {
+ rtsn_write(priv, TDIE0, TDIE_TDID_TDX(TX_CHAIN_IDX));
+ rtsn_write(priv, RDIE0, RDIE_RDID_RDX(RX_CHAIN_IDX));
+ } else {
+ rtsn_write(priv, TDID0, TDIE_TDID_TDX(TX_CHAIN_IDX));
+ rtsn_write(priv, RDID0, RDIE_RDID_RDX(RX_CHAIN_IDX));
+ }
+}
+
+static void rtsn_get_timestamp(struct rtsn_private *priv, struct timespec64 *ts)
+{
+ struct rcar_gen4_ptp_private *ptp_priv = priv->ptp_priv;
+
+ ptp_priv->info.gettime64(&ptp_priv->info, ts);
+}
+
+static int rtsn_tx_free(struct net_device *ndev, bool free_txed_only)
+{
+ struct rtsn_private *priv = netdev_priv(ndev);
+ struct rtsn_ext_desc *desc;
+ struct sk_buff *skb;
+ int free_num = 0;
+ int entry, size;
+
+ for (; priv->cur_tx - priv->dirty_tx > 0; priv->dirty_tx++) {
+ entry = priv->dirty_tx % priv->num_tx_ring;
+ desc = &priv->tx_ring[entry];
+ if (free_txed_only && (desc->die_dt & DT_MASK) != DT_FEMPTY)
+ break;
+
+ dma_rmb();
+ size = le16_to_cpu(desc->info_ds) & TX_DS;
+ skb = priv->tx_skb[entry];
+ if (skb) {
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
+ struct skb_shared_hwtstamps shhwtstamps;
+ struct timespec64 ts;
+
+ rtsn_get_timestamp(priv, &ts);
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ shhwtstamps.hwtstamp = timespec64_to_ktime(ts);
+ skb_tstamp_tx(skb, &shhwtstamps);
+ }
+ dma_unmap_single(ndev->dev.parent,
+ le32_to_cpu(desc->dptr),
+ size, DMA_TO_DEVICE);
+ dev_kfree_skb_any(priv->tx_skb[entry]);
+ free_num++;
+
+ priv->stats.tx_packets++;
+ priv->stats.tx_bytes += size;
+ }
+
+ desc->die_dt = DT_EEMPTY;
+ }
+
+ desc = &priv->tx_ring[priv->num_tx_ring];
+ desc->die_dt = DT_LINK;
+
+ return free_num;
+}
+
+static int rtsn_rx(struct net_device *ndev, int budget)
+{
+ struct rtsn_private *priv = netdev_priv(ndev);
+ unsigned int ndescriptors;
+ unsigned int rx_packets;
+ unsigned int i;
+ bool get_ts;
+
+ get_ts = priv->ptp_priv->tstamp_rx_ctrl &
+ RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT;
+
+ ndescriptors = priv->dirty_rx + priv->num_rx_ring - priv->cur_rx;
+ rx_packets = 0;
+ for (i = 0; i < ndescriptors; i++) {
+ const unsigned int entry = priv->cur_rx % priv->num_rx_ring;
+ struct rtsn_ext_ts_desc *desc = &priv->rx_ring[entry];
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+ u16 pkt_len;
+
+ /* Stop processing descriptors if budget is consumed. */
+ if (rx_packets >= budget)
+ break;
+
+ /* Stop processing descriptors on first empty. */
+ if ((desc->die_dt & DT_MASK) == DT_FEMPTY)
+ break;
+
+ dma_rmb();
+ pkt_len = le16_to_cpu(desc->info_ds) & RX_DS;
+
+ skb = priv->rx_skb[entry];
+ priv->rx_skb[entry] = NULL;
+ dma_addr = le32_to_cpu(desc->dptr);
+ dma_unmap_single(ndev->dev.parent, dma_addr, PKT_BUF_SZ,
+ DMA_FROM_DEVICE);
+
+ /* Get timestamp if enabled. */
+ if (get_ts) {
+ struct skb_shared_hwtstamps *shhwtstamps;
+ struct timespec64 ts;
+
+ shhwtstamps = skb_hwtstamps(skb);
+ memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+
+ ts.tv_sec = (u64)le32_to_cpu(desc->ts_sec);
+ ts.tv_nsec = le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff));
+
+ shhwtstamps->hwtstamp = timespec64_to_ktime(ts);
+ }
+
+ skb_put(skb, pkt_len);
+ skb->protocol = eth_type_trans(skb, ndev);
+ napi_gro_receive(&priv->napi, skb);
+
+ /* Update statistics. */
+ priv->stats.rx_packets++;
+ priv->stats.rx_bytes += pkt_len;
+
+ /* Update counters. */
+ priv->cur_rx++;
+ rx_packets++;
+ }
+
+ /* Refill the RX ring buffers */
+ for (; priv->cur_rx - priv->dirty_rx > 0; priv->dirty_rx++) {
+ const unsigned int entry = priv->dirty_rx % priv->num_rx_ring;
+ struct rtsn_ext_ts_desc *desc = &priv->rx_ring[entry];
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+
+ desc->info_ds = cpu_to_le16(PKT_BUF_SZ);
+
+ if (!priv->rx_skb[entry]) {
+ skb = napi_alloc_skb(&priv->napi,
+ PKT_BUF_SZ + RTSN_ALIGN - 1);
+ if (!skb)
+ break;
+ skb_reserve(skb, NET_IP_ALIGN);
+ dma_addr = dma_map_single(ndev->dev.parent, skb->data,
+ le16_to_cpu(desc->info_ds),
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(ndev->dev.parent, dma_addr))
+ desc->info_ds = cpu_to_le16(0);
+ desc->dptr = cpu_to_le32(dma_addr);
+ skb_checksum_none_assert(skb);
+ priv->rx_skb[entry] = skb;
+ }
+
+ dma_wmb();
+ desc->die_dt = DT_FEMPTY | D_DIE;
+ }
+
+ priv->rx_ring[priv->num_rx_ring].die_dt = DT_LINK;
+
+ return rx_packets;
+}
+
+static int rtsn_poll(struct napi_struct *napi, int budget)
+{
+ struct rtsn_private *priv;
+ struct net_device *ndev;
+ unsigned long flags;
+ int work_done;
+
+ ndev = napi->dev;
+ priv = netdev_priv(ndev);
+
+ /* Processing RX Descriptor Ring */
+ work_done = rtsn_rx(ndev, budget);
+
+ /* Processing TX Descriptor Ring */
+ spin_lock_irqsave(&priv->lock, flags);
+ rtsn_tx_free(ndev, true);
+ netif_wake_subqueue(ndev, 0);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ /* Re-enable TX/RX interrupts */
+ if (work_done < budget && napi_complete_done(napi, work_done)) {
+ spin_lock_irqsave(&priv->lock, flags);
+ rtsn_ctrl_data_irq(priv, true);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ }
+
+ return work_done;
+}
+
+static int rtsn_desc_alloc(struct rtsn_private *priv)
+{
+ struct device *dev = &priv->pdev->dev;
+ unsigned int i;
+
+ priv->tx_desc_bat_size = sizeof(struct rtsn_desc) * TX_NUM_CHAINS;
+ priv->tx_desc_bat = dma_alloc_coherent(dev, priv->tx_desc_bat_size,
+ &priv->tx_desc_bat_dma,
+ GFP_KERNEL);
+
+ if (!priv->tx_desc_bat)
+ return -ENOMEM;
+
+ for (i = 0; i < TX_NUM_CHAINS; i++)
+ priv->tx_desc_bat[i].die_dt = DT_EOS;
+
+ priv->rx_desc_bat_size = sizeof(struct rtsn_desc) * RX_NUM_CHAINS;
+ priv->rx_desc_bat = dma_alloc_coherent(dev, priv->rx_desc_bat_size,
+ &priv->rx_desc_bat_dma,
+ GFP_KERNEL);
+
+ if (!priv->rx_desc_bat)
+ return -ENOMEM;
+
+ for (i = 0; i < RX_NUM_CHAINS; i++)
+ priv->rx_desc_bat[i].die_dt = DT_EOS;
+
+ return 0;
+}
+
+static void rtsn_desc_free(struct rtsn_private *priv)
+{
+ if (priv->tx_desc_bat)
+ dma_free_coherent(&priv->pdev->dev, priv->tx_desc_bat_size,
+ priv->tx_desc_bat, priv->tx_desc_bat_dma);
+ priv->tx_desc_bat = NULL;
+
+ if (priv->rx_desc_bat)
+ dma_free_coherent(&priv->pdev->dev, priv->rx_desc_bat_size,
+ priv->rx_desc_bat, priv->rx_desc_bat_dma);
+ priv->rx_desc_bat = NULL;
+}
+
+static void rtsn_chain_free(struct rtsn_private *priv)
+{
+ struct device *dev = &priv->pdev->dev;
+
+ dma_free_coherent(dev,
+ sizeof(struct rtsn_ext_desc) * (priv->num_tx_ring + 1),
+ priv->tx_ring, priv->tx_desc_dma);
+ priv->tx_ring = NULL;
+
+ dma_free_coherent(dev,
+ sizeof(struct rtsn_ext_ts_desc) * (priv->num_rx_ring + 1),
+ priv->rx_ring, priv->rx_desc_dma);
+ priv->rx_ring = NULL;
+
+ kfree(priv->tx_skb);
+ priv->tx_skb = NULL;
+
+ kfree(priv->rx_skb);
+ priv->rx_skb = NULL;
+}
+
+static int rtsn_chain_init(struct rtsn_private *priv, int tx_size, int rx_size)
+{
+ struct net_device *ndev = priv->ndev;
+ struct sk_buff *skb;
+ int i;
+
+ priv->num_tx_ring = tx_size;
+ priv->num_rx_ring = rx_size;
+
+ priv->tx_skb = kcalloc(tx_size, sizeof(*priv->tx_skb), GFP_KERNEL);
+ priv->rx_skb = kcalloc(rx_size, sizeof(*priv->rx_skb), GFP_KERNEL);
+
+ if (!priv->rx_skb || !priv->tx_skb)
+ goto error;
+
+ for (i = 0; i < rx_size; i++) {
+ skb = netdev_alloc_skb(ndev, PKT_BUF_SZ + RTSN_ALIGN - 1);
+ if (!skb)
+ goto error;
+ skb_reserve(skb, NET_IP_ALIGN);
+ priv->rx_skb[i] = skb;
+ }
+
+ /* Allocate TX, RX descriptors */
+ priv->tx_ring = dma_alloc_coherent(ndev->dev.parent,
+ sizeof(struct rtsn_ext_desc) * (tx_size + 1),
+ &priv->tx_desc_dma, GFP_KERNEL);
+ priv->rx_ring = dma_alloc_coherent(ndev->dev.parent,
+ sizeof(struct rtsn_ext_ts_desc) * (rx_size + 1),
+ &priv->rx_desc_dma, GFP_KERNEL);
+
+ if (!priv->tx_ring || !priv->rx_ring)
+ goto error;
+
+ return 0;
+error:
+ rtsn_chain_free(priv);
+
+ return -ENOMEM;
+}
+
+static void rtsn_chain_format(struct rtsn_private *priv)
+{
+ struct net_device *ndev = priv->ndev;
+ struct rtsn_ext_ts_desc *rx_desc;
+ struct rtsn_ext_desc *tx_desc;
+ struct rtsn_desc *bat_desc;
+ dma_addr_t dma_addr;
+ unsigned int i;
+
+ priv->cur_tx = 0;
+ priv->cur_rx = 0;
+ priv->dirty_rx = 0;
+ priv->dirty_tx = 0;
+
+ /* TX */
+ memset(priv->tx_ring, 0, sizeof(*tx_desc) * priv->num_tx_ring);
+ for (i = 0, tx_desc = priv->tx_ring; i < priv->num_tx_ring; i++, tx_desc++)
+ tx_desc->die_dt = DT_EEMPTY | D_DIE;
+
+ tx_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma);
+ tx_desc->die_dt = DT_LINK;
+
+ bat_desc = &priv->tx_desc_bat[TX_CHAIN_IDX];
+ bat_desc->die_dt = DT_LINK;
+ bat_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma);
+
+ /* RX */
+ memset(priv->rx_ring, 0, sizeof(*rx_desc) * priv->num_rx_ring);
+ for (i = 0, rx_desc = priv->rx_ring; i < priv->num_rx_ring; i++, rx_desc++) {
+ dma_addr = dma_map_single(ndev->dev.parent,
+ priv->rx_skb[i]->data, PKT_BUF_SZ,
+ DMA_FROM_DEVICE);
+ if (!dma_mapping_error(ndev->dev.parent, dma_addr))
+ rx_desc->info_ds = cpu_to_le16(PKT_BUF_SZ);
+ rx_desc->dptr = cpu_to_le32((u32)dma_addr);
+ rx_desc->die_dt = DT_FEMPTY | D_DIE;
+ }
+ rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma);
+ rx_desc->die_dt = DT_LINK;
+
+ bat_desc = &priv->rx_desc_bat[RX_CHAIN_IDX];
+ bat_desc->die_dt = DT_LINK;
+ bat_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma);
+}
+
+static int rtsn_dmac_init(struct rtsn_private *priv)
+{
+ int ret;
+
+ ret = rtsn_chain_init(priv, TX_CHAIN_SIZE, RX_CHAIN_SIZE);
+ if (ret)
+ return ret;
+
+ rtsn_chain_format(priv);
+
+ return 0;
+}
+
+static enum rtsn_mode rtsn_read_mode(struct rtsn_private *priv)
+{
+ return (rtsn_read(priv, OSR) & OSR_OPS) >> 1;
+}
+
+static int rtsn_wait_mode(struct rtsn_private *priv, enum rtsn_mode mode)
+{
+ unsigned int i;
+
+ /* Need to busy loop as mode changes can happen in atomic context. */
+ for (i = 0; i < RTSN_TIMEOUT_US / RTSN_INTERVAL_US; i++) {
+ if (rtsn_read_mode(priv) == mode)
+ return 0;
+
+ udelay(RTSN_INTERVAL_US);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int rtsn_change_mode(struct rtsn_private *priv, enum rtsn_mode mode)
+{
+ int ret;
+
+ rtsn_write(priv, OCR, mode);
+ ret = rtsn_wait_mode(priv, mode);
+ if (ret)
+ netdev_err(priv->ndev, "Failed to switch operation mode\n");
+ return ret;
+}
+
+static int rtsn_get_data_irq_status(struct rtsn_private *priv)
+{
+ u32 val;
+
+ val = rtsn_read(priv, TDIS0) | TDIS_TDS(TX_CHAIN_IDX);
+ val |= rtsn_read(priv, RDIS0) | RDIS_RDS(RX_CHAIN_IDX);
+
+ return val;
+}
+
+static irqreturn_t rtsn_irq(int irq, void *dev_id)
+{
+ struct rtsn_private *priv = dev_id;
+ int ret = IRQ_NONE;
+
+ spin_lock(&priv->lock);
+
+ if (rtsn_get_data_irq_status(priv)) {
+ /* Clear TX/RX irq status */
+ rtsn_write(priv, TDIS0, TDIS_TDS(TX_CHAIN_IDX));
+ rtsn_write(priv, RDIS0, RDIS_RDS(RX_CHAIN_IDX));
+
+ if (napi_schedule_prep(&priv->napi)) {
+ /* Disable TX/RX interrupts */
+ rtsn_ctrl_data_irq(priv, false);
+
+ __napi_schedule(&priv->napi);
+ }
+
+ ret = IRQ_HANDLED;
+ }
+
+ spin_unlock(&priv->lock);
+
+ return ret;
+}
+
+static int rtsn_request_irq(unsigned int irq, irq_handler_t handler,
+ unsigned long flags, struct rtsn_private *priv,
+ const char *ch)
+{
+ char *name;
+ int ret;
+
+ name = devm_kasprintf(&priv->pdev->dev, GFP_KERNEL, "%s:%s",
+ priv->ndev->name, ch);
+ if (!name)
+ return -ENOMEM;
+
+ ret = request_irq(irq, handler, flags, name, priv);
+ if (ret)
+ netdev_err(priv->ndev, "Cannot request IRQ %s\n", name);
+
+ return ret;
+}
+
+static void rtsn_free_irqs(struct rtsn_private *priv)
+{
+ free_irq(priv->tx_data_irq, priv);
+ free_irq(priv->rx_data_irq, priv);
+}
+
+static int rtsn_request_irqs(struct rtsn_private *priv)
+{
+ int ret;
+
+ priv->rx_data_irq = platform_get_irq_byname(priv->pdev, "rx");
+ if (priv->rx_data_irq < 0)
+ return priv->rx_data_irq;
+
+ priv->tx_data_irq = platform_get_irq_byname(priv->pdev, "tx");
+ if (priv->tx_data_irq < 0)
+ return priv->tx_data_irq;
+
+ ret = rtsn_request_irq(priv->tx_data_irq, rtsn_irq, 0, priv, "tx");
+ if (ret)
+ return ret;
+
+ ret = rtsn_request_irq(priv->rx_data_irq, rtsn_irq, 0, priv, "rx");
+ if (ret) {
+ free_irq(priv->tx_data_irq, priv);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rtsn_reset(struct rtsn_private *priv)
+{
+ reset_control_reset(priv->reset);
+ mdelay(1);
+
+ return rtsn_wait_mode(priv, OCR_OPC_DISABLE);
+}
+
+static int rtsn_axibmi_init(struct rtsn_private *priv)
+{
+ int ret;
+
+ ret = rtsn_reg_wait(priv, RR, RR_RST, RR_RST_COMPLETE);
+ if (ret)
+ return ret;
+
+ /* Set AXIWC */
+ rtsn_write(priv, AXIWC, AXIWC_DEFAULT);
+
+ /* Set AXIRC */
+ rtsn_write(priv, AXIRC, AXIRC_DEFAULT);
+
+ /* TX Descriptor chain setting */
+ rtsn_write(priv, TATLS0, TATLS0_TEDE | TATLS0_TATEN(TX_CHAIN_IDX));
+ rtsn_write(priv, TATLS1, priv->tx_desc_bat_dma + TX_CHAIN_ADDR_OFFSET);
+ rtsn_write(priv, TATLR, TATLR_TATL);
+
+ ret = rtsn_reg_wait(priv, TATLR, TATLR_TATL, 0);
+ if (ret)
+ return ret;
+
+ /* RX Descriptor chain setting */
+ rtsn_write(priv, RATLS0,
+ RATLS0_RETS | RATLS0_REDE | RATLS0_RATEN(RX_CHAIN_IDX));
+ rtsn_write(priv, RATLS1, priv->rx_desc_bat_dma + RX_CHAIN_ADDR_OFFSET);
+ rtsn_write(priv, RATLR, RATLR_RATL);
+
+ ret = rtsn_reg_wait(priv, RATLR, RATLR_RATL, 0);
+ if (ret)
+ return ret;
+
+ /* Enable TX/RX interrupts */
+ rtsn_ctrl_data_irq(priv, true);
+
+ return 0;
+}
+
+static void rtsn_mhd_init(struct rtsn_private *priv)
+{
+ /* TX General setting */
+ rtsn_write(priv, TGC1, TGC1_STTV_DEFAULT | TGC1_TQTM_SFM);
+ rtsn_write(priv, TMS0, TMS_MFS_MAX);
+
+ /* RX Filter IP */
+ rtsn_write(priv, CFCR0, CFCR_SDID(RX_CHAIN_IDX));
+ rtsn_write(priv, FMSCR, FMSCR_FMSIE(RX_CHAIN_IDX));
+}
+
+static int rtsn_get_phy_params(struct rtsn_private *priv)
+{
+ int ret;
+
+ ret = of_get_phy_mode(priv->pdev->dev.of_node, &priv->iface);
+ if (ret)
+ return ret;
+
+ switch (priv->iface) {
+ case PHY_INTERFACE_MODE_MII:
+ priv->speed = 100;
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ priv->speed = 1000;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static void rtsn_set_phy_interface(struct rtsn_private *priv)
+{
+ u32 val;
+
+ switch (priv->iface) {
+ case PHY_INTERFACE_MODE_MII:
+ val = MPIC_PIS_MII;
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ val = MPIC_PIS_GMII;
+ break;
+ default:
+ return;
+ }
+
+ rtsn_modify(priv, MPIC, MPIC_PIS_MASK, val);
+}
+
+static void rtsn_set_rate(struct rtsn_private *priv)
+{
+ u32 val;
+
+ switch (priv->speed) {
+ case 10:
+ val = MPIC_LSC_10M;
+ break;
+ case 100:
+ val = MPIC_LSC_100M;
+ break;
+ case 1000:
+ val = MPIC_LSC_1G;
+ break;
+ default:
+ return;
+ }
+
+ rtsn_modify(priv, MPIC, MPIC_LSC_MASK, val);
+}
+
+static int rtsn_rmac_init(struct rtsn_private *priv)
+{
+ const u8 *mac_addr = priv->ndev->dev_addr;
+ int ret;
+
+ /* Set MAC address */
+ rtsn_write(priv, MRMAC0, (mac_addr[0] << 8) | mac_addr[1]);
+ rtsn_write(priv, MRMAC1, (mac_addr[2] << 24) | (mac_addr[3] << 16) |
+ (mac_addr[4] << 8) | mac_addr[5]);
+
+ /* Set xMII type */
+ rtsn_set_phy_interface(priv);
+ rtsn_set_rate(priv);
+
+ /* Enable MII */
+ rtsn_modify(priv, MPIC, MPIC_PSMCS_MASK | MPIC_PSMHT_MASK,
+ MPIC_PSMCS_DEFAULT | MPIC_PSMHT_DEFAULT);
+
+ /* Link verification */
+ rtsn_modify(priv, MLVC, MLVC_PLV, MLVC_PLV);
+ ret = rtsn_reg_wait(priv, MLVC, MLVC_PLV, 0);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static int rtsn_hw_init(struct rtsn_private *priv)
+{
+ int ret;
+
+ ret = rtsn_reset(priv);
+ if (ret)
+ return ret;
+
+ /* Change to CONFIG mode */
+ ret = rtsn_change_mode(priv, OCR_OPC_CONFIG);
+ if (ret)
+ return ret;
+
+ ret = rtsn_axibmi_init(priv);
+ if (ret)
+ return ret;
+
+ rtsn_mhd_init(priv);
+
+ ret = rtsn_rmac_init(priv);
+ if (ret)
+ return ret;
+
+ ret = rtsn_change_mode(priv, OCR_OPC_DISABLE);
+ if (ret)
+ return ret;
+
+ /* Change to OPERATION mode */
+ ret = rtsn_change_mode(priv, OCR_OPC_OPERATION);
+
+ return ret;
+}
+
+static int rtsn_mii_access(struct mii_bus *bus, bool read, int phyad,
+ int regad, u16 data)
+{
+ struct rtsn_private *priv = bus->priv;
+ u32 val;
+ int ret;
+
+ val = MPSM_PDA(phyad) | MPSM_PRA(regad) | MPSM_PSME;
+
+ if (!read)
+ val |= MPSM_PSMAD | MPSM_PRD_SET(data);
+
+ rtsn_write(priv, MPSM, val);
+
+ ret = rtsn_reg_wait(priv, MPSM, MPSM_PSME, 0);
+ if (ret)
+ return ret;
+
+ if (read)
+ ret = MPSM_PRD_GET(rtsn_read(priv, MPSM));
+
+ return ret;
+}
+
+static int rtsn_mii_read(struct mii_bus *bus, int addr, int regnum)
+{
+ return rtsn_mii_access(bus, true, addr, regnum, 0);
+}
+
+static int rtsn_mii_write(struct mii_bus *bus, int addr, int regnum, u16 val)
+{
+ return rtsn_mii_access(bus, false, addr, regnum, val);
+}
+
+static int rtsn_mdio_alloc(struct rtsn_private *priv)
+{
+ struct platform_device *pdev = priv->pdev;
+ struct device *dev = &pdev->dev;
+ struct device_node *mdio_node;
+ struct mii_bus *mii;
+ int ret;
+
+ mii = mdiobus_alloc();
+ if (!mii)
+ return -ENOMEM;
+
+ mdio_node = of_get_child_by_name(dev->of_node, "mdio");
+ if (!mdio_node) {
+ ret = -ENODEV;
+ goto out_free_bus;
+ }
+
+ /* Enter config mode before registering the MDIO bus */
+ ret = rtsn_reset(priv);
+ if (ret)
+ goto out_free_bus;
+
+ ret = rtsn_change_mode(priv, OCR_OPC_CONFIG);
+ if (ret)
+ goto out_free_bus;
+
+ rtsn_modify(priv, MPIC, MPIC_PSMCS_MASK | MPIC_PSMHT_MASK,
+ MPIC_PSMCS_DEFAULT | MPIC_PSMHT_DEFAULT);
+
+ /* Register the MDIO bus */
+ mii->name = "rtsn_mii";
+ snprintf(mii->id, MII_BUS_ID_SIZE, "%s-%x",
+ pdev->name, pdev->id);
+ mii->priv = priv;
+ mii->read = rtsn_mii_read;
+ mii->write = rtsn_mii_write;
+ mii->parent = dev;
+
+ ret = of_mdiobus_register(mii, mdio_node);
+ of_node_put(mdio_node);
+ if (ret)
+ goto out_free_bus;
+
+ priv->mii = mii;
+
+ return 0;
+
+out_free_bus:
+ mdiobus_free(mii);
+ return ret;
+}
+
+static void rtsn_mdio_free(struct rtsn_private *priv)
+{
+ mdiobus_unregister(priv->mii);
+ mdiobus_free(priv->mii);
+ priv->mii = NULL;
+}
+
+static void rtsn_adjust_link(struct net_device *ndev)
+{
+ struct rtsn_private *priv = netdev_priv(ndev);
+ struct phy_device *phydev = ndev->phydev;
+ bool new_state = false;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ if (phydev->link) {
+ if (phydev->speed != priv->speed) {
+ new_state = true;
+ priv->speed = phydev->speed;
+ }
+
+ if (!priv->link) {
+ new_state = true;
+ priv->link = phydev->link;
+ }
+ } else if (priv->link) {
+ new_state = true;
+ priv->link = 0;
+ priv->speed = 0;
+ }
+
+ if (new_state) {
+ /* Need to transition to CONFIG mode before reconfiguring and
+ * then back to the original mode. Any state change to/from
+ * CONFIG or OPERATION must go over DISABLED to stop Rx/Tx.
+ */
+ enum rtsn_mode orgmode = rtsn_read_mode(priv);
+
+ /* Transit to CONFIG */
+ if (orgmode != OCR_OPC_CONFIG) {
+ if (orgmode != OCR_OPC_DISABLE &&
+ rtsn_change_mode(priv, OCR_OPC_DISABLE))
+ goto out;
+ if (rtsn_change_mode(priv, OCR_OPC_CONFIG))
+ goto out;
+ }
+
+ rtsn_set_rate(priv);
+
+ /* Transition to original mode */
+ if (orgmode != OCR_OPC_CONFIG) {
+ if (rtsn_change_mode(priv, OCR_OPC_DISABLE))
+ goto out;
+ if (orgmode != OCR_OPC_DISABLE &&
+ rtsn_change_mode(priv, orgmode))
+ goto out;
+ }
+ }
+out:
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ if (new_state)
+ phy_print_status(phydev);
+}
+
+static int rtsn_phy_init(struct rtsn_private *priv)
+{
+ struct device_node *np = priv->ndev->dev.parent->of_node;
+ struct phy_device *phydev;
+ struct device_node *phy;
+
+ priv->link = 0;
+
+ phy = of_parse_phandle(np, "phy-handle", 0);
+ if (!phy)
+ return -ENOENT;
+
+ phydev = of_phy_connect(priv->ndev, phy, rtsn_adjust_link, 0,
+ priv->iface);
+ of_node_put(phy);
+ if (!phydev)
+ return -ENOENT;
+
+ /* Only support full-duplex mode */
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
+
+ phy_attached_info(phydev);
+
+ return 0;
+}
+
+static void rtsn_phy_deinit(struct rtsn_private *priv)
+{
+ phy_disconnect(priv->ndev->phydev);
+ priv->ndev->phydev = NULL;
+}
+
+static int rtsn_init(struct rtsn_private *priv)
+{
+ int ret;
+
+ ret = rtsn_desc_alloc(priv);
+ if (ret)
+ return ret;
+
+ ret = rtsn_dmac_init(priv);
+ if (ret)
+ goto error_free_desc;
+
+ ret = rtsn_hw_init(priv);
+ if (ret)
+ goto error_free_chain;
+
+ ret = rtsn_phy_init(priv);
+ if (ret)
+ goto error_free_chain;
+
+ ret = rtsn_request_irqs(priv);
+ if (ret)
+ goto error_free_phy;
+
+ return 0;
+error_free_phy:
+ rtsn_phy_deinit(priv);
+error_free_chain:
+ rtsn_chain_free(priv);
+error_free_desc:
+ rtsn_desc_free(priv);
+ return ret;
+}
+
+static void rtsn_deinit(struct rtsn_private *priv)
+{
+ rtsn_free_irqs(priv);
+ rtsn_phy_deinit(priv);
+ rtsn_chain_free(priv);
+ rtsn_desc_free(priv);
+}
+
+static void rtsn_parse_mac_address(struct device_node *np,
+ struct net_device *ndev)
+{
+ struct rtsn_private *priv = netdev_priv(ndev);
+ u8 addr[ETH_ALEN];
+ u32 mrmac0;
+ u32 mrmac1;
+
+ /* Try to read address from Device Tree. */
+ if (!of_get_mac_address(np, addr)) {
+ eth_hw_addr_set(ndev, addr);
+ return;
+ }
+
+ /* Try to read address from device. */
+ mrmac0 = rtsn_read(priv, MRMAC0);
+ mrmac1 = rtsn_read(priv, MRMAC1);
+
+ addr[0] = (mrmac0 >> 8) & 0xff;
+ addr[1] = (mrmac0 >> 0) & 0xff;
+ addr[2] = (mrmac1 >> 24) & 0xff;
+ addr[3] = (mrmac1 >> 16) & 0xff;
+ addr[4] = (mrmac1 >> 8) & 0xff;
+ addr[5] = (mrmac1 >> 0) & 0xff;
+
+ if (is_valid_ether_addr(addr)) {
+ eth_hw_addr_set(ndev, addr);
+ return;
+ }
+
+ /* Fallback to a random address */
+ eth_hw_addr_random(ndev);
+}
+
+static int rtsn_open(struct net_device *ndev)
+{
+ struct rtsn_private *priv = netdev_priv(ndev);
+ int ret;
+
+ napi_enable(&priv->napi);
+
+ ret = rtsn_init(priv);
+ if (ret) {
+ napi_disable(&priv->napi);
+ return ret;
+ }
+
+ phy_start(ndev->phydev);
+
+ netif_start_queue(ndev);
+
+ return 0;
+}
+
+static int rtsn_stop(struct net_device *ndev)
+{
+ struct rtsn_private *priv = netdev_priv(ndev);
+
+ phy_stop(priv->ndev->phydev);
+ napi_disable(&priv->napi);
+ rtsn_change_mode(priv, OCR_OPC_DISABLE);
+ rtsn_deinit(priv);
+
+ return 0;
+}
+
+static netdev_tx_t rtsn_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct rtsn_private *priv = netdev_priv(ndev);
+ struct rtsn_ext_desc *desc;
+ int ret = NETDEV_TX_OK;
+ unsigned long flags;
+ dma_addr_t dma_addr;
+ int entry;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /* Drop packet if it won't fit in a single descriptor. */
+ if (skb->len >= TX_DS) {
+ priv->stats.tx_dropped++;
+ priv->stats.tx_errors++;
+ goto out;
+ }
+
+ if (priv->cur_tx - priv->dirty_tx > priv->num_tx_ring) {
+ netif_stop_subqueue(ndev, 0);
+ ret = NETDEV_TX_BUSY;
+ goto out;
+ }
+
+ if (skb_put_padto(skb, ETH_ZLEN))
+ goto out;
+
+ dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(ndev->dev.parent, dma_addr)) {
+ dev_kfree_skb_any(skb);
+ goto out;
+ }
+
+ entry = priv->cur_tx % priv->num_tx_ring;
+ priv->tx_skb[entry] = skb;
+ desc = &priv->tx_ring[entry];
+ desc->dptr = cpu_to_le32(dma_addr);
+ desc->info_ds = cpu_to_le16(skb->len);
+ desc->info1 = cpu_to_le64(skb->len);
+
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ priv->ts_tag++;
+ desc->info_ds |= cpu_to_le16(TXC);
+ desc->info = priv->ts_tag;
+ }
+
+ skb_tx_timestamp(skb);
+ dma_wmb();
+
+ desc->die_dt = DT_FSINGLE | D_DIE;
+ priv->cur_tx++;
+
+ /* Start xmit */
+ rtsn_write(priv, TRCR0, BIT(TX_CHAIN_IDX));
+out:
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return ret;
+}
+
+static void rtsn_get_stats64(struct net_device *ndev,
+ struct rtnl_link_stats64 *storage)
+{
+ struct rtsn_private *priv = netdev_priv(ndev);
+ *storage = priv->stats;
+}
+
+static int rtsn_do_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
+{
+ if (!netif_running(ndev))
+ return -ENODEV;
+
+ return phy_do_ioctl_running(ndev, ifr, cmd);
+}
+
+static int rtsn_hwtstamp_get(struct net_device *ndev,
+ struct kernel_hwtstamp_config *config)
+{
+ struct rcar_gen4_ptp_private *ptp_priv;
+ struct rtsn_private *priv;
+
+ if (!netif_running(ndev))
+ return -ENODEV;
+
+ priv = netdev_priv(ndev);
+ ptp_priv = priv->ptp_priv;
+
+ config->flags = 0;
+
+ config->tx_type =
+ ptp_priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+
+ switch (ptp_priv->tstamp_rx_ctrl & RCAR_GEN4_RXTSTAMP_TYPE) {
+ case RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT:
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+ break;
+ case RCAR_GEN4_RXTSTAMP_TYPE_ALL:
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ default:
+ config->rx_filter = HWTSTAMP_FILTER_NONE;
+ break;
+ }
+
+ return 0;
+}
+
+static int rtsn_hwtstamp_set(struct net_device *ndev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ struct rcar_gen4_ptp_private *ptp_priv;
+ struct rtsn_private *priv;
+ u32 tstamp_rx_ctrl;
+ u32 tstamp_tx_ctrl;
+
+ if (!netif_running(ndev))
+ return -ENODEV;
+
+ priv = netdev_priv(ndev);
+ ptp_priv = priv->ptp_priv;
+
+ if (config->flags)
+ return -EINVAL;
+
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_OFF:
+ tstamp_tx_ctrl = 0;
+ break;
+ case HWTSTAMP_TX_ON:
+ tstamp_tx_ctrl = RCAR_GEN4_TXTSTAMP_ENABLED;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ tstamp_rx_ctrl = 0;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ tstamp_rx_ctrl = RCAR_GEN4_RXTSTAMP_ENABLED |
+ RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT;
+ break;
+ default:
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
+ tstamp_rx_ctrl = RCAR_GEN4_RXTSTAMP_ENABLED |
+ RCAR_GEN4_RXTSTAMP_TYPE_ALL;
+ break;
+ }
+
+ ptp_priv->tstamp_tx_ctrl = tstamp_tx_ctrl;
+ ptp_priv->tstamp_rx_ctrl = tstamp_rx_ctrl;
+
+ return 0;
+}
+
+static const struct net_device_ops rtsn_netdev_ops = {
+ .ndo_open = rtsn_open,
+ .ndo_stop = rtsn_stop,
+ .ndo_start_xmit = rtsn_start_xmit,
+ .ndo_get_stats64 = rtsn_get_stats64,
+ .ndo_eth_ioctl = rtsn_do_ioctl,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_hwtstamp_set = rtsn_hwtstamp_set,
+ .ndo_hwtstamp_get = rtsn_hwtstamp_get,
+};
+
+static int rtsn_get_ts_info(struct net_device *ndev,
+ struct kernel_ethtool_ts_info *info)
+{
+ struct rtsn_private *priv = netdev_priv(ndev);
+
+ info->phc_index = ptp_clock_index(priv->ptp_priv->clock);
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
+static const struct ethtool_ops rtsn_ethtool_ops = {
+ .nway_reset = phy_ethtool_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_ts_info = rtsn_get_ts_info,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+};
+
+static const struct of_device_id rtsn_match_table[] = {
+ { .compatible = "renesas,r8a779g0-ethertsn", },
+ { /* Sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, rtsn_match_table);
+
+static int rtsn_probe(struct platform_device *pdev)
+{
+ struct rtsn_private *priv;
+ struct net_device *ndev;
+ struct resource *res;
+ int ret;
+
+ ndev = alloc_etherdev_mqs(sizeof(struct rtsn_private), TX_NUM_CHAINS,
+ RX_NUM_CHAINS);
+ if (!ndev)
+ return -ENOMEM;
+
+ priv = netdev_priv(ndev);
+ priv->pdev = pdev;
+ priv->ndev = ndev;
+ priv->ptp_priv = rcar_gen4_ptp_alloc(pdev);
+
+ spin_lock_init(&priv->lock);
+ platform_set_drvdata(pdev, priv);
+
+ priv->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ ret = PTR_ERR(priv->clk);
+ goto error_free;
+ }
+
+ priv->reset = devm_reset_control_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->reset)) {
+ ret = PTR_ERR(priv->reset);
+ goto error_free;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tsnes");
+ if (!res) {
+ dev_err(&pdev->dev, "Can't find tsnes resource\n");
+ ret = -EINVAL;
+ goto error_free;
+ }
+
+ priv->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->base)) {
+ ret = PTR_ERR(priv->base);
+ goto error_free;
+ }
+
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ ndev->features = NETIF_F_RXCSUM;
+ ndev->hw_features = NETIF_F_RXCSUM;
+ ndev->base_addr = res->start;
+ ndev->netdev_ops = &rtsn_netdev_ops;
+ ndev->ethtool_ops = &rtsn_ethtool_ops;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gptp");
+ if (!res) {
+ dev_err(&pdev->dev, "Can't find gptp resource\n");
+ ret = -EINVAL;
+ goto error_free;
+ }
+
+ priv->ptp_priv->addr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->ptp_priv->addr)) {
+ ret = PTR_ERR(priv->ptp_priv->addr);
+ goto error_free;
+ }
+
+ ret = rtsn_get_phy_params(priv);
+ if (ret)
+ goto error_free;
+
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
+
+ netif_napi_add(ndev, &priv->napi, rtsn_poll);
+
+ rtsn_parse_mac_address(pdev->dev.of_node, ndev);
+
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+
+ device_set_wakeup_capable(&pdev->dev, 1);
+
+ ret = rcar_gen4_ptp_register(priv->ptp_priv, RCAR_GEN4_PTP_REG_LAYOUT,
+ clk_get_rate(priv->clk));
+ if (ret)
+ goto error_pm;
+
+ ret = rtsn_mdio_alloc(priv);
+ if (ret)
+ goto error_ptp;
+
+ ret = register_netdev(ndev);
+ if (ret)
+ goto error_mdio;
+
+ netdev_info(ndev, "MAC address %pM\n", ndev->dev_addr);
+
+ return 0;
+
+error_mdio:
+ rtsn_mdio_free(priv);
+error_ptp:
+ rcar_gen4_ptp_unregister(priv->ptp_priv);
+error_pm:
+ netif_napi_del(&priv->napi);
+ rtsn_change_mode(priv, OCR_OPC_DISABLE);
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+error_free:
+ free_netdev(ndev);
+
+ return ret;
+}
+
+static int rtsn_remove(struct platform_device *pdev)
+{
+ struct rtsn_private *priv = platform_get_drvdata(pdev);
+
+ unregister_netdev(priv->ndev);
+ rtsn_mdio_free(priv);
+ rcar_gen4_ptp_unregister(priv->ptp_priv);
+ rtsn_change_mode(priv, OCR_OPC_DISABLE);
+ netif_napi_del(&priv->napi);
+
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ free_netdev(priv->ndev);
+
+ return 0;
+}
+
+static struct platform_driver rtsn_driver = {
+ .probe = rtsn_probe,
+ .remove = rtsn_remove,
+ .driver = {
+ .name = "rtsn",
+ .of_match_table = rtsn_match_table,
+ }
+};
+module_platform_driver(rtsn_driver);
+
+MODULE_AUTHOR("Phong Hoang, Niklas Söderlund");
+MODULE_DESCRIPTION("Renesas Ethernet-TSN device driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/renesas/rtsn.h b/drivers/net/ethernet/renesas/rtsn.h
new file mode 100644
index 000000000000..3183e80d7e6b
--- /dev/null
+++ b/drivers/net/ethernet/renesas/rtsn.h
@@ -0,0 +1,464 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Renesas Ethernet-TSN device driver
+ *
+ * Copyright (C) 2022 Renesas Electronics Corporation
+ * Copyright (C) 2023 Niklas Söderlund <niklas.soderlund@ragnatech.se>
+ */
+
+#ifndef __RTSN_H__
+#define __RTSN_H__
+
+#include <linux/types.h>
+
+#define AXIBMI 0x0000
+#define TSNMHD 0x1000
+#define RMSO 0x2000
+#define RMRO 0x3800
+
+enum rtsn_reg {
+ AXIWC = AXIBMI + 0x0000,
+ AXIRC = AXIBMI + 0x0004,
+ TDPC0 = AXIBMI + 0x0010,
+ TFT = AXIBMI + 0x0090,
+ TATLS0 = AXIBMI + 0x00a0,
+ TATLS1 = AXIBMI + 0x00a4,
+ TATLR = AXIBMI + 0x00a8,
+ RATLS0 = AXIBMI + 0x00b0,
+ RATLS1 = AXIBMI + 0x00b4,
+ RATLR = AXIBMI + 0x00b8,
+ TSA0 = AXIBMI + 0x00c0,
+ TSS0 = AXIBMI + 0x00c4,
+ TRCR0 = AXIBMI + 0x0140,
+ RIDAUAS0 = AXIBMI + 0x0180,
+ RR = AXIBMI + 0x0200,
+ TATS = AXIBMI + 0x0210,
+ TATSR0 = AXIBMI + 0x0214,
+ TATSR1 = AXIBMI + 0x0218,
+ TATSR2 = AXIBMI + 0x021c,
+ RATS = AXIBMI + 0x0220,
+ RATSR0 = AXIBMI + 0x0224,
+ RATSR1 = AXIBMI + 0x0228,
+ RATSR2 = AXIBMI + 0x022c,
+ RIDASM0 = AXIBMI + 0x0240,
+ RIDASAM0 = AXIBMI + 0x0244,
+ RIDACAM0 = AXIBMI + 0x0248,
+ EIS0 = AXIBMI + 0x0300,
+ EIE0 = AXIBMI + 0x0304,
+ EID0 = AXIBMI + 0x0308,
+ EIS1 = AXIBMI + 0x0310,
+ EIE1 = AXIBMI + 0x0314,
+ EID1 = AXIBMI + 0x0318,
+ TCEIS0 = AXIBMI + 0x0340,
+ TCEIE0 = AXIBMI + 0x0344,
+ TCEID0 = AXIBMI + 0x0348,
+ RFSEIS0 = AXIBMI + 0x04c0,
+ RFSEIE0 = AXIBMI + 0x04c4,
+ RFSEID0 = AXIBMI + 0x04c8,
+ RFEIS0 = AXIBMI + 0x0540,
+ RFEIE0 = AXIBMI + 0x0544,
+ RFEID0 = AXIBMI + 0x0548,
+ RCEIS0 = AXIBMI + 0x05c0,
+ RCEIE0 = AXIBMI + 0x05c4,
+ RCEID0 = AXIBMI + 0x05c8,
+ RIDAOIS = AXIBMI + 0x0640,
+ RIDAOIE = AXIBMI + 0x0644,
+ RIDAOID = AXIBMI + 0x0648,
+ TSFEIS = AXIBMI + 0x06c0,
+ TSFEIE = AXIBMI + 0x06c4,
+ TSFEID = AXIBMI + 0x06c8,
+ TSCEIS = AXIBMI + 0x06d0,
+ TSCEIE = AXIBMI + 0x06d4,
+ TSCEID = AXIBMI + 0x06d8,
+ DIS = AXIBMI + 0x0b00,
+ DIE = AXIBMI + 0x0b04,
+ DID = AXIBMI + 0x0b08,
+ TDIS0 = AXIBMI + 0x0b10,
+ TDIE0 = AXIBMI + 0x0b14,
+ TDID0 = AXIBMI + 0x0b18,
+ RDIS0 = AXIBMI + 0x0b90,
+ RDIE0 = AXIBMI + 0x0b94,
+ RDID0 = AXIBMI + 0x0b98,
+ TSDIS = AXIBMI + 0x0c10,
+ TSDIE = AXIBMI + 0x0c14,
+ TSDID = AXIBMI + 0x0c18,
+ GPOUT = AXIBMI + 0x6000,
+
+ OCR = TSNMHD + 0x0000,
+ OSR = TSNMHD + 0x0004,
+ SWR = TSNMHD + 0x0008,
+ SIS = TSNMHD + 0x000c,
+ GIS = TSNMHD + 0x0010,
+ GIE = TSNMHD + 0x0014,
+ GID = TSNMHD + 0x0018,
+ TIS1 = TSNMHD + 0x0020,
+ TIE1 = TSNMHD + 0x0024,
+ TID1 = TSNMHD + 0x0028,
+ TIS2 = TSNMHD + 0x0030,
+ TIE2 = TSNMHD + 0x0034,
+ TID2 = TSNMHD + 0x0038,
+ RIS = TSNMHD + 0x0040,
+ RIE = TSNMHD + 0x0044,
+ RID = TSNMHD + 0x0048,
+ TGC1 = TSNMHD + 0x0050,
+ TGC2 = TSNMHD + 0x0054,
+ TFS0 = TSNMHD + 0x0060,
+ TCF0 = TSNMHD + 0x0070,
+ TCR1 = TSNMHD + 0x0080,
+ TCR2 = TSNMHD + 0x0084,
+ TCR3 = TSNMHD + 0x0088,
+ TCR4 = TSNMHD + 0x008c,
+ TMS0 = TSNMHD + 0x0090,
+ TSR1 = TSNMHD + 0x00b0,
+ TSR2 = TSNMHD + 0x00b4,
+ TSR3 = TSNMHD + 0x00b8,
+ TSR4 = TSNMHD + 0x00bc,
+ TSR5 = TSNMHD + 0x00c0,
+ RGC = TSNMHD + 0x00d0,
+ RDFCR = TSNMHD + 0x00d4,
+ RCFCR = TSNMHD + 0x00d8,
+ REFCNCR = TSNMHD + 0x00dc,
+ RSR1 = TSNMHD + 0x00e0,
+ RSR2 = TSNMHD + 0x00e4,
+ RSR3 = TSNMHD + 0x00e8,
+ TCIS = TSNMHD + 0x01e0,
+ TCIE = TSNMHD + 0x01e4,
+ TCID = TSNMHD + 0x01e8,
+ TPTPC = TSNMHD + 0x01f0,
+ TTML = TSNMHD + 0x01f4,
+ TTJ = TSNMHD + 0x01f8,
+ TCC = TSNMHD + 0x0200,
+ TCS = TSNMHD + 0x0204,
+ TGS = TSNMHD + 0x020c,
+ TACST0 = TSNMHD + 0x0210,
+ TACST1 = TSNMHD + 0x0214,
+ TACST2 = TSNMHD + 0x0218,
+ TALIT0 = TSNMHD + 0x0220,
+ TALIT1 = TSNMHD + 0x0224,
+ TALIT2 = TSNMHD + 0x0228,
+ TAEN0 = TSNMHD + 0x0230,
+ TAEN1 = TSNMHD + 0x0234,
+ TASFE = TSNMHD + 0x0240,
+ TACLL0 = TSNMHD + 0x0250,
+ TACLL1 = TSNMHD + 0x0254,
+ TACLL2 = TSNMHD + 0x0258,
+ CACC = TSNMHD + 0x0260,
+ CCS = TSNMHD + 0x0264,
+ CAIV0 = TSNMHD + 0x0270,
+ CAUL0 = TSNMHD + 0x0290,
+ TOCST0 = TSNMHD + 0x0300,
+ TOCST1 = TSNMHD + 0x0304,
+ TOCST2 = TSNMHD + 0x0308,
+ TOLIT0 = TSNMHD + 0x0310,
+ TOLIT1 = TSNMHD + 0x0314,
+ TOLIT2 = TSNMHD + 0x0318,
+ TOEN0 = TSNMHD + 0x0320,
+ TOEN1 = TSNMHD + 0x0324,
+ TOSFE = TSNMHD + 0x0330,
+ TCLR0 = TSNMHD + 0x0340,
+ TCLR1 = TSNMHD + 0x0344,
+ TCLR2 = TSNMHD + 0x0348,
+ TSMS = TSNMHD + 0x0350,
+ COCC = TSNMHD + 0x0360,
+ COIV0 = TSNMHD + 0x03b0,
+ COUL0 = TSNMHD + 0x03d0,
+ QSTMACU0 = TSNMHD + 0x0400,
+ QSTMACD0 = TSNMHD + 0x0404,
+ QSTMAMU0 = TSNMHD + 0x0408,
+ QSTMAMD0 = TSNMHD + 0x040c,
+ QSFTVL0 = TSNMHD + 0x0410,
+ QSFTVLM0 = TSNMHD + 0x0414,
+ QSFTMSD0 = TSNMHD + 0x0418,
+ QSFTGMI0 = TSNMHD + 0x041c,
+ QSFTLS = TSNMHD + 0x0600,
+ QSFTLIS = TSNMHD + 0x0604,
+ QSFTLIE = TSNMHD + 0x0608,
+ QSFTLID = TSNMHD + 0x060c,
+ QSMSMC = TSNMHD + 0x0610,
+ QSGTMC = TSNMHD + 0x0614,
+ QSEIS = TSNMHD + 0x0618,
+ QSEIE = TSNMHD + 0x061c,
+ QSEID = TSNMHD + 0x0620,
+ QGACST0 = TSNMHD + 0x0630,
+ QGACST1 = TSNMHD + 0x0634,
+ QGACST2 = TSNMHD + 0x0638,
+ QGALIT1 = TSNMHD + 0x0640,
+ QGALIT2 = TSNMHD + 0x0644,
+ QGAEN0 = TSNMHD + 0x0648,
+ QGAEN1 = TSNMHD + 0x074c,
+ QGIGS = TSNMHD + 0x0650,
+ QGGC = TSNMHD + 0x0654,
+ QGATL0 = TSNMHD + 0x0664,
+ QGATL1 = TSNMHD + 0x0668,
+ QGATL2 = TSNMHD + 0x066c,
+ QGOCST0 = TSNMHD + 0x0670,
+ QGOCST1 = TSNMHD + 0x0674,
+ QGOCST2 = TSNMHD + 0x0678,
+ QGOLIT0 = TSNMHD + 0x067c,
+ QGOLIT1 = TSNMHD + 0x0680,
+ QGOLIT2 = TSNMHD + 0x0684,
+ QGOEN0 = TSNMHD + 0x0688,
+ QGOEN1 = TSNMHD + 0x068c,
+ QGTRO = TSNMHD + 0x0690,
+ QGTR1 = TSNMHD + 0x0694,
+ QGTR2 = TSNMHD + 0x0698,
+ QGFSMS = TSNMHD + 0x069c,
+ QTMIS = TSNMHD + 0x06e0,
+ QTMIE = TSNMHD + 0x06e4,
+ QTMID = TSNMHD + 0x06e8,
+ QMEC = TSNMHD + 0x0700,
+ QMMC = TSNMHD + 0x0704,
+ QRFDC = TSNMHD + 0x0708,
+ QYFDC = TSNMHD + 0x070c,
+ QVTCMC0 = TSNMHD + 0x0710,
+ QMCBSC0 = TSNMHD + 0x0750,
+ QMCIRC0 = TSNMHD + 0x0790,
+ QMEBSC0 = TSNMHD + 0x07d0,
+ QMEIRC0 = TSNMHD + 0x0710,
+ QMCFC = TSNMHD + 0x0850,
+ QMEIS = TSNMHD + 0x0860,
+ QMEIE = TSNMHD + 0x0864,
+ QMEID = TSNMHD + 0x086c,
+ QSMFC0 = TSNMHD + 0x0870,
+ QMSPPC0 = TSNMHD + 0x08b0,
+ QMSRPC0 = TSNMHD + 0x08f0,
+ QGPPC0 = TSNMHD + 0x0930,
+ QGRPC0 = TSNMHD + 0x0950,
+ QMDPC0 = TSNMHD + 0x0970,
+ QMGPC0 = TSNMHD + 0x09b0,
+ QMYPC0 = TSNMHD + 0x09f0,
+ QMRPC0 = TSNMHD + 0x0a30,
+ MQSTMACU = TSNMHD + 0x0a70,
+ MQSTMACD = TSNMHD + 0x0a74,
+ MQSTMAMU = TSNMHD + 0x0a78,
+ MQSTMAMD = TSNMHD + 0x0a7c,
+ MQSFTVL = TSNMHD + 0x0a80,
+ MQSFTVLM = TSNMHD + 0x0a84,
+ MQSFTMSD = TSNMHD + 0x0a88,
+ MQSFTGMI = TSNMHD + 0x0a8c,
+
+ CFCR0 = RMSO + 0x0800,
+ FMSCR = RMSO + 0x0c10,
+
+ MMC = RMRO + 0x0000,
+ MPSM = RMRO + 0x0010,
+ MPIC = RMRO + 0x0014,
+ MTFFC = RMRO + 0x0020,
+ MTPFC = RMRO + 0x0024,
+ MTATC0 = RMRO + 0x0040,
+ MRGC = RMRO + 0x0080,
+ MRMAC0 = RMRO + 0x0084,
+ MRMAC1 = RMRO + 0x0088,
+ MRAFC = RMRO + 0x008c,
+ MRSCE = RMRO + 0x0090,
+ MRSCP = RMRO + 0x0094,
+ MRSCC = RMRO + 0x0098,
+ MRFSCE = RMRO + 0x009c,
+ MRFSCP = RMRO + 0x00a0,
+ MTRC = RMRO + 0x00a4,
+ MPFC = RMRO + 0x0100,
+ MLVC = RMRO + 0x0340,
+ MEEEC = RMRO + 0x0350,
+ MLBC = RMRO + 0x0360,
+ MGMR = RMRO + 0x0400,
+ MMPFTCT = RMRO + 0x0410,
+ MAPFTCT = RMRO + 0x0414,
+ MPFRCT = RMRO + 0x0418,
+ MFCICT = RMRO + 0x041c,
+ MEEECT = RMRO + 0x0420,
+ MEIS = RMRO + 0x0500,
+ MEIE = RMRO + 0x0504,
+ MEID = RMRO + 0x0508,
+ MMIS0 = RMRO + 0x0510,
+ MMIE0 = RMRO + 0x0514,
+ MMID0 = RMRO + 0x0518,
+ MMIS1 = RMRO + 0x0520,
+ MMIE1 = RMRO + 0x0524,
+ MMID1 = RMRO + 0x0528,
+ MMIS2 = RMRO + 0x0530,
+ MMIE2 = RMRO + 0x0534,
+ MMID2 = RMRO + 0x0538,
+ MXMS = RMRO + 0x0600,
+
+};
+
+/* AXIBMI */
+#define RR_RATRR BIT(0)
+#define RR_TATRR BIT(1)
+#define RR_RST (RR_RATRR | RR_TATRR)
+#define RR_RST_COMPLETE 0x03
+
+#define AXIWC_DEFAULT 0xffff
+#define AXIRC_DEFAULT 0xffff
+
+#define TATLS0_TEDE BIT(1)
+#define TATLS0_TATEN_SHIFT 24
+#define TATLS0_TATEN(n) ((n) << TATLS0_TATEN_SHIFT)
+#define TATLR_TATL BIT(31)
+
+#define RATLS0_RETS BIT(2)
+#define RATLS0_REDE BIT(3)
+#define RATLS0_RATEN_SHIFT 24
+#define RATLS0_RATEN(n) ((n) << RATLS0_RATEN_SHIFT)
+#define RATLR_RATL BIT(31)
+
+#define DIE_DID_TDICX(n) BIT((n))
+#define DIE_DID_RDICX(n) BIT((n) + 8)
+#define TDIE_TDID_TDX(n) BIT(n)
+#define RDIE_RDID_RDX(n) BIT(n)
+#define TDIS_TDS(n) BIT(n)
+#define RDIS_RDS(n) BIT(n)
+
+/* MHD */
+#define OSR_OPS 0x07
+#define SWR_SWR BIT(0)
+
+#define TGC1_TQTM_SFM 0xff00
+#define TGC1_STTV_DEFAULT 0x03
+
+#define TMS_MFS_MAX 0x2800
+
+/* RMAC System */
+#define CFCR_SDID(n) ((n) << 16)
+#define FMSCR_FMSIE(n) ((n) << 0)
+
+/* RMAC */
+#define MPIC_PIS_MASK GENMASK(1, 0)
+#define MPIC_PIS_MII 0
+#define MPIC_PIS_RMII 0x01
+#define MPIC_PIS_GMII 0x02
+#define MPIC_PIS_RGMII 0x03
+#define MPIC_LSC_SHIFT 2
+#define MPIC_LSC_MASK GENMASK(3, MPIC_LSC_SHIFT)
+#define MPIC_LSC_10M (0 << MPIC_LSC_SHIFT)
+#define MPIC_LSC_100M (0x01 << MPIC_LSC_SHIFT)
+#define MPIC_LSC_1G (0x02 << MPIC_LSC_SHIFT)
+#define MPIC_PSMCS_SHIFT 16
+#define MPIC_PSMCS_MASK GENMASK(21, MPIC_PSMCS_SHIFT)
+#define MPIC_PSMCS_DEFAULT (0x0a << MPIC_PSMCS_SHIFT)
+#define MPIC_PSMHT_SHIFT 24
+#define MPIC_PSMHT_MASK GENMASK(26, MPIC_PSMHT_SHIFT)
+#define MPIC_PSMHT_DEFAULT (0x07 << MPIC_PSMHT_SHIFT)
+
+#define MLVC_PASE BIT(8)
+#define MLVC_PSE BIT(16)
+#define MLVC_PLV BIT(17)
+
+#define MPSM_PSME BIT(0)
+#define MPSM_PSMAD BIT(1)
+#define MPSM_PDA_SHIFT 3
+#define MPSM_PDA_MASK GENMASK(7, 3)
+#define MPSM_PDA(n) (((n) << MPSM_PDA_SHIFT) & MPSM_PDA_MASK)
+#define MPSM_PRA_SHIFT 8
+#define MPSM_PRA_MASK GENMASK(12, 8)
+#define MPSM_PRA(n) (((n) << MPSM_PRA_SHIFT) & MPSM_PRA_MASK)
+#define MPSM_PRD_SHIFT 16
+#define MPSM_PRD_SET(n) ((n) << MPSM_PRD_SHIFT)
+#define MPSM_PRD_GET(n) ((n) >> MPSM_PRD_SHIFT)
+
+#define GPOUT_RDM BIT(13)
+#define GPOUT_TDM BIT(14)
+
+/* RTSN */
+#define RTSN_INTERVAL_US 1000
+#define RTSN_TIMEOUT_US 1000000
+
+#define TX_NUM_CHAINS 1
+#define RX_NUM_CHAINS 1
+
+#define TX_CHAIN_SIZE 1024
+#define RX_CHAIN_SIZE 1024
+
+#define TX_CHAIN_IDX 0
+#define RX_CHAIN_IDX 0
+
+#define TX_CHAIN_ADDR_OFFSET (sizeof(struct rtsn_desc) * TX_CHAIN_IDX)
+#define RX_CHAIN_ADDR_OFFSET (sizeof(struct rtsn_desc) * RX_CHAIN_IDX)
+
+#define PKT_BUF_SZ 1584
+#define RTSN_ALIGN 128
+
+enum rtsn_mode {
+ OCR_OPC_DISABLE,
+ OCR_OPC_CONFIG,
+ OCR_OPC_OPERATION,
+};
+
+/* Descriptors */
+enum RX_DS_CC_BIT {
+ RX_DS = 0x0fff, /* Data size */
+ RX_TR = 0x1000, /* Truncation indication */
+ RX_EI = 0x2000, /* Error indication */
+ RX_PS = 0xc000, /* Padding selection */
+};
+
+enum TX_FS_TAGL_BIT {
+ TX_DS = 0x0fff, /* Data size */
+ TX_TAGL = 0xf000, /* Frame tag LSBs */
+};
+
+enum DIE_DT {
+ /* HW/SW arbitration */
+ DT_FEMPTY_IS = 0x10,
+ DT_FEMPTY_IC = 0x20,
+ DT_FEMPTY_ND = 0x30,
+ DT_FEMPTY = 0x40,
+ DT_FEMPTY_START = 0x50,
+ DT_FEMPTY_MID = 0x60,
+ DT_FEMPTY_END = 0x70,
+
+ /* Frame data */
+ DT_FSINGLE = 0x80,
+ DT_FSTART = 0x90,
+ DT_FMID = 0xa0,
+ DT_FEND = 0xb0,
+
+ /* Chain control */
+ DT_LEMPTY = 0xc0,
+ DT_EEMPTY = 0xd0,
+ DT_LINK = 0xe0,
+ DT_EOS = 0xf0,
+
+ DT_MASK = 0xf0,
+ D_DIE = 0x08,
+};
+
+struct rtsn_desc {
+ __le16 info_ds;
+ __u8 info;
+ u8 die_dt;
+ __le32 dptr;
+} __packed;
+
+struct rtsn_ts_desc {
+ __le16 info_ds;
+ __u8 info;
+ u8 die_dt;
+ __le32 dptr;
+ __le32 ts_nsec;
+ __le32 ts_sec;
+} __packed;
+
+struct rtsn_ext_desc {
+ __le16 info_ds;
+ __u8 info;
+ u8 die_dt;
+ __le32 dptr;
+ __le64 info1;
+} __packed;
+
+struct rtsn_ext_ts_desc {
+ __le16 info_ds;
+ __u8 info;
+ u8 die_dt;
+ __le32 dptr;
+ __le64 info1;
+ __le32 ts_nsec;
+ __le32 ts_sec;
+} __packed;
+
+enum EXT_INFO_DS_BIT {
+ TXC = 0x4000,
+};
+
+#endif