summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-02-20 17:45:32 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2021-02-20 17:45:32 -0800
commit51e6d17809c85e1934600ec4cdb85552e9bda254 (patch)
tree917ecfba322f9c81fb2e936054462756515aebb8 /include/linux
parentf40ddce88593482919761f74910f42f4b84c004b (diff)
parent38b5133ad607ecdcc8d24906d1ac9cc8df41acd5 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next
Pull networking updates from David Miller: "Here is what we have this merge window: 1) Support SW steering for mlx5 Connect-X6Dx, from Yevgeny Kliteynik. 2) Add RSS multi group support to octeontx2-pf driver, from Geetha Sowjanya. 3) Add support for KS8851 PHY. From Marek Vasut. 4) Add support for GarfieldPeak bluetooth controller from Kiran K. 5) Add support for half-duplex tcan4x5x can controllers. 6) Add batch skb rx processing to bcrm63xx_enet, from Sieng Piaw Liew. 7) Rework RX port offload infrastructure, particularly wrt, UDP tunneling, from Jakub Kicinski. 8) Add BCM72116 PHY support, from Florian Fainelli. 9) Remove Dsa specific notifiers, they are unnecessary. From Vladimir Oltean. 10) Add support for picosecond rx delay in dwmac-meson8b chips. From Martin Blumenstingl. 11) Support TSO on xfrm interfaces from Eyal Birger. 12) Add support for MP_PRIO to mptcp stack, from Geliang Tang. 13) Support BCM4908 integrated switch, from Rafał Miłecki. 14) Support for directly accessing kernel module variables via module BTF info, from Andrii Naryiko. 15) Add DASH (esktop and mobile Architecture for System Hardware) support to r8169 driver, from Heiner Kallweit. 16) Add rx vlan filtering to dpaa2-eth, from Ionut-robert Aron. 17) Add support for 100 base0x SFP devices, from Bjarni Jonasson. 18) Support link aggregation in DSA, from Tobias Waldekranz. 19) Support for bitwidse atomics in bpf, from Brendan Jackman. 20) SmartEEE support in at803x driver, from Russell King. 21) Add support for flow based tunneling to GTP, from Pravin B Shelar. 22) Allow arbitrary number of interconnrcts in ipa, from Alex Elder. 23) TLS RX offload for bonding, from Tariq Toukan. 24) RX decap offklload support in mac80211, from Felix Fietkou. 25) devlink health saupport in octeontx2-af, from George Cherian. 26) Add TTL attr to SCM_TIMESTAMP_OPT_STATS, from Yousuk Seung 27) Delegated actionss support in mptcp, from Paolo Abeni. 28) Support receive timestamping when doin zerocopy tcp receive. From Arjun Ray. 29) HTB offload support for mlx5, from Maxim Mikityanskiy. 30) UDP GRO forwarding, from Maxim Mikityanskiy. 31) TAPRIO offloading in dsa hellcreek driver, from Kurt Kanzenbach. 32) Weighted random twos choice algorithm for ipvs, from Darby Payne. 33) Fix netdev registration deadlock, from Johannes Berg. 34) Various conversions to new tasklet api, from EmilRenner Berthing. 35) Bulk skb allocations in veth, from Lorenzo Bianconi. 36) New ethtool interface for lane setting, from Danielle Ratson. 37) Offload failiure notifications for routes, from Amit Cohen. 38) BCM4908 support, from Rafał Miłecki. 39) Support several new iwlwifi chips, from Ihab Zhaika. 40) Flow drector support for ipv6 in i40e, from Przemyslaw Patynowski. 41) Support for mhi prrotocols, from Loic Poulain. 42) Optimize bpf program stats. 43) Implement RFC6056, for better port randomization, from Eric Dumazet. 44) hsr tag offloading support from George McCollister. 45) Netpoll support in qede, from Bhaskar Upadhaya. 46) 2005/400g speed support in bonding 3ad mode, from Nikolay Aleksandrov. 47) Netlink event support in mptcp, from Florian Westphal. 48) Better skbuff caching, from Alexander Lobakin. 49) MRP (Media Redundancy Protocol) offloading in DSA and a few drivers, from Horatiu Vultur. 50) mqprio saupport in mvneta, from Maxime Chevallier. 51) Remove of_phy_attach, no longer needed, from Florian Fainelli" * git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next: (1766 commits) octeontx2-pf: Fix otx2_get_fecparam() cteontx2-pf: cn10k: Prevent harmless double shift bugs net: stmmac: Add PCI bus info to ethtool driver query output ptp: ptp_clockmatrix: clean-up - parenthesis around a == b are unnecessary ptp: ptp_clockmatrix: Simplify code - remove unnecessary `err` variable. ptp: ptp_clockmatrix: Coding style - tighten vertical spacing. ptp: ptp_clockmatrix: Clean-up dev_*() messages. ptp: ptp_clockmatrix: Remove unused header declarations. ptp: ptp_clockmatrix: Add alignment of 1 PPS to idtcm_perout_enable. ptp: ptp_clockmatrix: Add wait_for_sys_apll_dpll_lock. net: stmmac: dwmac-sun8i: Add a shutdown callback net: stmmac: dwmac-sun8i: Minor probe function cleanup net: stmmac: dwmac-sun8i: Use reset_control_reset net: stmmac: dwmac-sun8i: Remove unnecessary PHY power check net: stmmac: dwmac-sun8i: Return void from PHY unpower r8169: use macro pm_ptr net: mdio: Remove of_phy_attach() net: mscc: ocelot: select PACKING in the Kconfig net: re-solve some conflicts after net -> net-next merge net: dsa: tag_rtl4_a: Support also egress tags ...
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/atmdev.h2
-rw-r--r--include/linux/bpf-cgroup.h101
-rw-r--r--include/linux/bpf.h92
-rw-r--r--include/linux/bpf_verifier.h8
-rw-r--r--include/linux/brcmphy.h25
-rw-r--r--include/linux/btf.h3
-rw-r--r--include/linux/buildid.h12
-rw-r--r--include/linux/can/bittiming.h44
-rw-r--r--include/linux/can/dev.h136
-rw-r--r--include/linux/can/length.h174
-rw-r--r--include/linux/can/rx-offload.h3
-rw-r--r--include/linux/can/skb.h80
-rw-r--r--include/linux/dsa/8021q.h14
-rw-r--r--include/linux/dsa/brcm.h16
-rw-r--r--include/linux/dsa/ocelot.h223
-rw-r--r--include/linux/ethtool.h5
-rw-r--r--include/linux/filter.h48
-rw-r--r--include/linux/gfp.h12
-rw-r--r--include/linux/icmpv6.h2
-rw-r--r--include/linux/if_hsr.h27
-rw-r--r--include/linux/indirect_call_wrapper.h8
-rw-r--r--include/linux/ipv6.h1
-rw-r--r--include/linux/mdio.h23
-rw-r--r--include/linux/mhi.h9
-rw-r--r--include/linux/mlx5/device.h14
-rw-r--r--include/linux/mlx5/driver.h46
-rw-r--r--include/linux/mlx5/eswitch.h29
-rw-r--r--include/linux/mlx5/mlx5_ifc.h113
-rw-r--r--include/linux/mm.h2
-rw-r--r--include/linux/netdev_features.h13
-rw-r--r--include/linux/netdevice.h135
-rw-r--r--include/linux/netfilter.h2
-rw-r--r--include/linux/netlink.h6
-rw-r--r--include/linux/of_mdio.h10
-rw-r--r--include/linux/phy.h38
-rw-r--r--include/linux/qed/qed_chain.h2
-rw-r--r--include/linux/remoteproc/qcom_rproc.h4
-rw-r--r--include/linux/skbuff.h177
-rw-r--r--include/linux/skmsg.h1
-rw-r--r--include/linux/soc/marvell/octeontx2/asm.h8
-rw-r--r--include/linux/soc/qcom/mdt_loader.h35
-rw-r--r--include/linux/ssb/ssb_driver_gige.h14
-rw-r--r--include/linux/stmmac.h1
-rw-r--r--include/linux/tcp.h3
-rw-r--r--include/linux/usb/cdc_ncm.h2
45 files changed, 1330 insertions, 393 deletions
diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
index d7493016cd46..60cd25c0461b 100644
--- a/include/linux/atmdev.h
+++ b/include/linux/atmdev.h
@@ -207,7 +207,7 @@ struct atm_skb_data {
struct atm_vcc *vcc; /* ATM VCC */
unsigned long atm_options; /* ATM layer options */
unsigned int acct_truesize; /* truesize accounted to vcc */
-};
+} __packed;
#define VCC_HTABLE_SIZE 32
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
index 72e69a0e1e8c..c42e02b4d84b 100644
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@ -23,8 +23,8 @@ struct ctl_table_header;
#ifdef CONFIG_CGROUP_BPF
-extern struct static_key_false cgroup_bpf_enabled_key;
-#define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key)
+extern struct static_key_false cgroup_bpf_enabled_key[MAX_BPF_ATTACH_TYPE];
+#define cgroup_bpf_enabled(type) static_branch_unlikely(&cgroup_bpf_enabled_key[type])
DECLARE_PER_CPU(struct bpf_cgroup_storage*,
bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]);
@@ -125,7 +125,8 @@ int __cgroup_bpf_run_filter_sk(struct sock *sk,
int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
struct sockaddr *uaddr,
enum bpf_attach_type type,
- void *t_ctx);
+ void *t_ctx,
+ u32 *flags);
int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
struct bpf_sock_ops_kern *sock_ops,
@@ -147,6 +148,10 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
int __user *optlen, int max_optlen,
int retval);
+int __cgroup_bpf_run_filter_getsockopt_kern(struct sock *sk, int level,
+ int optname, void *optval,
+ int *optlen, int retval);
+
static inline enum bpf_cgroup_storage_type cgroup_storage_type(
struct bpf_map *map)
{
@@ -185,7 +190,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled) \
+ if (cgroup_bpf_enabled(BPF_CGROUP_INET_INGRESS)) \
__ret = __cgroup_bpf_run_filter_skb(sk, skb, \
BPF_CGROUP_INET_INGRESS); \
\
@@ -195,7 +200,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled && sk && sk == skb->sk) { \
+ if (cgroup_bpf_enabled(BPF_CGROUP_INET_EGRESS) && sk && sk == skb->sk) { \
typeof(sk) __sk = sk_to_full_sk(sk); \
if (sk_fullsock(__sk)) \
__ret = __cgroup_bpf_run_filter_skb(__sk, skb, \
@@ -207,7 +212,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
#define BPF_CGROUP_RUN_SK_PROG(sk, type) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled) { \
+ if (cgroup_bpf_enabled(type)) { \
__ret = __cgroup_bpf_run_filter_sk(sk, type); \
} \
__ret; \
@@ -227,33 +232,53 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, type) \
({ \
+ u32 __unused_flags; \
int __ret = 0; \
- if (cgroup_bpf_enabled) \
+ if (cgroup_bpf_enabled(type)) \
__ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \
- NULL); \
+ NULL, \
+ &__unused_flags); \
__ret; \
})
#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx) \
({ \
+ u32 __unused_flags; \
int __ret = 0; \
- if (cgroup_bpf_enabled) { \
+ if (cgroup_bpf_enabled(type)) { \
lock_sock(sk); \
__ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \
- t_ctx); \
+ t_ctx, \
+ &__unused_flags); \
release_sock(sk); \
} \
__ret; \
})
-#define BPF_CGROUP_RUN_PROG_INET4_BIND_LOCK(sk, uaddr) \
- BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET4_BIND, NULL)
-
-#define BPF_CGROUP_RUN_PROG_INET6_BIND_LOCK(sk, uaddr) \
- BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET6_BIND, NULL)
+/* BPF_CGROUP_INET4_BIND and BPF_CGROUP_INET6_BIND can return extra flags
+ * via upper bits of return code. The only flag that is supported
+ * (at bit position 0) is to indicate CAP_NET_BIND_SERVICE capability check
+ * should be bypassed (BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE).
+ */
+#define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, type, bind_flags) \
+({ \
+ u32 __flags = 0; \
+ int __ret = 0; \
+ if (cgroup_bpf_enabled(type)) { \
+ lock_sock(sk); \
+ __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \
+ NULL, &__flags); \
+ release_sock(sk); \
+ if (__flags & BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE) \
+ *bind_flags |= BIND_NO_CAP_NET_BIND_SERVICE; \
+ } \
+ __ret; \
+})
-#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (cgroup_bpf_enabled && \
- sk->sk_prot->pre_connect)
+#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) \
+ ((cgroup_bpf_enabled(BPF_CGROUP_INET4_CONNECT) || \
+ cgroup_bpf_enabled(BPF_CGROUP_INET6_CONNECT)) && \
+ (sk)->sk_prot->pre_connect)
#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) \
BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_CONNECT)
@@ -297,7 +322,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
#define BPF_CGROUP_RUN_PROG_SOCK_OPS_SK(sock_ops, sk) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled) \
+ if (cgroup_bpf_enabled(BPF_CGROUP_SOCK_OPS)) \
__ret = __cgroup_bpf_run_filter_sock_ops(sk, \
sock_ops, \
BPF_CGROUP_SOCK_OPS); \
@@ -307,7 +332,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled && (sock_ops)->sk) { \
+ if (cgroup_bpf_enabled(BPF_CGROUP_SOCK_OPS) && (sock_ops)->sk) { \
typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk); \
if (__sk && sk_fullsock(__sk)) \
__ret = __cgroup_bpf_run_filter_sock_ops(__sk, \
@@ -320,7 +345,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled) \
+ if (cgroup_bpf_enabled(BPF_CGROUP_DEVICE)) \
__ret = __cgroup_bpf_check_dev_permission(type, major, minor, \
access, \
BPF_CGROUP_DEVICE); \
@@ -332,7 +357,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
#define BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, count, pos) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled) \
+ if (cgroup_bpf_enabled(BPF_CGROUP_SYSCTL)) \
__ret = __cgroup_bpf_run_filter_sysctl(head, table, write, \
buf, count, pos, \
BPF_CGROUP_SYSCTL); \
@@ -343,7 +368,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
kernel_optval) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled) \
+ if (cgroup_bpf_enabled(BPF_CGROUP_SETSOCKOPT)) \
__ret = __cgroup_bpf_run_filter_setsockopt(sock, level, \
optname, optval, \
optlen, \
@@ -354,7 +379,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
#define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled) \
+ if (cgroup_bpf_enabled(BPF_CGROUP_GETSOCKOPT)) \
get_user(__ret, optlen); \
__ret; \
})
@@ -363,11 +388,24 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
max_optlen, retval) \
({ \
int __ret = retval; \
- if (cgroup_bpf_enabled) \
- __ret = __cgroup_bpf_run_filter_getsockopt(sock, level, \
- optname, optval, \
- optlen, max_optlen, \
- retval); \
+ if (cgroup_bpf_enabled(BPF_CGROUP_GETSOCKOPT)) \
+ if (!(sock)->sk_prot->bpf_bypass_getsockopt || \
+ !INDIRECT_CALL_INET_1((sock)->sk_prot->bpf_bypass_getsockopt, \
+ tcp_bpf_bypass_getsockopt, \
+ level, optname)) \
+ __ret = __cgroup_bpf_run_filter_getsockopt( \
+ sock, level, optname, optval, optlen, \
+ max_optlen, retval); \
+ __ret; \
+})
+
+#define BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sock, level, optname, optval, \
+ optlen, retval) \
+({ \
+ int __ret = retval; \
+ if (cgroup_bpf_enabled(BPF_CGROUP_GETSOCKOPT)) \
+ __ret = __cgroup_bpf_run_filter_getsockopt_kern( \
+ sock, level, optname, optval, optlen, retval); \
__ret; \
})
@@ -427,15 +465,14 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
return 0;
}
-#define cgroup_bpf_enabled (0)
+#define cgroup_bpf_enabled(type) (0)
#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx) ({ 0; })
#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) ({ 0; })
-#define BPF_CGROUP_RUN_PROG_INET4_BIND_LOCK(sk, uaddr) ({ 0; })
-#define BPF_CGROUP_RUN_PROG_INET6_BIND_LOCK(sk, uaddr) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, type, flags) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; })
@@ -452,6 +489,8 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
#define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) ({ 0; })
#define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, \
optlen, max_optlen, retval) ({ retval; })
+#define BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sock, level, optname, optval, \
+ optlen, retval) ({ retval; })
#define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \
kernel_optval) ({ 0; })
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 07cb5d15e743..cccaef1088ea 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -14,7 +14,6 @@
#include <linux/numa.h>
#include <linux/mm_types.h>
#include <linux/wait.h>
-#include <linux/u64_stats_sync.h>
#include <linux/refcount.h>
#include <linux/mutex.h>
#include <linux/module.h>
@@ -507,12 +506,6 @@ enum bpf_cgroup_storage_type {
*/
#define MAX_BPF_FUNC_ARGS 12
-struct bpf_prog_stats {
- u64 cnt;
- u64 nsecs;
- struct u64_stats_sync syncp;
-} __aligned(2 * sizeof(u64));
-
struct btf_func_model {
u8 ret_size;
u8 nr_args;
@@ -536,7 +529,7 @@ struct btf_func_model {
/* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50
* bytes on x86. Pick a number to fit into BPF_IMAGE_SIZE / 2
*/
-#define BPF_MAX_TRAMP_PROGS 40
+#define BPF_MAX_TRAMP_PROGS 38
struct bpf_tramp_progs {
struct bpf_prog *progs[BPF_MAX_TRAMP_PROGS];
@@ -568,10 +561,10 @@ int arch_prepare_bpf_trampoline(void *image, void *image_end,
struct bpf_tramp_progs *tprogs,
void *orig_call);
/* these two functions are called from generated trampoline */
-u64 notrace __bpf_prog_enter(void);
+u64 notrace __bpf_prog_enter(struct bpf_prog *prog);
void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start);
-void notrace __bpf_prog_enter_sleepable(void);
-void notrace __bpf_prog_exit_sleepable(void);
+u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog);
+void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start);
struct bpf_ksym {
unsigned long start;
@@ -761,9 +754,15 @@ struct bpf_ctx_arg_aux {
u32 btf_id;
};
+struct btf_mod_pair {
+ struct btf *btf;
+ struct module *module;
+};
+
struct bpf_prog_aux {
atomic64_t refcnt;
u32 used_map_cnt;
+ u32 used_btf_cnt;
u32 max_ctx_offset;
u32 max_pkt_offset;
u32 max_tp_access;
@@ -802,6 +801,7 @@ struct bpf_prog_aux {
const struct bpf_prog_ops *ops;
struct bpf_map **used_maps;
struct mutex used_maps_mutex; /* mutex for used_maps and used_map_cnt */
+ struct btf_mod_pair *used_btfs;
struct bpf_prog *prog;
struct user_struct *user;
u64 load_time; /* ns since boottime */
@@ -838,7 +838,6 @@ struct bpf_prog_aux {
u32 linfo_idx;
u32 num_exentries;
struct exception_table_entry *extable;
- struct bpf_prog_stats __percpu *stats;
union {
struct work_struct work;
struct rcu_head rcu;
@@ -1066,6 +1065,34 @@ int bpf_prog_array_copy(struct bpf_prog_array *old_array,
struct bpf_prog *include_prog,
struct bpf_prog_array **new_array);
+/* BPF program asks to bypass CAP_NET_BIND_SERVICE in bind. */
+#define BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE (1 << 0)
+/* BPF program asks to set CN on the packet. */
+#define BPF_RET_SET_CN (1 << 0)
+
+#define BPF_PROG_RUN_ARRAY_FLAGS(array, ctx, func, ret_flags) \
+ ({ \
+ struct bpf_prog_array_item *_item; \
+ struct bpf_prog *_prog; \
+ struct bpf_prog_array *_array; \
+ u32 _ret = 1; \
+ u32 func_ret; \
+ migrate_disable(); \
+ rcu_read_lock(); \
+ _array = rcu_dereference(array); \
+ _item = &_array->items[0]; \
+ while ((_prog = READ_ONCE(_item->prog))) { \
+ bpf_cgroup_storage_set(_item->cgroup_storage); \
+ func_ret = func(_prog, ctx); \
+ _ret &= (func_ret & 1); \
+ *(ret_flags) |= (func_ret >> 1); \
+ _item++; \
+ } \
+ rcu_read_unlock(); \
+ migrate_enable(); \
+ _ret; \
+ })
+
#define __BPF_PROG_RUN_ARRAY(array, ctx, func, check_non_null) \
({ \
struct bpf_prog_array_item *_item; \
@@ -1113,25 +1140,11 @@ _out: \
*/
#define BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY(array, ctx, func) \
({ \
- struct bpf_prog_array_item *_item; \
- struct bpf_prog *_prog; \
- struct bpf_prog_array *_array; \
- u32 ret; \
- u32 _ret = 1; \
- u32 _cn = 0; \
- migrate_disable(); \
- rcu_read_lock(); \
- _array = rcu_dereference(array); \
- _item = &_array->items[0]; \
- while ((_prog = READ_ONCE(_item->prog))) { \
- bpf_cgroup_storage_set(_item->cgroup_storage); \
- ret = func(_prog, ctx); \
- _ret &= (ret & 1); \
- _cn |= (ret & 2); \
- _item++; \
- } \
- rcu_read_unlock(); \
- migrate_enable(); \
+ u32 _flags = 0; \
+ bool _cn; \
+ u32 _ret; \
+ _ret = BPF_PROG_RUN_ARRAY_FLAGS(array, ctx, func, &_flags); \
+ _cn = _flags & BPF_RET_SET_CN; \
if (_ret) \
_ret = (_cn ? NET_XMIT_CN : NET_XMIT_SUCCESS); \
else \
@@ -1206,8 +1219,6 @@ void bpf_prog_sub(struct bpf_prog *prog, int i);
void bpf_prog_inc(struct bpf_prog *prog);
struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog);
void bpf_prog_put(struct bpf_prog *prog);
-void __bpf_free_used_maps(struct bpf_prog_aux *aux,
- struct bpf_map **used_maps, u32 len);
void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock);
void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock);
@@ -1271,6 +1282,11 @@ static inline bool bpf_allow_ptr_leaks(void)
return perfmon_capable();
}
+static inline bool bpf_allow_uninit_stack(void)
+{
+ return perfmon_capable();
+}
+
static inline bool bpf_allow_ptr_to_map_access(void)
{
return perfmon_capable();
@@ -1403,7 +1419,10 @@ static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
/* verify correctness of eBPF program */
int bpf_check(struct bpf_prog **fp, union bpf_attr *attr,
union bpf_attr __user *uattr);
+
+#ifndef CONFIG_BPF_JIT_ALWAYS_ON
void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
+#endif
struct btf *bpf_get_btf_vmlinux(void);
@@ -1667,12 +1686,18 @@ bpf_base_func_proto(enum bpf_func_id func_id)
}
#endif /* CONFIG_BPF_SYSCALL */
+void __bpf_free_used_btfs(struct bpf_prog_aux *aux,
+ struct btf_mod_pair *used_btfs, u32 len);
+
static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
enum bpf_prog_type type)
{
return bpf_prog_get_type_dev(ufd, type, false);
}
+void __bpf_free_used_maps(struct bpf_prog_aux *aux,
+ struct bpf_map **used_maps, u32 len);
+
bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool);
int bpf_prog_offload_compile(struct bpf_prog *prog);
@@ -1860,6 +1885,7 @@ extern const struct bpf_func_proto bpf_per_cpu_ptr_proto;
extern const struct bpf_func_proto bpf_this_cpu_ptr_proto;
extern const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto;
extern const struct bpf_func_proto bpf_sock_from_file_proto;
+extern const struct bpf_func_proto bpf_get_socket_ptr_cookie_proto;
const struct bpf_func_proto *bpf_tracing_func_proto(
enum bpf_func_id func_id, const struct bpf_prog *prog);
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index e941fe1484e5..971b33aca13d 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -195,7 +195,7 @@ struct bpf_func_state {
* 0 = main function, 1 = first callee.
*/
u32 frameno;
- /* subprog number == index within subprog_stack_depth
+ /* subprog number == index within subprog_info
* zero == main subprog
*/
u32 subprogno;
@@ -340,6 +340,7 @@ struct bpf_insn_aux_data {
};
#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
+#define MAX_USED_BTFS 64 /* max number of BTFs accessed by one BPF program */
#define BPF_VERIFIER_TMP_LOG_SIZE 1024
@@ -398,9 +399,12 @@ struct bpf_verifier_env {
struct bpf_verifier_state_list **explored_states; /* search pruning optimization */
struct bpf_verifier_state_list *free_list;
struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */
+ struct btf_mod_pair used_btfs[MAX_USED_BTFS]; /* array of BTF's used by BPF program */
u32 used_map_cnt; /* number of used maps */
+ u32 used_btf_cnt; /* number of used BTF objects */
u32 id_gen; /* used to generate unique reg IDs */
bool allow_ptr_leaks;
+ bool allow_uninit_stack;
bool allow_ptr_to_map_access;
bool bpf_capable;
bool bypass_spec_v1;
@@ -467,6 +471,8 @@ bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt);
int check_ctx_reg(struct bpf_verifier_env *env,
const struct bpf_reg_state *reg, int regno);
+int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
+ u32 regno, u32 mem_size);
/* this lives here instead of in bpf.h because it needs to dereference tgt_prog */
static inline u64 bpf_trampoline_compute_key(const struct bpf_prog *tgt_prog,
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index d0bd226d6bd9..c2c2147dfeb8 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -31,6 +31,7 @@
#define PHY_ID_BCM89610 0x03625cd0
#define PHY_ID_BCM72113 0x35905310
+#define PHY_ID_BCM72116 0x35905350
#define PHY_ID_BCM7250 0xae025280
#define PHY_ID_BCM7255 0xae025120
#define PHY_ID_BCM7260 0xae025190
@@ -60,19 +61,11 @@
#define PHY_BCM_OUI_5 0x03625e00
#define PHY_BCM_OUI_6 0xae025000
-#define PHY_BCM_FLAGS_MODE_COPPER 0x00000001
-#define PHY_BCM_FLAGS_MODE_1000BX 0x00000002
-#define PHY_BCM_FLAGS_INTF_SGMII 0x00000010
-#define PHY_BCM_FLAGS_INTF_XAUI 0x00000020
-#define PHY_BRCM_WIRESPEED_ENABLE 0x00000100
-#define PHY_BRCM_AUTO_PWRDWN_ENABLE 0x00000200
-#define PHY_BRCM_RX_REFCLK_UNUSED 0x00000400
-#define PHY_BRCM_STD_IBND_DISABLE 0x00000800
-#define PHY_BRCM_EXT_IBND_RX_ENABLE 0x00001000
-#define PHY_BRCM_EXT_IBND_TX_ENABLE 0x00002000
-#define PHY_BRCM_CLEAR_RGMII_MODE 0x00004000
-#define PHY_BRCM_DIS_TXCRXC_NOENRGY 0x00008000
-#define PHY_BRCM_EN_MASTER_MODE 0x00010000
+#define PHY_BRCM_AUTO_PWRDWN_ENABLE 0x00000001
+#define PHY_BRCM_RX_REFCLK_UNUSED 0x00000002
+#define PHY_BRCM_CLEAR_RGMII_MODE 0x00000004
+#define PHY_BRCM_DIS_TXCRXC_NOENRGY 0x00000008
+#define PHY_BRCM_EN_MASTER_MODE 0x00000010
/* Broadcom BCM7xxx specific workarounds */
#define PHY_BRCM_7XXX_REV(x) (((x) >> 8) & 0xff)
@@ -136,6 +129,7 @@
#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC 0x07
#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_WIRESPEED_EN 0x0010
+#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_EN 0x0080
#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN 0x0100
#define MII_BCM54XX_AUXCTL_MISC_FORCE_AMDIX 0x0200
#define MII_BCM54XX_AUXCTL_MISC_WREN 0x8000
@@ -197,6 +191,7 @@
#define BCM54XX_SHD_SCR3_DEF_CLK125 0x0001
#define BCM54XX_SHD_SCR3_DLLAPD_DIS 0x0002
#define BCM54XX_SHD_SCR3_TRDDAPD 0x0004
+#define BCM54XX_SHD_SCR3_RXCTXC_DIS 0x0100
/* 01010: Auto Power-Down */
#define BCM54XX_SHD_APD 0x0a
@@ -222,6 +217,9 @@
/* 11111: Mode Control Register */
#define BCM54XX_SHD_MODE 0x1f
#define BCM54XX_SHD_INTF_SEL_MASK GENMASK(2, 1) /* INTERF_SEL[1:0] */
+#define BCM54XX_SHD_INTF_SEL_RGMII 0x02
+#define BCM54XX_SHD_INTF_SEL_SGMII 0x04
+#define BCM54XX_SHD_INTF_SEL_GBIC 0x06
#define BCM54XX_SHD_MODE_1000BX BIT(0) /* Enable 1000-X registers */
/*
@@ -257,7 +255,6 @@
#define BCM54810_EXP_BROADREACH_LRE_MISC_CTL_EN (1 << 0)
#define BCM54810_SHD_CLK_CTL 0x3
#define BCM54810_SHD_CLK_CTL_GTXCLK_EN (1 << 9)
-#define BCM54810_SHD_SCR3_TRDDAPD 0x0100
/* BCM54612E Registers */
#define BCM54612E_EXP_SPARE0 (MII_BCM54XX_EXP_SEL_ETC + 0x34)
diff --git a/include/linux/btf.h b/include/linux/btf.h
index 4c200f5d242b..7fabf1428093 100644
--- a/include/linux/btf.h
+++ b/include/linux/btf.h
@@ -91,6 +91,9 @@ int btf_type_snprintf_show(const struct btf *btf, u32 type_id, void *obj,
int btf_get_fd_by_id(u32 id);
u32 btf_obj_id(const struct btf *btf);
bool btf_is_kernel(const struct btf *btf);
+bool btf_is_module(const struct btf *btf);
+struct module *btf_try_get_module(const struct btf *btf);
+u32 btf_nr_types(const struct btf *btf);
bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s,
const struct btf_member *m,
u32 expected_offset, u32 expected_size);
diff --git a/include/linux/buildid.h b/include/linux/buildid.h
new file mode 100644
index 000000000000..40232f90db6e
--- /dev/null
+++ b/include/linux/buildid.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_BUILDID_H
+#define _LINUX_BUILDID_H
+
+#include <linux/mm_types.h>
+
+#define BUILD_ID_SIZE_MAX 20
+
+int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id,
+ __u32 *size);
+
+#endif
diff --git a/include/linux/can/bittiming.h b/include/linux/can/bittiming.h
new file mode 100644
index 000000000000..707575c668f4
--- /dev/null
+++ b/include/linux/can/bittiming.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2020 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de>
+ */
+
+#ifndef _CAN_BITTIMING_H
+#define _CAN_BITTIMING_H
+
+#include <linux/netdevice.h>
+#include <linux/can/netlink.h>
+
+#define CAN_SYNC_SEG 1
+
+#ifdef CONFIG_CAN_CALC_BITTIMING
+int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
+ const struct can_bittiming_const *btc);
+#else /* !CONFIG_CAN_CALC_BITTIMING */
+static inline int
+can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
+ const struct can_bittiming_const *btc)
+{
+ netdev_err(dev, "bit-timing calculation not available\n");
+ return -EINVAL;
+}
+#endif /* CONFIG_CAN_CALC_BITTIMING */
+
+int can_get_bittiming(struct net_device *dev, struct can_bittiming *bt,
+ const struct can_bittiming_const *btc,
+ const u32 *bitrate_const,
+ const unsigned int bitrate_const_cnt);
+
+/*
+ * can_bit_time() - Duration of one bit
+ *
+ * Please refer to ISO 11898-1:2015, section 11.3.1.1 "Bit time" for
+ * additional information.
+ *
+ * Return: the number of time quanta in one bit.
+ */
+static inline unsigned int can_bit_time(const struct can_bittiming *bt)
+{
+ return CAN_SYNC_SEG + bt->prop_seg + bt->phase_seg1 + bt->phase_seg2;
+}
+
+#endif /* !_CAN_BITTIMING_H */
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index 197a79535cc2..ac4d83a1ab81 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -15,8 +15,10 @@
#define _CAN_DEV_H
#include <linux/can.h>
+#include <linux/can/bittiming.h>
#include <linux/can/error.h>
#include <linux/can/led.h>
+#include <linux/can/length.h>
#include <linux/can/netlink.h>
#include <linux/can/skb.h>
#include <linux/netdevice.h>
@@ -82,118 +84,6 @@ struct can_priv {
#endif
};
-#define CAN_SYNC_SEG 1
-
-/*
- * can_bit_time() - Duration of one bit
- *
- * Please refer to ISO 11898-1:2015, section 11.3.1.1 "Bit time" for
- * additional information.
- *
- * Return: the number of time quanta in one bit.
- */
-static inline unsigned int can_bit_time(const struct can_bittiming *bt)
-{
- return CAN_SYNC_SEG + bt->prop_seg + bt->phase_seg1 + bt->phase_seg2;
-}
-
-/*
- * can_cc_dlc2len(value) - convert a given data length code (dlc) of a
- * Classical CAN frame into a valid data length of max. 8 bytes.
- *
- * To be used in the CAN netdriver receive path to ensure conformance with
- * ISO 11898-1 Chapter 8.4.2.3 (DLC field)
- */
-#define can_cc_dlc2len(dlc) (min_t(u8, (dlc), CAN_MAX_DLEN))
-
-/* Check for outgoing skbs that have not been created by the CAN subsystem */
-static inline bool can_skb_headroom_valid(struct net_device *dev,
- struct sk_buff *skb)
-{
- /* af_packet creates a headroom of HH_DATA_MOD bytes which is fine */
- if (WARN_ON_ONCE(skb_headroom(skb) < sizeof(struct can_skb_priv)))
- return false;
-
- /* af_packet does not apply CAN skb specific settings */
- if (skb->ip_summed == CHECKSUM_NONE) {
- /* init headroom */
- can_skb_prv(skb)->ifindex = dev->ifindex;
- can_skb_prv(skb)->skbcnt = 0;
-
- skb->ip_summed = CHECKSUM_UNNECESSARY;
-
- /* perform proper loopback on capable devices */
- if (dev->flags & IFF_ECHO)
- skb->pkt_type = PACKET_LOOPBACK;
- else
- skb->pkt_type = PACKET_HOST;
-
- skb_reset_mac_header(skb);
- skb_reset_network_header(skb);
- skb_reset_transport_header(skb);
- }
-
- return true;
-}
-
-/* Drop a given socketbuffer if it does not contain a valid CAN frame. */
-static inline bool can_dropped_invalid_skb(struct net_device *dev,
- struct sk_buff *skb)
-{
- const struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
-
- if (skb->protocol == htons(ETH_P_CAN)) {
- if (unlikely(skb->len != CAN_MTU ||
- cfd->len > CAN_MAX_DLEN))
- goto inval_skb;
- } else if (skb->protocol == htons(ETH_P_CANFD)) {
- if (unlikely(skb->len != CANFD_MTU ||
- cfd->len > CANFD_MAX_DLEN))
- goto inval_skb;
- } else
- goto inval_skb;
-
- if (!can_skb_headroom_valid(dev, skb))
- goto inval_skb;
-
- return false;
-
-inval_skb:
- kfree_skb(skb);
- dev->stats.tx_dropped++;
- return true;
-}
-
-static inline bool can_is_canfd_skb(const struct sk_buff *skb)
-{
- /* the CAN specific type of skb is identified by its data length */
- return skb->len == CANFD_MTU;
-}
-
-/* helper to get the data length code (DLC) for Classical CAN raw DLC access */
-static inline u8 can_get_cc_dlc(const struct can_frame *cf, const u32 ctrlmode)
-{
- /* return len8_dlc as dlc value only if all conditions apply */
- if ((ctrlmode & CAN_CTRLMODE_CC_LEN8_DLC) &&
- (cf->len == CAN_MAX_DLEN) &&
- (cf->len8_dlc > CAN_MAX_DLEN && cf->len8_dlc <= CAN_MAX_RAW_DLC))
- return cf->len8_dlc;
-
- /* return the payload length as dlc value */
- return cf->len;
-}
-
-/* helper to set len and len8_dlc value for Classical CAN raw DLC access */
-static inline void can_frame_set_cc_len(struct can_frame *cf, const u8 dlc,
- const u32 ctrlmode)
-{
- /* the caller already ensured that dlc is a value from 0 .. 15 */
- if (ctrlmode & CAN_CTRLMODE_CC_LEN8_DLC && dlc > CAN_MAX_DLEN)
- cf->len8_dlc = dlc;
-
- /* limit the payload length 'len' to CAN_MAX_DLEN */
- cf->len = can_cc_dlc2len(dlc);
-}
/* helper to define static CAN controller features at device creation time */
static inline void can_set_static_ctrlmode(struct net_device *dev,
@@ -210,11 +100,7 @@ static inline void can_set_static_ctrlmode(struct net_device *dev,
dev->mtu = CANFD_MTU;
}
-/* get data length from raw data length code (DLC) */
-u8 can_fd_dlc2len(u8 dlc);
-
-/* map the sanitized data length to an appropriate data length code */
-u8 can_fd_len2dlc(u8 len);
+void can_setup(struct net_device *dev);
struct net_device *alloc_candev_mqs(int sizeof_priv, unsigned int echo_skb_max,
unsigned int txqs, unsigned int rxqs);
@@ -237,26 +123,18 @@ void unregister_candev(struct net_device *dev);
int can_restart_now(struct net_device *dev);
void can_bus_off(struct net_device *dev);
+const char *can_get_state_str(const enum can_state state);
void can_change_state(struct net_device *dev, struct can_frame *cf,
enum can_state tx_state, enum can_state rx_state);
-int can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
- unsigned int idx);
-struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx,
- u8 *len_ptr);
-unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx);
-void can_free_echo_skb(struct net_device *dev, unsigned int idx);
-
#ifdef CONFIG_OF
void of_can_transceiver(struct net_device *dev);
#else
static inline void of_can_transceiver(struct net_device *dev) { }
#endif
-struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf);
-struct sk_buff *alloc_canfd_skb(struct net_device *dev,
- struct canfd_frame **cfd);
-struct sk_buff *alloc_can_err_skb(struct net_device *dev,
- struct can_frame **cf);
+extern struct rtnl_link_ops can_link_ops;
+int can_netlink_register(void);
+void can_netlink_unregister(void);
#endif /* !_CAN_DEV_H */
diff --git a/include/linux/can/length.h b/include/linux/can/length.h
new file mode 100644
index 000000000000..6995092b774e
--- /dev/null
+++ b/include/linux/can/length.h
@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2020 Oliver Hartkopp <socketcan@hartkopp.net>
+ * Copyright (C) 2020 Marc Kleine-Budde <kernel@pengutronix.de>
+ */
+
+#ifndef _CAN_LENGTH_H
+#define _CAN_LENGTH_H
+
+/*
+ * Size of a Classical CAN Standard Frame
+ *
+ * Name of Field Bits
+ * ---------------------------------------------------------
+ * Start-of-frame 1
+ * Identifier 11
+ * Remote transmission request (RTR) 1
+ * Identifier extension bit (IDE) 1
+ * Reserved bit (r0) 1
+ * Data length code (DLC) 4
+ * Data field 0...64
+ * CRC 15
+ * CRC delimiter 1
+ * ACK slot 1
+ * ACK delimiter 1
+ * End-of-frame (EOF) 7
+ * Inter frame spacing 3
+ *
+ * rounded up and ignoring bitstuffing
+ */
+#define CAN_FRAME_OVERHEAD_SFF DIV_ROUND_UP(47, 8)
+
+/*
+ * Size of a Classical CAN Extended Frame
+ *
+ * Name of Field Bits
+ * ---------------------------------------------------------
+ * Start-of-frame 1
+ * Identifier A 11
+ * Substitute remote request (SRR) 1
+ * Identifier extension bit (IDE) 1
+ * Identifier B 18
+ * Remote transmission request (RTR) 1
+ * Reserved bits (r1, r0) 2
+ * Data length code (DLC) 4
+ * Data field 0...64
+ * CRC 15
+ * CRC delimiter 1
+ * ACK slot 1
+ * ACK delimiter 1
+ * End-of-frame (EOF) 7
+ * Inter frame spacing 3
+ *
+ * rounded up and ignoring bitstuffing
+ */
+#define CAN_FRAME_OVERHEAD_EFF DIV_ROUND_UP(67, 8)
+
+/*
+ * Size of a CAN-FD Standard Frame
+ *
+ * Name of Field Bits
+ * ---------------------------------------------------------
+ * Start-of-frame 1
+ * Identifier 11
+ * Reserved bit (r1) 1
+ * Identifier extension bit (IDE) 1
+ * Flexible data rate format (FDF) 1
+ * Reserved bit (r0) 1
+ * Bit Rate Switch (BRS) 1
+ * Error Status Indicator (ESI) 1
+ * Data length code (DLC) 4
+ * Data field 0...512
+ * Stuff Bit Count (SBC) 0...16: 4 20...64:5
+ * CRC 0...16: 17 20...64:21
+ * CRC delimiter (CD) 1
+ * ACK slot (AS) 1
+ * ACK delimiter (AD) 1
+ * End-of-frame (EOF) 7
+ * Inter frame spacing 3
+ *
+ * assuming CRC21, rounded up and ignoring bitstuffing
+ */
+#define CANFD_FRAME_OVERHEAD_SFF DIV_ROUND_UP(61, 8)
+
+/*
+ * Size of a CAN-FD Extended Frame
+ *
+ * Name of Field Bits
+ * ---------------------------------------------------------
+ * Start-of-frame 1
+ * Identifier A 11
+ * Substitute remote request (SRR) 1
+ * Identifier extension bit (IDE) 1
+ * Identifier B 18
+ * Reserved bit (r1) 1
+ * Flexible data rate format (FDF) 1
+ * Reserved bit (r0) 1
+ * Bit Rate Switch (BRS) 1
+ * Error Status Indicator (ESI) 1
+ * Data length code (DLC) 4
+ * Data field 0...512
+ * Stuff Bit Count (SBC) 0...16: 4 20...64:5
+ * CRC 0...16: 17 20...64:21
+ * CRC delimiter (CD) 1
+ * ACK slot (AS) 1
+ * ACK delimiter (AD) 1
+ * End-of-frame (EOF) 7
+ * Inter frame spacing 3
+ *
+ * assuming CRC21, rounded up and ignoring bitstuffing
+ */
+#define CANFD_FRAME_OVERHEAD_EFF DIV_ROUND_UP(80, 8)
+
+/*
+ * Maximum size of a Classical CAN frame
+ * (rounded up and ignoring bitstuffing)
+ */
+#define CAN_FRAME_LEN_MAX (CAN_FRAME_OVERHEAD_EFF + CAN_MAX_DLEN)
+
+/*
+ * Maximum size of a CAN-FD frame
+ * (rounded up and ignoring bitstuffing)
+ */
+#define CANFD_FRAME_LEN_MAX (CANFD_FRAME_OVERHEAD_EFF + CANFD_MAX_DLEN)
+
+/*
+ * can_cc_dlc2len(value) - convert a given data length code (dlc) of a
+ * Classical CAN frame into a valid data length of max. 8 bytes.
+ *
+ * To be used in the CAN netdriver receive path to ensure conformance with
+ * ISO 11898-1 Chapter 8.4.2.3 (DLC field)
+ */
+#define can_cc_dlc2len(dlc) (min_t(u8, (dlc), CAN_MAX_DLEN))
+
+/* helper to get the data length code (DLC) for Classical CAN raw DLC access */
+static inline u8 can_get_cc_dlc(const struct can_frame *cf, const u32 ctrlmode)
+{
+ /* return len8_dlc as dlc value only if all conditions apply */
+ if ((ctrlmode & CAN_CTRLMODE_CC_LEN8_DLC) &&
+ (cf->len == CAN_MAX_DLEN) &&
+ (cf->len8_dlc > CAN_MAX_DLEN && cf->len8_dlc <= CAN_MAX_RAW_DLC))
+ return cf->len8_dlc;
+
+ /* return the payload length as dlc value */
+ return cf->len;
+}
+
+/* helper to set len and len8_dlc value for Classical CAN raw DLC access */
+static inline void can_frame_set_cc_len(struct can_frame *cf, const u8 dlc,
+ const u32 ctrlmode)
+{
+ /* the caller already ensured that dlc is a value from 0 .. 15 */
+ if (ctrlmode & CAN_CTRLMODE_CC_LEN8_DLC && dlc > CAN_MAX_DLEN)
+ cf->len8_dlc = dlc;
+
+ /* limit the payload length 'len' to CAN_MAX_DLEN */
+ cf->len = can_cc_dlc2len(dlc);
+}
+
+/* get data length from raw data length code (DLC) */
+u8 can_fd_dlc2len(u8 dlc);
+
+/* map the sanitized data length to an appropriate data length code */
+u8 can_fd_len2dlc(u8 len);
+
+/* calculate the CAN Frame length in bytes of a given skb */
+unsigned int can_skb_get_frame_len(const struct sk_buff *skb);
+
+/* map the data length to an appropriate data link layer length */
+static inline u8 canfd_sanitize_len(u8 len)
+{
+ return can_fd_dlc2len(can_fd_len2dlc(len));
+}
+
+#endif /* !_CAN_LENGTH_H */
diff --git a/include/linux/can/rx-offload.h b/include/linux/can/rx-offload.h
index f1b38088b765..40882df7105e 100644
--- a/include/linux/can/rx-offload.h
+++ b/include/linux/can/rx-offload.h
@@ -44,7 +44,8 @@ int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload);
int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
struct sk_buff *skb, u32 timestamp);
unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
- unsigned int idx, u32 timestamp);
+ unsigned int idx, u32 timestamp,
+ unsigned int *frame_len_ptr);
int can_rx_offload_queue_tail(struct can_rx_offload *offload,
struct sk_buff *skb);
void can_rx_offload_del(struct can_rx_offload *offload);
diff --git a/include/linux/can/skb.h b/include/linux/can/skb.h
index fc61cf4eff1c..685f34cfba20 100644
--- a/include/linux/can/skb.h
+++ b/include/linux/can/skb.h
@@ -16,6 +16,20 @@
#include <linux/can.h>
#include <net/sock.h>
+void can_flush_echo_skb(struct net_device *dev);
+int can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
+ unsigned int idx, unsigned int frame_len);
+struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx,
+ u8 *len_ptr, unsigned int *frame_len_ptr);
+unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx,
+ unsigned int *frame_len_ptr);
+void can_free_echo_skb(struct net_device *dev, unsigned int idx);
+struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf);
+struct sk_buff *alloc_canfd_skb(struct net_device *dev,
+ struct canfd_frame **cfd);
+struct sk_buff *alloc_can_err_skb(struct net_device *dev,
+ struct can_frame **cf);
+
/*
* The struct can_skb_priv is used to transport additional information along
* with the stored struct can(fd)_frame that can not be contained in existing
@@ -29,11 +43,13 @@
* struct can_skb_priv - private additional data inside CAN sk_buffs
* @ifindex: ifindex of the first interface the CAN frame appeared on
* @skbcnt: atomic counter to have an unique id together with skb pointer
+ * @frame_len: length of CAN frame in data link layer
* @cf: align to the following CAN frame at skb->data
*/
struct can_skb_priv {
int ifindex;
int skbcnt;
+ unsigned int frame_len;
struct can_frame cf[];
};
@@ -74,4 +90,68 @@ static inline struct sk_buff *can_create_echo_skb(struct sk_buff *skb)
return nskb;
}
+/* Check for outgoing skbs that have not been created by the CAN subsystem */
+static inline bool can_skb_headroom_valid(struct net_device *dev,
+ struct sk_buff *skb)
+{
+ /* af_packet creates a headroom of HH_DATA_MOD bytes which is fine */
+ if (WARN_ON_ONCE(skb_headroom(skb) < sizeof(struct can_skb_priv)))
+ return false;
+
+ /* af_packet does not apply CAN skb specific settings */
+ if (skb->ip_summed == CHECKSUM_NONE) {
+ /* init headroom */
+ can_skb_prv(skb)->ifindex = dev->ifindex;
+ can_skb_prv(skb)->skbcnt = 0;
+
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ /* perform proper loopback on capable devices */
+ if (dev->flags & IFF_ECHO)
+ skb->pkt_type = PACKET_LOOPBACK;
+ else
+ skb->pkt_type = PACKET_HOST;
+
+ skb_reset_mac_header(skb);
+ skb_reset_network_header(skb);
+ skb_reset_transport_header(skb);
+ }
+
+ return true;
+}
+
+/* Drop a given socketbuffer if it does not contain a valid CAN frame. */
+static inline bool can_dropped_invalid_skb(struct net_device *dev,
+ struct sk_buff *skb)
+{
+ const struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
+
+ if (skb->protocol == htons(ETH_P_CAN)) {
+ if (unlikely(skb->len != CAN_MTU ||
+ cfd->len > CAN_MAX_DLEN))
+ goto inval_skb;
+ } else if (skb->protocol == htons(ETH_P_CANFD)) {
+ if (unlikely(skb->len != CANFD_MTU ||
+ cfd->len > CANFD_MAX_DLEN))
+ goto inval_skb;
+ } else
+ goto inval_skb;
+
+ if (!can_skb_headroom_valid(dev, skb))
+ goto inval_skb;
+
+ return false;
+
+inval_skb:
+ kfree_skb(skb);
+ dev->stats.tx_dropped++;
+ return true;
+}
+
+static inline bool can_is_canfd_skb(const struct sk_buff *skb)
+{
+ /* the CAN specific type of skb is identified by its data length */
+ return skb->len == CANFD_MTU;
+}
+
#endif /* !_CAN_SKB_H */
diff --git a/include/linux/dsa/8021q.h b/include/linux/dsa/8021q.h
index 88cd72dfa4e0..b12b05f1c8b4 100644
--- a/include/linux/dsa/8021q.h
+++ b/include/linux/dsa/8021q.h
@@ -64,6 +64,10 @@ int dsa_8021q_rx_source_port(u16 vid);
u16 dsa_8021q_rx_subvlan(u16 vid);
+bool vid_is_dsa_8021q_rxvlan(u16 vid);
+
+bool vid_is_dsa_8021q_txvlan(u16 vid);
+
bool vid_is_dsa_8021q(u16 vid);
#else
@@ -123,6 +127,16 @@ u16 dsa_8021q_rx_subvlan(u16 vid)
return 0;
}
+bool vid_is_dsa_8021q_rxvlan(u16 vid)
+{
+ return false;
+}
+
+bool vid_is_dsa_8021q_txvlan(u16 vid)
+{
+ return false;
+}
+
bool vid_is_dsa_8021q(u16 vid)
{
return false;
diff --git a/include/linux/dsa/brcm.h b/include/linux/dsa/brcm.h
new file mode 100644
index 000000000000..47545a948784
--- /dev/null
+++ b/include/linux/dsa/brcm.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright (C) 2014 Broadcom Corporation
+ */
+
+/* Included by drivers/net/ethernet/broadcom/bcmsysport.c and
+ * net/dsa/tag_brcm.c
+ */
+#ifndef _NET_DSA_BRCM_H
+#define _NET_DSA_BRCM_H
+
+/* Broadcom tag specific helpers to insert and extract queue/port number */
+#define BRCM_TAG_SET_PORT_QUEUE(p, q) ((p) << 8 | q)
+#define BRCM_TAG_GET_PORT(v) ((v) >> 8)
+#define BRCM_TAG_GET_QUEUE(v) ((v) & 0xff)
+
+#endif
diff --git a/include/linux/dsa/ocelot.h b/include/linux/dsa/ocelot.h
new file mode 100644
index 000000000000..4265f328681a
--- /dev/null
+++ b/include/linux/dsa/ocelot.h
@@ -0,0 +1,223 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Copyright 2019-2021 NXP Semiconductors
+ */
+
+#ifndef _NET_DSA_TAG_OCELOT_H
+#define _NET_DSA_TAG_OCELOT_H
+
+#include <linux/packing.h>
+
+#define OCELOT_TAG_LEN 16
+#define OCELOT_SHORT_PREFIX_LEN 4
+#define OCELOT_LONG_PREFIX_LEN 16
+#define OCELOT_TOTAL_TAG_LEN (OCELOT_SHORT_PREFIX_LEN + OCELOT_TAG_LEN)
+
+/* The CPU injection header and the CPU extraction header can have 3 types of
+ * prefixes: long, short and no prefix. The format of the header itself is the
+ * same in all 3 cases.
+ *
+ * Extraction with long prefix:
+ *
+ * +-------------------+-------------------+------+------+------------+-------+
+ * | ff:ff:ff:ff:ff:ff | fe:ff:ff:ff:ff:ff | 8880 | 000a | extraction | frame |
+ * | | | | | header | |
+ * +-------------------+-------------------+------+------+------------+-------+
+ * 48 bits 48 bits 16 bits 16 bits 128 bits
+ *
+ * Extraction with short prefix:
+ *
+ * +------+------+------------+-------+
+ * | 8880 | 000a | extraction | frame |
+ * | | | header | |
+ * +------+------+------------+-------+
+ * 16 bits 16 bits 128 bits
+ *
+ * Extraction with no prefix:
+ *
+ * +------------+-------+
+ * | extraction | frame |
+ * | header | |
+ * +------------+-------+
+ * 128 bits
+ *
+ *
+ * Injection with long prefix:
+ *
+ * +-------------------+-------------------+------+------+------------+-------+
+ * | any dmac | any smac | 8880 | 000a | injection | frame |
+ * | | | | | header | |
+ * +-------------------+-------------------+------+------+------------+-------+
+ * 48 bits 48 bits 16 bits 16 bits 128 bits
+ *
+ * Injection with short prefix:
+ *
+ * +------+------+------------+-------+
+ * | 8880 | 000a | injection | frame |
+ * | | | header | |
+ * +------+------+------------+-------+
+ * 16 bits 16 bits 128 bits
+ *
+ * Injection with no prefix:
+ *
+ * +------------+-------+
+ * | injection | frame |
+ * | header | |
+ * +------------+-------+
+ * 128 bits
+ *
+ * The injection header looks like this (network byte order, bit 127
+ * is part of lowest address byte in memory, bit 0 is part of highest
+ * address byte):
+ *
+ * +------+------+------+------+------+------+------+------+
+ * 127:120 |BYPASS| MASQ | MASQ_PORT |REW_OP|REW_OP|
+ * +------+------+------+------+------+------+------+------+
+ * 119:112 | REW_OP |
+ * +------+------+------+------+------+------+------+------+
+ * 111:104 | REW_VAL |
+ * +------+------+------+------+------+------+------+------+
+ * 103: 96 | REW_VAL |
+ * +------+------+------+------+------+------+------+------+
+ * 95: 88 | REW_VAL |
+ * +------+------+------+------+------+------+------+------+
+ * 87: 80 | REW_VAL |
+ * +------+------+------+------+------+------+------+------+
+ * 79: 72 | RSV |
+ * +------+------+------+------+------+------+------+------+
+ * 71: 64 | RSV | DEST |
+ * +------+------+------+------+------+------+------+------+
+ * 63: 56 | DEST |
+ * +------+------+------+------+------+------+------+------+
+ * 55: 48 | RSV |
+ * +------+------+------+------+------+------+------+------+
+ * 47: 40 | RSV | SRC_PORT | RSV |TFRM_TIMER|
+ * +------+------+------+------+------+------+------+------+
+ * 39: 32 | TFRM_TIMER | RSV |
+ * +------+------+------+------+------+------+------+------+
+ * 31: 24 | RSV | DP | POP_CNT | CPUQ |
+ * +------+------+------+------+------+------+------+------+
+ * 23: 16 | CPUQ | QOS_CLASS |TAG_TYPE|
+ * +------+------+------+------+------+------+------+------+
+ * 15: 8 | PCP | DEI | VID |
+ * +------+------+------+------+------+------+------+------+
+ * 7: 0 | VID |
+ * +------+------+------+------+------+------+------+------+
+ *
+ * And the extraction header looks like this:
+ *
+ * +------+------+------+------+------+------+------+------+
+ * 127:120 | RSV | REW_OP |
+ * +------+------+------+------+------+------+------+------+
+ * 119:112 | REW_OP | REW_VAL |
+ * +------+------+------+------+------+------+------+------+
+ * 111:104 | REW_VAL |
+ * +------+------+------+------+------+------+------+------+
+ * 103: 96 | REW_VAL |
+ * +------+------+------+------+------+------+------+------+
+ * 95: 88 | REW_VAL |
+ * +------+------+------+------+------+------+------+------+
+ * 87: 80 | REW_VAL | LLEN |
+ * +------+------+------+------+------+------+------+------+
+ * 79: 72 | LLEN | WLEN |
+ * +------+------+------+------+------+------+------+------+
+ * 71: 64 | WLEN | RSV |
+ * +------+------+------+------+------+------+------+------+
+ * 63: 56 | RSV |
+ * +------+------+------+------+------+------+------+------+
+ * 55: 48 | RSV |
+ * +------+------+------+------+------+------+------+------+
+ * 47: 40 | RSV | SRC_PORT | ACL_ID |
+ * +------+------+------+------+------+------+------+------+
+ * 39: 32 | ACL_ID | RSV | SFLOW_ID |
+ * +------+------+------+------+------+------+------+------+
+ * 31: 24 |ACL_HIT| DP | LRN_FLAGS | CPUQ |
+ * +------+------+------+------+------+------+------+------+
+ * 23: 16 | CPUQ | QOS_CLASS |TAG_TYPE|
+ * +------+------+------+------+------+------+------+------+
+ * 15: 8 | PCP | DEI | VID |
+ * +------+------+------+------+------+------+------+------+
+ * 7: 0 | VID |
+ * +------+------+------+------+------+------+------+------+
+ */
+
+static inline void ocelot_xfh_get_rew_val(void *extraction, u64 *rew_val)
+{
+ packing(extraction, rew_val, 116, 85, OCELOT_TAG_LEN, UNPACK, 0);
+}
+
+static inline void ocelot_xfh_get_len(void *extraction, u64 *len)
+{
+ u64 llen, wlen;
+
+ packing(extraction, &llen, 84, 79, OCELOT_TAG_LEN, UNPACK, 0);
+ packing(extraction, &wlen, 78, 71, OCELOT_TAG_LEN, UNPACK, 0);
+
+ *len = 60 * wlen + llen - 80;
+}
+
+static inline void ocelot_xfh_get_src_port(void *extraction, u64 *src_port)
+{
+ packing(extraction, src_port, 46, 43, OCELOT_TAG_LEN, UNPACK, 0);
+}
+
+static inline void ocelot_xfh_get_cpuq(void *extraction, u64 *cpuq)
+{
+ packing(extraction, cpuq, 28, 20, OCELOT_TAG_LEN, UNPACK, 0);
+}
+
+static inline void ocelot_xfh_get_qos_class(void *extraction, u64 *qos_class)
+{
+ packing(extraction, qos_class, 19, 17, OCELOT_TAG_LEN, UNPACK, 0);
+}
+
+static inline void ocelot_xfh_get_tag_type(void *extraction, u64 *tag_type)
+{
+ packing(extraction, tag_type, 16, 16, OCELOT_TAG_LEN, UNPACK, 0);
+}
+
+static inline void ocelot_xfh_get_vlan_tci(void *extraction, u64 *vlan_tci)
+{
+ packing(extraction, vlan_tci, 15, 0, OCELOT_TAG_LEN, UNPACK, 0);
+}
+
+static inline void ocelot_ifh_set_bypass(void *injection, u64 bypass)
+{
+ packing(injection, &bypass, 127, 127, OCELOT_TAG_LEN, PACK, 0);
+}
+
+static inline void ocelot_ifh_set_rew_op(void *injection, u64 rew_op)
+{
+ packing(injection, &rew_op, 125, 117, OCELOT_TAG_LEN, PACK, 0);
+}
+
+static inline void ocelot_ifh_set_dest(void *injection, u64 dest)
+{
+ packing(injection, &dest, 67, 56, OCELOT_TAG_LEN, PACK, 0);
+}
+
+static inline void ocelot_ifh_set_qos_class(void *injection, u64 qos_class)
+{
+ packing(injection, &qos_class, 19, 17, OCELOT_TAG_LEN, PACK, 0);
+}
+
+static inline void seville_ifh_set_dest(void *injection, u64 dest)
+{
+ packing(injection, &dest, 67, 57, OCELOT_TAG_LEN, PACK, 0);
+}
+
+static inline void ocelot_ifh_set_src(void *injection, u64 src)
+{
+ packing(injection, &src, 46, 43, OCELOT_TAG_LEN, PACK, 0);
+}
+
+static inline void ocelot_ifh_set_tag_type(void *injection, u64 tag_type)
+{
+ packing(injection, &tag_type, 16, 16, OCELOT_TAG_LEN, PACK, 0);
+}
+
+static inline void ocelot_ifh_set_vid(void *injection, u64 vid)
+{
+ packing(injection, &vid, 11, 0, OCELOT_TAG_LEN, PACK, 0);
+}
+
+#endif
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index e3da25b51ae4..ec4cd3921c67 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -128,6 +128,8 @@ struct ethtool_link_ksettings {
__ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
__ETHTOOL_DECLARE_LINK_MODE_MASK(lp_advertising);
} link_modes;
+ u32 lanes;
+ enum ethtool_link_mode_bit_indices link_mode;
};
/**
@@ -265,6 +267,8 @@ struct ethtool_pause_stats {
/**
* struct ethtool_ops - optional netdev operations
+ * @cap_link_lanes_supported: indicates if the driver supports lanes
+ * parameter.
* @supported_coalesce_params: supported types of interrupt coalescing.
* @get_drvinfo: Report driver/device information. Should only set the
* @driver, @version, @fw_version and @bus_info fields. If not
@@ -420,6 +424,7 @@ struct ethtool_pause_stats {
* of the generic netdev features interface.
*/
struct ethtool_ops {
+ u32 cap_link_lanes_supported:1;
u32 supported_coalesce_params;
void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *);
int (*get_regs_len)(struct net_device *);
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 29c27656165b..3b00fc906ccd 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -22,6 +22,7 @@
#include <linux/vmalloc.h>
#include <linux/sockptr.h>
#include <crypto/sha1.h>
+#include <linux/u64_stats_sync.h>
#include <net/sch_generic.h>
@@ -259,15 +260,32 @@ static inline bool insn_is_zext(const struct bpf_insn *insn)
.off = OFF, \
.imm = 0 })
-/* Atomic memory add, *(uint *)(dst_reg + off16) += src_reg */
-#define BPF_STX_XADD(SIZE, DST, SRC, OFF) \
+/*
+ * Atomic operations:
+ *
+ * BPF_ADD *(uint *) (dst_reg + off16) += src_reg
+ * BPF_AND *(uint *) (dst_reg + off16) &= src_reg
+ * BPF_OR *(uint *) (dst_reg + off16) |= src_reg
+ * BPF_XOR *(uint *) (dst_reg + off16) ^= src_reg
+ * BPF_ADD | BPF_FETCH src_reg = atomic_fetch_add(dst_reg + off16, src_reg);
+ * BPF_AND | BPF_FETCH src_reg = atomic_fetch_and(dst_reg + off16, src_reg);
+ * BPF_OR | BPF_FETCH src_reg = atomic_fetch_or(dst_reg + off16, src_reg);
+ * BPF_XOR | BPF_FETCH src_reg = atomic_fetch_xor(dst_reg + off16, src_reg);
+ * BPF_XCHG src_reg = atomic_xchg(dst_reg + off16, src_reg)
+ * BPF_CMPXCHG r0 = atomic_cmpxchg(dst_reg + off16, r0, src_reg)
+ */
+
+#define BPF_ATOMIC_OP(SIZE, OP, DST, SRC, OFF) \
((struct bpf_insn) { \
- .code = BPF_STX | BPF_SIZE(SIZE) | BPF_XADD, \
+ .code = BPF_STX | BPF_SIZE(SIZE) | BPF_ATOMIC, \
.dst_reg = DST, \
.src_reg = SRC, \
.off = OFF, \
- .imm = 0 })
+ .imm = OP })
+
+/* Legacy alias */
+#define BPF_STX_XADD(SIZE, DST, SRC, OFF) BPF_ATOMIC_OP(SIZE, BPF_ADD, DST, SRC, OFF)
/* Memory store, *(uint *) (dst_reg + off16) = imm32 */
@@ -522,6 +540,13 @@ struct bpf_binary_header {
u8 image[] __aligned(BPF_IMAGE_ALIGNMENT);
};
+struct bpf_prog_stats {
+ u64 cnt;
+ u64 nsecs;
+ u64 misses;
+ struct u64_stats_sync syncp;
+} __aligned(2 * sizeof(u64));
+
struct bpf_prog {
u16 pages; /* Number of allocated pages */
u16 jited:1, /* Is our filter JIT'ed? */
@@ -540,10 +565,12 @@ struct bpf_prog {
u32 len; /* Number of filter blocks */
u32 jited_len; /* Size of jited insns in bytes */
u8 tag[BPF_TAG_SIZE];
- struct bpf_prog_aux *aux; /* Auxiliary fields */
- struct sock_fprog_kern *orig_prog; /* Original BPF program */
+ struct bpf_prog_stats __percpu *stats;
+ int __percpu *active;
unsigned int (*bpf_func)(const void *ctx,
const struct bpf_insn *insn);
+ struct bpf_prog_aux *aux; /* Auxiliary fields */
+ struct sock_fprog_kern *orig_prog; /* Original BPF program */
/* Instructions for interpreter */
struct sock_filter insns[0];
struct bpf_insn insnsi[];
@@ -564,7 +591,7 @@ DECLARE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
struct bpf_prog_stats *__stats; \
u64 __start = sched_clock(); \
__ret = dfunc(ctx, (prog)->insnsi, (prog)->bpf_func); \
- __stats = this_cpu_ptr(prog->aux->stats); \
+ __stats = this_cpu_ptr(prog->stats); \
u64_stats_update_begin(&__stats->syncp); \
__stats->cnt++; \
__stats->nsecs += sched_clock() - __start; \
@@ -886,7 +913,7 @@ void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
#define __bpf_call_base_args \
((u64 (*)(u64, u64, u64, u64, u64, const struct bpf_insn *)) \
- __bpf_call_base)
+ (void *)__bpf_call_base)
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog);
void bpf_jit_compile(struct bpf_prog *prog);
@@ -1281,6 +1308,11 @@ struct bpf_sysctl_kern {
u64 tmp_reg;
};
+#define BPF_SOCKOPT_KERN_BUF_SIZE 32
+struct bpf_sockopt_buf {
+ u8 data[BPF_SOCKOPT_KERN_BUF_SIZE];
+};
+
struct bpf_sockopt_kern {
struct sock *sk;
u8 *optval;
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 6e479e9c48ce..80544d5c08e7 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -583,8 +583,16 @@ extern void free_pages(unsigned long addr, unsigned int order);
struct page_frag_cache;
extern void __page_frag_cache_drain(struct page *page, unsigned int count);
-extern void *page_frag_alloc(struct page_frag_cache *nc,
- unsigned int fragsz, gfp_t gfp_mask);
+extern void *page_frag_alloc_align(struct page_frag_cache *nc,
+ unsigned int fragsz, gfp_t gfp_mask,
+ unsigned int align_mask);
+
+static inline void *page_frag_alloc(struct page_frag_cache *nc,
+ unsigned int fragsz, gfp_t gfp_mask)
+{
+ return page_frag_alloc_align(nc, fragsz, gfp_mask, ~0u);
+}
+
extern void page_frag_free(void *addr);
#define __free_page(page) __free_pages((page), 0)
diff --git a/include/linux/icmpv6.h b/include/linux/icmpv6.h
index 1b3371ae8193..452d8978ffc7 100644
--- a/include/linux/icmpv6.h
+++ b/include/linux/icmpv6.h
@@ -16,9 +16,9 @@ static inline struct icmp6hdr *icmp6_hdr(const struct sk_buff *skb)
typedef void ip6_icmp_send_t(struct sk_buff *skb, u8 type, u8 code, __u32 info,
const struct in6_addr *force_saddr);
-#if IS_BUILTIN(CONFIG_IPV6)
void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
const struct in6_addr *force_saddr);
+#if IS_BUILTIN(CONFIG_IPV6)
static inline void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
{
icmp6_send(skb, type, code, info, NULL);
diff --git a/include/linux/if_hsr.h b/include/linux/if_hsr.h
new file mode 100644
index 000000000000..38bbc537d4e4
--- /dev/null
+++ b/include/linux/if_hsr.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_IF_HSR_H_
+#define _LINUX_IF_HSR_H_
+
+/* used to differentiate various protocols */
+enum hsr_version {
+ HSR_V0 = 0,
+ HSR_V1,
+ PRP_V1,
+};
+
+#if IS_ENABLED(CONFIG_HSR)
+extern bool is_hsr_master(struct net_device *dev);
+extern int hsr_get_version(struct net_device *dev, enum hsr_version *ver);
+#else
+static inline bool is_hsr_master(struct net_device *dev)
+{
+ return false;
+}
+static inline int hsr_get_version(struct net_device *dev,
+ enum hsr_version *ver)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_HSR */
+
+#endif /*_LINUX_IF_HSR_H_*/
diff --git a/include/linux/indirect_call_wrapper.h b/include/linux/indirect_call_wrapper.h
index 54c02c84906a..c1c76a70a6ce 100644
--- a/include/linux/indirect_call_wrapper.h
+++ b/include/linux/indirect_call_wrapper.h
@@ -36,6 +36,7 @@
#define INDIRECT_CALLABLE_DECLARE(f) f
#define INDIRECT_CALLABLE_SCOPE
+#define EXPORT_INDIRECT_CALLABLE(f) EXPORT_SYMBOL(f)
#else
#define INDIRECT_CALL_1(f, f1, ...) f(__VA_ARGS__)
@@ -44,6 +45,7 @@
#define INDIRECT_CALL_4(f, f4, f3, f2, f1, ...) f(__VA_ARGS__)
#define INDIRECT_CALLABLE_DECLARE(f)
#define INDIRECT_CALLABLE_SCOPE static
+#define EXPORT_INDIRECT_CALLABLE(f)
#endif
/*
@@ -60,4 +62,10 @@
#define INDIRECT_CALL_INET(f, f2, f1, ...) f(__VA_ARGS__)
#endif
+#if IS_ENABLED(CONFIG_INET)
+#define INDIRECT_CALL_INET_1(f, f1, ...) INDIRECT_CALL_1(f, f1, __VA_ARGS__)
+#else
+#define INDIRECT_CALL_INET_1(f, f1, ...) f(__VA_ARGS__)
+#endif
+
#endif
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index dda61d150a13..9d1f29f0c512 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -31,6 +31,7 @@ struct ipv6_devconf {
__s32 max_desync_factor;
__s32 max_addresses;
__s32 accept_ra_defrtr;
+ __u32 ra_defrtr_metric;
__s32 accept_ra_min_hop_limit;
__s32 accept_ra_pinfo;
__s32 ignore_routes_with_linkdown;
diff --git a/include/linux/mdio.h b/include/linux/mdio.h
index dbd69b3d170b..ffb787d5ebde 100644
--- a/include/linux/mdio.h
+++ b/include/linux/mdio.h
@@ -49,7 +49,11 @@ struct mdio_device {
unsigned int reset_assert_delay;
unsigned int reset_deassert_delay;
};
-#define to_mdio_device(d) container_of(d, struct mdio_device, dev)
+
+static inline struct mdio_device *to_mdio_device(const struct device *dev)
+{
+ return container_of(dev, struct mdio_device, dev);
+}
/* struct mdio_driver_common: Common to all MDIO drivers */
struct mdio_driver_common {
@@ -57,8 +61,12 @@ struct mdio_driver_common {
int flags;
};
#define MDIO_DEVICE_FLAG_PHY 1
-#define to_mdio_common_driver(d) \
- container_of(d, struct mdio_driver_common, driver)
+
+static inline struct mdio_driver_common *
+to_mdio_common_driver(const struct device_driver *driver)
+{
+ return container_of(driver, struct mdio_driver_common, driver);
+}
/* struct mdio_driver: Generic MDIO driver */
struct mdio_driver {
@@ -73,8 +81,13 @@ struct mdio_driver {
/* Clears up any memory if needed */
void (*remove)(struct mdio_device *mdiodev);
};
-#define to_mdio_driver(d) \
- container_of(to_mdio_common_driver(d), struct mdio_driver, mdiodrv)
+
+static inline struct mdio_driver *
+to_mdio_driver(const struct device_driver *driver)
+{
+ return container_of(to_mdio_common_driver(driver), struct mdio_driver,
+ mdiodrv);
+}
/* device driver data */
static inline void mdiodev_set_drvdata(struct mdio_device *mdio, void *data)
diff --git a/include/linux/mhi.h b/include/linux/mhi.h
index 562862ff819c..ece53a252217 100644
--- a/include/linux/mhi.h
+++ b/include/linux/mhi.h
@@ -599,6 +599,15 @@ void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl,
void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason);
/**
+ * mhi_get_free_desc_count - Get transfer ring length
+ * Get # of TD available to queue buffers
+ * @mhi_dev: Device associated with the channels
+ * @dir: Direction of the channel
+ */
+int mhi_get_free_desc_count(struct mhi_device *mhi_dev,
+ enum dma_data_direction dir);
+
+/**
* mhi_prepare_for_power_up - Do pre-initialization before power up.
* This is optional, call this before power up if
* the controller does not want bus framework to
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index f1de49d64a98..dc3d2508f5c6 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -359,6 +359,10 @@ enum mlx5_event {
MLX5_EVENT_TYPE_MAX = 0x100,
};
+enum mlx5_driver_event {
+ MLX5_DRIVER_EVENT_TYPE_TRAP = 0,
+};
+
enum {
MLX5_TRACER_SUBTYPE_OWNERSHIP_CHANGE = 0x0,
MLX5_TRACER_SUBTYPE_TRACES_AVAILABLE = 0x1,
@@ -578,7 +582,10 @@ struct mlx5_init_seg {
__be32 internal_timer_l;
__be32 rsvd3[2];
__be32 health_counter;
- __be32 rsvd4[1019];
+ __be32 rsvd4[11];
+ __be32 real_time_h;
+ __be32 real_time_l;
+ __be32 rsvd5[1006];
__be64 ieee1588_clk;
__be32 ieee1588_clk_type;
__be32 clr_intx;
@@ -899,6 +906,11 @@ static inline u64 get_cqe_ts(struct mlx5_cqe64 *cqe)
return (u64)lo | ((u64)hi << 32);
}
+static inline u16 get_cqe_flow_tag(struct mlx5_cqe64 *cqe)
+{
+ return be32_to_cpu(cqe->sop_drop_qpn) & 0xFFF;
+}
+
#define MLX5_MPWQE_LOG_NUM_STRIDES_BASE (9)
#define MLX5_MPWQE_LOG_STRIDE_SZ_BASE (6)
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 4672e12f1aa5..6ea8d67e3cb8 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -143,6 +143,7 @@ enum {
MLX5_REG_MPCNT = 0x9051,
MLX5_REG_MTPPS = 0x9053,
MLX5_REG_MTPPSE = 0x9054,
+ MLX5_REG_MTUTC = 0x9055,
MLX5_REG_MPEGC = 0x9056,
MLX5_REG_MCQS = 0x9060,
MLX5_REG_MCQI = 0x9061,
@@ -193,7 +194,8 @@ enum port_state_policy {
enum mlx5_coredev_type {
MLX5_COREDEV_PF,
- MLX5_COREDEV_VF
+ MLX5_COREDEV_VF,
+ MLX5_COREDEV_SF,
};
struct mlx5_field_desc {
@@ -507,6 +509,10 @@ struct mlx5_devcom;
struct mlx5_fw_reset;
struct mlx5_eq_table;
struct mlx5_irq_table;
+struct mlx5_vhca_state_notifier;
+struct mlx5_sf_dev_table;
+struct mlx5_sf_hw_table;
+struct mlx5_sf_table;
struct mlx5_rate_limit {
u32 rate;
@@ -564,6 +570,7 @@ struct mlx5_priv {
int host_pf_pages;
struct mlx5_core_health health;
+ struct list_head traps;
/* start: qp staff */
struct dentry *qp_debugfs;
@@ -582,7 +589,6 @@ struct mlx5_priv {
/* end: alloc staff */
struct dentry *dbg_root;
- struct list_head dev_list;
struct list_head ctx_list;
spinlock_t ctx_lock;
struct mlx5_adev **adev;
@@ -603,6 +609,15 @@ struct mlx5_priv {
struct mlx5_bfreg_data bfregs;
struct mlx5_uars_page *uar;
+#ifdef CONFIG_MLX5_SF
+ struct mlx5_vhca_state_notifier *vhca_state_notifier;
+ struct mlx5_sf_dev_table *sf_dev_table;
+ struct mlx5_core_dev *parent_mdev;
+#endif
+#ifdef CONFIG_MLX5_SF_MANAGER
+ struct mlx5_sf_hw_table *sf_hw_table;
+ struct mlx5_sf_table *sf_table;
+#endif
};
enum mlx5_device_state {
@@ -661,18 +676,22 @@ struct mlx5_pps {
u8 enabled;
};
-struct mlx5_clock {
- struct mlx5_nb pps_nb;
- seqlock_t lock;
+struct mlx5_timer {
struct cyclecounter cycles;
struct timecounter tc;
- struct hwtstamp_config hwtstamp_config;
u32 nominal_c_mult;
unsigned long overflow_period;
struct delayed_work overflow_work;
+};
+
+struct mlx5_clock {
+ struct mlx5_nb pps_nb;
+ seqlock_t lock;
+ struct hwtstamp_config hwtstamp_config;
struct ptp_clock *ptp;
struct ptp_clock_info ptp_info;
struct mlx5_pps pps_info;
+ struct mlx5_timer timer;
};
struct mlx5_dm;
@@ -1072,11 +1091,26 @@ enum {
MAX_MR_CACHE_ENTRIES
};
+/* Async-atomic event notifier used by mlx5 core to forward FW
+ * evetns recived from event queue to mlx5 consumers.
+ * Optimise event queue dipatching.
+ */
int mlx5_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb);
int mlx5_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb);
+
+/* Async-atomic event notifier used for forwarding
+ * evetns from the event queue into the to mlx5 events dispatcher,
+ * eswitch, clock and others.
+ */
int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb);
int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb);
+/* Blocking event notifier used to forward SW events, used for slow path */
+int mlx5_blocking_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb);
+int mlx5_blocking_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb);
+int mlx5_blocking_notifier_call_chain(struct mlx5_core_dev *dev, unsigned int event,
+ void *data);
+
int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id);
int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev);
diff --git a/include/linux/mlx5/eswitch.h b/include/linux/mlx5/eswitch.h
index 29fd832950e0..994c2c8cb4fd 100644
--- a/include/linux/mlx5/eswitch.h
+++ b/include/linux/mlx5/eswitch.h
@@ -96,6 +96,35 @@ static inline u32 mlx5_eswitch_get_vport_metadata_mask(void)
u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw,
u16 vport_num);
+u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw,
+ u16 vport_num);
+
+/* Reg C1 usage:
+ * Reg C1 = < ESW_TUN_ID(12) | ESW_TUN_OPTS(12) | ESW_ZONE_ID(8) >
+ *
+ * Highest 12 bits of reg c1 is the encapsulation tunnel id, next 12 bits is
+ * encapsulation tunnel options, and the lowest 8 bits are used for zone id.
+ *
+ * Zone id is used to restore CT flow when packet misses on chain.
+ *
+ * Tunnel id and options are used together to restore the tunnel info metadata
+ * on miss and to support inner header rewrite by means of implicit chain 0
+ * flows.
+ */
+#define ESW_ZONE_ID_BITS 8
+#define ESW_TUN_OPTS_BITS 12
+#define ESW_TUN_ID_BITS 12
+#define ESW_TUN_OPTS_OFFSET ESW_ZONE_ID_BITS
+#define ESW_TUN_OFFSET ESW_TUN_OPTS_OFFSET
+#define ESW_ZONE_ID_MASK GENMASK(ESW_ZONE_ID_BITS - 1, 0)
+#define ESW_TUN_OPTS_MASK GENMASK(32 - ESW_TUN_ID_BITS - 1, ESW_TUN_OPTS_OFFSET)
+#define ESW_TUN_MASK GENMASK(31, ESW_TUN_OFFSET)
+#define ESW_TUN_ID_SLOW_TABLE_GOTO_VPORT 0 /* 0 is not a valid tunnel id */
+#define ESW_TUN_OPTS_SLOW_TABLE_GOTO_VPORT 0xFFF /* 0xFFF is a reserved mapping */
+#define ESW_TUN_SLOW_TABLE_GOTO_VPORT ((ESW_TUN_ID_SLOW_TABLE_GOTO_VPORT << ESW_TUN_OPTS_BITS) | \
+ ESW_TUN_OPTS_SLOW_TABLE_GOTO_VPORT)
+#define ESW_TUN_SLOW_TABLE_GOTO_VPORT_MARK ESW_TUN_OPTS_MASK
+
u8 mlx5_eswitch_mode(struct mlx5_core_dev *dev);
#else /* CONFIG_MLX5_ESWITCH */
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 442c0160caab..6f0b866fb495 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -842,11 +842,16 @@ struct mlx5_ifc_qos_cap_bits {
u8 reserved_at_4[0x1];
u8 packet_pacing_burst_bound[0x1];
u8 packet_pacing_typical_size[0x1];
- u8 reserved_at_7[0x4];
+ u8 reserved_at_7[0x1];
+ u8 nic_sq_scheduling[0x1];
+ u8 nic_bw_share[0x1];
+ u8 nic_rate_limit[0x1];
u8 packet_pacing_uid[0x1];
u8 reserved_at_c[0x14];
- u8 reserved_at_20[0x20];
+ u8 reserved_at_20[0xb];
+ u8 log_max_qos_nic_queue_group[0x5];
+ u8 reserved_at_30[0x10];
u8 packet_pacing_max_rate[0x20];
@@ -932,11 +937,18 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
u8 reserved_at_200[0x600];
};
+enum {
+ MLX5_QP_TIMESTAMP_FORMAT_CAP_FREE_RUNNING = 0x0,
+ MLX5_QP_TIMESTAMP_FORMAT_CAP_REAL_TIME = 0x1,
+ MLX5_QP_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME = 0x2,
+};
+
struct mlx5_ifc_roce_cap_bits {
u8 roce_apm[0x1];
u8 reserved_at_1[0x3];
u8 sw_r_roce_src_udp_port[0x1];
- u8 reserved_at_5[0x1b];
+ u8 reserved_at_5[0x19];
+ u8 qp_ts_format[0x2];
u8 reserved_at_20[0x60];
@@ -1253,6 +1265,18 @@ enum {
MLX5_STEERING_FORMAT_CONNECTX_6DX = 1,
};
+enum {
+ MLX5_SQ_TIMESTAMP_FORMAT_CAP_FREE_RUNNING = 0x0,
+ MLX5_SQ_TIMESTAMP_FORMAT_CAP_REAL_TIME = 0x1,
+ MLX5_SQ_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME = 0x2,
+};
+
+enum {
+ MLX5_RQ_TIMESTAMP_FORMAT_CAP_FREE_RUNNING = 0x0,
+ MLX5_RQ_TIMESTAMP_FORMAT_CAP_REAL_TIME = 0x1,
+ MLX5_RQ_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME = 0x2,
+};
+
struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_0[0x1f];
u8 vhca_resource_manager[0x1];
@@ -1278,7 +1302,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_a0[0x3];
u8 ece_support[0x1];
- u8 reserved_at_a4[0x7];
+ u8 reserved_at_a4[0x5];
+ u8 reg_c_preserve[0x1];
+ u8 reserved_at_aa[0x1];
u8 log_max_srq[0x5];
u8 reserved_at_b0[0x1];
u8 uplink_follow[0x1];
@@ -1564,7 +1590,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 general_obj_types[0x40];
- u8 reserved_at_440[0x4];
+ u8 sq_ts_format[0x2];
+ u8 rq_ts_format[0x2];
u8 steering_format_version[0x4];
u8 create_qp_start_hint[0x18];
@@ -2868,6 +2895,12 @@ enum {
MLX5_QPC_CS_RES_UP_TO_64B = 0x2,
};
+enum {
+ MLX5_QPC_TIMESTAMP_FORMAT_FREE_RUNNING = 0x0,
+ MLX5_QPC_TIMESTAMP_FORMAT_DEFAULT = 0x1,
+ MLX5_QPC_TIMESTAMP_FORMAT_REAL_TIME = 0x2,
+};
+
struct mlx5_ifc_qpc_bits {
u8 state[0x4];
u8 lag_tx_port_affinity[0x4];
@@ -2896,7 +2929,9 @@ struct mlx5_ifc_qpc_bits {
u8 log_rq_stride[0x3];
u8 no_sq[0x1];
u8 log_sq_size[0x4];
- u8 reserved_at_55[0x6];
+ u8 reserved_at_55[0x3];
+ u8 ts_format[0x2];
+ u8 reserved_at_5a[0x1];
u8 rlky[0x1];
u8 ulp_stateless_offload_mode[0x4];
@@ -3312,6 +3347,12 @@ enum {
MLX5_SQC_STATE_ERR = 0x3,
};
+enum {
+ MLX5_SQC_TIMESTAMP_FORMAT_FREE_RUNNING = 0x0,
+ MLX5_SQC_TIMESTAMP_FORMAT_DEFAULT = 0x1,
+ MLX5_SQC_TIMESTAMP_FORMAT_REAL_TIME = 0x2,
+};
+
struct mlx5_ifc_sqc_bits {
u8 rlky[0x1];
u8 cd_master[0x1];
@@ -3323,7 +3364,9 @@ struct mlx5_ifc_sqc_bits {
u8 reg_umr[0x1];
u8 allow_swp[0x1];
u8 hairpin[0x1];
- u8 reserved_at_f[0x11];
+ u8 reserved_at_f[0xb];
+ u8 ts_format[0x2];
+ u8 reserved_at_1c[0x4];
u8 reserved_at_20[0x8];
u8 user_index[0x18];
@@ -3345,7 +3388,7 @@ struct mlx5_ifc_sqc_bits {
u8 reserved_at_e0[0x10];
u8 packet_pacing_rate_limit_index[0x10];
u8 tis_lst_sz[0x10];
- u8 reserved_at_110[0x10];
+ u8 qos_queue_group_id[0x10];
u8 reserved_at_120[0x40];
@@ -3360,6 +3403,7 @@ enum {
SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT = 0x1,
SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC = 0x2,
SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC = 0x3,
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_QUEUE_GROUP = 0x4,
};
enum {
@@ -3414,6 +3458,12 @@ enum {
MLX5_RQC_STATE_ERR = 0x3,
};
+enum {
+ MLX5_RQC_TIMESTAMP_FORMAT_FREE_RUNNING = 0x0,
+ MLX5_RQC_TIMESTAMP_FORMAT_DEFAULT = 0x1,
+ MLX5_RQC_TIMESTAMP_FORMAT_REAL_TIME = 0x2,
+};
+
struct mlx5_ifc_rqc_bits {
u8 rlky[0x1];
u8 delay_drop_en[0x1];
@@ -3424,7 +3474,9 @@ struct mlx5_ifc_rqc_bits {
u8 reserved_at_c[0x1];
u8 flush_in_error_en[0x1];
u8 hairpin[0x1];
- u8 reserved_at_f[0x11];
+ u8 reserved_at_f[0xb];
+ u8 ts_format[0x2];
+ u8 reserved_at_1c[0x4];
u8 reserved_at_20[0x8];
u8 user_index[0x18];
@@ -4803,6 +4855,7 @@ struct mlx5_ifc_query_scheduling_element_out_bits {
enum {
SCHEDULING_HIERARCHY_E_SWITCH = 0x2,
+ SCHEDULING_HIERARCHY_NIC = 0x3,
};
struct mlx5_ifc_query_scheduling_element_in_bits {
@@ -5904,6 +5957,18 @@ struct mlx5_ifc_dealloc_modify_header_context_in_bits {
u8 reserved_at_60[0x20];
};
+struct mlx5_ifc_query_modify_header_context_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 modify_header_id[0x20];
+
+ u8 reserved_at_60[0xa0];
+};
+
struct mlx5_ifc_query_dct_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -9094,6 +9159,28 @@ struct mlx5_ifc_mpegc_reg_bits {
u8 reserved_at_60[0x100];
};
+enum {
+ MLX5_MTUTC_OPERATION_SET_TIME_IMMEDIATE = 0x1,
+ MLX5_MTUTC_OPERATION_ADJUST_TIME = 0x2,
+ MLX5_MTUTC_OPERATION_ADJUST_FREQ_UTC = 0x3,
+};
+
+struct mlx5_ifc_mtutc_reg_bits {
+ u8 reserved_at_0[0x1c];
+ u8 operation[0x4];
+
+ u8 freq_adjustment[0x20];
+
+ u8 reserved_at_40[0x40];
+
+ u8 utc_sec[0x20];
+
+ u8 reserved_at_a0[0x2];
+ u8 utc_nsec[0x1e];
+
+ u8 time_adjustment[0x20];
+};
+
struct mlx5_ifc_pcam_enhanced_features_bits {
u8 reserved_at_0[0x68];
u8 fec_50G_per_lane_in_pplm[0x1];
@@ -9152,7 +9239,9 @@ struct mlx5_ifc_pcam_reg_bits {
};
struct mlx5_ifc_mcam_enhanced_features_bits {
- u8 reserved_at_0[0x6e];
+ u8 reserved_at_0[0x6b];
+ u8 ptpcyc2realtime_modify[0x1];
+ u8 reserved_at_6c[0x2];
u8 pci_status_and_power[0x1];
u8 reserved_at_6f[0x5];
u8 mark_tx_action_cnp[0x1];
@@ -9175,7 +9264,8 @@ struct mlx5_ifc_mcam_access_reg_bits {
u8 regs_95_to_87[0x9];
u8 mpegc[0x1];
- u8 regs_85_to_68[0x12];
+ u8 mtutc[0x1];
+ u8 regs_84_to_68[0x11];
u8 tracer_registers[0x4];
u8 regs_63_to_32[0x20];
@@ -9908,6 +9998,7 @@ union mlx5_ifc_ports_control_registers_document_bits {
struct mlx5_ifc_mcda_reg_bits mcda_reg;
struct mlx5_ifc_mirc_reg_bits mirc_reg;
struct mlx5_ifc_mfrl_reg_bits mfrl_reg;
+ struct mlx5_ifc_mtutc_reg_bits mtutc_reg;
u8 reserved_at_0[0x60e0];
};
diff --git a/include/linux/mm.h b/include/linux/mm.h
index ecdf8a8cd6ae..078633d43af9 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1584,7 +1584,7 @@ struct address_space *page_mapping_file(struct page *page);
* ALLOC_NO_WATERMARKS and the low watermark was not
* met implying that the system is under some pressure.
*/
-static inline bool page_is_pfmemalloc(struct page *page)
+static inline bool page_is_pfmemalloc(const struct page *page)
{
/*
* Page index cannot be this large so this must be
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index 934de56644e7..3de38d6a0aea 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -84,6 +84,12 @@ enum {
NETIF_F_GRO_FRAGLIST_BIT, /* Fraglist GRO */
NETIF_F_HW_MACSEC_BIT, /* Offload MACsec operations */
+ NETIF_F_GRO_UDP_FWD_BIT, /* Allow UDP GRO for forwarding */
+
+ NETIF_F_HW_HSR_TAG_INS_BIT, /* Offload HSR tag insertion */
+ NETIF_F_HW_HSR_TAG_RM_BIT, /* Offload HSR tag removal */
+ NETIF_F_HW_HSR_FWD_BIT, /* Offload HSR forwarding */
+ NETIF_F_HW_HSR_DUP_BIT, /* Offload HSR duplication */
/*
* Add your fresh new feature above and remember to update
@@ -157,6 +163,11 @@ enum {
#define NETIF_F_GRO_FRAGLIST __NETIF_F(GRO_FRAGLIST)
#define NETIF_F_GSO_FRAGLIST __NETIF_F(GSO_FRAGLIST)
#define NETIF_F_HW_MACSEC __NETIF_F(HW_MACSEC)
+#define NETIF_F_GRO_UDP_FWD __NETIF_F(GRO_UDP_FWD)
+#define NETIF_F_HW_HSR_TAG_INS __NETIF_F(HW_HSR_TAG_INS)
+#define NETIF_F_HW_HSR_TAG_RM __NETIF_F(HW_HSR_TAG_RM)
+#define NETIF_F_HW_HSR_FWD __NETIF_F(HW_HSR_FWD)
+#define NETIF_F_HW_HSR_DUP __NETIF_F(HW_HSR_DUP)
/* Finds the next feature with the highest number of the range of start till 0.
*/
@@ -234,7 +245,7 @@ static inline int find_next_netdev_feature(u64 feature, unsigned long start)
#define NETIF_F_SOFT_FEATURES (NETIF_F_GSO | NETIF_F_GRO)
/* Changeable features with no special hardware requirements that defaults to off. */
-#define NETIF_F_SOFT_FEATURES_OFF NETIF_F_GRO_FRAGLIST
+#define NETIF_F_SOFT_FEATURES_OFF (NETIF_F_GRO_FRAGLIST | NETIF_F_GRO_UDP_FWD)
#define NETIF_F_VLAN_FEATURES (NETIF_F_HW_VLAN_CTAG_FILTER | \
NETIF_F_HW_VLAN_CTAG_RX | \
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 5ff27c12ce68..ddf4cfc12615 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -347,6 +347,7 @@ struct napi_struct {
struct list_head dev_list;
struct hlist_node napi_hash_node;
unsigned int napi_id;
+ struct task_struct *thread;
};
enum {
@@ -358,6 +359,7 @@ enum {
NAPI_STATE_NO_BUSY_POLL, /* Do not add in napi_hash, no busy polling */
NAPI_STATE_IN_BUSY_POLL, /* sk_busy_loop() owns this NAPI */
NAPI_STATE_PREFER_BUSY_POLL, /* prefer busy-polling over softirq processing*/
+ NAPI_STATE_THREADED, /* The poll is performed inside its own thread*/
};
enum {
@@ -369,6 +371,7 @@ enum {
NAPIF_STATE_NO_BUSY_POLL = BIT(NAPI_STATE_NO_BUSY_POLL),
NAPIF_STATE_IN_BUSY_POLL = BIT(NAPI_STATE_IN_BUSY_POLL),
NAPIF_STATE_PREFER_BUSY_POLL = BIT(NAPI_STATE_PREFER_BUSY_POLL),
+ NAPIF_STATE_THREADED = BIT(NAPI_STATE_THREADED),
};
enum gro_result {
@@ -376,7 +379,6 @@ enum gro_result {
GRO_MERGED_FREE,
GRO_HELD,
GRO_NORMAL,
- GRO_DROP,
GRO_CONSUMED,
};
typedef enum gro_result gro_result_t;
@@ -495,6 +497,8 @@ static inline bool napi_complete(struct napi_struct *n)
return napi_complete_done(n, 0);
}
+int dev_set_threaded(struct net_device *dev, bool threaded);
+
/**
* napi_disable - prevent NAPI from scheduling
* @n: NAPI context
@@ -504,20 +508,7 @@ static inline bool napi_complete(struct napi_struct *n)
*/
void napi_disable(struct napi_struct *n);
-/**
- * napi_enable - enable NAPI scheduling
- * @n: NAPI context
- *
- * Resume NAPI from being scheduled on this context.
- * Must be paired with napi_disable.
- */
-static inline void napi_enable(struct napi_struct *n)
-{
- BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
- smp_mb__before_atomic();
- clear_bit(NAPI_STATE_SCHED, &n->state);
- clear_bit(NAPI_STATE_NPSVC, &n->state);
-}
+void napi_enable(struct napi_struct *n);
/**
* napi_synchronize - wait until NAPI is not running
@@ -859,6 +850,7 @@ enum tc_setup_type {
TC_SETUP_QDISC_ETS,
TC_SETUP_QDISC_TBF,
TC_SETUP_QDISC_FIFO,
+ TC_SETUP_QDISC_HTB,
};
/* These structures hold the attributes of bpf state that are being passed
@@ -1213,19 +1205,6 @@ struct netdev_net_notifier {
* struct netdev_phys_item_id *ppid)
* Called to get the parent ID of the physical port of this device.
*
- * void (*ndo_udp_tunnel_add)(struct net_device *dev,
- * struct udp_tunnel_info *ti);
- * Called by UDP tunnel to notify a driver about the UDP port and socket
- * address family that a UDP tunnel is listnening to. It is called only
- * when a new port starts listening. The operation is protected by the
- * RTNL.
- *
- * void (*ndo_udp_tunnel_del)(struct net_device *dev,
- * struct udp_tunnel_info *ti);
- * Called by UDP tunnel to notify the driver about a UDP port and socket
- * address family that the UDP tunnel is not listening to anymore. The
- * operation is protected by the RTNL.
- *
* void* (*ndo_dfwd_add_station)(struct net_device *pdev,
* struct net_device *dev)
* Called by upper layer devices to accelerate switching or other
@@ -1412,6 +1391,8 @@ struct net_device_ops {
struct net_device* (*ndo_get_xmit_slave)(struct net_device *dev,
struct sk_buff *skb,
bool all_slaves);
+ struct net_device* (*ndo_sk_get_lower_dev)(struct net_device *dev,
+ struct sock *sk);
netdev_features_t (*ndo_fix_features)(struct net_device *dev,
netdev_features_t features);
int (*ndo_set_features)(struct net_device *dev,
@@ -1464,10 +1445,6 @@ struct net_device_ops {
struct netdev_phys_item_id *ppid);
int (*ndo_get_phys_port_name)(struct net_device *dev,
char *name, size_t len);
- void (*ndo_udp_tunnel_add)(struct net_device *dev,
- struct udp_tunnel_info *ti);
- void (*ndo_udp_tunnel_del)(struct net_device *dev,
- struct udp_tunnel_info *ti);
void* (*ndo_dfwd_add_station)(struct net_device *pdev,
struct net_device *dev);
void (*ndo_dfwd_del_station)(struct net_device *pdev,
@@ -1842,6 +1819,8 @@ enum netdev_priv_flags {
*
* @wol_enabled: Wake-on-LAN is enabled
*
+ * @threaded: napi threaded mode is enabled
+ *
* @net_notifier_list: List of per-net netdev notifier block
* that follow this device when it is moved
* to another network namespace.
@@ -1873,7 +1852,6 @@ struct net_device {
unsigned long mem_end;
unsigned long mem_start;
unsigned long base_addr;
- int irq;
/*
* Some hardware also needs these fields (state,dev_list,
@@ -1895,6 +1873,23 @@ struct net_device {
struct list_head lower;
} adj_list;
+ /* Read-mostly cache-line for fast-path access */
+ unsigned int flags;
+ unsigned int priv_flags;
+ const struct net_device_ops *netdev_ops;
+ int ifindex;
+ unsigned short gflags;
+ unsigned short hard_header_len;
+
+ /* Note : dev->mtu is often read without holding a lock.
+ * Writers usually hold RTNL.
+ * It is recommended to use READ_ONCE() to annotate the reads,
+ * and to use WRITE_ONCE() to annotate the writes.
+ */
+ unsigned int mtu;
+ unsigned short needed_headroom;
+ unsigned short needed_tailroom;
+
netdev_features_t features;
netdev_features_t hw_features;
netdev_features_t wanted_features;
@@ -1903,10 +1898,15 @@ struct net_device {
netdev_features_t mpls_features;
netdev_features_t gso_partial_features;
- int ifindex;
+ unsigned int min_mtu;
+ unsigned int max_mtu;
+ unsigned short type;
+ unsigned char min_header_len;
+ unsigned char name_assign_type;
+
int group;
- struct net_device_stats stats;
+ struct net_device_stats stats; /* not used by modern drivers */
atomic_long_t rx_dropped;
atomic_long_t tx_dropped;
@@ -1920,7 +1920,6 @@ struct net_device {
const struct iw_handler_def *wireless_handlers;
struct iw_public_data *wireless_data;
#endif
- const struct net_device_ops *netdev_ops;
const struct ethtool_ops *ethtool_ops;
#ifdef CONFIG_NET_L3_MASTER_DEV
const struct l3mdev_ops *l3mdev_ops;
@@ -1939,34 +1938,12 @@ struct net_device {
const struct header_ops *header_ops;
- unsigned int flags;
- unsigned int priv_flags;
-
- unsigned short gflags;
- unsigned short padded;
-
unsigned char operstate;
unsigned char link_mode;
unsigned char if_port;
unsigned char dma;
- /* Note : dev->mtu is often read without holding a lock.
- * Writers usually hold RTNL.
- * It is recommended to use READ_ONCE() to annotate the reads,
- * and to use WRITE_ONCE() to annotate the writes.
- */
- unsigned int mtu;
- unsigned int min_mtu;
- unsigned int max_mtu;
- unsigned short type;
- unsigned short hard_header_len;
- unsigned char min_header_len;
- unsigned char name_assign_type;
-
- unsigned short needed_headroom;
- unsigned short needed_tailroom;
-
/* Interface address info. */
unsigned char perm_addr[MAX_ADDR_LEN];
unsigned char addr_assign_type;
@@ -1977,7 +1954,10 @@ struct net_device {
unsigned short neigh_priv_len;
unsigned short dev_id;
unsigned short dev_port;
+ unsigned short padded;
+
spinlock_t addr_list_lock;
+ int irq;
struct netdev_hw_addr_list uc;
struct netdev_hw_addr_list mc;
@@ -2159,6 +2139,7 @@ struct net_device {
struct lock_class_key *qdisc_running_key;
bool proto_down;
unsigned wol_enabled:1;
+ unsigned threaded:1;
struct list_head net_notifier_list;
@@ -2633,6 +2614,7 @@ enum netdev_lag_hash {
NETDEV_LAG_HASH_L23,
NETDEV_LAG_HASH_E23,
NETDEV_LAG_HASH_E34,
+ NETDEV_LAG_HASH_VLAN_SRCMAC,
NETDEV_LAG_HASH_UNKNOWN,
};
@@ -2876,6 +2858,8 @@ int init_dummy_netdev(struct net_device *dev);
struct net_device *netdev_get_xmit_slave(struct net_device *dev,
struct sk_buff *skb,
bool all_slaves);
+struct net_device *netdev_sk_get_lowest_dev(struct net_device *dev,
+ struct sock *sk);
struct net_device *dev_get_by_index(struct net *net, int ifindex);
struct net_device *__dev_get_by_index(struct net *net, int ifindex);
struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
@@ -3918,6 +3902,9 @@ int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr,
struct netlink_ext_ack *extack);
int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa,
struct netlink_ext_ack *extack);
+int dev_set_mac_address_user(struct net_device *dev, struct sockaddr *sa,
+ struct netlink_ext_ack *extack);
+int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name);
int dev_change_carrier(struct net_device *, bool new_carrier);
int dev_get_phys_port_id(struct net_device *dev,
struct netdev_phys_item_id *ppid);
@@ -3944,14 +3931,42 @@ int xdp_umem_query(struct net_device *dev, u16 queue_id);
int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
+int dev_forward_skb_nomtu(struct net_device *dev, struct sk_buff *skb);
bool is_skb_forwardable(const struct net_device *dev,
const struct sk_buff *skb);
+static __always_inline bool __is_skb_forwardable(const struct net_device *dev,
+ const struct sk_buff *skb,
+ const bool check_mtu)
+{
+ const u32 vlan_hdr_len = 4; /* VLAN_HLEN */
+ unsigned int len;
+
+ if (!(dev->flags & IFF_UP))
+ return false;
+
+ if (!check_mtu)
+ return true;
+
+ len = dev->mtu + dev->hard_header_len + vlan_hdr_len;
+ if (skb->len <= len)
+ return true;
+
+ /* if TSO is enabled, we don't care about the length as the packet
+ * could be forwarded without being segmented before
+ */
+ if (skb_is_gso(skb))
+ return true;
+
+ return false;
+}
+
static __always_inline int ____dev_forward_skb(struct net_device *dev,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ const bool check_mtu)
{
if (skb_orphan_frags(skb, GFP_ATOMIC) ||
- unlikely(!is_skb_forwardable(dev, skb))) {
+ unlikely(!__is_skb_forwardable(dev, skb, check_mtu))) {
atomic_long_inc(&dev->rx_dropped);
kfree_skb(skb);
return NET_RX_DROP;
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 0101747de549..f0f3a8354c3c 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -463,8 +463,6 @@ extern struct nf_ct_hook __rcu *nf_ct_hook;
struct nlattr;
struct nfnl_ct_hook {
- struct nf_conn *(*get_ct)(const struct sk_buff *skb,
- enum ip_conntrack_info *ctinfo);
size_t (*build_size)(const struct nf_conn *ct);
int (*build)(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 9f118771e248..0bcf98098c5a 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -11,6 +11,8 @@
struct net;
+void do_trace_netlink_extack(const char *msg);
+
static inline struct nlmsghdr *nlmsg_hdr(const struct sk_buff *skb)
{
return (struct nlmsghdr *)skb->data;
@@ -90,6 +92,8 @@ struct netlink_ext_ack {
static const char __msg[] = msg; \
struct netlink_ext_ack *__extack = (extack); \
\
+ do_trace_netlink_extack(__msg); \
+ \
if (__extack) \
__extack->_msg = __msg; \
} while (0)
@@ -110,6 +114,8 @@ struct netlink_ext_ack {
static const char __msg[] = msg; \
struct netlink_ext_ack *__extack = (extack); \
\
+ do_trace_netlink_extack(__msg); \
+ \
if (__extack) { \
__extack->_msg = __msg; \
__extack->bad_attr = (attr); \
diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h
index cfe8c607a628..2b05e7f7c238 100644
--- a/include/linux/of_mdio.h
+++ b/include/linux/of_mdio.h
@@ -26,9 +26,6 @@ of_phy_connect(struct net_device *dev, struct device_node *phy_np,
struct phy_device *
of_phy_get_and_connect(struct net_device *dev, struct device_node *np,
void (*hndlr)(struct net_device *));
-struct phy_device *
-of_phy_attach(struct net_device *dev, struct device_node *phy_np,
- u32 flags, phy_interface_t iface);
struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np);
int of_phy_register_fixed_link(struct device_node *np);
@@ -100,13 +97,6 @@ of_phy_get_and_connect(struct net_device *dev, struct device_node *np,
return NULL;
}
-static inline struct phy_device *of_phy_attach(struct net_device *dev,
- struct device_node *phy_np,
- u32 flags, phy_interface_t iface)
-{
- return NULL;
-}
-
static inline struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np)
{
return NULL;
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 9effb511acde..1a12e4436b5b 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -71,11 +71,11 @@ extern const int phy_10gbit_features_array[1];
/*
* Set phydev->irq to PHY_POLL if interrupts are not supported,
- * or not desired for this PHY. Set to PHY_IGNORE_INTERRUPT if
- * the attached driver handles the interrupt
+ * or not desired for this PHY. Set to PHY_MAC_INTERRUPT if
+ * the attached MAC driver handles the interrupt
*/
#define PHY_POLL -1
-#define PHY_IGNORE_INTERRUPT -2
+#define PHY_MAC_INTERRUPT -2
#define PHY_IS_INTERNAL 0x00000001
#define PHY_RST_AFTER_CLK_EN 0x00000002
@@ -104,8 +104,10 @@ extern const int phy_10gbit_features_array[1];
* @PHY_INTERFACE_MODE_MOCA: Multimedia over Coax
* @PHY_INTERFACE_MODE_QSGMII: Quad SGMII
* @PHY_INTERFACE_MODE_TRGMII: Turbo RGMII
+ * @PHY_INTERFACE_MODE_100BASEX: 100 BaseX
* @PHY_INTERFACE_MODE_1000BASEX: 1000 BaseX
* @PHY_INTERFACE_MODE_2500BASEX: 2500 BaseX
+ * @PHY_INTERFACE_MODE_5GBASER: 5G BaseR
* @PHY_INTERFACE_MODE_RXAUI: Reduced XAUI
* @PHY_INTERFACE_MODE_XAUI: 10 Gigabit Attachment Unit Interface
* @PHY_INTERFACE_MODE_10GBASER: 10G BaseR
@@ -135,8 +137,10 @@ typedef enum {
PHY_INTERFACE_MODE_MOCA,
PHY_INTERFACE_MODE_QSGMII,
PHY_INTERFACE_MODE_TRGMII,
+ PHY_INTERFACE_MODE_100BASEX,
PHY_INTERFACE_MODE_1000BASEX,
PHY_INTERFACE_MODE_2500BASEX,
+ PHY_INTERFACE_MODE_5GBASER,
PHY_INTERFACE_MODE_RXAUI,
PHY_INTERFACE_MODE_XAUI,
/* 10GBASE-R, XFI, SFI - single lane 10G Serdes */
@@ -207,6 +211,8 @@ static inline const char *phy_modes(phy_interface_t interface)
return "1000base-x";
case PHY_INTERFACE_MODE_2500BASEX:
return "2500base-x";
+ case PHY_INTERFACE_MODE_5GBASER:
+ return "5gbase-r";
case PHY_INTERFACE_MODE_RXAUI:
return "rxaui";
case PHY_INTERFACE_MODE_XAUI:
@@ -217,6 +223,8 @@ static inline const char *phy_modes(phy_interface_t interface)
return "usxgmii";
case PHY_INTERFACE_MODE_10GKR:
return "10gbase-kr";
+ case PHY_INTERFACE_MODE_100BASEX:
+ return "100base-x";
default:
return "unknown";
}
@@ -484,6 +492,7 @@ struct macsec_ops;
* @sysfs_links: Internal boolean tracking sysfs symbolic links setup/removal.
* @loopback_enabled: Set true if this PHY has been loopbacked successfully.
* @downshifted_rate: Set true if link speed has been downshifted.
+ * @is_on_sfp_module: Set true if PHY is located on an SFP module.
* @state: State of the PHY for management purposes
* @dev_flags: Device-specific flags used by the PHY driver.
* @irq: IRQ number of the PHY's interrupt (-1 if none)
@@ -499,6 +508,7 @@ struct macsec_ops;
*
* @speed: Current link speed
* @duplex: Current duplex
+ * @port: Current port
* @pause: Current pause
* @asym_pause: Current asymmetric pause
* @supported: Combined MAC/PHY supported linkmodes
@@ -556,6 +566,7 @@ struct phy_device {
unsigned sysfs_links:1;
unsigned loopback_enabled:1;
unsigned downshifted_rate:1;
+ unsigned is_on_sfp_module:1;
unsigned autoneg:1;
/* The most recently read link state */
@@ -577,6 +588,7 @@ struct phy_device {
*/
int speed;
int duplex;
+ int port;
int pause;
int asym_pause;
u8 master_slave_get;
@@ -644,8 +656,11 @@ struct phy_device {
const struct macsec_ops *macsec_ops;
#endif
};
-#define to_phy_device(d) container_of(to_mdio_device(d), \
- struct phy_device, mdio)
+
+static inline struct phy_device *to_phy_device(const struct device *dev)
+{
+ return container_of(to_mdio_device(dev), struct phy_device, mdio);
+}
/**
* struct phy_tdr_config - Configuration of a TDR raw test
@@ -1193,11 +1208,11 @@ static inline int phy_clear_bits_mmd(struct phy_device *phydev, int devad,
* @phydev: the phy_device struct
*
* NOTE: must be kept in sync with addition/removal of PHY_POLL and
- * PHY_IGNORE_INTERRUPT
+ * PHY_MAC_INTERRUPT
*/
static inline bool phy_interrupt_is_valid(struct phy_device *phydev)
{
- return phydev->irq != PHY_POLL && phydev->irq != PHY_IGNORE_INTERRUPT;
+ return phydev->irq != PHY_POLL && phydev->irq != PHY_MAC_INTERRUPT;
}
/**
@@ -1284,6 +1299,15 @@ static inline bool phy_is_internal(struct phy_device *phydev)
}
/**
+ * phy_on_sfp - Convenience function for testing if a PHY is on an SFP module
+ * @phydev: the phy_device struct
+ */
+static inline bool phy_on_sfp(struct phy_device *phydev)
+{
+ return phydev->is_on_sfp_module;
+}
+
+/**
* phy_interface_mode_is_rgmii - Convenience function for testing if a
* PHY interface mode is RGMII (all variants)
* @mode: the &phy_interface_t enum
diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h
index 4d58dc8943f0..e339b48de32d 100644
--- a/include/linux/qed/qed_chain.h
+++ b/include/linux/qed/qed_chain.h
@@ -470,7 +470,7 @@ static inline void *qed_chain_consume(struct qed_chain *p_chain)
/**
* @brief qed_chain_reset - Resets the chain to its start state
*
- * @param p_chain pointer to a previously allocted chain
+ * @param p_chain pointer to a previously allocated chain
*/
static inline void qed_chain_reset(struct qed_chain *p_chain)
{
diff --git a/include/linux/remoteproc/qcom_rproc.h b/include/linux/remoteproc/qcom_rproc.h
index 647051662174..82b211518136 100644
--- a/include/linux/remoteproc/qcom_rproc.h
+++ b/include/linux/remoteproc/qcom_rproc.h
@@ -3,8 +3,6 @@
struct notifier_block;
-#if IS_ENABLED(CONFIG_QCOM_RPROC_COMMON)
-
/**
* enum qcom_ssr_notify_type - Startup/Shutdown events related to a remoteproc
* processor.
@@ -26,6 +24,8 @@ struct qcom_ssr_notify_data {
bool crashed;
};
+#if IS_ENABLED(CONFIG_QCOM_RPROC_COMMON)
+
void *qcom_register_ssr_notifier(const char *name, struct notifier_block *nb);
int qcom_unregister_ssr_notifier(void *notify, struct notifier_block *nb);
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 5f60c9e907c9..6d0a33d1c0db 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -430,28 +430,32 @@ enum {
/* device driver is going to provide hardware time stamp */
SKBTX_IN_PROGRESS = 1 << 2,
- /* device driver supports TX zero-copy buffers */
- SKBTX_DEV_ZEROCOPY = 1 << 3,
-
/* generate wifi status information (where possible) */
SKBTX_WIFI_STATUS = 1 << 4,
- /* This indicates at least one fragment might be overwritten
- * (as in vmsplice(), sendfile() ...)
- * If we need to compute a TX checksum, we'll need to copy
- * all frags to avoid possible bad checksum
- */
- SKBTX_SHARED_FRAG = 1 << 5,
-
/* generate software time stamp when entering packet scheduling */
SKBTX_SCHED_TSTAMP = 1 << 6,
};
-#define SKBTX_ZEROCOPY_FRAG (SKBTX_DEV_ZEROCOPY | SKBTX_SHARED_FRAG)
#define SKBTX_ANY_SW_TSTAMP (SKBTX_SW_TSTAMP | \
SKBTX_SCHED_TSTAMP)
#define SKBTX_ANY_TSTAMP (SKBTX_HW_TSTAMP | SKBTX_ANY_SW_TSTAMP)
+/* Definitions for flags in struct skb_shared_info */
+enum {
+ /* use zcopy routines */
+ SKBFL_ZEROCOPY_ENABLE = BIT(0),
+
+ /* This indicates at least one fragment might be overwritten
+ * (as in vmsplice(), sendfile() ...)
+ * If we need to compute a TX checksum, we'll need to copy
+ * all frags to avoid possible bad checksum
+ */
+ SKBFL_SHARED_FRAG = BIT(1),
+};
+
+#define SKBFL_ZEROCOPY_FRAG (SKBFL_ZEROCOPY_ENABLE | SKBFL_SHARED_FRAG)
+
/*
* The callback notifies userspace to release buffers when skb DMA is done in
* lower device, the skb last reference should be 0 when calling this.
@@ -461,7 +465,8 @@ enum {
* The desc field is used to track userspace buffer index.
*/
struct ubuf_info {
- void (*callback)(struct ubuf_info *, bool zerocopy_success);
+ void (*callback)(struct sk_buff *, struct ubuf_info *,
+ bool zerocopy_success);
union {
struct {
unsigned long desc;
@@ -475,6 +480,7 @@ struct ubuf_info {
};
};
refcount_t refcnt;
+ u8 flags;
struct mmpin {
struct user_struct *user;
@@ -487,19 +493,14 @@ struct ubuf_info {
int mm_account_pinned_pages(struct mmpin *mmp, size_t size);
void mm_unaccount_pinned_pages(struct mmpin *mmp);
-struct ubuf_info *sock_zerocopy_alloc(struct sock *sk, size_t size);
-struct ubuf_info *sock_zerocopy_realloc(struct sock *sk, size_t size,
- struct ubuf_info *uarg);
-
-static inline void sock_zerocopy_get(struct ubuf_info *uarg)
-{
- refcount_inc(&uarg->refcnt);
-}
+struct ubuf_info *msg_zerocopy_alloc(struct sock *sk, size_t size);
+struct ubuf_info *msg_zerocopy_realloc(struct sock *sk, size_t size,
+ struct ubuf_info *uarg);
-void sock_zerocopy_put(struct ubuf_info *uarg);
-void sock_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref);
+void msg_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref);
-void sock_zerocopy_callback(struct ubuf_info *uarg, bool success);
+void msg_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *uarg,
+ bool success);
int skb_zerocopy_iter_dgram(struct sk_buff *skb, struct msghdr *msg, int len);
int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
@@ -510,7 +511,7 @@ int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
* the end of the header data, ie. at skb->end.
*/
struct skb_shared_info {
- __u8 __unused;
+ __u8 flags;
__u8 meta_len;
__u8 nr_frags;
__u8 tx_flags;
@@ -1086,6 +1087,8 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size);
struct sk_buff *build_skb_around(struct sk_buff *skb,
void *data, unsigned int frag_size);
+struct sk_buff *napi_build_skb(void *data, unsigned int frag_size);
+
/**
* alloc_skb - allocate a network buffer
* @size: size to allocate
@@ -1352,8 +1355,8 @@ void
skb_flow_dissect_ct(const struct sk_buff *skb,
struct flow_dissector *flow_dissector,
void *target_container,
- u16 *ctinfo_map,
- size_t mapsize);
+ u16 *ctinfo_map, size_t mapsize,
+ bool post_ct);
void
skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
struct flow_dissector *flow_dissector,
@@ -1438,11 +1441,22 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
static inline struct ubuf_info *skb_zcopy(struct sk_buff *skb)
{
- bool is_zcopy = skb && skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY;
+ bool is_zcopy = skb && skb_shinfo(skb)->flags & SKBFL_ZEROCOPY_ENABLE;
return is_zcopy ? skb_uarg(skb) : NULL;
}
+static inline void net_zcopy_get(struct ubuf_info *uarg)
+{
+ refcount_inc(&uarg->refcnt);
+}
+
+static inline void skb_zcopy_init(struct sk_buff *skb, struct ubuf_info *uarg)
+{
+ skb_shinfo(skb)->destructor_arg = uarg;
+ skb_shinfo(skb)->flags |= uarg->flags;
+}
+
static inline void skb_zcopy_set(struct sk_buff *skb, struct ubuf_info *uarg,
bool *have_ref)
{
@@ -1450,16 +1464,15 @@ static inline void skb_zcopy_set(struct sk_buff *skb, struct ubuf_info *uarg,
if (unlikely(have_ref && *have_ref))
*have_ref = false;
else
- sock_zerocopy_get(uarg);
- skb_shinfo(skb)->destructor_arg = uarg;
- skb_shinfo(skb)->tx_flags |= SKBTX_ZEROCOPY_FRAG;
+ net_zcopy_get(uarg);
+ skb_zcopy_init(skb, uarg);
}
}
static inline void skb_zcopy_set_nouarg(struct sk_buff *skb, void *val)
{
skb_shinfo(skb)->destructor_arg = (void *)((uintptr_t) val | 0x1UL);
- skb_shinfo(skb)->tx_flags |= SKBTX_ZEROCOPY_FRAG;
+ skb_shinfo(skb)->flags |= SKBFL_ZEROCOPY_FRAG;
}
static inline bool skb_zcopy_is_nouarg(struct sk_buff *skb)
@@ -1472,33 +1485,32 @@ static inline void *skb_zcopy_get_nouarg(struct sk_buff *skb)
return (void *)((uintptr_t) skb_shinfo(skb)->destructor_arg & ~0x1UL);
}
-/* Release a reference on a zerocopy structure */
-static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy)
+static inline void net_zcopy_put(struct ubuf_info *uarg)
{
- struct ubuf_info *uarg = skb_zcopy(skb);
+ if (uarg)
+ uarg->callback(NULL, uarg, true);
+}
+static inline void net_zcopy_put_abort(struct ubuf_info *uarg, bool have_uref)
+{
if (uarg) {
- if (skb_zcopy_is_nouarg(skb)) {
- /* no notification callback */
- } else if (uarg->callback == sock_zerocopy_callback) {
- uarg->zerocopy = uarg->zerocopy && zerocopy;
- sock_zerocopy_put(uarg);
- } else {
- uarg->callback(uarg, zerocopy);
- }
-
- skb_shinfo(skb)->tx_flags &= ~SKBTX_ZEROCOPY_FRAG;
+ if (uarg->callback == msg_zerocopy_callback)
+ msg_zerocopy_put_abort(uarg, have_uref);
+ else if (have_uref)
+ net_zcopy_put(uarg);
}
}
-/* Abort a zerocopy operation and revert zckey on error in send syscall */
-static inline void skb_zcopy_abort(struct sk_buff *skb)
+/* Release a reference on a zerocopy structure */
+static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy_success)
{
struct ubuf_info *uarg = skb_zcopy(skb);
if (uarg) {
- sock_zerocopy_put_abort(uarg, false);
- skb_shinfo(skb)->tx_flags &= ~SKBTX_ZEROCOPY_FRAG;
+ if (!skb_zcopy_is_nouarg(skb))
+ uarg->callback(skb, uarg, zerocopy_success);
+
+ skb_shinfo(skb)->flags &= ~SKBFL_ZEROCOPY_FRAG;
}
}
@@ -2777,7 +2789,7 @@ static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask)
if (likely(!skb_zcopy(skb)))
return 0;
if (!skb_zcopy_is_nouarg(skb) &&
- skb_uarg(skb)->callback == sock_zerocopy_callback)
+ skb_uarg(skb)->callback == msg_zerocopy_callback)
return 0;
return skb_copy_ubufs(skb, gfp_mask);
}
@@ -2808,7 +2820,26 @@ void skb_queue_purge(struct sk_buff_head *list);
unsigned int skb_rbtree_purge(struct rb_root *root);
-void *netdev_alloc_frag(unsigned int fragsz);
+void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask);
+
+/**
+ * netdev_alloc_frag - allocate a page fragment
+ * @fragsz: fragment size
+ *
+ * Allocates a frag from a page for receive buffer.
+ * Uses GFP_ATOMIC allocations.
+ */
+static inline void *netdev_alloc_frag(unsigned int fragsz)
+{
+ return __netdev_alloc_frag_align(fragsz, ~0u);
+}
+
+static inline void *netdev_alloc_frag_align(unsigned int fragsz,
+ unsigned int align)
+{
+ WARN_ON_ONCE(!is_power_of_2(align));
+ return __netdev_alloc_frag_align(fragsz, -align);
+}
struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length,
gfp_t gfp_mask);
@@ -2867,7 +2898,20 @@ static inline void skb_free_frag(void *addr)
page_frag_free(addr);
}
-void *napi_alloc_frag(unsigned int fragsz);
+void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask);
+
+static inline void *napi_alloc_frag(unsigned int fragsz)
+{
+ return __napi_alloc_frag_align(fragsz, ~0u);
+}
+
+static inline void *napi_alloc_frag_align(unsigned int fragsz,
+ unsigned int align)
+{
+ WARN_ON_ONCE(!is_power_of_2(align));
+ return __napi_alloc_frag_align(fragsz, -align);
+}
+
struct sk_buff *__napi_alloc_skb(struct napi_struct *napi,
unsigned int length, gfp_t gfp_mask);
static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi,
@@ -2877,7 +2921,7 @@ static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi,
}
void napi_consume_skb(struct sk_buff *skb, int budget);
-void __kfree_skb_flush(void);
+void napi_skb_free_stolen_head(struct sk_buff *skb);
void __kfree_skb_defer(struct sk_buff *skb);
/**
@@ -2929,12 +2973,28 @@ static inline struct page *dev_alloc_page(void)
}
/**
+ * dev_page_is_reusable - check whether a page can be reused for network Rx
+ * @page: the page to test
+ *
+ * A page shouldn't be considered for reusing/recycling if it was allocated
+ * under memory pressure or at a distant memory node.
+ *
+ * Returns false if this page should be returned to page allocator, true
+ * otherwise.
+ */
+static inline bool dev_page_is_reusable(const struct page *page)
+{
+ return likely(page_to_nid(page) == numa_mem_id() &&
+ !page_is_pfmemalloc(page));
+}
+
+/**
* skb_propagate_pfmemalloc - Propagate pfmemalloc if skb is allocated after RX page
* @page: The page that was allocated from skb_alloc_page
* @skb: The skb that may need pfmemalloc set
*/
-static inline void skb_propagate_pfmemalloc(struct page *page,
- struct sk_buff *skb)
+static inline void skb_propagate_pfmemalloc(const struct page *page,
+ struct sk_buff *skb)
{
if (page_is_pfmemalloc(page))
skb->pfmemalloc = true;
@@ -3324,7 +3384,7 @@ static inline int skb_linearize(struct sk_buff *skb)
static inline bool skb_has_shared_frag(const struct sk_buff *skb)
{
return skb_is_nonlinear(skb) &&
- skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG;
+ skb_shinfo(skb)->flags & SKBFL_SHARED_FRAG;
}
/**
@@ -3849,7 +3909,7 @@ static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
void skb_complete_tx_timestamp(struct sk_buff *skb,
struct skb_shared_hwtstamps *hwtstamps);
-void __skb_tstamp_tx(struct sk_buff *orig_skb,
+void __skb_tstamp_tx(struct sk_buff *orig_skb, const struct sk_buff *ack_skb,
struct skb_shared_hwtstamps *hwtstamps,
struct sock *sk, int tstype);
@@ -4611,6 +4671,11 @@ static inline void skb_reset_redirect(struct sk_buff *skb)
#endif
}
+static inline bool skb_csum_is_sctp(struct sk_buff *skb)
+{
+ return skb->csum_not_inet;
+}
+
static inline void skb_set_kcov_handle(struct sk_buff *skb,
const u64 kcov_handle)
{
diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
index fec0c5ac1c4f..8edbbf5f2f93 100644
--- a/include/linux/skmsg.h
+++ b/include/linux/skmsg.h
@@ -390,7 +390,6 @@ static inline struct sk_psock *sk_psock_get(struct sock *sk)
}
void sk_psock_stop(struct sock *sk, struct sk_psock *psock);
-void sk_psock_destroy(struct rcu_head *rcu);
void sk_psock_drop(struct sock *sk, struct sk_psock *psock);
static inline void sk_psock_put(struct sock *sk, struct sk_psock *psock)
diff --git a/include/linux/soc/marvell/octeontx2/asm.h b/include/linux/soc/marvell/octeontx2/asm.h
index ae2279fe830a..28c04d918f0f 100644
--- a/include/linux/soc/marvell/octeontx2/asm.h
+++ b/include/linux/soc/marvell/octeontx2/asm.h
@@ -22,8 +22,16 @@
: [rs]"r" (ioaddr)); \
(result); \
})
+#define cn10k_lmt_flush(val, addr) \
+({ \
+ __asm__ volatile(".cpu generic+lse\n" \
+ "steor %x[rf],[%[rs]]" \
+ : [rf]"+r"(val) \
+ : [rs]"r"(addr)); \
+})
#else
#define otx2_lmt_flush(ioaddr) ({ 0; })
+#define cn10k_lmt_flush(val, addr) ({ addr = val; })
#endif
#endif /* __SOC_OTX2_ASM_H */
diff --git a/include/linux/soc/qcom/mdt_loader.h b/include/linux/soc/qcom/mdt_loader.h
index e600baec6825..afd47217996b 100644
--- a/include/linux/soc/qcom/mdt_loader.h
+++ b/include/linux/soc/qcom/mdt_loader.h
@@ -11,6 +11,8 @@
struct device;
struct firmware;
+#if IS_ENABLED(CONFIG_QCOM_MDT_LOADER)
+
ssize_t qcom_mdt_get_size(const struct firmware *fw);
int qcom_mdt_load(struct device *dev, const struct firmware *fw,
const char *fw_name, int pas_id, void *mem_region,
@@ -23,4 +25,37 @@ int qcom_mdt_load_no_init(struct device *dev, const struct firmware *fw,
phys_addr_t *reloc_base);
void *qcom_mdt_read_metadata(const struct firmware *fw, size_t *data_len);
+#else /* !IS_ENABLED(CONFIG_QCOM_MDT_LOADER) */
+
+static inline ssize_t qcom_mdt_get_size(const struct firmware *fw)
+{
+ return -ENODEV;
+}
+
+static inline int qcom_mdt_load(struct device *dev, const struct firmware *fw,
+ const char *fw_name, int pas_id,
+ void *mem_region, phys_addr_t mem_phys,
+ size_t mem_size, phys_addr_t *reloc_base)
+{
+ return -ENODEV;
+}
+
+static inline int qcom_mdt_load_no_init(struct device *dev,
+ const struct firmware *fw,
+ const char *fw_name, int pas_id,
+ void *mem_region, phys_addr_t mem_phys,
+ size_t mem_size,
+ phys_addr_t *reloc_base)
+{
+ return -ENODEV;
+}
+
+static inline void *qcom_mdt_read_metadata(const struct firmware *fw,
+ size_t *data_len)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+#endif /* !IS_ENABLED(CONFIG_QCOM_MDT_LOADER) */
+
#endif
diff --git a/include/linux/ssb/ssb_driver_gige.h b/include/linux/ssb/ssb_driver_gige.h
index 31593b34608e..15ba0df1ee0d 100644
--- a/include/linux/ssb/ssb_driver_gige.h
+++ b/include/linux/ssb/ssb_driver_gige.h
@@ -76,7 +76,7 @@ static inline bool ssb_gige_have_roboswitch(struct pci_dev *pdev)
if (dev)
return !!(dev->dev->bus->sprom.boardflags_lo &
SSB_GIGE_BFL_ROBOSWITCH);
- return 0;
+ return false;
}
/* Returns whether we can only do one DMA at once. */
@@ -86,7 +86,7 @@ static inline bool ssb_gige_one_dma_at_once(struct pci_dev *pdev)
if (dev)
return ((dev->dev->bus->chip_id == 0x4785) &&
(dev->dev->bus->chip_rev < 2));
- return 0;
+ return false;
}
/* Returns whether we must flush posted writes. */
@@ -159,7 +159,7 @@ static inline void ssb_gige_exit(void)
static inline bool pdev_is_ssb_gige_core(struct pci_dev *pdev)
{
- return 0;
+ return false;
}
static inline struct ssb_gige * pdev_to_ssb_gige(struct pci_dev *pdev)
{
@@ -167,19 +167,19 @@ static inline struct ssb_gige * pdev_to_ssb_gige(struct pci_dev *pdev)
}
static inline bool ssb_gige_is_rgmii(struct pci_dev *pdev)
{
- return 0;
+ return false;
}
static inline bool ssb_gige_have_roboswitch(struct pci_dev *pdev)
{
- return 0;
+ return false;
}
static inline bool ssb_gige_one_dma_at_once(struct pci_dev *pdev)
{
- return 0;
+ return false;
}
static inline bool ssb_gige_must_flush_posted_writes(struct pci_dev *pdev)
{
- return 0;
+ return false;
}
static inline int ssb_gige_get_macaddr(struct pci_dev *pdev, u8 *macaddr)
{
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index 15ca6b4167cc..a302982de2d7 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -202,5 +202,6 @@ struct plat_stmmacenet_data {
bool vlan_fail_q_en;
u8 vlan_fail_q;
unsigned int eee_usecs_rate;
+ struct pci_dev *pdev;
};
#endif
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 2f87377e9af7..48d8a363319e 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -496,7 +496,8 @@ static inline u32 tcp_saved_syn_len(const struct saved_syn *saved_syn)
}
struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk,
- const struct sk_buff *orig_skb);
+ const struct sk_buff *orig_skb,
+ const struct sk_buff *ack_skb);
static inline u16 tcp_mss_clamp(const struct tcp_sock *tp, u16 mss)
{
diff --git a/include/linux/usb/cdc_ncm.h b/include/linux/usb/cdc_ncm.h
index 0ce4377545f8..f7cb3ddce7fb 100644
--- a/include/linux/usb/cdc_ncm.h
+++ b/include/linux/usb/cdc_ncm.h
@@ -98,6 +98,8 @@ struct cdc_ncm_ctx {
struct hrtimer tx_timer;
struct tasklet_struct bh;
+ struct usbnet *dev;
+
const struct usb_cdc_ncm_desc *func_desc;
const struct usb_cdc_mbim_desc *mbim_desc;
const struct usb_cdc_mbim_extended_desc *mbim_extended_desc;