summaryrefslogtreecommitdiff
path: root/net/sched
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-10-04 13:38:03 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2022-10-04 13:38:03 -0700
commit0326074ff4652329f2a1a9c8685104576bd8d131 (patch)
tree9a7574c7ccb05bf4c7cb34fc5a65457bb8f495cb /net/sched
parent522667b24f08009591c90e75bfe2ffb67f555498 (diff)
parent681bf011b9b5989c6e9db6beb64494918aab9a43 (diff)
Merge tag 'net-next-6.1' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next
Pull networking updates from Jakub Kicinski: "Core: - Introduce and use a single page frag cache for allocating small skb heads, clawing back the 10-20% performance regression in UDP flood test from previous fixes. - Run packets which already went thru HW coalescing thru SW GRO. This significantly improves TCP segment coalescing and simplifies deployments as different workloads benefit from HW or SW GRO. - Shrink the size of the base zero-copy send structure. - Move TCP init under a new slow / sleepable version of DO_ONCE(). BPF: - Add BPF-specific, any-context-safe memory allocator. - Add helpers/kfuncs for PKCS#7 signature verification from BPF programs. - Define a new map type and related helpers for user space -> kernel communication over a ring buffer (BPF_MAP_TYPE_USER_RINGBUF). - Allow targeting BPF iterators to loop through resources of one task/thread. - Add ability to call selected destructive functions. Expose crash_kexec() to allow BPF to trigger a kernel dump. Use CAP_SYS_BOOT check on the loading process to judge permissions. - Enable BPF to collect custom hierarchical cgroup stats efficiently by integrating with the rstat framework. - Support struct arguments for trampoline based programs. Only structs with size <= 16B and x86 are supported. - Invoke cgroup/connect{4,6} programs for unprivileged ICMP ping sockets (instead of just TCP and UDP sockets). - Add a helper for accessing CLOCK_TAI for time sensitive network related programs. - Support accessing network tunnel metadata's flags. - Make TCP SYN ACK RTO tunable by BPF programs with TCP Fast Open. - Add support for writing to Netfilter's nf_conn:mark. Protocols: - WiFi: more Extremely High Throughput (EHT) and Multi-Link Operation (MLO) work (802.11be, WiFi 7). - vsock: improve support for SO_RCVLOWAT. - SMC: support SO_REUSEPORT. - Netlink: define and document how to use netlink in a "modern" way. Support reporting missing attributes via extended ACK. - IPSec: support collect metadata mode for xfrm interfaces. - TCPv6: send consistent autoflowlabel in SYN_RECV state and RST packets. - TCP: introduce optional per-netns connection hash table to allow better isolation between namespaces (opt-in, at the cost of memory and cache pressure). - MPTCP: support TCP_FASTOPEN_CONNECT. - Add NEXT-C-SID support in Segment Routing (SRv6) End behavior. - Adjust IP_UNICAST_IF sockopt behavior for connected UDP sockets. - Open vSwitch: - Allow specifying ifindex of new interfaces. - Allow conntrack and metering in non-initial user namespace. - TLS: support the Korean ARIA-GCM crypto algorithm. - Remove DECnet support. Driver API: - Allow selecting the conduit interface used by each port in DSA switches, at runtime. - Ethernet Power Sourcing Equipment and Power Device support. - Add tc-taprio support for queueMaxSDU parameter, i.e. setting per traffic class max frame size for time-based packet schedules. - Support PHY rate matching - adapting between differing host-side and link-side speeds. - Introduce QUSGMII PHY mode and 1000BASE-KX interface mode. - Validate OF (device tree) nodes for DSA shared ports; make phylink-related properties mandatory on DSA and CPU ports. Enforcing more uniformity should allow transitioning to phylink. - Require that flash component name used during update matches one of the components for which version is reported by info_get(). - Remove "weight" argument from driver-facing NAPI API as much as possible. It's one of those magic knobs which seemed like a good idea at the time but is too indirect to use in practice. - Support offload of TLS connections with 256 bit keys. New hardware / drivers: - Ethernet: - Microchip KSZ9896 6-port Gigabit Ethernet Switch - Renesas Ethernet AVB (EtherAVB-IF) Gen4 SoCs - Analog Devices ADIN1110 and ADIN2111 industrial single pair Ethernet (10BASE-T1L) MAC+PHY. - Rockchip RV1126 Gigabit Ethernet (a version of stmmac IP). - Ethernet SFPs / modules: - RollBall / Hilink / Turris 10G copper SFPs - HALNy GPON module - WiFi: - CYW43439 SDIO chipset (brcmfmac) - CYW89459 PCIe chipset (brcmfmac) - BCM4378 on Apple platforms (brcmfmac) Drivers: - CAN: - gs_usb: HW timestamp support - Ethernet PHYs: - lan8814: cable diagnostics - Ethernet NICs: - Intel (100G): - implement control of FCS/CRC stripping - port splitting via devlink - L2TPv3 filtering offload - nVidia/Mellanox: - tunnel offload for sub-functions - MACSec offload, w/ Extended packet number and replay window offload - significantly restructure, and optimize the AF_XDP support, align the behavior with other vendors - Huawei: - configuring DSCP map for traffic class selection - querying standard FEC statistics - querying SerDes lane number via ethtool - Marvell/Cavium: - egress priority flow control - MACSec offload - AMD/SolarFlare: - PTP over IPv6 and raw Ethernet - small / embedded: - ax88772: convert to phylink (to support SFP cages) - altera: tse: convert to phylink - ftgmac100: support fixed link - enetc: standard Ethtool counters - macb: ZynqMP SGMII dynamic configuration support - tsnep: support multi-queue and use page pool - lan743x: Rx IP & TCP checksum offload - igc: add xdp frags support to ndo_xdp_xmit - Ethernet high-speed switches: - Marvell (prestera): - support SPAN port features (traffic mirroring) - nexthop object offloading - Microchip (sparx5): - multicast forwarding offload - QoS queuing offload (tc-mqprio, tc-tbf, tc-ets) - Ethernet embedded switches: - Marvell (mv88e6xxx): - support RGMII cmode - NXP (felix): - standardized ethtool counters - Microchip (lan966x): - QoS queuing offload (tc-mqprio, tc-tbf, tc-cbs, tc-ets) - traffic policing and mirroring - link aggregation / bonding offload - QUSGMII PHY mode support - Qualcomm 802.11ax WiFi (ath11k): - cold boot calibration support on WCN6750 - support to connect to a non-transmit MBSSID AP profile - enable remain-on-channel support on WCN6750 - Wake-on-WLAN support for WCN6750 - support to provide transmit power from firmware via nl80211 - support to get power save duration for each client - spectral scan support for 160 MHz - MediaTek WiFi (mt76): - WiFi-to-Ethernet bridging offload for MT7986 chips - RealTek WiFi (rtw89): - P2P support" * tag 'net-next-6.1' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next: (1864 commits) eth: pse: add missing static inlines once: rename _SLOW to _SLEEPABLE net: pse-pd: add regulator based PSE driver dt-bindings: net: pse-dt: add bindings for regulator based PoDL PSE controller ethtool: add interface to interact with Ethernet Power Equipment net: mdiobus: search for PSE nodes by parsing PHY nodes. net: mdiobus: fwnode_mdiobus_register_phy() rework error handling net: add framework to support Ethernet PSE and PDs devices dt-bindings: net: phy: add PoDL PSE property net: marvell: prestera: Propagate nh state from hw to kernel net: marvell: prestera: Add neighbour cache accounting net: marvell: prestera: add stub handler neighbour events net: marvell: prestera: Add heplers to interact with fib_notifier_info net: marvell: prestera: Add length macros for prestera_ip_addr net: marvell: prestera: add delayed wq and flush wq on deinit net: marvell: prestera: Add strict cleanup of fib arbiter net: marvell: prestera: Add cleanup of allocated fib_nodes net: marvell: prestera: Add router nexthops ABI eth: octeon: fix build after netif_napi_add() changes net/mlx5: E-Switch, Return EBUSY if can't get mode lock ...
Diffstat (limited to 'net/sched')
-rw-r--r--net/sched/act_api.c33
-rw-r--r--net/sched/act_bpf.c30
-rw-r--r--net/sched/act_connmark.c28
-rw-r--r--net/sched/act_csum.c28
-rw-r--r--net/sched/act_ct.c37
-rw-r--r--net/sched/act_ctinfo.c28
-rw-r--r--net/sched/act_gact.c28
-rw-r--r--net/sched/act_gate.c28
-rw-r--r--net/sched/act_ife.c28
-rw-r--r--net/sched/act_ipt.c61
-rw-r--r--net/sched/act_mirred.c31
-rw-r--r--net/sched/act_mpls.c28
-rw-r--r--net/sched/act_nat.c28
-rw-r--r--net/sched/act_pedit.c28
-rw-r--r--net/sched/act_police.c28
-rw-r--r--net/sched/act_sample.c28
-rw-r--r--net/sched/act_simple.c28
-rw-r--r--net/sched/act_skbedit.c28
-rw-r--r--net/sched/act_skbmod.c28
-rw-r--r--net/sched/act_tunnel_key.c28
-rw-r--r--net/sched/act_vlan.c28
-rw-r--r--net/sched/cls_api.c13
-rw-r--r--net/sched/cls_basic.c16
-rw-r--r--net/sched/cls_bpf.c15
-rw-r--r--net/sched/cls_flow.c8
-rw-r--r--net/sched/cls_flower.c23
-rw-r--r--net/sched/cls_fw.c16
-rw-r--r--net/sched/cls_matchall.c12
-rw-r--r--net/sched/cls_route.c20
-rw-r--r--net/sched/cls_rsvp.h16
-rw-r--r--net/sched/cls_tcindex.c25
-rw-r--r--net/sched/cls_u32.c33
-rw-r--r--net/sched/sch_api.c43
-rw-r--r--net/sched/sch_atm.c7
-rw-r--r--net/sched/sch_cake.c12
-rw-r--r--net/sched/sch_cbq.c10
-rw-r--r--net/sched/sch_cbs.c8
-rw-r--r--net/sched/sch_choke.c4
-rw-r--r--net/sched/sch_codel.c3
-rw-r--r--net/sched/sch_drr.c11
-rw-r--r--net/sched/sch_dsmark.c16
-rw-r--r--net/sched/sch_etf.c6
-rw-r--r--net/sched/sch_ets.c16
-rw-r--r--net/sched/sch_fq.c3
-rw-r--r--net/sched/sch_fq_codel.c38
-rw-r--r--net/sched/sch_fq_pie.c6
-rw-r--r--net/sched/sch_generic.c1
-rw-r--r--net/sched/sch_gred.c13
-rw-r--r--net/sched/sch_hfsc.c13
-rw-r--r--net/sched/sch_hhf.c3
-rw-r--r--net/sched/sch_htb.c49
-rw-r--r--net/sched/sch_mq.c5
-rw-r--r--net/sched/sch_mqprio.c5
-rw-r--r--net/sched/sch_multiq.c10
-rw-r--r--net/sched/sch_netem.c11
-rw-r--r--net/sched/sch_pie.c3
-rw-r--r--net/sched/sch_plug.c3
-rw-r--r--net/sched/sch_prio.c13
-rw-r--r--net/sched/sch_qfq.c11
-rw-r--r--net/sched/sch_red.c13
-rw-r--r--net/sched/sch_sfb.c9
-rw-r--r--net/sched/sch_sfq.c8
-rw-r--r--net/sched/sch_skbprio.c12
-rw-r--r--net/sched/sch_taprio.c291
-rw-r--r--net/sched/sch_tbf.c9
-rw-r--r--net/sched/sch_teql.c3
66 files changed, 436 insertions, 1069 deletions
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 817065aa2833..9b31a10cc639 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -676,6 +676,31 @@ int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index)
}
EXPORT_SYMBOL(tcf_idr_search);
+static int __tcf_generic_walker(struct net *net, struct sk_buff *skb,
+ struct netlink_callback *cb, int type,
+ const struct tc_action_ops *ops,
+ struct netlink_ext_ack *extack)
+{
+ struct tc_action_net *tn = net_generic(net, ops->net_id);
+
+ if (unlikely(ops->walk))
+ return ops->walk(net, skb, cb, type, ops, extack);
+
+ return tcf_generic_walker(tn, skb, cb, type, ops, extack);
+}
+
+static int __tcf_idr_search(struct net *net,
+ const struct tc_action_ops *ops,
+ struct tc_action **a, u32 index)
+{
+ struct tc_action_net *tn = net_generic(net, ops->net_id);
+
+ if (unlikely(ops->lookup))
+ return ops->lookup(net, a, index);
+
+ return tcf_idr_search(tn, a, index);
+}
+
static int tcf_idr_delete_index(struct tcf_idrinfo *idrinfo, u32 index)
{
struct tc_action *p;
@@ -926,7 +951,7 @@ int tcf_register_action(struct tc_action_ops *act,
struct tc_action_ops *a;
int ret;
- if (!act->act || !act->dump || !act->init || !act->walk || !act->lookup)
+ if (!act->act || !act->dump || !act->init)
return -EINVAL;
/* We have to register pernet ops before making the action ops visible,
@@ -1638,7 +1663,7 @@ static struct tc_action *tcf_action_get_1(struct net *net, struct nlattr *nla,
goto err_out;
}
err = -ENOENT;
- if (ops->lookup(net, &a, index) == 0) {
+ if (__tcf_idr_search(net, ops, &a, index) == 0) {
NL_SET_ERR_MSG(extack, "TC action with specified index not found");
goto err_mod;
}
@@ -1703,7 +1728,7 @@ static int tca_action_flush(struct net *net, struct nlattr *nla,
goto out_module_put;
}
- err = ops->walk(net, skb, &dcb, RTM_DELACTION, ops, extack);
+ err = __tcf_generic_walker(net, skb, &dcb, RTM_DELACTION, ops, extack);
if (err <= 0) {
nla_nest_cancel(skb, nest);
goto out_module_put;
@@ -2121,7 +2146,7 @@ static int tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
if (nest == NULL)
goto out_module_put;
- ret = a_o->walk(net, skb, cb, RTM_GETACTION, a_o, NULL);
+ ret = __tcf_generic_walker(net, skb, cb, RTM_GETACTION, a_o, NULL);
if (ret < 0)
goto out_module_put;
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index fea2d78b9ddc..b79eee44e24e 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -29,7 +29,6 @@ struct tcf_bpf_cfg {
bool is_ebpf;
};
-static unsigned int bpf_net_id;
static struct tc_action_ops act_bpf_ops;
static int tcf_bpf_act(struct sk_buff *skb, const struct tc_action *act,
@@ -280,7 +279,7 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
struct tcf_proto *tp, u32 flags,
struct netlink_ext_ack *extack)
{
- struct tc_action_net *tn = net_generic(net, bpf_net_id);
+ struct tc_action_net *tn = net_generic(net, act_bpf_ops.net_id);
bool bind = flags & TCA_ACT_FLAGS_BIND;
struct nlattr *tb[TCA_ACT_BPF_MAX + 1];
struct tcf_chain *goto_ch = NULL;
@@ -334,7 +333,7 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
is_bpf = tb[TCA_ACT_BPF_OPS_LEN] && tb[TCA_ACT_BPF_OPS];
is_ebpf = tb[TCA_ACT_BPF_FD];
- if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf)) {
+ if (is_bpf == is_ebpf) {
ret = -EINVAL;
goto put_chain;
}
@@ -390,23 +389,6 @@ static void tcf_bpf_cleanup(struct tc_action *act)
tcf_bpf_cfg_cleanup(&tmp);
}
-static int tcf_bpf_walker(struct net *net, struct sk_buff *skb,
- struct netlink_callback *cb, int type,
- const struct tc_action_ops *ops,
- struct netlink_ext_ack *extack)
-{
- struct tc_action_net *tn = net_generic(net, bpf_net_id);
-
- return tcf_generic_walker(tn, skb, cb, type, ops, extack);
-}
-
-static int tcf_bpf_search(struct net *net, struct tc_action **a, u32 index)
-{
- struct tc_action_net *tn = net_generic(net, bpf_net_id);
-
- return tcf_idr_search(tn, a, index);
-}
-
static struct tc_action_ops act_bpf_ops __read_mostly = {
.kind = "bpf",
.id = TCA_ID_BPF,
@@ -415,27 +397,25 @@ static struct tc_action_ops act_bpf_ops __read_mostly = {
.dump = tcf_bpf_dump,
.cleanup = tcf_bpf_cleanup,
.init = tcf_bpf_init,
- .walk = tcf_bpf_walker,
- .lookup = tcf_bpf_search,
.size = sizeof(struct tcf_bpf),
};
static __net_init int bpf_init_net(struct net *net)
{
- struct tc_action_net *tn = net_generic(net, bpf_net_id);
+ struct tc_action_net *tn = net_generic(net, act_bpf_ops.net_id);
return tc_action_net_init(net, tn, &act_bpf_ops);
}
static void __net_exit bpf_exit_net(struct list_head *net_list)
{
- tc_action_net_exit(net_list, bpf_net_id);
+ tc_action_net_exit(net_list, act_bpf_ops.net_id);
}
static struct pernet_operations bpf_net_ops = {
.init = bpf_init_net,
.exit_batch = bpf_exit_net,
- .id = &bpf_net_id,
+ .id = &act_bpf_ops.net_id,
.size = sizeof(struct tc_action_net),
};
diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c
index 09e2aafc8943..66b143bb04ac 100644
--- a/net/sched/act_connmark.c
+++ b/net/sched/act_connmark.c
@@ -25,7 +25,6 @@
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_zones.h>
-static unsigned int connmark_net_id;
static struct tc_action_ops act_connmark_ops;
static int tcf_connmark_act(struct sk_buff *skb, const struct tc_action *a,
@@ -99,7 +98,7 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla,
struct tcf_proto *tp, u32 flags,
struct netlink_ext_ack *extack)
{
- struct tc_action_net *tn = net_generic(net, connmark_net_id);
+ struct tc_action_net *tn = net_generic(net, act_connmark_ops.net_id);
struct nlattr *tb[TCA_CONNMARK_MAX + 1];
bool bind = flags & TCA_ACT_FLAGS_BIND;
struct tcf_chain *goto_ch = NULL;
@@ -200,23 +199,6 @@ nla_put_failure:
return -1;
}
-static int tcf_connmark_walker(struct net *net, struct sk_buff *skb,
- struct netlink_callback *cb, int type,
- const struct tc_action_ops *ops,
- struct netlink_ext_ack *extack)
-{
- struct tc_action_net *tn = net_generic(net, connmark_net_id);
-
- return tcf_generic_walker(tn, skb, cb, type, ops, extack);
-}
-
-static int tcf_connmark_search(struct net *net, struct tc_action **a, u32 index)
-{
- struct tc_action_net *tn = net_generic(net, connmark_net_id);
-
- return tcf_idr_search(tn, a, index);
-}
-
static struct tc_action_ops act_connmark_ops = {
.kind = "connmark",
.id = TCA_ID_CONNMARK,
@@ -224,27 +206,25 @@ static struct tc_action_ops act_connmark_ops = {
.act = tcf_connmark_act,
.dump = tcf_connmark_dump,
.init = tcf_connmark_init,
- .walk = tcf_connmark_walker,
- .lookup = tcf_connmark_search,
.size = sizeof(struct tcf_connmark_info),
};
static __net_init int connmark_init_net(struct net *net)
{
- struct tc_action_net *tn = net_generic(net, connmark_net_id);
+ struct tc_action_net *tn = net_generic(net, act_connmark_ops.net_id);
return tc_action_net_init(net, tn, &act_connmark_ops);
}
static void __net_exit connmark_exit_net(struct list_head *net_list)
{
- tc_action_net_exit(net_list, connmark_net_id);
+ tc_action_net_exit(net_list, act_connmark_ops.net_id);
}
static struct pernet_operations connmark_net_ops = {
.init = connmark_init_net,
.exit_batch = connmark_exit_net,
- .id = &connmark_net_id,
+ .id = &act_connmark_ops.net_id,
.size = sizeof(struct tc_action_net),
};
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index 22847ee009ef..1366adf9b909 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -37,7 +37,6 @@ static const struct nla_policy csum_policy[TCA_CSUM_MAX + 1] = {
[TCA_CSUM_PARMS] = { .len = sizeof(struct tc_csum), },
};
-static unsigned int csum_net_id;
static struct tc_action_ops act_csum_ops;
static int tcf_csum_init(struct net *net, struct nlattr *nla,
@@ -45,7 +44,7 @@ static int tcf_csum_init(struct net *net, struct nlattr *nla,
struct tcf_proto *tp,
u32 flags, struct netlink_ext_ack *extack)
{
- struct tc_action_net *tn = net_generic(net, csum_net_id);
+ struct tc_action_net *tn = net_generic(net, act_csum_ops.net_id);
bool bind = flags & TCA_ACT_FLAGS_BIND;
struct tcf_csum_params *params_new;
struct nlattr *tb[TCA_CSUM_MAX + 1];
@@ -673,23 +672,6 @@ static void tcf_csum_cleanup(struct tc_action *a)
kfree_rcu(params, rcu);
}
-static int tcf_csum_walker(struct net *net, struct sk_buff *skb,
- struct netlink_callback *cb, int type,
- const struct tc_action_ops *ops,
- struct netlink_ext_ack *extack)
-{
- struct tc_action_net *tn = net_generic(net, csum_net_id);
-
- return tcf_generic_walker(tn, skb, cb, type, ops, extack);
-}
-
-static int tcf_csum_search(struct net *net, struct tc_action **a, u32 index)
-{
- struct tc_action_net *tn = net_generic(net, csum_net_id);
-
- return tcf_idr_search(tn, a, index);
-}
-
static size_t tcf_csum_get_fill_size(const struct tc_action *act)
{
return nla_total_size(sizeof(struct tc_csum));
@@ -722,8 +704,6 @@ static struct tc_action_ops act_csum_ops = {
.dump = tcf_csum_dump,
.init = tcf_csum_init,
.cleanup = tcf_csum_cleanup,
- .walk = tcf_csum_walker,
- .lookup = tcf_csum_search,
.get_fill_size = tcf_csum_get_fill_size,
.offload_act_setup = tcf_csum_offload_act_setup,
.size = sizeof(struct tcf_csum),
@@ -731,20 +711,20 @@ static struct tc_action_ops act_csum_ops = {
static __net_init int csum_init_net(struct net *net)
{
- struct tc_action_net *tn = net_generic(net, csum_net_id);
+ struct tc_action_net *tn = net_generic(net, act_csum_ops.net_id);
return tc_action_net_init(net, tn, &act_csum_ops);
}
static void __net_exit csum_exit_net(struct list_head *net_list)
{
- tc_action_net_exit(net_list, csum_net_id);
+ tc_action_net_exit(net_list, act_csum_ops.net_id);
}
static struct pernet_operations csum_net_ops = {
.init = csum_init_net,
.exit_batch = csum_exit_net,
- .id = &csum_net_id,
+ .id = &act_csum_ops.net_id,
.size = sizeof(struct tc_action_net),
};
diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
index 5950974ae8f6..b38d91d6b249 100644
--- a/net/sched/act_ct.c
+++ b/net/sched/act_ct.c
@@ -649,7 +649,6 @@ static void tcf_ct_flow_tables_uninit(void)
}
static struct tc_action_ops act_ct_ops;
-static unsigned int ct_net_id;
struct tc_ct_action_net {
struct tc_action_net tn; /* Must be first */
@@ -697,7 +696,6 @@ drop_ct:
static int tcf_ct_skb_network_trim(struct sk_buff *skb, int family)
{
unsigned int len;
- int err;
switch (family) {
case NFPROTO_IPV4:
@@ -711,9 +709,7 @@ static int tcf_ct_skb_network_trim(struct sk_buff *skb, int family)
len = skb->len;
}
- err = pskb_trim_rcsum(skb, len);
-
- return err;
+ return pskb_trim_rcsum(skb, len);
}
static u8 tcf_ct_skb_nf_family(struct sk_buff *skb)
@@ -1255,7 +1251,7 @@ static int tcf_ct_fill_params(struct net *net,
struct nlattr **tb,
struct netlink_ext_ack *extack)
{
- struct tc_ct_action_net *tn = net_generic(net, ct_net_id);
+ struct tc_ct_action_net *tn = net_generic(net, act_ct_ops.net_id);
struct nf_conntrack_zone zone;
struct nf_conn *tmpl;
int err;
@@ -1330,7 +1326,7 @@ static int tcf_ct_init(struct net *net, struct nlattr *nla,
struct tcf_proto *tp, u32 flags,
struct netlink_ext_ack *extack)
{
- struct tc_action_net *tn = net_generic(net, ct_net_id);
+ struct tc_action_net *tn = net_generic(net, act_ct_ops.net_id);
bool bind = flags & TCA_ACT_FLAGS_BIND;
struct tcf_ct_params *params = NULL;
struct nlattr *tb[TCA_CT_MAX + 1];
@@ -1561,23 +1557,6 @@ nla_put_failure:
return -1;
}
-static int tcf_ct_walker(struct net *net, struct sk_buff *skb,
- struct netlink_callback *cb, int type,
- const struct tc_action_ops *ops,
- struct netlink_ext_ack *extack)
-{
- struct tc_action_net *tn = net_generic(net, ct_net_id);
-
- return tcf_generic_walker(tn, skb, cb, type, ops, extack);
-}
-
-static int tcf_ct_search(struct net *net, struct tc_action **a, u32 index)
-{
- struct tc_action_net *tn = net_generic(net, ct_net_id);
-
- return tcf_idr_search(tn, a, index);
-}
-
static void tcf_stats_update(struct tc_action *a, u64 bytes, u64 packets,
u64 drops, u64 lastuse, bool hw)
{
@@ -1616,8 +1595,6 @@ static struct tc_action_ops act_ct_ops = {
.dump = tcf_ct_dump,
.init = tcf_ct_init,
.cleanup = tcf_ct_cleanup,
- .walk = tcf_ct_walker,
- .lookup = tcf_ct_search,
.stats_update = tcf_stats_update,
.offload_act_setup = tcf_ct_offload_act_setup,
.size = sizeof(struct tcf_ct),
@@ -1626,7 +1603,7 @@ static struct tc_action_ops act_ct_ops = {
static __net_init int ct_init_net(struct net *net)
{
unsigned int n_bits = sizeof_field(struct tcf_ct_params, labels) * 8;
- struct tc_ct_action_net *tn = net_generic(net, ct_net_id);
+ struct tc_ct_action_net *tn = net_generic(net, act_ct_ops.net_id);
if (nf_connlabels_get(net, n_bits - 1)) {
tn->labels = false;
@@ -1644,20 +1621,20 @@ static void __net_exit ct_exit_net(struct list_head *net_list)
rtnl_lock();
list_for_each_entry(net, net_list, exit_list) {
- struct tc_ct_action_net *tn = net_generic(net, ct_net_id);
+ struct tc_ct_action_net *tn = net_generic(net, act_ct_ops.net_id);
if (tn->labels)
nf_connlabels_put(net);
}
rtnl_unlock();
- tc_action_net_exit(net_list, ct_net_id);
+ tc_action_net_exit(net_list, act_ct_ops.net_id);
}
static struct pernet_operations ct_net_ops = {
.init = ct_init_net,
.exit_batch = ct_exit_net,
- .id = &ct_net_id,
+ .id = &act_ct_ops.net_id,
.size = sizeof(struct tc_ct_action_net),
};
diff --git a/net/sched/act_ctinfo.c b/net/sched/act_ctinfo.c
index 0281e45987a4..d4102f0a9abd 100644
--- a/net/sched/act_ctinfo.c
+++ b/net/sched/act_ctinfo.c
@@ -25,7 +25,6 @@
#include <net/netfilter/nf_conntrack_zones.h>
static struct tc_action_ops act_ctinfo_ops;
-static unsigned int ctinfo_net_id;
static void tcf_ctinfo_dscp_set(struct nf_conn *ct, struct tcf_ctinfo *ca,
struct tcf_ctinfo_params *cp,
@@ -157,7 +156,7 @@ static int tcf_ctinfo_init(struct net *net, struct nlattr *nla,
struct tcf_proto *tp, u32 flags,
struct netlink_ext_ack *extack)
{
- struct tc_action_net *tn = net_generic(net, ctinfo_net_id);
+ struct tc_action_net *tn = net_generic(net, act_ctinfo_ops.net_id);
bool bind = flags & TCA_ACT_FLAGS_BIND;
u32 dscpmask = 0, dscpstatemask, index;
struct nlattr *tb[TCA_CTINFO_MAX + 1];
@@ -342,23 +341,6 @@ nla_put_failure:
return -1;
}
-static int tcf_ctinfo_walker(struct net *net, struct sk_buff *skb,
- struct netlink_callback *cb, int type,
- const struct tc_action_ops *ops,
- struct netlink_ext_ack *extack)
-{
- struct tc_action_net *tn = net_generic(net, ctinfo_net_id);
-
- return tcf_generic_walker(tn, skb, cb, type, ops, extack);
-}
-
-static int tcf_ctinfo_search(struct net *net, struct tc_action **a, u32 index)
-{
- struct tc_action_net *tn = net_generic(net, ctinfo_net_id);
-
- return tcf_idr_search(tn, a, index);
-}
-
static void tcf_ctinfo_cleanup(struct tc_action *a)
{
struct tcf_ctinfo *ci = to_ctinfo(a);
@@ -377,27 +359,25 @@ static struct tc_action_ops act_ctinfo_ops = {
.dump = tcf_ctinfo_dump,
.init = tcf_ctinfo_init,
.cleanup= tcf_ctinfo_cleanup,
- .walk = tcf_ctinfo_walker,
- .lookup = tcf_ctinfo_search,
.size = sizeof(struct tcf_ctinfo),
};
static __net_init int ctinfo_init_net(struct net *net)
{
- struct tc_action_net *tn = net_generic(net, ctinfo_net_id);
+ struct tc_action_net *tn = net_generic(net, act_ctinfo_ops.net_id);
return tc_action_net_init(net, tn, &act_ctinfo_ops);
}
static void __net_exit ctinfo_exit_net(struct list_head *net_list)
{
- tc_action_net_exit(net_list, ctinfo_net_id);
+ tc_action_net_exit(net_list, act_ctinfo_ops.net_id);
}
static struct pernet_operations ctinfo_net_ops = {
.init = ctinfo_init_net,
.exit_batch = ctinfo_exit_net,
- .id = &ctinfo_net_id,
+ .id = &act_ctinfo_ops.net_id,
.size = sizeof(struct tc_action_net),
};
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index ac29d1065232..abe1bcc5c797 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -19,7 +19,6 @@
#include <linux/tc_act/tc_gact.h>
#include <net/tc_act/tc_gact.h>
-static unsigned int gact_net_id;
static struct tc_action_ops act_gact_ops;
#ifdef CONFIG_GACT_PROB
@@ -55,7 +54,7 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla,
struct tcf_proto *tp, u32 flags,
struct netlink_ext_ack *extack)
{
- struct tc_action_net *tn = net_generic(net, gact_net_id);
+ struct tc_action_net *tn = net_generic(net, act_gact_ops.net_id);
bool bind = flags & TCA_ACT_FLAGS_BIND;
struct nlattr *tb[TCA_GACT_MAX + 1];
struct tcf_chain *goto_ch = NULL;
@@ -222,23 +221,6 @@ nla_put_failure:
return -1;
}
-static int tcf_gact_walker(struct net *net, struct sk_buff *skb,
- struct netlink_callback *cb, int type,
- const struct tc_action_ops *ops,
- struct netlink_ext_ack *extack)
-{
- struct tc_action_net *tn = net_generic(net, gact_net_id);
-
- return tcf_generic_walker(tn, skb, cb, type, ops, extack);
-}
-
-static int tcf_gact_search(struct net *net, struct tc_action **a, u32 index)
-{
- struct tc_action_net *tn = net_generic(net, gact_net_id);
-
- return tcf_idr_search(tn, a, index);
-}
-
static size_t tcf_gact_get_fill_size(const struct tc_action *act)
{
size_t sz = nla_total_size(sizeof(struct tc_gact)); /* TCA_GACT_PARMS */
@@ -308,8 +290,6 @@ static struct tc_action_ops act_gact_ops = {
.stats_update = tcf_gact_stats_update,
.dump = tcf_gact_dump,
.init = tcf_gact_init,
- .walk = tcf_gact_walker,
- .lookup = tcf_gact_search,
.get_fill_size = tcf_gact_get_fill_size,
.offload_act_setup = tcf_gact_offload_act_setup,
.size = sizeof(struct tcf_gact),
@@ -317,20 +297,20 @@ static struct tc_action_ops act_gact_ops = {
static __net_init int gact_init_net(struct net *net)
{
- struct tc_action_net *tn = net_generic(net, gact_net_id);
+ struct tc_action_net *tn = net_generic(net, act_gact_ops.net_id);
return tc_action_net_init(net, tn, &act_gact_ops);
}
static void __net_exit gact_exit_net(struct list_head *net_list)
{
- tc_action_net_exit(net_list, gact_net_id);
+ tc_action_net_exit(net_list, act_gact_ops.net_id);
}
static struct pernet_operations gact_net_ops = {
.init = gact_init_net,
.exit_batch = gact_exit_net,
- .id = &gact_net_id,
+ .id = &act_gact_ops.net_id,
.size = sizeof(struct tc_action_net),
};
diff --git a/net/sched/act_gate.c b/net/sched/act_gate.c
index fd5155274733..3049878e7315 100644
--- a/net/sched/act_gate.c
+++ b/net/sched/act_gate.c
@@ -15,7 +15,6 @@
#include <net/pkt_cls.h>
#include <net/tc_act/tc_gate.h>
-static unsigned int gate_net_id;
static struct tc_action_ops act_gate_ops;
static ktime_t gate_get_time(struct tcf_gate *gact)
@@ -298,7 +297,7 @@ static int tcf_gate_init(struct net *net, struct nlattr *nla,
struct tcf_proto *tp, u32 flags,
struct netlink_ext_ack *extack)
{
- struct tc_action_net *tn = net_generic(net, gate_net_id);
+ struct tc_action_net *tn = net_generic(net, act_gate_ops.net_id);
enum tk_offsets tk_offset = TK_OFFS_TAI;
bool bind = flags & TCA_ACT_FLAGS_BIND;
struct nlattr *tb[TCA_GATE_MAX + 1];
@@ -565,16 +564,6 @@ nla_put_failure:
return -1;
}
-static int tcf_gate_walker(struct net *net, struct sk_buff *skb,
- struct netlink_callback *cb, int type,
- const struct tc_action_ops *ops,
- struct netlink_ext_ack *extack)
-{
- struct tc_action_net *tn = net_generic(net, gate_net_id);
-
- return tcf_generic_walker(tn, skb, cb, type, ops, extack);
-}
-
static void tcf_gate_stats_update(struct tc_action *a, u64 bytes, u64 packets,
u64 drops, u64 lastuse, bool hw)
{
@@ -585,13 +574,6 @@ static void tcf_gate_stats_update(struct tc_action *a, u64 bytes, u64 packets,
tm->lastuse = max_t(u64, tm->lastuse, lastuse);
}
-static int tcf_gate_search(struct net *net, struct tc_action **a, u32 index)
-{
- struct tc_action_net *tn = net_generic(net, gate_net_id);
-
- return tcf_idr_search(tn, a, index);
-}
-
static size_t tcf_gate_get_fill_size(const struct tc_action *act)
{
return nla_total_size(sizeof(struct tc_gate));
@@ -654,30 +636,28 @@ static struct tc_action_ops act_gate_ops = {
.dump = tcf_gate_dump,
.init = tcf_gate_init,
.cleanup = tcf_gate_cleanup,
- .walk = tcf_gate_walker,
.stats_update = tcf_gate_stats_update,
.get_fill_size = tcf_gate_get_fill_size,
- .lookup = tcf_gate_search,
.offload_act_setup = tcf_gate_offload_act_setup,
.size = sizeof(struct tcf_gate),
};
static __net_init int gate_init_net(struct net *net)
{
- struct tc_action_net *tn = net_generic(net, gate_net_id);
+ struct tc_action_net *tn = net_generic(net, act_gate_ops.net_id);
return tc_action_net_init(net, tn, &act_gate_ops);
}
static void __net_exit gate_exit_net(struct list_head *net_list)
{
- tc_action_net_exit(net_list, gate_net_id);
+ tc_action_net_exit(net_list, act_gate_ops.net_id);
}
static struct pernet_operations gate_net_ops = {
.init = gate_init_net,
.exit_batch = gate_exit_net,
- .id = &gate_net_id,
+ .id = &act_gate_ops.net_id,
.size = sizeof(struct tc_action_net),
};
diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
index 41ba55e60b1b..41d63b33461d 100644
--- a/net/sched/act_ife.c
+++ b/net/sched/act_ife.c
@@ -30,7 +30,6 @@
#include <linux/etherdevice.h>
#include <net/ife.h>
-static unsigned int ife_net_id;
static int max_metacnt = IFE_META_MAX + 1;
static struct tc_action_ops act_ife_ops;
@@ -482,7 +481,7 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
struct tcf_proto *tp, u32 flags,
struct netlink_ext_ack *extack)
{
- struct tc_action_net *tn = net_generic(net, ife_net_id);
+ struct tc_action_net *tn = net_generic(net, act_ife_ops.net_id);
bool bind = flags & TCA_ACT_FLAGS_BIND;
struct nlattr *tb[TCA_IFE_MAX + 1];
struct nlattr *tb2[IFE_META_MAX + 1];
@@ -878,23 +877,6 @@ static int tcf_ife_act(struct sk_buff *skb, const struct tc_action *a,
return tcf_ife_decode(skb, a, res);
}
-static int tcf_ife_walker(struct net *net, struct sk_buff *skb,
- struct netlink_callback *cb, int type,
- const struct tc_action_ops *ops,
- struct netlink_ext_ack *extack)
-{
- struct tc_action_net *tn = net_generic(net, ife_net_id);
-
- return tcf_generic_walker(tn, skb, cb, type, ops, extack);
-}
-
-static int tcf_ife_search(struct net *net, struct tc_action **a, u32 index)
-{
- struct tc_action_net *tn = net_generic(net, ife_net_id);
-
- return tcf_idr_search(tn, a, index);
-}
-
static struct tc_action_ops act_ife_ops = {
.kind = "ife",
.id = TCA_ID_IFE,
@@ -903,27 +885,25 @@ static struct tc_action_ops act_ife_ops = {
.dump = tcf_ife_dump,
.cleanup = tcf_ife_cleanup,
.init = tcf_ife_init,
- .walk = tcf_ife_walker,
- .lookup = tcf_ife_search,
.size = sizeof(struct tcf_ife_info),
};
static __net_init int ife_init_net(struct net *net)
{
- struct tc_action_net *tn = net_generic(net, ife_net_id);
+ struct tc_action_net *tn = net_generic(net, act_ife_ops.net_id);
return tc_action_net_init(net, tn, &act_ife_ops);
}
static void __net_exit ife_exit_net(struct list_head *net_list)
{
- tc_action_net_exit(net_list, ife_net_id);
+ tc_action_net_exit(net_list, act_ife_ops.net_id);
}
static struct pernet_operations ife_net_ops = {
.init = ife_init_net,
.exit_batch = ife_exit_net,
- .id = &ife_net_id,
+ .id = &act_ife_ops.net_id,
.size = sizeof(struct tc_action_net),
};
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 2f3d507c24a1..1625e1037416 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -24,10 +24,7 @@
#include <linux/netfilter_ipv4/ip_tables.h>
-static unsigned int ipt_net_id;
static struct tc_action_ops act_ipt_ops;
-
-static unsigned int xt_net_id;
static struct tc_action_ops act_xt_ops;
static int ipt_init_target(struct net *net, struct xt_entry_target *t,
@@ -206,8 +203,8 @@ static int tcf_ipt_init(struct net *net, struct nlattr *nla,
struct tcf_proto *tp,
u32 flags, struct netlink_ext_ack *extack)
{
- return __tcf_ipt_init(net, ipt_net_id, nla, est, a, &act_ipt_ops,
- tp, flags);
+ return __tcf_ipt_init(net, act_ipt_ops.net_id, nla, est,
+ a, &act_ipt_ops, tp, flags);
}
static int tcf_xt_init(struct net *net, struct nlattr *nla,
@@ -215,8 +212,8 @@ static int tcf_xt_init(struct net *net, struct nlattr *nla,
struct tcf_proto *tp,
u32 flags, struct netlink_ext_ack *extack)
{
- return __tcf_ipt_init(net, xt_net_id, nla, est, a, &act_xt_ops,
- tp, flags);
+ return __tcf_ipt_init(net, act_xt_ops.net_id, nla, est,
+ a, &act_xt_ops, tp, flags);
}
static int tcf_ipt_act(struct sk_buff *skb, const struct tc_action *a,
@@ -316,23 +313,6 @@ nla_put_failure:
return -1;
}
-static int tcf_ipt_walker(struct net *net, struct sk_buff *skb,
- struct netlink_callback *cb, int type,
- const struct tc_action_ops *ops,
- struct netlink_ext_ack *extack)
-{
- struct tc_action_net *tn = net_generic(net, ipt_net_id);
-
- return tcf_generic_walker(tn, skb, cb, type, ops, extack);
-}
-
-static int tcf_ipt_search(struct net *net, struct tc_action **a, u32 index)
-{
- struct tc_action_net *tn = net_generic(net, ipt_net_id);
-
- return tcf_idr_search(tn, a, index);
-}
-
static struct tc_action_ops act_ipt_ops = {
.kind = "ipt",
.id = TCA_ID_IPT,
@@ -341,47 +321,28 @@ static struct tc_action_ops act_ipt_ops = {
.dump = tcf_ipt_dump,
.cleanup = tcf_ipt_release,
.init = tcf_ipt_init,
- .walk = tcf_ipt_walker,
- .lookup = tcf_ipt_search,
.size = sizeof(struct tcf_ipt),
};
static __net_init int ipt_init_net(struct net *net)
{
- struct tc_action_net *tn = net_generic(net, ipt_net_id);
+ struct tc_action_net *tn = net_generic(net, act_ipt_ops.net_id);
return tc_action_net_init(net, tn, &act_ipt_ops);
}
static void __net_exit ipt_exit_net(struct list_head *net_list)
{
- tc_action_net_exit(net_list, ipt_net_id);
+ tc_action_net_exit(net_list, act_ipt_ops.net_id);
}
static struct pernet_operations ipt_net_ops = {
.init = ipt_init_net,
.exit_batch = ipt_exit_net,
- .id = &ipt_net_id,
+ .id = &act_ipt_ops.net_id,
.size = sizeof(struct tc_action_net),
};
-static int tcf_xt_walker(struct net *net, struct sk_buff *skb,
- struct netlink_callback *cb, int type,
- const struct tc_action_ops *ops,
- struct netlink_ext_ack *extack)
-{
- struct tc_action_net *tn = net_generic(net, xt_net_id);
-
- return tcf_generic_walker(tn, skb, cb, type, ops, extack);
-}
-
-static int tcf_xt_search(struct net *net, struct tc_action **a, u32 index)
-{
- struct tc_action_net *tn = net_generic(net, xt_net_id);
-
- return tcf_idr_search(tn, a, index);
-}
-
static struct tc_action_ops act_xt_ops = {
.kind = "xt",
.id = TCA_ID_XT,
@@ -390,27 +351,25 @@ static struct tc_action_ops act_xt_ops = {
.dump = tcf_ipt_dump,
.cleanup = tcf_ipt_release,
.init = tcf_xt_init,
- .walk = tcf_xt_walker,
- .lookup = tcf_xt_search,
.size = sizeof(struct tcf_ipt),
};
static __net_init int xt_init_net(struct net *net)
{
- struct tc_action_net *tn = net_generic(net, xt_net_id);
+ struct tc_action_net *tn = net_generic(net, act_xt_ops.net_id);
return tc_action_net_init(net, tn, &act_xt_ops);
}
static void __net_exit xt_exit_net(struct list_head *net_list)
{
- tc_action_net_exit(net_list, xt_net_id);
+ tc_action_net_exit(net_list, act_xt_ops.net_id);
}
static struct pernet_operations xt_net_ops = {
.init = xt_init_net,
.exit_batch = xt_exit_net,
- .id = &xt_net_id,
+ .id = &act_xt_ops.net_id,
.size = sizeof(struct tc_action_net),
};
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index a1d70cf86843..b8ad6ae282c0 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -86,7 +86,6 @@ static const struct nla_policy mirred_policy[TCA_MIRRED_MAX + 1] = {
[TCA_MIRRED_PARMS] = { .len = sizeof(struct tc_mirred) },
};
-static unsigned int mirred_net_id;
static struct tc_action_ops act_mirred_ops;
static int tcf_mirred_init(struct net *net, struct nlattr *nla,
@@ -94,7 +93,7 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
struct tcf_proto *tp,
u32 flags, struct netlink_ext_ack *extack)
{
- struct tc_action_net *tn = net_generic(net, mirred_net_id);
+ struct tc_action_net *tn = net_generic(net, act_mirred_ops.net_id);
bool bind = flags & TCA_ACT_FLAGS_BIND;
struct nlattr *tb[TCA_MIRRED_MAX + 1];
struct tcf_chain *goto_ch = NULL;
@@ -306,8 +305,7 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
/* let's the caller reinsert the packet, if possible */
if (use_reinsert) {
- res->ingress = want_ingress;
- err = tcf_mirred_forward(res->ingress, skb);
+ err = tcf_mirred_forward(want_ingress, skb);
if (err)
tcf_action_inc_overlimit_qstats(&m->common);
__this_cpu_dec(mirred_rec_level);
@@ -373,23 +371,6 @@ nla_put_failure:
return -1;
}
-static int tcf_mirred_walker(struct net *net, struct sk_buff *skb,
- struct netlink_callback *cb, int type,
- const struct tc_action_ops *ops,
- struct netlink_ext_ack *extack)
-{
- struct tc_action_net *tn = net_generic(net, mirred_net_id);
-
- return tcf_generic_walker(tn, skb, cb, type, ops, extack);
-}
-
-static int tcf_mirred_search(struct net *net, struct tc_action **a, u32 index)
-{
- struct tc_action_net *tn = net_generic(net, mirred_net_id);
-
- return tcf_idr_search(tn, a, index);
-}
-
static int mirred_device_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
@@ -510,8 +491,6 @@ static struct tc_action_ops act_mirred_ops = {
.dump = tcf_mirred_dump,
.cleanup = tcf_mirred_release,
.init = tcf_mirred_init,
- .walk = tcf_mirred_walker,
- .lookup = tcf_mirred_search,
.get_fill_size = tcf_mirred_get_fill_size,
.offload_act_setup = tcf_mirred_offload_act_setup,
.size = sizeof(struct tcf_mirred),
@@ -520,20 +499,20 @@ static struct tc_action_ops act_mirred_ops = {
static __net_init int mirred_init_net(struct net *net)
{
- struct tc_action_net *tn = net_generic(net, mirred_net_id);
+ struct tc_action_net *tn = net_generic(net, act_mirred_ops.net_id);
return tc_action_net_init(net, tn, &act_mirred_ops);
}
static void __net_exit mirred_exit_net(struct list_head *net_list)
{
- tc_action_net_exit(net_list, mirred_net_id);
+ tc_action_net_exit(net_list, act_mirred_ops.net_id);
}
static struct pernet_operations mirred_net_ops = {
.init = mirred_init_net,
.exit_batch = mirred_exit_net,
- .id = &mirred_net_id,
+ .id = &act_mirred_ops.net_id,
.size = sizeof(struct tc_action_net),
};
diff --git a/net/sched/act_mpls.c b/net/sched/act_mpls.c
index adabeccb63e1..8ad25cc8ccd5 100644
--- a/net/sched/act_mpls.c
+++ b/net/sched/act_mpls.c
@@ -15,7 +15,6 @@
#include <net/pkt_cls.h>
#include <net/tc_act/tc_mpls.h>
-static unsigned int mpls_net_id;
static struct tc_action_ops act_mpls_ops;
#define ACT_MPLS_TTL_DEFAULT 255
@@ -155,7 +154,7 @@ static int tcf_mpls_init(struct net *net, struct nlattr *nla,
struct tcf_proto *tp, u32 flags,
struct netlink_ext_ack *extack)
{
- struct tc_action_net *tn = net_generic(net, mpls_net_id);
+ struct tc_action_net *tn = net_generic(net, act_mpls_ops.net_id);
bool bind = flags & TCA_ACT_FLAGS_BIND;
struct nlattr *tb[TCA_MPLS_MAX + 1];
struct tcf_chain *goto_ch = NULL;
@@ -367,23 +366,6 @@ nla_put_failure:
return -EMSGSIZE;
}
-static int tcf_mpls_walker(struct net *net, struct sk_buff *skb,
- struct netlink_callback *cb, int type,
- const struct tc_action_ops *ops,
- struct netlink_ext_ack *extack)
-{
- struct tc_action_net *tn = net_generic(net, mpls_net_id);
-
- return tcf_generic_walker(tn, skb, cb, type, ops, extack);
-}
-
-static int tcf_mpls_search(struct net *net, struct tc_action **a, u32 index)
-{
- struct tc_action_net *tn = net_generic(net, mpls_net_id);
-
- return tcf_idr_search(tn, a, index);
-}
-
static int tcf_mpls_offload_act_setup(struct tc_action *act, void *entry_data,
u32 *index_inc, bool bind,
struct netlink_ext_ack *extack)
@@ -451,28 +433,26 @@ static struct tc_action_ops act_mpls_ops = {
.dump = tcf_mpls_dump,
.init = tcf_mpls_init,
.cleanup = tcf_mpls_cleanup,
- .walk = tcf_mpls_walker,
- .lookup = tcf_mpls_search,
.offload_act_setup = tcf_mpls_offload_act_setup,
.size = sizeof(struct tcf_mpls),
};
static __net_init int mpls_init_net(struct net *net)
{
- struct tc_action_net *tn = net_generic(net, mpls_net_id);
+ struct tc_action_net *tn = net_generic(net, act_mpls_ops.net_id);
return tc_action_net_init(net, tn, &act_mpls_ops);
}
static void __net_exit mpls_exit_net(struct list_head *net_list)
{
- tc_action_net_exit(net_list, mpls_net_id);
+ tc_action_net_exit(net_list, act_mpls_ops.net_id);
}
static struct pernet_operations mpls_net_ops = {
.init = mpls_init_net,
.exit_batch = mpls_exit_net,
- .id = &mpls_net_id,
+ .id = &act_mpls_ops.net_id,
.size = sizeof(struct tc_action_net),
};
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index 2a39b3729e84..9265145f1040 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -26,7 +26,6 @@
#include <net/udp.h>
-static unsigned int nat_net_id;
static struct tc_action_ops act_nat_ops;
static const struct nla_policy nat_policy[TCA_NAT_MAX + 1] = {
@@ -37,7 +36,7 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
struct tc_action **a, struct tcf_proto *tp,
u32 flags, struct netlink_ext_ack *extack)
{
- struct tc_action_net *tn = net_generic(net, nat_net_id);
+ struct tc_action_net *tn = net_generic(net, act_nat_ops.net_id);
bool bind = flags & TCA_ACT_FLAGS_BIND;
struct nlattr *tb[TCA_NAT_MAX + 1];
struct tcf_chain *goto_ch = NULL;
@@ -289,23 +288,6 @@ nla_put_failure:
return -1;
}
-static int tcf_nat_walker(struct net *net, struct sk_buff *skb,
- struct netlink_callback *cb, int type,
- const struct tc_action_ops *ops,
- struct netlink_ext_ack *extack)
-{
- struct tc_action_net *tn = net_generic(net, nat_net_id);
-
- return tcf_generic_walker(tn, skb, cb, type, ops, extack);
-}
-
-static int tcf_nat_search(struct net *net, struct tc_action **a, u32 index)
-{
- struct tc_action_net *tn = net_generic(net, nat_net_id);
-
- return tcf_idr_search(tn, a, index);
-}
-
static struct tc_action_ops act_nat_ops = {
.kind = "nat",
.id = TCA_ID_NAT,
@@ -313,27 +295,25 @@ static struct tc_action_ops act_nat_ops = {
.act = tcf_nat_act,
.dump = tcf_nat_dump,
.init = tcf_nat_init,
- .walk = tcf_nat_walker,
- .lookup = tcf_nat_search,
.size = sizeof(struct tcf_nat),
};
static __net_init int nat_init_net(struct net *net)
{
- struct tc_action_net *tn = net_generic(net, nat_net_id);
+ struct tc_action_net *tn = net_generic(net, act_nat_ops.net_id);
return tc_action_net_init(net, tn, &act_nat_ops);
}
static void __net_exit nat_exit_net(struct list_head *net_list)
{
- tc_action_net_exit(net_list, nat_net_id);
+ tc_action_net_exit(net_list, act_nat_ops.net_id);
}
static struct pernet_operations nat_net_ops = {
.init = nat_init_net,
.exit_batch = nat_exit_net,
- .id = &nat_net_id,
+ .id = &act_nat_ops.net_id,
.size = sizeof(struct tc_action_net),
};
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 823ee643371c..94ed5857ce67 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -21,7 +21,6 @@
#include <uapi/linux/tc_act/tc_pedit.h>
#include <net/pkt_cls.h>
-static unsigned int pedit_net_id;
static struct tc_action_ops act_pedit_ops;
static const struct nla_policy pedit_policy[TCA_PEDIT_MAX + 1] = {
@@ -139,7 +138,7 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
struct tcf_proto *tp, u32 flags,
struct netlink_ext_ack *extack)
{
- struct tc_action_net *tn = net_generic(net, pedit_net_id);
+ struct tc_action_net *tn = net_generic(net, act_pedit_ops.net_id);
bool bind = flags & TCA_ACT_FLAGS_BIND;
struct nlattr *tb[TCA_PEDIT_MAX + 1];
struct tcf_chain *goto_ch = NULL;
@@ -492,23 +491,6 @@ nla_put_failure:
return -1;
}
-static int tcf_pedit_walker(struct net *net, struct sk_buff *skb,
- struct netlink_callback *cb, int type,
- const struct tc_action_ops *ops,
- struct netlink_ext_ack *extack)
-{
- struct tc_action_net *tn = net_generic(net, pedit_net_id);
-
- return tcf_generic_walker(tn, skb, cb, type, ops, extack);
-}
-
-static int tcf_pedit_search(struct net *net, struct tc_action **a, u32 index)
-{
- struct tc_action_net *tn = net_generic(net, pedit_net_id);
-
- return tcf_idr_search(tn, a, index);
-}
-
static int tcf_pedit_offload_act_setup(struct tc_action *act, void *entry_data,
u32 *index_inc, bool bind,
struct netlink_ext_ack *extack)
@@ -553,28 +535,26 @@ static struct tc_action_ops act_pedit_ops = {
.dump = tcf_pedit_dump,
.cleanup = tcf_pedit_cleanup,
.init = tcf_pedit_init,
- .walk = tcf_pedit_walker,
- .lookup = tcf_pedit_search,
.offload_act_setup = tcf_pedit_offload_act_setup,
.size = sizeof(struct tcf_pedit),
};
static __net_init int pedit_init_net(struct net *net)
{
- struct tc_action_net *tn = net_generic(net, pedit_net_id);
+ struct tc_action_net *tn = net_generic(net, act_pedit_ops.net_id);
return tc_action_net_init(net, tn, &act_pedit_ops);
}
static void __net_exit pedit_exit_net(struct list_head *net_list)
{
- tc_action_net_exit(net_list, pedit_net_id);
+ tc_action_net_exit(net_list, act_pedit_ops.net_id);
}
static struct pernet_operations pedit_net_ops = {
.init = pedit_init_net,
.exit_batch = pedit_exit_net,
- .id = &pedit_net_id,
+ .id = &act_pedit_ops.net_id,
.size = sizeof(struct tc_action_net),
};
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index b759628a47c2..0adb26e366a7 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -22,19 +22,8 @@
/* Each policer is serialized by its individual spinlock */
-static unsigned int police_net_id;
static struct tc_action_ops act_police_ops;
-static int tcf_police_walker(struct net *net, struct sk_buff *skb,
- struct netlink_callback *cb, int type,
- const struct tc_action_ops *ops,
- struct netlink_ext_ack *extack)
-{
- struct tc_action_net *tn = net_generic(net, police_net_id);
-
- return tcf_generic_walker(tn, skb, cb, type, ops, extack);
-}
-
static const struct nla_policy police_policy[TCA_POLICE_MAX + 1] = {
[TCA_POLICE_RATE] = { .len = TC_RTAB_SIZE },
[TCA_POLICE_PEAKRATE] = { .len = TC_RTAB_SIZE },
@@ -58,7 +47,7 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
struct tc_police *parm;
struct tcf_police *police;
struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL;
- struct tc_action_net *tn = net_generic(net, police_net_id);
+ struct tc_action_net *tn = net_generic(net, act_police_ops.net_id);
struct tcf_police_params *new;
bool exists = false;
u32 index;
@@ -412,13 +401,6 @@ nla_put_failure:
return -1;
}
-static int tcf_police_search(struct net *net, struct tc_action **a, u32 index)
-{
- struct tc_action_net *tn = net_generic(net, police_net_id);
-
- return tcf_idr_search(tn, a, index);
-}
-
static int tcf_police_act_to_flow_act(int tc_act, u32 *extval,
struct netlink_ext_ack *extack)
{
@@ -513,8 +495,6 @@ static struct tc_action_ops act_police_ops = {
.act = tcf_police_act,
.dump = tcf_police_dump,
.init = tcf_police_init,
- .walk = tcf_police_walker,
- .lookup = tcf_police_search,
.cleanup = tcf_police_cleanup,
.offload_act_setup = tcf_police_offload_act_setup,
.size = sizeof(struct tcf_police),
@@ -522,20 +502,20 @@ static struct tc_action_ops act_police_ops = {
static __net_init int police_init_net(struct net *net)
{
- struct tc_action_net *tn = net_generic(net, police_net_id);
+ struct tc_action_net *tn = net_generic(net, act_police_ops.net_id);
return tc_action_net_init(net, tn, &act_police_ops);
}
static void __net_exit police_exit_net(struct list_head *net_list)
{
- tc_action_net_exit(net_list, police_net_id);
+ tc_action_net_exit(net_list, act_police_ops.net_id);
}
static struct pernet_operations police_net_ops = {
.init = police_init_net,
.exit_batch = police_exit_net,
- .id = &police_net_id,
+ .id = &act_police_ops.net_id,
.size = sizeof(struct tc_action_net),
};
diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
index 2f7f5e44d28c..5ba36f70e3a1 100644
--- a/net/sched/act_sample.c
+++ b/net/sched/act_sample.c
@@ -23,7 +23,6 @@
#include <linux/if_arp.h>
-static unsigned int sample_net_id;
static struct tc_action_ops act_sample_ops;
static const struct nla_policy sample_policy[TCA_SAMPLE_MAX + 1] = {
@@ -38,7 +37,7 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
struct tcf_proto *tp,
u32 flags, struct netlink_ext_ack *extack)
{
- struct tc_action_net *tn = net_generic(net, sample_net_id);
+ struct tc_action_net *tn = net_generic(net, act_sample_ops.net_id);
bool bind = flags & TCA_ACT_FLAGS_BIND;
struct nlattr *tb[TCA_SAMPLE_MAX + 1];
struct psample_group *psample_group;
@@ -241,23 +240,6 @@ nla_put_failure:
return -1;
}
-static int tcf_sample_walker(struct net *net, struct sk_buff *skb,
- struct netlink_callback *cb, int type,
- const struct tc_action_ops *ops,
- struct netlink_ext_ack *extack)
-{
- struct tc_action_net *tn = net_generic(net, sample_net_id);
-
- return tcf_generic_walker(tn, skb, cb, type, ops, extack);
-}
-
-static int tcf_sample_search(struct net *net, struct tc_action **a, u32 index)
-{
- struct tc_action_net *tn = net_generic(net, sample_net_id);
-
- return tcf_idr_search(tn, a, index);
-}
-
static void tcf_psample_group_put(void *priv)
{
struct psample_group *group = priv;
@@ -321,8 +303,6 @@ static struct tc_action_ops act_sample_ops = {
.dump = tcf_sample_dump,
.init = tcf_sample_init,
.cleanup = tcf_sample_cleanup,
- .walk = tcf_sample_walker,
- .lookup = tcf_sample_search,
.get_psample_group = tcf_sample_get_group,
.offload_act_setup = tcf_sample_offload_act_setup,
.size = sizeof(struct tcf_sample),
@@ -330,20 +310,20 @@ static struct tc_action_ops act_sample_ops = {
static __net_init int sample_init_net(struct net *net)
{
- struct tc_action_net *tn = net_generic(net, sample_net_id);
+ struct tc_action_net *tn = net_generic(net, act_sample_ops.net_id);
return tc_action_net_init(net, tn, &act_sample_ops);
}
static void __net_exit sample_exit_net(struct list_head *net_list)
{
- tc_action_net_exit(net_list, sample_net_id);
+ tc_action_net_exit(net_list, act_sample_ops.net_id);
}
static struct pernet_operations sample_net_ops = {
.init = sample_init_net,
.exit_batch = sample_exit_net,
- .id = &sample_net_id,
+ .id = &act_sample_ops.net_id,
.size = sizeof(struct tc_action_net),
};
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 8c1d60bde93e..18d376135461 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -18,7 +18,6 @@
#include <linux/tc_act/tc_defact.h>
#include <net/tc_act/tc_defact.h>
-static unsigned int simp_net_id;
static struct tc_action_ops act_simp_ops;
#define SIMP_MAX_DATA 32
@@ -89,7 +88,7 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
struct tcf_proto *tp, u32 flags,
struct netlink_ext_ack *extack)
{
- struct tc_action_net *tn = net_generic(net, simp_net_id);
+ struct tc_action_net *tn = net_generic(net, act_simp_ops.net_id);
bool bind = flags & TCA_ACT_FLAGS_BIND;
struct nlattr *tb[TCA_DEF_MAX + 1];
struct tcf_chain *goto_ch = NULL;
@@ -198,23 +197,6 @@ nla_put_failure:
return -1;
}
-static int tcf_simp_walker(struct net *net, struct sk_buff *skb,
- struct netlink_callback *cb, int type,
- const struct tc_action_ops *ops,
- struct netlink_ext_ack *extack)
-{
- struct tc_action_net *tn = net_generic(net, simp_net_id);
-
- return tcf_generic_walker(tn, skb, cb, type, ops, extack);
-}
-
-static int tcf_simp_search(struct net *net, struct tc_action **a, u32 index)
-{
- struct tc_action_net *tn = net_generic(net, simp_net_id);
-
- return tcf_idr_search(tn, a, index);
-}
-
static struct tc_action_ops act_simp_ops = {
.kind = "simple",
.id = TCA_ID_SIMP,
@@ -223,27 +205,25 @@ static struct tc_action_ops act_simp_ops = {
.dump = tcf_simp_dump,
.cleanup = tcf_simp_release,
.init = tcf_simp_init,
- .walk = tcf_simp_walker,
- .lookup = tcf_simp_search,
.size = sizeof(struct tcf_defact),
};
static __net_init int simp_init_net(struct net *net)
{
- struct tc_action_net *tn = net_generic(net, simp_net_id);
+ struct tc_action_net *tn = net_generic(net, act_simp_ops.net_id);
return tc_action_net_init(net, tn, &act_simp_ops);
}
static void __net_exit simp_exit_net(struct list_head *net_list)
{
- tc_action_net_exit(net_list, simp_net_id);
+ tc_action_net_exit(net_list, act_simp_ops.net_id);
}
static struct pernet_operations simp_net_ops = {
.init = simp_init_net,
.exit_batch = simp_exit_net,
- .id = &simp_net_id,
+ .id = &act_simp_ops.net_id,
.size = sizeof(struct tc_action_net),
};
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index e3bd11dfe1ca..7f598784fd30 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -20,7 +20,6 @@
#include <linux/tc_act/tc_skbedit.h>
#include <net/tc_act/tc_skbedit.h>
-static unsigned int skbedit_net_id;
static struct tc_action_ops act_skbedit_ops;
static u16 tcf_skbedit_hash(struct tcf_skbedit_params *params,
@@ -118,7 +117,7 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
struct tcf_proto *tp, u32 act_flags,
struct netlink_ext_ack *extack)
{
- struct tc_action_net *tn = net_generic(net, skbedit_net_id);
+ struct tc_action_net *tn = net_generic(net, act_skbedit_ops.net_id);
bool bind = act_flags & TCA_ACT_FLAGS_BIND;
struct tcf_skbedit_params *params_new;
struct nlattr *tb[TCA_SKBEDIT_MAX + 1];
@@ -347,23 +346,6 @@ static void tcf_skbedit_cleanup(struct tc_action *a)
kfree_rcu(params, rcu);
}
-static int tcf_skbedit_walker(struct net *net, struct sk_buff *skb,
- struct netlink_callback *cb, int type,
- const struct tc_action_ops *ops,
- struct netlink_ext_ack *extack)
-{
- struct tc_action_net *tn = net_generic(net, skbedit_net_id);
-
- return tcf_generic_walker(tn, skb, cb, type, ops, extack);
-}
-
-static int tcf_skbedit_search(struct net *net, struct tc_action **a, u32 index)
-{
- struct tc_action_net *tn = net_generic(net, skbedit_net_id);
-
- return tcf_idr_search(tn, a, index);
-}
-
static size_t tcf_skbedit_get_fill_size(const struct tc_action *act)
{
return nla_total_size(sizeof(struct tc_skbedit))
@@ -428,29 +410,27 @@ static struct tc_action_ops act_skbedit_ops = {
.dump = tcf_skbedit_dump,
.init = tcf_skbedit_init,
.cleanup = tcf_skbedit_cleanup,
- .walk = tcf_skbedit_walker,
.get_fill_size = tcf_skbedit_get_fill_size,
- .lookup = tcf_skbedit_search,
.offload_act_setup = tcf_skbedit_offload_act_setup,
.size = sizeof(struct tcf_skbedit),
};
static __net_init int skbedit_init_net(struct net *net)
{
- struct tc_action_net *tn = net_generic(net, skbedit_net_id);
+ struct tc_action_net *tn = net_generic(net, act_skbedit_ops.net_id);
return tc_action_net_init(net, tn, &act_skbedit_ops);
}
static void __net_exit skbedit_exit_net(struct list_head *net_list)
{
- tc_action_net_exit(net_list, skbedit_net_id);
+ tc_action_net_exit(net_list, act_skbedit_ops.net_id);
}
static struct pernet_operations skbedit_net_ops = {
.init = skbedit_init_net,
.exit_batch = skbedit_exit_net,
- .id = &skbedit_net_id,
+ .id = &act_skbedit_ops.net_id,
.size = sizeof(struct tc_action_net),
};
diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c
index 2083612d8780..d98758a63934 100644
--- a/net/sched/act_skbmod.c
+++ b/net/sched/act_skbmod.c
@@ -19,7 +19,6 @@
#include <linux/tc_act/tc_skbmod.h>
#include <net/tc_act/tc_skbmod.h>
-static unsigned int skbmod_net_id;
static struct tc_action_ops act_skbmod_ops;
static int tcf_skbmod_act(struct sk_buff *skb, const struct tc_action *a,
@@ -103,7 +102,7 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
struct tcf_proto *tp, u32 flags,
struct netlink_ext_ack *extack)
{
- struct tc_action_net *tn = net_generic(net, skbmod_net_id);
+ struct tc_action_net *tn = net_generic(net, act_skbmod_ops.net_id);
bool ovr = flags & TCA_ACT_FLAGS_REPLACE;
bool bind = flags & TCA_ACT_FLAGS_BIND;
struct nlattr *tb[TCA_SKBMOD_MAX + 1];
@@ -276,23 +275,6 @@ nla_put_failure:
return -1;
}
-static int tcf_skbmod_walker(struct net *net, struct sk_buff *skb,
- struct netlink_callback *cb, int type,
- const struct tc_action_ops *ops,
- struct netlink_ext_ack *extack)
-{
- struct tc_action_net *tn = net_generic(net, skbmod_net_id);
-
- return tcf_generic_walker(tn, skb, cb, type, ops, extack);
-}
-
-static int tcf_skbmod_search(struct net *net, struct tc_action **a, u32 index)
-{
- struct tc_action_net *tn = net_generic(net, skbmod_net_id);
-
- return tcf_idr_search(tn, a, index);
-}
-
static struct tc_action_ops act_skbmod_ops = {
.kind = "skbmod",
.id = TCA_ACT_SKBMOD,
@@ -301,27 +283,25 @@ static struct tc_action_ops act_skbmod_ops = {
.dump = tcf_skbmod_dump,
.init = tcf_skbmod_init,
.cleanup = tcf_skbmod_cleanup,
- .walk = tcf_skbmod_walker,
- .lookup = tcf_skbmod_search,
.size = sizeof(struct tcf_skbmod),
};
static __net_init int skbmod_init_net(struct net *net)
{
- struct tc_action_net *tn = net_generic(net, skbmod_net_id);
+ struct tc_action_net *tn = net_generic(net, act_skbmod_ops.net_id);
return tc_action_net_init(net, tn, &act_skbmod_ops);
}
static void __net_exit skbmod_exit_net(struct list_head *net_list)
{
- tc_action_net_exit(net_list, skbmod_net_id);
+ tc_action_net_exit(net_list, act_skbmod_ops.net_id);
}
static struct pernet_operations skbmod_net_ops = {
.init = skbmod_init_net,
.exit_batch = skbmod_exit_net,
- .id = &skbmod_net_id,
+ .id = &act_skbmod_ops.net_id,
.size = sizeof(struct tc_action_net),
};
diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
index 856dc23cef8c..2691a3d8e451 100644
--- a/net/sched/act_tunnel_key.c
+++ b/net/sched/act_tunnel_key.c
@@ -20,7 +20,6 @@
#include <linux/tc_act/tc_tunnel_key.h>
#include <net/tc_act/tc_tunnel_key.h>
-static unsigned int tunnel_key_net_id;
static struct tc_action_ops act_tunnel_key_ops;
static int tunnel_key_act(struct sk_buff *skb, const struct tc_action *a,
@@ -358,7 +357,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
struct tcf_proto *tp, u32 act_flags,
struct netlink_ext_ack *extack)
{
- struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
+ struct tc_action_net *tn = net_generic(net, act_tunnel_key_ops.net_id);
bool bind = act_flags & TCA_ACT_FLAGS_BIND;
struct nlattr *tb[TCA_TUNNEL_KEY_MAX + 1];
struct tcf_tunnel_key_params *params_new;
@@ -770,23 +769,6 @@ nla_put_failure:
return -1;
}
-static int tunnel_key_walker(struct net *net, struct sk_buff *skb,
- struct netlink_callback *cb, int type,
- const struct tc_action_ops *ops,
- struct netlink_ext_ack *extack)
-{
- struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
-
- return tcf_generic_walker(tn, skb, cb, type, ops, extack);
-}
-
-static int tunnel_key_search(struct net *net, struct tc_action **a, u32 index)
-{
- struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
-
- return tcf_idr_search(tn, a, index);
-}
-
static void tcf_tunnel_encap_put_tunnel(void *priv)
{
struct ip_tunnel_info *tunnel = priv;
@@ -850,28 +832,26 @@ static struct tc_action_ops act_tunnel_key_ops = {
.dump = tunnel_key_dump,
.init = tunnel_key_init,
.cleanup = tunnel_key_release,
- .walk = tunnel_key_walker,
- .lookup = tunnel_key_search,
.offload_act_setup = tcf_tunnel_key_offload_act_setup,
.size = sizeof(struct tcf_tunnel_key),
};
static __net_init int tunnel_key_init_net(struct net *net)
{
- struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
+ struct tc_action_net *tn = net_generic(net, act_tunnel_key_ops.net_id);
return tc_action_net_init(net, tn, &act_tunnel_key_ops);
}
static void __net_exit tunnel_key_exit_net(struct list_head *net_list)
{
- tc_action_net_exit(net_list, tunnel_key_net_id);
+ tc_action_net_exit(net_list, act_tunnel_key_ops.net_id);
}
static struct pernet_operations tunnel_key_net_ops = {
.init = tunnel_key_init_net,
.exit_batch = tunnel_key_exit_net,
- .id = &tunnel_key_net_id,
+ .id = &act_tunnel_key_ops.net_id,
.size = sizeof(struct tc_action_net),
};
diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c
index 68b5e772386a..7b24e898a3e6 100644
--- a/net/sched/act_vlan.c
+++ b/net/sched/act_vlan.c
@@ -16,7 +16,6 @@
#include <linux/tc_act/tc_vlan.h>
#include <net/tc_act/tc_vlan.h>
-static unsigned int vlan_net_id;
static struct tc_action_ops act_vlan_ops;
static int tcf_vlan_act(struct sk_buff *skb, const struct tc_action *a,
@@ -117,7 +116,7 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
struct tcf_proto *tp, u32 flags,
struct netlink_ext_ack *extack)
{
- struct tc_action_net *tn = net_generic(net, vlan_net_id);
+ struct tc_action_net *tn = net_generic(net, act_vlan_ops.net_id);
bool bind = flags & TCA_ACT_FLAGS_BIND;
struct nlattr *tb[TCA_VLAN_MAX + 1];
struct tcf_chain *goto_ch = NULL;
@@ -333,16 +332,6 @@ nla_put_failure:
return -1;
}
-static int tcf_vlan_walker(struct net *net, struct sk_buff *skb,
- struct netlink_callback *cb, int type,
- const struct tc_action_ops *ops,
- struct netlink_ext_ack *extack)
-{
- struct tc_action_net *tn = net_generic(net, vlan_net_id);
-
- return tcf_generic_walker(tn, skb, cb, type, ops, extack);
-}
-
static void tcf_vlan_stats_update(struct tc_action *a, u64 bytes, u64 packets,
u64 drops, u64 lastuse, bool hw)
{
@@ -353,13 +342,6 @@ static void tcf_vlan_stats_update(struct tc_action *a, u64 bytes, u64 packets,
tm->lastuse = max_t(u64, tm->lastuse, lastuse);
}
-static int tcf_vlan_search(struct net *net, struct tc_action **a, u32 index)
-{
- struct tc_action_net *tn = net_generic(net, vlan_net_id);
-
- return tcf_idr_search(tn, a, index);
-}
-
static size_t tcf_vlan_get_fill_size(const struct tc_action *act)
{
return nla_total_size(sizeof(struct tc_vlan))
@@ -438,30 +420,28 @@ static struct tc_action_ops act_vlan_ops = {
.dump = tcf_vlan_dump,
.init = tcf_vlan_init,
.cleanup = tcf_vlan_cleanup,
- .walk = tcf_vlan_walker,
.stats_update = tcf_vlan_stats_update,
.get_fill_size = tcf_vlan_get_fill_size,
- .lookup = tcf_vlan_search,
.offload_act_setup = tcf_vlan_offload_act_setup,
.size = sizeof(struct tcf_vlan),
};
static __net_init int vlan_init_net(struct net *net)
{
- struct tc_action_net *tn = net_generic(net, vlan_net_id);
+ struct tc_action_net *tn = net_generic(net, act_vlan_ops.net_id);
return tc_action_net_init(net, tn, &act_vlan_ops);
}
static void __net_exit vlan_exit_net(struct list_head *net_list)
{
- tc_action_net_exit(net_list, vlan_net_id);
+ tc_action_net_exit(net_list, act_vlan_ops.net_id);
}
static struct pernet_operations vlan_net_ops = {
.init = vlan_init_net,
.exit_batch = vlan_exit_net,
- .id = &vlan_net_id,
+ .id = &act_vlan_ops.net_id,
.size = sizeof(struct tc_action_net),
};
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 51d175f3fbcb..50566db45949 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -1977,9 +1977,6 @@ static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
bool rtnl_held = false;
u32 flags;
- if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
- return -EPERM;
-
replay:
tp_created = 0;
@@ -2209,9 +2206,6 @@ static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
int err;
bool rtnl_held = false;
- if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
- return -EPERM;
-
err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
rtm_tca_policy, extack);
if (err < 0)
@@ -2827,10 +2821,6 @@ static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n,
unsigned long cl;
int err;
- if (n->nlmsg_type != RTM_GETCHAIN &&
- !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
- return -EPERM;
-
replay:
q = NULL;
err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
@@ -3640,9 +3630,6 @@ int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
if (err)
return err;
- if (!block_index)
- return 0;
-
qe->info.binder_type = binder_type;
qe->info.chain_head_change = tcf_chain_head_change_dflt;
qe->info.chain_head_change_priv = &qe->filter_chain;
diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c
index 8158fc9ee1ab..d229ce99e554 100644
--- a/net/sched/cls_basic.c
+++ b/net/sched/cls_basic.c
@@ -251,15 +251,8 @@ static void basic_walk(struct tcf_proto *tp, struct tcf_walker *arg,
struct basic_filter *f;
list_for_each_entry(f, &head->flist, link) {
- if (arg->count < arg->skip)
- goto skip;
-
- if (arg->fn(tp, f, arg) < 0) {
- arg->stop = 1;
+ if (!tc_cls_stats_dump(tp, arg, f))
break;
- }
-skip:
- arg->count++;
}
}
@@ -268,12 +261,7 @@ static void basic_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
{
struct basic_filter *f = fh;
- if (f && f->res.classid == classid) {
- if (cl)
- __tcf_bind_filter(q, &f->res, base);
- else
- __tcf_unbind_filter(q, &f->res);
- }
+ tc_cls_bind_class(classid, cl, q, &f->res, base);
}
static int basic_dump(struct net *net, struct tcf_proto *tp, void *fh,
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index c85b85a192bf..bc317b3eac12 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -635,12 +635,7 @@ static void cls_bpf_bind_class(void *fh, u32 classid, unsigned long cl,
{
struct cls_bpf_prog *prog = fh;
- if (prog && prog->res.classid == classid) {
- if (cl)
- __tcf_bind_filter(q, &prog->res, base);
- else
- __tcf_unbind_filter(q, &prog->res);
- }
+ tc_cls_bind_class(classid, cl, q, &prog->res, base);
}
static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg,
@@ -650,14 +645,8 @@ static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg,
struct cls_bpf_prog *prog;
list_for_each_entry(prog, &head->plist, link) {
- if (arg->count < arg->skip)
- goto skip;
- if (arg->fn(tp, prog, arg) < 0) {
- arg->stop = 1;
+ if (!tc_cls_stats_dump(tp, arg, prog))
break;
- }
-skip:
- arg->count++;
}
}
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index 972303aa8edd..014cd3de7b5d 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -683,14 +683,8 @@ static void flow_walk(struct tcf_proto *tp, struct tcf_walker *arg,
struct flow_filter *f;
list_for_each_entry(f, &head->filters, list) {
- if (arg->count < arg->skip)
- goto skip;
- if (arg->fn(tp, f, arg) < 0) {
- arg->stop = 1;
+ if (!tc_cls_stats_dump(tp, arg, f))
break;
- }
-skip:
- arg->count++;
}
}
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 041d63ff809a..25bc57ee6ea1 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -69,6 +69,7 @@ struct fl_flow_key {
struct flow_dissector_key_hash hash;
struct flow_dissector_key_num_of_vlans num_of_vlans;
struct flow_dissector_key_pppoe pppoe;
+ struct flow_dissector_key_l2tpv3 l2tpv3;
} __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
struct fl_flow_mask_range {
@@ -712,6 +713,7 @@ static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
[TCA_FLOWER_KEY_NUM_OF_VLANS] = { .type = NLA_U8 },
[TCA_FLOWER_KEY_PPPOE_SID] = { .type = NLA_U16 },
[TCA_FLOWER_KEY_PPP_PROTO] = { .type = NLA_U16 },
+ [TCA_FLOWER_KEY_L2TPV3_SID] = { .type = NLA_U32 },
};
@@ -1790,6 +1792,11 @@ static int fl_set_key(struct net *net, struct nlattr **tb,
fl_set_key_val(tb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
sizeof(key->arp.tha));
+ } else if (key->basic.ip_proto == IPPROTO_L2TP) {
+ fl_set_key_val(tb, &key->l2tpv3.session_id,
+ TCA_FLOWER_KEY_L2TPV3_SID,
+ &mask->l2tpv3.session_id, TCA_FLOWER_UNSPEC,
+ sizeof(key->l2tpv3.session_id));
}
if (key->basic.ip_proto == IPPROTO_TCP ||
@@ -1970,6 +1977,8 @@ static void fl_init_dissector(struct flow_dissector *dissector,
FLOW_DISSECTOR_KEY_NUM_OF_VLANS, num_of_vlans);
FL_KEY_SET_IF_MASKED(mask, keys, cnt,
FLOW_DISSECTOR_KEY_PPPOE, pppoe);
+ FL_KEY_SET_IF_MASKED(mask, keys, cnt,
+ FLOW_DISSECTOR_KEY_L2TPV3, l2tpv3);
skb_flow_dissector_init(dissector, keys, cnt);
}
@@ -3196,6 +3205,13 @@ static int fl_dump_key(struct sk_buff *skb, struct net *net,
mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
sizeof(key->arp.tha))))
goto nla_put_failure;
+ else if (key->basic.ip_proto == IPPROTO_L2TP &&
+ fl_dump_key_val(skb, &key->l2tpv3.session_id,
+ TCA_FLOWER_KEY_L2TPV3_SID,
+ &mask->l2tpv3.session_id,
+ TCA_FLOWER_UNSPEC,
+ sizeof(key->l2tpv3.session_id)))
+ goto nla_put_failure;
if ((key->basic.ip_proto == IPPROTO_TCP ||
key->basic.ip_proto == IPPROTO_UDP ||
@@ -3389,12 +3405,7 @@ static void fl_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
{
struct cls_fl_filter *f = fh;
- if (f && f->res.classid == classid) {
- if (cl)
- __tcf_bind_filter(q, &f->res, base);
- else
- __tcf_unbind_filter(q, &f->res);
- }
+ tc_cls_bind_class(classid, cl, q, &f->res, base);
}
static bool fl_delete_empty(struct tcf_proto *tp)
diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c
index 8654b0ce997c..a32351da968c 100644
--- a/net/sched/cls_fw.c
+++ b/net/sched/cls_fw.c
@@ -358,15 +358,8 @@ static void fw_walk(struct tcf_proto *tp, struct tcf_walker *arg,
for (f = rtnl_dereference(head->ht[h]); f;
f = rtnl_dereference(f->next)) {
- if (arg->count < arg->skip) {
- arg->count++;
- continue;
- }
- if (arg->fn(tp, f, arg) < 0) {
- arg->stop = 1;
+ if (!tc_cls_stats_dump(tp, arg, f))
return;
- }
- arg->count++;
}
}
}
@@ -423,12 +416,7 @@ static void fw_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
{
struct fw_filter *f = fh;
- if (f && f->res.classid == classid) {
- if (cl)
- __tcf_bind_filter(q, &f->res, base);
- else
- __tcf_unbind_filter(q, &f->res);
- }
+ tc_cls_bind_class(classid, cl, q, &f->res, base);
}
static struct tcf_proto_ops cls_fw_ops __read_mostly = {
diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
index 06cf22adbab7..39a5d9c170de 100644
--- a/net/sched/cls_matchall.c
+++ b/net/sched/cls_matchall.c
@@ -313,10 +313,7 @@ static int mall_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
tc_cleanup_offload_action(&cls_mall.rule->action);
kfree(cls_mall.rule);
- if (err)
- return err;
-
- return 0;
+ return err;
}
static void mall_stats_hw_filter(struct tcf_proto *tp,
@@ -397,12 +394,7 @@ static void mall_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
{
struct cls_mall_head *head = fh;
- if (head && head->res.classid == classid) {
- if (cl)
- __tcf_bind_filter(q, &head->res, base);
- else
- __tcf_unbind_filter(q, &head->res);
- }
+ tc_cls_bind_class(classid, cl, q, &head->res, base);
}
static struct tcf_proto_ops cls_mall_ops __read_mostly = {
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c
index 48712bc51bda..9e43b929d4ca 100644
--- a/net/sched/cls_route.c
+++ b/net/sched/cls_route.c
@@ -488,7 +488,7 @@ static int route4_change(struct net *net, struct sk_buff *in_skb,
}
if (opt == NULL)
- return handle ? -EINVAL : 0;
+ return -EINVAL;
err = nla_parse_nested_deprecated(tb, TCA_ROUTE4_MAX, opt,
route4_policy, NULL);
@@ -496,7 +496,7 @@ static int route4_change(struct net *net, struct sk_buff *in_skb,
return err;
fold = *arg;
- if (fold && handle && fold->handle != handle)
+ if (fold && fold->handle != handle)
return -EINVAL;
err = -ENOBUFS;
@@ -587,15 +587,8 @@ static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg,
for (f = rtnl_dereference(b->ht[h1]);
f;
f = rtnl_dereference(f->next)) {
- if (arg->count < arg->skip) {
- arg->count++;
- continue;
- }
- if (arg->fn(tp, f, arg) < 0) {
- arg->stop = 1;
+ if (!tc_cls_stats_dump(tp, arg, f))
return;
- }
- arg->count++;
}
}
}
@@ -656,12 +649,7 @@ static void route4_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
{
struct route4_filter *f = fh;
- if (f && f->res.classid == classid) {
- if (cl)
- __tcf_bind_filter(q, &f->res, base);
- else
- __tcf_unbind_filter(q, &f->res);
- }
+ tc_cls_bind_class(classid, cl, q, &f->res, base);
}
static struct tcf_proto_ops cls_route4_ops __read_mostly = {
diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h
index 5cd9d6b143c4..b00a7dbd0587 100644
--- a/net/sched/cls_rsvp.h
+++ b/net/sched/cls_rsvp.h
@@ -671,15 +671,8 @@ static void rsvp_walk(struct tcf_proto *tp, struct tcf_walker *arg,
for (f = rtnl_dereference(s->ht[h1]); f;
f = rtnl_dereference(f->next)) {
- if (arg->count < arg->skip) {
- arg->count++;
- continue;
- }
- if (arg->fn(tp, f, arg) < 0) {
- arg->stop = 1;
+ if (!tc_cls_stats_dump(tp, arg, f))
return;
- }
- arg->count++;
}
}
}
@@ -740,12 +733,7 @@ static void rsvp_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
{
struct rsvp_filter *f = fh;
- if (f && f->res.classid == classid) {
- if (cl)
- __tcf_bind_filter(q, &f->res, base);
- else
- __tcf_unbind_filter(q, &f->res);
- }
+ tc_cls_bind_class(classid, cl, q, &f->res, base);
}
static struct tcf_proto_ops RSVP_OPS __read_mostly = {
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index 742c7d49a958..1c9eeb98d826 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -566,13 +566,8 @@ static void tcindex_walk(struct tcf_proto *tp, struct tcf_walker *walker,
for (i = 0; i < p->hash; i++) {
if (!p->perfect[i].res.class)
continue;
- if (walker->count >= walker->skip) {
- if (walker->fn(tp, p->perfect + i, walker) < 0) {
- walker->stop = 1;
- return;
- }
- }
- walker->count++;
+ if (!tc_cls_stats_dump(tp, walker, p->perfect + i))
+ return;
}
}
if (!p->h)
@@ -580,13 +575,8 @@ static void tcindex_walk(struct tcf_proto *tp, struct tcf_walker *walker,
for (i = 0; i < p->hash; i++) {
for (f = rtnl_dereference(p->h[i]); f; f = next) {
next = rtnl_dereference(f->next);
- if (walker->count >= walker->skip) {
- if (walker->fn(tp, &f->result, walker) < 0) {
- walker->stop = 1;
- return;
- }
- }
- walker->count++;
+ if (!tc_cls_stats_dump(tp, walker, &f->result))
+ return;
}
}
}
@@ -701,12 +691,7 @@ static void tcindex_bind_class(void *fh, u32 classid, unsigned long cl,
{
struct tcindex_filter_result *r = fh;
- if (r && r->res.classid == classid) {
- if (cl)
- __tcf_bind_filter(q, &r->res, base);
- else
- __tcf_unbind_filter(q, &r->res);
- }
+ tc_cls_bind_class(classid, cl, q, &r->res, base);
}
static struct tcf_proto_ops cls_tcindex_ops __read_mostly = {
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 4d27300c287c..34d25f7a0687 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -1040,7 +1040,11 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
}
#endif
- memcpy(&n->sel, s, sel_size);
+ unsafe_memcpy(&n->sel, s, sel_size,
+ /* A composite flex-array structure destination,
+ * which was correctly sized with struct_size(),
+ * bounds-checked against nla_len(), and allocated
+ * above. */);
RCU_INIT_POINTER(n->ht_up, ht);
n->handle = handle;
n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0;
@@ -1125,26 +1129,16 @@ static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg,
ht = rtnl_dereference(ht->next)) {
if (ht->prio != tp->prio)
continue;
- if (arg->count >= arg->skip) {
- if (arg->fn(tp, ht, arg) < 0) {
- arg->stop = 1;
- return;
- }
- }
- arg->count++;
+
+ if (!tc_cls_stats_dump(tp, arg, ht))
+ return;
+
for (h = 0; h <= ht->divisor; h++) {
for (n = rtnl_dereference(ht->ht[h]);
n;
n = rtnl_dereference(n->next)) {
- if (arg->count < arg->skip) {
- arg->count++;
- continue;
- }
- if (arg->fn(tp, n, arg) < 0) {
- arg->stop = 1;
+ if (!tc_cls_stats_dump(tp, arg, n))
return;
- }
- arg->count++;
}
}
}
@@ -1256,12 +1250,7 @@ static void u32_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
{
struct tc_u_knode *n = fh;
- if (n && n->res.classid == classid) {
- if (cl)
- __tcf_bind_filter(q, &n->res, base);
- else
- __tcf_unbind_filter(q, &n->res);
- }
+ tc_cls_bind_class(classid, cl, q, &n->res, base);
}
static int u32_dump(struct net *net, struct tcf_proto *tp, void *fh,
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index bf87b50837a8..c98af0ada706 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -171,7 +171,7 @@ out_einval:
}
EXPORT_SYMBOL(register_qdisc);
-int unregister_qdisc(struct Qdisc_ops *qops)
+void unregister_qdisc(struct Qdisc_ops *qops)
{
struct Qdisc_ops *q, **qp;
int err = -ENOENT;
@@ -186,7 +186,8 @@ int unregister_qdisc(struct Qdisc_ops *qops)
err = 0;
}
write_unlock(&qdisc_mod_lock);
- return err;
+
+ WARN(err, "unregister qdisc(%s) failed\n", qops->id);
}
EXPORT_SYMBOL(unregister_qdisc);
@@ -194,7 +195,7 @@ EXPORT_SYMBOL(unregister_qdisc);
void qdisc_get_default(char *name, size_t len)
{
read_lock(&qdisc_mod_lock);
- strlcpy(name, default_qdisc_ops->id, len);
+ strscpy(name, default_qdisc_ops->id, len);
read_unlock(&qdisc_mod_lock);
}
@@ -867,6 +868,23 @@ void qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch,
}
EXPORT_SYMBOL(qdisc_offload_graft_helper);
+void qdisc_offload_query_caps(struct net_device *dev,
+ enum tc_setup_type type,
+ void *caps, size_t caps_len)
+{
+ const struct net_device_ops *ops = dev->netdev_ops;
+ struct tc_query_caps_base base = {
+ .type = type,
+ .caps = caps,
+ };
+
+ memset(caps, 0, caps_len);
+
+ if (ops->ndo_setup_tc)
+ ops->ndo_setup_tc(dev, TC_QUERY_CAPS, &base);
+}
+EXPORT_SYMBOL(qdisc_offload_query_caps);
+
static void qdisc_offload_graft_root(struct net_device *dev,
struct Qdisc *new, struct Qdisc *old,
struct netlink_ext_ack *extack)
@@ -1163,7 +1181,7 @@ static int qdisc_block_indexes_set(struct Qdisc *sch, struct nlattr **tca,
static struct Qdisc *qdisc_create(struct net_device *dev,
struct netdev_queue *dev_queue,
- struct Qdisc *p, u32 parent, u32 handle,
+ u32 parent, u32 handle,
struct nlattr **tca, int *errp,
struct netlink_ext_ack *extack)
{
@@ -1424,10 +1442,6 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
struct Qdisc *p = NULL;
int err;
- if ((n->nlmsg_type != RTM_GETQDISC) &&
- !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
- return -EPERM;
-
err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX,
rtm_tca_policy, extack);
if (err < 0)
@@ -1508,9 +1522,6 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
struct Qdisc *q, *p;
int err;
- if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
- return -EPERM;
-
replay:
/* Reinit, just in case something touches this. */
err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX,
@@ -1640,7 +1651,7 @@ create_n_graft:
}
if (clid == TC_H_INGRESS) {
if (dev_ingress_queue(dev)) {
- q = qdisc_create(dev, dev_ingress_queue(dev), p,
+ q = qdisc_create(dev, dev_ingress_queue(dev),
tcm->tcm_parent, tcm->tcm_parent,
tca, &err, extack);
} else {
@@ -1657,7 +1668,7 @@ create_n_graft:
else
dev_queue = netdev_get_tx_queue(dev, 0);
- q = qdisc_create(dev, dev_queue, p,
+ q = qdisc_create(dev, dev_queue,
tcm->tcm_parent, tcm->tcm_handle,
tca, &err, extack);
}
@@ -1904,7 +1915,7 @@ static int tcf_node_bind(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
{
struct tcf_bind_args *a = (void *)arg;
- if (tp->ops->bind_class) {
+ if (n && tp->ops->bind_class) {
struct Qdisc *q = tcf_block_q(tp->chain->block);
sch_tree_lock(q);
@@ -1992,10 +2003,6 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n,
u32 qid;
int err;
- if ((n->nlmsg_type != RTM_GETTCLASS) &&
- !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
- return -EPERM;
-
err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX,
rtm_tca_policy, extack);
if (err < 0)
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index 4c8e994cf0a5..f52255fea652 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -354,12 +354,8 @@ static void atm_tc_walk(struct Qdisc *sch, struct qdisc_walker *walker)
if (walker->stop)
return;
list_for_each_entry(flow, &p->flows, list) {
- if (walker->count >= walker->skip &&
- walker->fn(sch, (unsigned long)flow, walker) < 0) {
- walker->stop = 1;
+ if (!tc_qdisc_stats_dump(sch, (unsigned long)flow, walker))
break;
- }
- walker->count++;
}
}
@@ -577,7 +573,6 @@ static void atm_tc_reset(struct Qdisc *sch)
pr_debug("atm_tc_reset(sch %p,[qdisc %p])\n", sch, p);
list_for_each_entry(flow, &p->flows, list)
qdisc_reset(flow->q);
- sch->q.qlen = 0;
}
static void atm_tc_destroy(struct Qdisc *sch)
diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index a43a58a73d09..55c6879d2c7e 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -2569,9 +2569,6 @@ static int cake_change(struct Qdisc *sch, struct nlattr *opt,
struct nlattr *tb[TCA_CAKE_MAX + 1];
int err;
- if (!opt)
- return -EINVAL;
-
err = nla_parse_nested_deprecated(tb, TCA_CAKE_MAX, opt, cake_policy,
extack);
if (err < 0)
@@ -3064,16 +3061,13 @@ static void cake_walk(struct Qdisc *sch, struct qdisc_walker *arg)
struct cake_tin_data *b = &q->tins[q->tin_order[i]];
for (j = 0; j < CAKE_QUEUES; j++) {
- if (list_empty(&b->flows[j].flowchain) ||
- arg->count < arg->skip) {
+ if (list_empty(&b->flows[j].flowchain)) {
arg->count++;
continue;
}
- if (arg->fn(sch, i * CAKE_QUEUES + j + 1, arg) < 0) {
- arg->stop = 1;
+ if (!tc_qdisc_stats_dump(sch, i * CAKE_QUEUES + j + 1,
+ arg))
break;
- }
- arg->count++;
}
}
}
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 91a0dc463c48..6568e17c4c63 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -975,7 +975,6 @@ cbq_reset(struct Qdisc *sch)
cl->cpriority = cl->priority;
}
}
- sch->q.qlen = 0;
}
@@ -1677,15 +1676,8 @@ static void cbq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
for (h = 0; h < q->clhash.hashsize; h++) {
hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
- if (arg->count < arg->skip) {
- arg->count++;
- continue;
- }
- if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
- arg->stop = 1;
+ if (!tc_qdisc_stats_dump(sch, (unsigned long)cl, arg))
return;
- }
- arg->count++;
}
}
}
diff --git a/net/sched/sch_cbs.c b/net/sched/sch_cbs.c
index 459cc240eda9..cac870eb7897 100644
--- a/net/sched/sch_cbs.c
+++ b/net/sched/sch_cbs.c
@@ -520,13 +520,7 @@ static unsigned long cbs_find(struct Qdisc *sch, u32 classid)
static void cbs_walk(struct Qdisc *sch, struct qdisc_walker *walker)
{
if (!walker->stop) {
- if (walker->count >= walker->skip) {
- if (walker->fn(sch, 1, walker) < 0) {
- walker->stop = 1;
- return;
- }
- }
- walker->count++;
+ tc_qdisc_stats_dump(sch, 1, walker);
}
}
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
index 2adbd945bf15..3ac3e5c80b6f 100644
--- a/net/sched/sch_choke.c
+++ b/net/sched/sch_choke.c
@@ -60,7 +60,6 @@ struct choke_sched_data {
u32 forced_drop; /* Forced drops, qavg > max_thresh */
u32 forced_mark; /* Forced marks, qavg > max_thresh */
u32 pdrop; /* Drops due to queue limits */
- u32 other; /* Drops due to drop() calls */
u32 matched; /* Drops to flow match */
} stats;
@@ -315,8 +314,6 @@ static void choke_reset(struct Qdisc *sch)
rtnl_qdisc_drop(skb, sch);
}
- sch->q.qlen = 0;
- sch->qstats.backlog = 0;
if (q->tab)
memset(q->tab, 0, (q->tab_mask + 1) * sizeof(struct sk_buff *));
q->head = q->tail = 0;
@@ -466,7 +463,6 @@ static int choke_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
.early = q->stats.prob_drop + q->stats.forced_drop,
.marked = q->stats.prob_mark + q->stats.forced_mark,
.pdrop = q->stats.pdrop,
- .other = q->stats.other,
.matched = q->stats.matched,
};
diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c
index 30169b3adbbb..d7a4874543de 100644
--- a/net/sched/sch_codel.c
+++ b/net/sched/sch_codel.c
@@ -138,9 +138,6 @@ static int codel_change(struct Qdisc *sch, struct nlattr *opt,
unsigned int qlen, dropped = 0;
int err;
- if (!opt)
- return -EINVAL;
-
err = nla_parse_nested_deprecated(tb, TCA_CODEL_MAX, opt,
codel_policy, NULL);
if (err < 0)
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index 18e4f7a0b291..e35a4e90f4e6 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -284,15 +284,8 @@ static void drr_walk(struct Qdisc *sch, struct qdisc_walker *arg)
for (i = 0; i < q->clhash.hashsize; i++) {
hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
- if (arg->count < arg->skip) {
- arg->count++;
- continue;
- }
- if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
- arg->stop = 1;
+ if (!tc_qdisc_stats_dump(sch, (unsigned long)cl, arg))
return;
- }
- arg->count++;
}
}
}
@@ -441,8 +434,6 @@ static void drr_reset_qdisc(struct Qdisc *sch)
qdisc_reset(cl->qdisc);
}
}
- sch->qstats.backlog = 0;
- sch->q.qlen = 0;
}
static void drr_destroy_qdisc(struct Qdisc *sch)
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index 4c100d105269..401ffaf87d62 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -176,16 +176,12 @@ static void dsmark_walk(struct Qdisc *sch, struct qdisc_walker *walker)
return;
for (i = 0; i < p->indices; i++) {
- if (p->mv[i].mask == 0xff && !p->mv[i].value)
- goto ignore;
- if (walker->count >= walker->skip) {
- if (walker->fn(sch, i + 1, walker) < 0) {
- walker->stop = 1;
- break;
- }
+ if (p->mv[i].mask == 0xff && !p->mv[i].value) {
+ walker->count++;
+ continue;
}
-ignore:
- walker->count++;
+ if (!tc_qdisc_stats_dump(sch, i + 1, walker))
+ break;
}
}
@@ -409,8 +405,6 @@ static void dsmark_reset(struct Qdisc *sch)
pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
if (p->q)
qdisc_reset(p->q);
- sch->qstats.backlog = 0;
- sch->q.qlen = 0;
}
static void dsmark_destroy(struct Qdisc *sch)
diff --git a/net/sched/sch_etf.c b/net/sched/sch_etf.c
index c48f91075b5c..61d1f0e32cf3 100644
--- a/net/sched/sch_etf.c
+++ b/net/sched/sch_etf.c
@@ -323,9 +323,6 @@ static int etf_enable_offload(struct net_device *dev, struct etf_sched_data *q,
struct tc_etf_qopt_offload etf = { };
int err;
- if (q->offload)
- return 0;
-
if (!ops->ndo_setup_tc) {
NL_SET_ERR_MSG(extack, "Specified device does not support ETF offload");
return -EOPNOTSUPP;
@@ -445,9 +442,6 @@ static void etf_reset(struct Qdisc *sch)
timesortedlist_clear(sch);
__qdisc_reset_queue(&sch->q);
- sch->qstats.backlog = 0;
- sch->q.qlen = 0;
-
q->last = 0;
}
diff --git a/net/sched/sch_ets.c b/net/sched/sch_ets.c
index d73393493553..b10efeaf0629 100644
--- a/net/sched/sch_ets.c
+++ b/net/sched/sch_ets.c
@@ -341,15 +341,8 @@ static void ets_qdisc_walk(struct Qdisc *sch, struct qdisc_walker *arg)
return;
for (i = 0; i < q->nbands; i++) {
- if (arg->count < arg->skip) {
- arg->count++;
- continue;
- }
- if (arg->fn(sch, i + 1, arg) < 0) {
- arg->stop = 1;
+ if (!tc_qdisc_stats_dump(sch, i + 1, arg))
break;
- }
- arg->count++;
}
}
@@ -594,11 +587,6 @@ static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt,
unsigned int i;
int err;
- if (!opt) {
- NL_SET_ERR_MSG(extack, "ETS options are required for this operation");
- return -EINVAL;
- }
-
err = nla_parse_nested(tb, TCA_ETS_MAX, opt, ets_policy, extack);
if (err < 0)
return err;
@@ -727,8 +715,6 @@ static void ets_qdisc_reset(struct Qdisc *sch)
}
for (band = 0; band < q->nbands; band++)
qdisc_reset(q->classes[band].qdisc);
- sch->qstats.backlog = 0;
- sch->q.qlen = 0;
}
static void ets_qdisc_destroy(struct Qdisc *sch)
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index 2fb76fc0cc31..48d14fb90ba0 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -808,9 +808,6 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt,
unsigned drop_len = 0;
u32 fq_log;
- if (!opt)
- return -EINVAL;
-
err = nla_parse_nested_deprecated(tb, TCA_FQ_MAX, opt, fq_policy,
NULL);
if (err < 0)
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index 839e1235db05..99d318b60568 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -347,8 +347,6 @@ static void fq_codel_reset(struct Qdisc *sch)
codel_vars_init(&flow->cvars);
}
memset(q->backlogs, 0, q->flows_cnt * sizeof(u32));
- sch->q.qlen = 0;
- sch->qstats.backlog = 0;
q->memory_usage = 0;
}
@@ -374,9 +372,6 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt,
u32 quantum = 0;
int err;
- if (!opt)
- return -EINVAL;
-
err = nla_parse_nested_deprecated(tb, TCA_FQ_CODEL_MAX, opt,
fq_codel_policy, NULL);
if (err < 0)
@@ -483,26 +478,24 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt,
if (opt) {
err = fq_codel_change(sch, opt, extack);
if (err)
- goto init_failure;
+ return err;
}
err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
if (err)
- goto init_failure;
+ return err;
if (!q->flows) {
q->flows = kvcalloc(q->flows_cnt,
sizeof(struct fq_codel_flow),
GFP_KERNEL);
- if (!q->flows) {
- err = -ENOMEM;
- goto init_failure;
- }
+ if (!q->flows)
+ return -ENOMEM;
+
q->backlogs = kvcalloc(q->flows_cnt, sizeof(u32), GFP_KERNEL);
- if (!q->backlogs) {
- err = -ENOMEM;
- goto alloc_failure;
- }
+ if (!q->backlogs)
+ return -ENOMEM;
+
for (i = 0; i < q->flows_cnt; i++) {
struct fq_codel_flow *flow = q->flows + i;
@@ -515,13 +508,6 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt,
else
sch->flags &= ~TCQ_F_CAN_BYPASS;
return 0;
-
-alloc_failure:
- kvfree(q->flows);
- q->flows = NULL;
-init_failure:
- q->flows_cnt = 0;
- return err;
}
static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb)
@@ -687,16 +673,12 @@ static void fq_codel_walk(struct Qdisc *sch, struct qdisc_walker *arg)
return;
for (i = 0; i < q->flows_cnt; i++) {
- if (list_empty(&q->flows[i].flowchain) ||
- arg->count < arg->skip) {
+ if (list_empty(&q->flows[i].flowchain)) {
arg->count++;
continue;
}
- if (arg->fn(sch, i + 1, arg) < 0) {
- arg->stop = 1;
+ if (!tc_qdisc_stats_dump(sch, i + 1, arg))
break;
- }
- arg->count++;
}
}
diff --git a/net/sched/sch_fq_pie.c b/net/sched/sch_fq_pie.c
index d6aba6edd16e..6980796d435d 100644
--- a/net/sched/sch_fq_pie.c
+++ b/net/sched/sch_fq_pie.c
@@ -283,9 +283,6 @@ static int fq_pie_change(struct Qdisc *sch, struct nlattr *opt,
unsigned int num_dropped = 0;
int err;
- if (!opt)
- return -EINVAL;
-
err = nla_parse_nested(tb, TCA_FQ_PIE_MAX, opt, fq_pie_policy, extack);
if (err < 0)
return err;
@@ -521,9 +518,6 @@ static void fq_pie_reset(struct Qdisc *sch)
INIT_LIST_HEAD(&flow->flowchain);
pie_vars_init(&flow->vars);
}
-
- sch->q.qlen = 0;
- sch->qstats.backlog = 0;
}
static void fq_pie_destroy(struct Qdisc *sch)
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 7a8ea03f673d..a9aadc4e6858 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -941,7 +941,6 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
goto errout;
__skb_queue_head_init(&sch->gso_skb);
__skb_queue_head_init(&sch->skb_bad_txq);
- qdisc_skb_head_init(&sch->q);
gnet_stats_basic_sync_init(&sch->bstats);
spin_lock_init(&sch->q.lock);
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index 1073c76d05c4..a661b062cca8 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -648,9 +648,6 @@ static int gred_change(struct Qdisc *sch, struct nlattr *opt,
u32 max_P;
struct gred_sched_data *prealloc;
- if (opt == NULL)
- return -EINVAL;
-
err = nla_parse_nested_deprecated(tb, TCA_GRED_MAX, opt, gred_policy,
extack);
if (err < 0)
@@ -829,7 +826,6 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
opt.Wlog = q->parms.Wlog;
opt.Plog = q->parms.Plog;
opt.Scell_log = q->parms.Scell_log;
- opt.other = q->stats.other;
opt.early = q->stats.prob_drop;
opt.forced = q->stats.forced_drop;
opt.pdrop = q->stats.pdrop;
@@ -895,8 +891,6 @@ append_opt:
goto nla_put_failure;
if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PDROP, q->stats.pdrop))
goto nla_put_failure;
- if (nla_put_u32(skb, TCA_GRED_VQ_STAT_OTHER, q->stats.other))
- goto nla_put_failure;
nla_nest_end(skb, vq);
}
@@ -914,10 +908,9 @@ static void gred_destroy(struct Qdisc *sch)
struct gred_sched *table = qdisc_priv(sch);
int i;
- for (i = 0; i < table->DPs; i++) {
- if (table->tab[i])
- gred_destroy_vq(table->tab[i]);
- }
+ for (i = 0; i < table->DPs; i++)
+ gred_destroy_vq(table->tab[i]);
+
gred_offload(sch, TC_GRED_DESTROY);
kfree(table->opt);
}
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index d3979a6000e7..70b0c5873d32 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1349,15 +1349,8 @@ hfsc_walk(struct Qdisc *sch, struct qdisc_walker *arg)
for (i = 0; i < q->clhash.hashsize; i++) {
hlist_for_each_entry(cl, &q->clhash.hash[i],
cl_common.hnode) {
- if (arg->count < arg->skip) {
- arg->count++;
- continue;
- }
- if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
- arg->stop = 1;
+ if (!tc_qdisc_stats_dump(sch, (unsigned long)cl, arg))
return;
- }
- arg->count++;
}
}
}
@@ -1430,7 +1423,7 @@ hfsc_change_qdisc(struct Qdisc *sch, struct nlattr *opt,
struct hfsc_sched *q = qdisc_priv(sch);
struct tc_hfsc_qopt *qopt;
- if (opt == NULL || nla_len(opt) < sizeof(*qopt))
+ if (nla_len(opt) < sizeof(*qopt))
return -EINVAL;
qopt = nla_data(opt);
@@ -1484,8 +1477,6 @@ hfsc_reset_qdisc(struct Qdisc *sch)
}
q->eligible = RB_ROOT;
qdisc_watchdog_cancel(&q->watchdog);
- sch->qstats.backlog = 0;
- sch->q.qlen = 0;
}
static void
diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
index 420ede875322..d26cd436cbe3 100644
--- a/net/sched/sch_hhf.c
+++ b/net/sched/sch_hhf.c
@@ -516,9 +516,6 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt,
u32 new_quantum = q->quantum;
u32 new_hhf_non_hh_weight = q->hhf_non_hh_weight;
- if (!opt)
- return -EINVAL;
-
err = nla_parse_nested_deprecated(tb, TCA_HHF_MAX, opt, hhf_policy,
NULL);
if (err < 0)
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 23a9d6242429..e5b4bbf3ce3d 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -1008,8 +1008,6 @@ static void htb_reset(struct Qdisc *sch)
}
qdisc_watchdog_cancel(&q->watchdog);
__qdisc_reset_queue(&q->direct_queue);
- sch->q.qlen = 0;
- sch->qstats.backlog = 0;
memset(q->hlevel, 0, sizeof(q->hlevel));
memset(q->row_mask, 0, sizeof(q->row_mask));
}
@@ -1104,9 +1102,7 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt,
err = qdisc_class_hash_init(&q->clhash);
if (err < 0)
- goto err_free_direct_qdiscs;
-
- qdisc_skb_head_init(&q->direct_queue);
+ return err;
if (tb[TCA_HTB_DIRECT_QLEN])
q->direct_qlen = nla_get_u32(tb[TCA_HTB_DIRECT_QLEN]);
@@ -1127,8 +1123,7 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt,
qdisc = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
TC_H_MAKE(sch->handle, 0), extack);
if (!qdisc) {
- err = -ENOMEM;
- goto err_free_qdiscs;
+ return -ENOMEM;
}
htb_set_lockdep_class_child(qdisc);
@@ -1146,7 +1141,7 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt,
};
err = htb_offload(dev, &offload_opt);
if (err)
- goto err_free_qdiscs;
+ return err;
/* Defer this assignment, so that htb_destroy skips offload-related
* parts (especially calling ndo_setup_tc) on errors.
@@ -1154,22 +1149,6 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt,
q->offload = true;
return 0;
-
-err_free_qdiscs:
- for (ntx = 0; ntx < q->num_direct_qdiscs && q->direct_qdiscs[ntx];
- ntx++)
- qdisc_put(q->direct_qdiscs[ntx]);
-
- qdisc_class_hash_destroy(&q->clhash);
- /* Prevent use-after-free and double-free when htb_destroy gets called.
- */
- q->clhash.hash = NULL;
- q->clhash.hashsize = 0;
-
-err_free_direct_qdiscs:
- kfree(q->direct_qdiscs);
- q->direct_qdiscs = NULL;
- return err;
}
static void htb_attach_offload(struct Qdisc *sch)
@@ -1692,13 +1671,12 @@ static void htb_destroy(struct Qdisc *sch)
qdisc_class_hash_destroy(&q->clhash);
__qdisc_reset_queue(&q->direct_queue);
- if (!q->offload)
- return;
-
- offload_opt = (struct tc_htb_qopt_offload) {
- .command = TC_HTB_DESTROY,
- };
- htb_offload(dev, &offload_opt);
+ if (q->offload) {
+ offload_opt = (struct tc_htb_qopt_offload) {
+ .command = TC_HTB_DESTROY,
+ };
+ htb_offload(dev, &offload_opt);
+ }
if (!q->direct_qdiscs)
return;
@@ -2141,15 +2119,8 @@ static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)
for (i = 0; i < q->clhash.hashsize; i++) {
hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
- if (arg->count < arg->skip) {
- arg->count++;
- continue;
- }
- if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
- arg->stop = 1;
+ if (!tc_qdisc_stats_dump(sch, (unsigned long)cl, arg))
return;
- }
- arg->count++;
}
}
}
diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c
index 83d2e54bf303..d0bc660d7401 100644
--- a/net/sched/sch_mq.c
+++ b/net/sched/sch_mq.c
@@ -247,11 +247,8 @@ static void mq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
arg->count = arg->skip;
for (ntx = arg->skip; ntx < dev->num_tx_queues; ntx++) {
- if (arg->fn(sch, ntx + 1, arg) < 0) {
- arg->stop = 1;
+ if (!tc_qdisc_stats_dump(sch, ntx + 1, arg))
break;
- }
- arg->count++;
}
}
diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
index b29f3453c6ea..4c68abaa289b 100644
--- a/net/sched/sch_mqprio.c
+++ b/net/sched/sch_mqprio.c
@@ -558,11 +558,8 @@ static void mqprio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
/* Walk hierarchy with a virtual class per tc */
arg->count = arg->skip;
for (ntx = arg->skip; ntx < netdev_get_num_tc(dev); ntx++) {
- if (arg->fn(sch, ntx + TC_H_MIN_PRIORITY, arg) < 0) {
- arg->stop = 1;
+ if (!tc_qdisc_stats_dump(sch, ntx + TC_H_MIN_PRIORITY, arg))
return;
- }
- arg->count++;
}
/* Pad the values and skip over unused traffic classes */
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
index cd8ab90c4765..75c9c860182b 100644
--- a/net/sched/sch_multiq.c
+++ b/net/sched/sch_multiq.c
@@ -152,7 +152,6 @@ multiq_reset(struct Qdisc *sch)
for (band = 0; band < q->bands; band++)
qdisc_reset(q->queues[band]);
- sch->q.qlen = 0;
q->curband = 0;
}
@@ -354,15 +353,8 @@ static void multiq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
return;
for (band = 0; band < q->bands; band++) {
- if (arg->count < arg->skip) {
- arg->count++;
- continue;
- }
- if (arg->fn(sch, band + 1, arg) < 0) {
- arg->stop = 1;
+ if (!tc_qdisc_stats_dump(sch, band + 1, arg))
break;
- }
- arg->count++;
}
}
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 5449ed114e40..18f4273a835b 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -961,9 +961,6 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt,
int old_loss_model = CLG_RANDOM;
int ret;
- if (opt == NULL)
- return -EINVAL;
-
qopt = nla_data(opt);
ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt));
if (ret < 0)
@@ -1254,12 +1251,8 @@ static unsigned long netem_find(struct Qdisc *sch, u32 classid)
static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
{
if (!walker->stop) {
- if (walker->count >= walker->skip)
- if (walker->fn(sch, 1, walker) < 0) {
- walker->stop = 1;
- return;
- }
- walker->count++;
+ if (!tc_qdisc_stats_dump(sch, 1, walker))
+ return;
}
}
diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c
index 5a457ff61acd..974038ba6c7b 100644
--- a/net/sched/sch_pie.c
+++ b/net/sched/sch_pie.c
@@ -143,9 +143,6 @@ static int pie_change(struct Qdisc *sch, struct nlattr *opt,
unsigned int qlen, dropped = 0;
int err;
- if (!opt)
- return -EINVAL;
-
err = nla_parse_nested_deprecated(tb, TCA_PIE_MAX, opt, pie_policy,
NULL);
if (err < 0)
diff --git a/net/sched/sch_plug.c b/net/sched/sch_plug.c
index cbc2ebca4548..ea8c4a7174bb 100644
--- a/net/sched/sch_plug.c
+++ b/net/sched/sch_plug.c
@@ -161,9 +161,6 @@ static int plug_change(struct Qdisc *sch, struct nlattr *opt,
struct plug_sched_data *q = qdisc_priv(sch);
struct tc_plug_qopt *msg;
- if (opt == NULL)
- return -EINVAL;
-
msg = nla_data(opt);
if (nla_len(opt) < sizeof(*msg))
return -EINVAL;
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 3b8d7197c06b..fdc5ef52c3ee 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -135,8 +135,6 @@ prio_reset(struct Qdisc *sch)
for (prio = 0; prio < q->bands; prio++)
qdisc_reset(q->queues[prio]);
- sch->qstats.backlog = 0;
- sch->q.qlen = 0;
}
static int prio_offload(struct Qdisc *sch, struct tc_prio_qopt *qopt)
@@ -187,7 +185,7 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt,
return -EINVAL;
qopt = nla_data(opt);
- if (qopt->bands > TCQ_PRIO_BANDS || qopt->bands < 2)
+ if (qopt->bands > TCQ_PRIO_BANDS || qopt->bands < TCQ_MIN_PRIO_BANDS)
return -EINVAL;
for (i = 0; i <= TC_PRIO_MAX; i++) {
@@ -378,15 +376,8 @@ static void prio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
return;
for (prio = 0; prio < q->bands; prio++) {
- if (arg->count < arg->skip) {
- arg->count++;
- continue;
- }
- if (arg->fn(sch, prio + 1, arg) < 0) {
- arg->stop = 1;
+ if (!tc_qdisc_stats_dump(sch, prio + 1, arg))
break;
- }
- arg->count++;
}
}
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index d4ce58c90f9f..cf5ebe43b3b4 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -659,15 +659,8 @@ static void qfq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
for (i = 0; i < q->clhash.hashsize; i++) {
hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
- if (arg->count < arg->skip) {
- arg->count++;
- continue;
- }
- if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
- arg->stop = 1;
+ if (!tc_qdisc_stats_dump(sch, (unsigned long)cl, arg))
return;
- }
- arg->count++;
}
}
}
@@ -1458,8 +1451,6 @@ static void qfq_reset_qdisc(struct Qdisc *sch)
qdisc_reset(cl->qdisc);
}
}
- sch->qstats.backlog = 0;
- sch->q.qlen = 0;
}
static void qfq_destroy_qdisc(struct Qdisc *sch)
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index 40adf1f07a82..a5a401f93c1a 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -176,8 +176,6 @@ static void red_reset(struct Qdisc *sch)
struct red_sched_data *q = qdisc_priv(sch);
qdisc_reset(q->qdisc);
- sch->qstats.backlog = 0;
- sch->q.qlen = 0;
red_restart(&q->vars);
}
@@ -370,9 +368,6 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt,
struct nlattr *tb[TCA_RED_MAX + 1];
int err;
- if (!opt)
- return -EINVAL;
-
err = nla_parse_nested_deprecated(tb, TCA_RED_MAX, opt, red_policy,
extack);
if (err < 0)
@@ -463,7 +458,6 @@ static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
}
st.early = q->stats.prob_drop + q->stats.forced_drop;
st.pdrop = q->stats.pdrop;
- st.other = q->stats.other;
st.marked = q->stats.prob_mark + q->stats.forced_mark;
return gnet_stats_copy_app(d, &st, sizeof(st));
@@ -522,12 +516,7 @@ static unsigned long red_find(struct Qdisc *sch, u32 classid)
static void red_walk(struct Qdisc *sch, struct qdisc_walker *walker)
{
if (!walker->stop) {
- if (walker->count >= walker->skip)
- if (walker->fn(sch, 1, walker) < 0) {
- walker->stop = 1;
- return;
- }
- walker->count++;
+ tc_qdisc_stats_dump(sch, 1, walker);
}
}
diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
index 2829455211f8..e2389fa3cff8 100644
--- a/net/sched/sch_sfb.c
+++ b/net/sched/sch_sfb.c
@@ -456,8 +456,6 @@ static void sfb_reset(struct Qdisc *sch)
struct sfb_sched_data *q = qdisc_priv(sch);
qdisc_reset(q->qdisc);
- sch->qstats.backlog = 0;
- sch->q.qlen = 0;
q->slot = 0;
q->double_buffering = false;
sfb_zero_all_buckets(q);
@@ -661,12 +659,7 @@ static int sfb_delete(struct Qdisc *sch, unsigned long cl,
static void sfb_walk(struct Qdisc *sch, struct qdisc_walker *walker)
{
if (!walker->stop) {
- if (walker->count >= walker->skip)
- if (walker->fn(sch, 1, walker) < 0) {
- walker->stop = 1;
- return;
- }
- walker->count++;
+ tc_qdisc_stats_dump(sch, 1, walker);
}
}
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index f8e569f79f13..abd436307d6a 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -888,16 +888,12 @@ static void sfq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
return;
for (i = 0; i < q->divisor; i++) {
- if (q->ht[i] == SFQ_EMPTY_SLOT ||
- arg->count < arg->skip) {
+ if (q->ht[i] == SFQ_EMPTY_SLOT) {
arg->count++;
continue;
}
- if (arg->fn(sch, i + 1, arg) < 0) {
- arg->stop = 1;
+ if (!tc_qdisc_stats_dump(sch, i + 1, arg))
break;
- }
- arg->count++;
}
}
diff --git a/net/sched/sch_skbprio.c b/net/sched/sch_skbprio.c
index 7a5e4c454715..5df2dacb7b1a 100644
--- a/net/sched/sch_skbprio.c
+++ b/net/sched/sch_skbprio.c
@@ -213,9 +213,6 @@ static void skbprio_reset(struct Qdisc *sch)
struct skbprio_sched_data *q = qdisc_priv(sch);
int prio;
- sch->qstats.backlog = 0;
- sch->q.qlen = 0;
-
for (prio = 0; prio < SKBPRIO_MAX_PRIORITY; prio++)
__skb_queue_purge(&q->qdiscs[prio]);
@@ -268,15 +265,8 @@ static void skbprio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
return;
for (i = 0; i < SKBPRIO_MAX_PRIORITY; i++) {
- if (arg->count < arg->skip) {
- arg->count++;
- continue;
- }
- if (arg->fn(sch, i + 1, arg) < 0) {
- arg->stop = 1;
+ if (!tc_qdisc_stats_dump(sch, i + 1, arg))
break;
- }
- arg->count++;
}
}
diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
index 86675a79da1e..435d866fcfa0 100644
--- a/net/sched/sch_taprio.c
+++ b/net/sched/sch_taprio.c
@@ -27,7 +27,6 @@
#include <net/tcp.h>
static LIST_HEAD(taprio_list);
-static DEFINE_SPINLOCK(taprio_list_lock);
#define TAPRIO_ALL_GATES_OPEN -1
@@ -79,8 +78,8 @@ struct taprio_sched {
struct sched_gate_list __rcu *admin_sched;
struct hrtimer advance_timer;
struct list_head taprio_list;
- struct sk_buff *(*dequeue)(struct Qdisc *sch);
- struct sk_buff *(*peek)(struct Qdisc *sch);
+ u32 max_frm_len[TC_MAX_QUEUE]; /* for the fast path */
+ u32 max_sdu[TC_MAX_QUEUE]; /* for dump and offloading */
u32 txtime_delay;
};
@@ -418,6 +417,9 @@ static int taprio_enqueue_one(struct sk_buff *skb, struct Qdisc *sch,
struct Qdisc *child, struct sk_buff **to_free)
{
struct taprio_sched *q = qdisc_priv(sch);
+ struct net_device *dev = qdisc_dev(sch);
+ int prio = skb->priority;
+ u8 tc;
/* sk_flags are only safe to use on full sockets. */
if (skb->sk && sk_fullsock(skb->sk) && sock_flag(skb->sk, SOCK_TXTIME)) {
@@ -429,12 +431,20 @@ static int taprio_enqueue_one(struct sk_buff *skb, struct Qdisc *sch,
return qdisc_drop(skb, sch, to_free);
}
+ /* Devices with full offload are expected to honor this in hardware */
+ tc = netdev_get_prio_tc_map(dev, prio);
+ if (skb->len > q->max_frm_len[tc])
+ return qdisc_drop(skb, sch, to_free);
+
qdisc_qstats_backlog_inc(sch, skb);
sch->q.qlen++;
return qdisc_enqueue(skb, child, to_free);
}
+/* Will not be called in the full offload case, since the TX queues are
+ * attached to the Qdisc created using qdisc_create_dflt()
+ */
static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
@@ -442,11 +452,6 @@ static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct Qdisc *child;
int queue;
- if (unlikely(FULL_OFFLOAD_IS_ENABLED(q->flags))) {
- WARN_ONCE(1, "Trying to enqueue skb into the root of a taprio qdisc configured with full offload\n");
- return qdisc_drop(skb, sch, to_free);
- }
-
queue = skb_get_queue_mapping(skb);
child = q->qdiscs[queue];
@@ -455,10 +460,10 @@ static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch,
/* Large packets might not be transmitted when the transmission duration
* exceeds any configured interval. Therefore, segment the skb into
- * smaller chunks. Skip it for the full offload case, as the driver
- * and/or the hardware is expected to handle this.
+ * smaller chunks. Drivers with full offload are expected to handle
+ * this in hardware.
*/
- if (skb_is_gso(skb) && !FULL_OFFLOAD_IS_ENABLED(q->flags)) {
+ if (skb_is_gso(skb)) {
unsigned int slen = 0, numsegs = 0, len = qdisc_pkt_len(skb);
netdev_features_t features = netif_skb_features(skb);
struct sk_buff *segs, *nskb;
@@ -492,7 +497,10 @@ static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch,
return taprio_enqueue_one(skb, sch, child, to_free);
}
-static struct sk_buff *taprio_peek_soft(struct Qdisc *sch)
+/* Will not be called in the full offload case, since the TX queues are
+ * attached to the Qdisc created using qdisc_create_dflt()
+ */
+static struct sk_buff *taprio_peek(struct Qdisc *sch)
{
struct taprio_sched *q = qdisc_priv(sch);
struct net_device *dev = qdisc_dev(sch);
@@ -536,20 +544,6 @@ static struct sk_buff *taprio_peek_soft(struct Qdisc *sch)
return NULL;
}
-static struct sk_buff *taprio_peek_offload(struct Qdisc *sch)
-{
- WARN_ONCE(1, "Trying to peek into the root of a taprio qdisc configured with full offload\n");
-
- return NULL;
-}
-
-static struct sk_buff *taprio_peek(struct Qdisc *sch)
-{
- struct taprio_sched *q = qdisc_priv(sch);
-
- return q->peek(sch);
-}
-
static void taprio_set_budget(struct taprio_sched *q, struct sched_entry *entry)
{
atomic_set(&entry->budget,
@@ -557,7 +551,10 @@ static void taprio_set_budget(struct taprio_sched *q, struct sched_entry *entry)
atomic64_read(&q->picos_per_byte)));
}
-static struct sk_buff *taprio_dequeue_soft(struct Qdisc *sch)
+/* Will not be called in the full offload case, since the TX queues are
+ * attached to the Qdisc created using qdisc_create_dflt()
+ */
+static struct sk_buff *taprio_dequeue(struct Qdisc *sch)
{
struct taprio_sched *q = qdisc_priv(sch);
struct net_device *dev = qdisc_dev(sch);
@@ -645,20 +642,6 @@ done:
return skb;
}
-static struct sk_buff *taprio_dequeue_offload(struct Qdisc *sch)
-{
- WARN_ONCE(1, "Trying to dequeue from the root of a taprio qdisc configured with full offload\n");
-
- return NULL;
-}
-
-static struct sk_buff *taprio_dequeue(struct Qdisc *sch)
-{
- struct taprio_sched *q = qdisc_priv(sch);
-
- return q->dequeue(sch);
-}
-
static bool should_restart_cycle(const struct sched_gate_list *oper,
const struct sched_entry *entry)
{
@@ -781,6 +764,11 @@ static const struct nla_policy entry_policy[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = {
[TCA_TAPRIO_SCHED_ENTRY_INTERVAL] = { .type = NLA_U32 },
};
+static const struct nla_policy taprio_tc_policy[TCA_TAPRIO_TC_ENTRY_MAX + 1] = {
+ [TCA_TAPRIO_TC_ENTRY_INDEX] = { .type = NLA_U32 },
+ [TCA_TAPRIO_TC_ENTRY_MAX_SDU] = { .type = NLA_U32 },
+};
+
static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = {
[TCA_TAPRIO_ATTR_PRIOMAP] = {
.len = sizeof(struct tc_mqprio_qopt)
@@ -793,6 +781,7 @@ static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = {
[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION] = { .type = NLA_S64 },
[TCA_TAPRIO_ATTR_FLAGS] = { .type = NLA_U32 },
[TCA_TAPRIO_ATTR_TXTIME_DELAY] = { .type = NLA_U32 },
+ [TCA_TAPRIO_ATTR_TC_ENTRY] = { .type = NLA_NESTED },
};
static int fill_sched_entry(struct taprio_sched *q, struct nlattr **tb,
@@ -1099,27 +1088,20 @@ static int taprio_dev_notifier(struct notifier_block *nb, unsigned long event,
void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
- struct net_device *qdev;
struct taprio_sched *q;
- bool found = false;
ASSERT_RTNL();
if (event != NETDEV_UP && event != NETDEV_CHANGE)
return NOTIFY_DONE;
- spin_lock(&taprio_list_lock);
list_for_each_entry(q, &taprio_list, taprio_list) {
- qdev = qdisc_dev(q->root);
- if (qdev == dev) {
- found = true;
- break;
- }
- }
- spin_unlock(&taprio_list_lock);
+ if (dev != qdisc_dev(q->root))
+ continue;
- if (found)
taprio_set_picos_per_byte(dev, q);
+ break;
+ }
return NOTIFY_DONE;
}
@@ -1194,16 +1176,10 @@ static void taprio_offload_config_changed(struct taprio_sched *q)
{
struct sched_gate_list *oper, *admin;
- spin_lock(&q->current_entry_lock);
-
- oper = rcu_dereference_protected(q->oper_sched,
- lockdep_is_held(&q->current_entry_lock));
- admin = rcu_dereference_protected(q->admin_sched,
- lockdep_is_held(&q->current_entry_lock));
+ oper = rtnl_dereference(q->oper_sched);
+ admin = rtnl_dereference(q->admin_sched);
switch_schedules(q, &admin, &oper);
-
- spin_unlock(&q->current_entry_lock);
}
static u32 tc_map_to_queue_mask(struct net_device *dev, u32 tc_mask)
@@ -1256,7 +1232,8 @@ static int taprio_enable_offload(struct net_device *dev,
{
const struct net_device_ops *ops = dev->netdev_ops;
struct tc_taprio_qopt_offload *offload;
- int err = 0;
+ struct tc_taprio_caps caps;
+ int tc, err = 0;
if (!ops->ndo_setup_tc) {
NL_SET_ERR_MSG(extack,
@@ -1264,6 +1241,19 @@ static int taprio_enable_offload(struct net_device *dev,
return -EOPNOTSUPP;
}
+ qdisc_offload_query_caps(dev, TC_SETUP_QDISC_TAPRIO,
+ &caps, sizeof(caps));
+
+ if (!caps.supports_queue_max_sdu) {
+ for (tc = 0; tc < TC_MAX_QUEUE; tc++) {
+ if (q->max_sdu[tc]) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Device does not handle queueMaxSDU");
+ return -EOPNOTSUPP;
+ }
+ }
+ }
+
offload = taprio_offload_alloc(sched->num_entries);
if (!offload) {
NL_SET_ERR_MSG(extack,
@@ -1273,6 +1263,9 @@ static int taprio_enable_offload(struct net_device *dev,
offload->enable = 1;
taprio_sched_to_offload(dev, sched, offload);
+ for (tc = 0; tc < TC_MAX_QUEUE; tc++)
+ offload->max_sdu[tc] = q->max_sdu[tc];
+
err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload);
if (err < 0) {
NL_SET_ERR_MSG(extack,
@@ -1407,6 +1400,89 @@ out:
return err;
}
+static int taprio_parse_tc_entry(struct Qdisc *sch,
+ struct nlattr *opt,
+ u32 max_sdu[TC_QOPT_MAX_QUEUE],
+ unsigned long *seen_tcs,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[TCA_TAPRIO_TC_ENTRY_MAX + 1] = { };
+ struct net_device *dev = qdisc_dev(sch);
+ u32 val = 0;
+ int err, tc;
+
+ err = nla_parse_nested(tb, TCA_TAPRIO_TC_ENTRY_MAX, opt,
+ taprio_tc_policy, extack);
+ if (err < 0)
+ return err;
+
+ if (!tb[TCA_TAPRIO_TC_ENTRY_INDEX]) {
+ NL_SET_ERR_MSG_MOD(extack, "TC entry index missing");
+ return -EINVAL;
+ }
+
+ tc = nla_get_u32(tb[TCA_TAPRIO_TC_ENTRY_INDEX]);
+ if (tc >= TC_QOPT_MAX_QUEUE) {
+ NL_SET_ERR_MSG_MOD(extack, "TC entry index out of range");
+ return -ERANGE;
+ }
+
+ if (*seen_tcs & BIT(tc)) {
+ NL_SET_ERR_MSG_MOD(extack, "Duplicate TC entry");
+ return -EINVAL;
+ }
+
+ *seen_tcs |= BIT(tc);
+
+ if (tb[TCA_TAPRIO_TC_ENTRY_MAX_SDU])
+ val = nla_get_u32(tb[TCA_TAPRIO_TC_ENTRY_MAX_SDU]);
+
+ if (val > dev->max_mtu) {
+ NL_SET_ERR_MSG_MOD(extack, "TC max SDU exceeds device max MTU");
+ return -ERANGE;
+ }
+
+ max_sdu[tc] = val;
+
+ return 0;
+}
+
+static int taprio_parse_tc_entries(struct Qdisc *sch,
+ struct nlattr *opt,
+ struct netlink_ext_ack *extack)
+{
+ struct taprio_sched *q = qdisc_priv(sch);
+ struct net_device *dev = qdisc_dev(sch);
+ u32 max_sdu[TC_QOPT_MAX_QUEUE];
+ unsigned long seen_tcs = 0;
+ struct nlattr *n;
+ int tc, rem;
+ int err = 0;
+
+ for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++)
+ max_sdu[tc] = q->max_sdu[tc];
+
+ nla_for_each_nested(n, opt, rem) {
+ if (nla_type(n) != TCA_TAPRIO_ATTR_TC_ENTRY)
+ continue;
+
+ err = taprio_parse_tc_entry(sch, n, max_sdu, &seen_tcs, extack);
+ if (err)
+ goto out;
+ }
+
+ for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) {
+ q->max_sdu[tc] = max_sdu[tc];
+ if (max_sdu[tc])
+ q->max_frm_len[tc] = max_sdu[tc] + dev->hard_header_len;
+ else
+ q->max_frm_len[tc] = U32_MAX; /* never oversized */
+ }
+
+out:
+ return err;
+}
+
static int taprio_mqprio_cmp(const struct net_device *dev,
const struct tc_mqprio_qopt *mqprio)
{
@@ -1485,6 +1561,10 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
if (err < 0)
return err;
+ err = taprio_parse_tc_entries(sch, opt, extack);
+ if (err)
+ return err;
+
new_admin = kzalloc(sizeof(*new_admin), GFP_KERNEL);
if (!new_admin) {
NL_SET_ERR_MSG(extack, "Not enough memory for a new schedule");
@@ -1492,10 +1572,8 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
}
INIT_LIST_HEAD(&new_admin->entries);
- rcu_read_lock();
- oper = rcu_dereference(q->oper_sched);
- admin = rcu_dereference(q->admin_sched);
- rcu_read_unlock();
+ oper = rtnl_dereference(q->oper_sched);
+ admin = rtnl_dereference(q->admin_sched);
/* no changes - no new mqprio settings */
if (!taprio_mqprio_cmp(dev, mqprio))
@@ -1565,17 +1643,6 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
q->advance_timer.function = advance_sched;
}
- if (FULL_OFFLOAD_IS_ENABLED(q->flags)) {
- q->dequeue = taprio_dequeue_offload;
- q->peek = taprio_peek_offload;
- } else {
- /* Be sure to always keep the function pointers
- * in a consistent state.
- */
- q->dequeue = taprio_dequeue_soft;
- q->peek = taprio_peek_soft;
- }
-
err = taprio_get_start_time(sch, new_admin, &start);
if (err < 0) {
NL_SET_ERR_MSG(extack, "Internal error: failed get start time");
@@ -1638,19 +1705,16 @@ static void taprio_reset(struct Qdisc *sch)
if (q->qdiscs[i])
qdisc_reset(q->qdiscs[i]);
}
- sch->qstats.backlog = 0;
- sch->q.qlen = 0;
}
static void taprio_destroy(struct Qdisc *sch)
{
struct taprio_sched *q = qdisc_priv(sch);
struct net_device *dev = qdisc_dev(sch);
+ struct sched_gate_list *oper, *admin;
unsigned int i;
- spin_lock(&taprio_list_lock);
list_del(&q->taprio_list);
- spin_unlock(&taprio_list_lock);
/* Note that taprio_reset() might not be called if an error
* happens in qdisc_create(), after taprio_init() has been called.
@@ -1669,11 +1733,14 @@ static void taprio_destroy(struct Qdisc *sch)
netdev_reset_tc(dev);
- if (q->oper_sched)
- call_rcu(&q->oper_sched->rcu, taprio_free_sched_cb);
+ oper = rtnl_dereference(q->oper_sched);
+ admin = rtnl_dereference(q->admin_sched);
+
+ if (oper)
+ call_rcu(&oper->rcu, taprio_free_sched_cb);
- if (q->admin_sched)
- call_rcu(&q->admin_sched->rcu, taprio_free_sched_cb);
+ if (admin)
+ call_rcu(&admin->rcu, taprio_free_sched_cb);
}
static int taprio_init(struct Qdisc *sch, struct nlattr *opt,
@@ -1688,9 +1755,6 @@ static int taprio_init(struct Qdisc *sch, struct nlattr *opt,
hrtimer_init(&q->advance_timer, CLOCK_TAI, HRTIMER_MODE_ABS);
q->advance_timer.function = advance_sched;
- q->dequeue = taprio_dequeue_soft;
- q->peek = taprio_peek_soft;
-
q->root = sch;
/* We only support static clockids. Use an invalid value as default
@@ -1699,15 +1763,17 @@ static int taprio_init(struct Qdisc *sch, struct nlattr *opt,
q->clockid = -1;
q->flags = TAPRIO_FLAGS_INVALID;
- spin_lock(&taprio_list_lock);
list_add(&q->taprio_list, &taprio_list);
- spin_unlock(&taprio_list_lock);
- if (sch->parent != TC_H_ROOT)
+ if (sch->parent != TC_H_ROOT) {
+ NL_SET_ERR_MSG_MOD(extack, "Can only be attached as root qdisc");
return -EOPNOTSUPP;
+ }
- if (!netif_is_multiqueue(dev))
+ if (!netif_is_multiqueue(dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Multi-queue device is required");
return -EOPNOTSUPP;
+ }
/* pre-allocate qdisc, attachment can't fail */
q->qdiscs = kcalloc(dev->num_tx_queues,
@@ -1879,6 +1945,33 @@ error_nest:
return -1;
}
+static int taprio_dump_tc_entries(struct taprio_sched *q, struct sk_buff *skb)
+{
+ struct nlattr *n;
+ int tc;
+
+ for (tc = 0; tc < TC_MAX_QUEUE; tc++) {
+ n = nla_nest_start(skb, TCA_TAPRIO_ATTR_TC_ENTRY);
+ if (!n)
+ return -EMSGSIZE;
+
+ if (nla_put_u32(skb, TCA_TAPRIO_TC_ENTRY_INDEX, tc))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, TCA_TAPRIO_TC_ENTRY_MAX_SDU,
+ q->max_sdu[tc]))
+ goto nla_put_failure;
+
+ nla_nest_end(skb, n);
+ }
+
+ return 0;
+
+nla_put_failure:
+ nla_nest_cancel(skb, n);
+ return -EMSGSIZE;
+}
+
static int taprio_dump(struct Qdisc *sch, struct sk_buff *skb)
{
struct taprio_sched *q = qdisc_priv(sch);
@@ -1888,9 +1981,8 @@ static int taprio_dump(struct Qdisc *sch, struct sk_buff *skb)
struct nlattr *nest, *sched_nest;
unsigned int i;
- rcu_read_lock();
- oper = rcu_dereference(q->oper_sched);
- admin = rcu_dereference(q->admin_sched);
+ oper = rtnl_dereference(q->oper_sched);
+ admin = rtnl_dereference(q->admin_sched);
opt.num_tc = netdev_get_num_tc(dev);
memcpy(opt.prio_tc_map, dev->prio_tc_map, sizeof(opt.prio_tc_map));
@@ -1918,6 +2010,9 @@ static int taprio_dump(struct Qdisc *sch, struct sk_buff *skb)
nla_put_u32(skb, TCA_TAPRIO_ATTR_TXTIME_DELAY, q->txtime_delay))
goto options_error;
+ if (taprio_dump_tc_entries(q, skb))
+ goto options_error;
+
if (oper && dump_schedule(skb, oper))
goto options_error;
@@ -1934,8 +2029,6 @@ static int taprio_dump(struct Qdisc *sch, struct sk_buff *skb)
nla_nest_end(skb, sched_nest);
done:
- rcu_read_unlock();
-
return nla_nest_end(skb, nest);
admin_error:
@@ -1945,7 +2038,6 @@ options_error:
nla_nest_cancel(skb, nest);
start_error:
- rcu_read_unlock();
return -ENOSPC;
}
@@ -2006,11 +2098,8 @@ static void taprio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
arg->count = arg->skip;
for (ntx = arg->skip; ntx < dev->num_tx_queues; ntx++) {
- if (arg->fn(sch, ntx + 1, arg) < 0) {
- arg->stop = 1;
+ if (!tc_qdisc_stats_dump(sch, ntx + 1, arg))
break;
- }
- arg->count++;
}
}
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 36079fdde2cb..277ad11f4d61 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -330,8 +330,6 @@ static void tbf_reset(struct Qdisc *sch)
struct tbf_sched_data *q = qdisc_priv(sch);
qdisc_reset(q->qdisc);
- sch->qstats.backlog = 0;
- sch->q.qlen = 0;
q->t_c = ktime_get_ns();
q->tokens = q->buffer;
q->ptokens = q->mtu;
@@ -582,12 +580,7 @@ static unsigned long tbf_find(struct Qdisc *sch, u32 classid)
static void tbf_walk(struct Qdisc *sch, struct qdisc_walker *walker)
{
if (!walker->stop) {
- if (walker->count >= walker->skip)
- if (walker->fn(sch, 1, walker) < 0) {
- walker->stop = 1;
- return;
- }
- walker->count++;
+ tc_qdisc_stats_dump(sch, 1, walker);
}
}
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 6af6b95bdb67..16f9238aa51d 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -124,7 +124,6 @@ teql_reset(struct Qdisc *sch)
struct teql_sched_data *dat = qdisc_priv(sch);
skb_queue_purge(&dat->q);
- sch->q.qlen = 0;
}
static void
@@ -492,7 +491,7 @@ static int __init teql_init(void)
master = netdev_priv(dev);
- strlcpy(master->qops.id, dev->name, IFNAMSIZ);
+ strscpy(master->qops.id, dev->name, IFNAMSIZ);
err = register_qdisc(&master->qops);
if (err) {