summaryrefslogtreecommitdiff
path: root/include/net/sch_generic.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/net/sch_generic.h')
-rw-r--r--include/net/sch_generic.h174
1 files changed, 142 insertions, 32 deletions
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 4d736427a4cb..a2b38b3deeca 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -12,6 +12,7 @@
#include <linux/list.h>
#include <linux/refcount.h>
#include <linux/workqueue.h>
+#include <linux/mutex.h>
#include <net/gen_stats.h>
#include <net/rtnetlink.h>
@@ -24,6 +25,9 @@ struct bpf_flow_keys;
typedef int tc_setup_cb_t(enum tc_setup_type type,
void *type_data, void *cb_priv);
+typedef int tc_indr_block_bind_cb_t(struct net_device *dev, void *cb_priv,
+ enum tc_setup_type type, void *type_data);
+
struct qdisc_rate_table {
struct tc_ratespec rate;
u32 data[256];
@@ -48,7 +52,10 @@ struct qdisc_size_table {
struct qdisc_skb_head {
struct sk_buff *head;
struct sk_buff *tail;
- __u32 qlen;
+ union {
+ u32 qlen;
+ atomic_t atomic_qlen;
+ };
spinlock_t lock;
};
@@ -175,6 +182,7 @@ static inline int qdisc_avail_bulklimit(const struct netdev_queue *txq)
}
struct Qdisc_class_ops {
+ unsigned int flags;
/* Child qdisc manipulation */
struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *);
int (*graft)(struct Qdisc *, unsigned long cl,
@@ -206,6 +214,13 @@ struct Qdisc_class_ops {
struct gnet_dump *);
};
+/* Qdisc_class_ops flag values */
+
+/* Implements API that doesn't require rtnl lock */
+enum qdisc_class_ops_flags {
+ QDISC_CLASS_OPS_DOIT_UNLOCKED = 1,
+};
+
struct Qdisc_ops {
struct Qdisc_ops *next;
const struct Qdisc_class_ops *cl_ops;
@@ -269,19 +284,21 @@ struct tcf_proto_ops {
const struct tcf_proto *,
struct tcf_result *);
int (*init)(struct tcf_proto*);
- void (*destroy)(struct tcf_proto *tp,
+ void (*destroy)(struct tcf_proto *tp, bool rtnl_held,
struct netlink_ext_ack *extack);
void* (*get)(struct tcf_proto*, u32 handle);
+ void (*put)(struct tcf_proto *tp, void *f);
int (*change)(struct net *net, struct sk_buff *,
struct tcf_proto*, unsigned long,
u32 handle, struct nlattr **,
- void **, bool,
+ void **, bool, bool,
struct netlink_ext_ack *);
int (*delete)(struct tcf_proto *tp, void *arg,
- bool *last,
+ bool *last, bool rtnl_held,
struct netlink_ext_ack *);
- void (*walk)(struct tcf_proto*, struct tcf_walker *arg);
+ void (*walk)(struct tcf_proto *tp,
+ struct tcf_walker *arg, bool rtnl_held);
int (*reoffload)(struct tcf_proto *tp, bool add,
tc_setup_cb_t *cb, void *cb_priv,
struct netlink_ext_ack *extack);
@@ -294,12 +311,18 @@ struct tcf_proto_ops {
/* rtnetlink specific */
int (*dump)(struct net*, struct tcf_proto*, void *,
- struct sk_buff *skb, struct tcmsg*);
+ struct sk_buff *skb, struct tcmsg*,
+ bool);
int (*tmplt_dump)(struct sk_buff *skb,
struct net *net,
void *tmplt_priv);
struct module *owner;
+ int flags;
+};
+
+enum tcf_proto_ops_flags {
+ TCF_PROTO_OPS_DOIT_UNLOCKED = 1,
};
struct tcf_proto {
@@ -318,6 +341,12 @@ struct tcf_proto {
void *data;
const struct tcf_proto_ops *ops;
struct tcf_chain *chain;
+ /* Lock protects tcf_proto shared state and can be used by unlocked
+ * classifiers to protect their private data.
+ */
+ spinlock_t lock;
+ bool deleting;
+ refcount_t refcnt;
struct rcu_head rcu;
};
@@ -337,6 +366,8 @@ struct qdisc_skb_cb {
typedef void tcf_chain_head_change_t(struct tcf_proto *tp_head, void *priv);
struct tcf_chain {
+ /* Protects filter_chain. */
+ struct mutex filter_chain_lock;
struct tcf_proto __rcu *filter_chain;
struct list_head list;
struct tcf_block *block;
@@ -344,11 +375,17 @@ struct tcf_chain {
unsigned int refcnt;
unsigned int action_refcnt;
bool explicitly_created;
+ bool flushing;
const struct tcf_proto_ops *tmplt_ops;
void *tmplt_priv;
+ struct rcu_head rcu;
};
struct tcf_block {
+ /* Lock protects tcf_block and lifetime-management data of chains
+ * attached to the block (refcnt, action_refcnt, explicitly_created).
+ */
+ struct mutex lock;
struct list_head chain_list;
u32 index; /* block index for shared blocks */
refcount_t refcnt;
@@ -366,6 +403,34 @@ struct tcf_block {
struct rcu_head rcu;
};
+#ifdef CONFIG_PROVE_LOCKING
+static inline bool lockdep_tcf_chain_is_locked(struct tcf_chain *chain)
+{
+ return lockdep_is_held(&chain->filter_chain_lock);
+}
+
+static inline bool lockdep_tcf_proto_is_locked(struct tcf_proto *tp)
+{
+ return lockdep_is_held(&tp->lock);
+}
+#else
+static inline bool lockdep_tcf_chain_is_locked(struct tcf_block *chain)
+{
+ return true;
+}
+
+static inline bool lockdep_tcf_proto_is_locked(struct tcf_proto *tp)
+{
+ return true;
+}
+#endif /* #ifdef CONFIG_PROVE_LOCKING */
+
+#define tcf_chain_dereference(p, chain) \
+ rcu_dereference_protected(p, lockdep_tcf_chain_is_locked(chain))
+
+#define tcf_proto_dereference(p, tp) \
+ rcu_dereference_protected(p, lockdep_tcf_proto_is_locked(tp))
+
static inline void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
{
if (*flags & TCA_CLS_FLAGS_IN_HW)
@@ -405,27 +470,19 @@ static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
BUILD_BUG_ON(sizeof(qcb->data) < sz);
}
-static inline int qdisc_qlen_cpu(const struct Qdisc *q)
-{
- return this_cpu_ptr(q->cpu_qstats)->qlen;
-}
-
static inline int qdisc_qlen(const struct Qdisc *q)
{
return q->q.qlen;
}
-static inline int qdisc_qlen_sum(const struct Qdisc *q)
+static inline u32 qdisc_qlen_sum(const struct Qdisc *q)
{
- __u32 qlen = q->qstats.qlen;
- int i;
+ u32 qlen = q->qstats.qlen;
- if (q->flags & TCQ_F_NOLOCK) {
- for_each_possible_cpu(i)
- qlen += per_cpu_ptr(q->cpu_qstats, i)->qlen;
- } else {
+ if (q->flags & TCQ_F_NOLOCK)
+ qlen += atomic_read(&q->q.atomic_qlen);
+ else
qlen += q->q.qlen;
- }
return qlen;
}
@@ -577,8 +634,31 @@ struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
void qdisc_reset(struct Qdisc *qdisc);
void qdisc_put(struct Qdisc *qdisc);
void qdisc_put_unlocked(struct Qdisc *qdisc);
-void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n,
- unsigned int len);
+void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, int n, int len);
+#ifdef CONFIG_NET_SCHED
+int qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type,
+ void *type_data);
+void qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch,
+ struct Qdisc *new, struct Qdisc *old,
+ enum tc_setup_type type, void *type_data,
+ struct netlink_ext_ack *extack);
+#else
+static inline int
+qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type,
+ void *type_data)
+{
+ q->flags &= ~TCQ_F_OFFLOADED;
+ return 0;
+}
+
+static inline void
+qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch,
+ struct Qdisc *new, struct Qdisc *old,
+ enum tc_setup_type type, void *type_data,
+ struct netlink_ext_ack *extack)
+{
+}
+#endif
struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
const struct Qdisc_ops *ops,
struct netlink_ext_ack *extack);
@@ -798,14 +878,14 @@ static inline void qdisc_qstats_cpu_backlog_inc(struct Qdisc *sch,
this_cpu_add(sch->cpu_qstats->backlog, qdisc_pkt_len(skb));
}
-static inline void qdisc_qstats_cpu_qlen_inc(struct Qdisc *sch)
+static inline void qdisc_qstats_atomic_qlen_inc(struct Qdisc *sch)
{
- this_cpu_inc(sch->cpu_qstats->qlen);
+ atomic_inc(&sch->q.atomic_qlen);
}
-static inline void qdisc_qstats_cpu_qlen_dec(struct Qdisc *sch)
+static inline void qdisc_qstats_atomic_qlen_dec(struct Qdisc *sch)
{
- this_cpu_dec(sch->cpu_qstats->qlen);
+ atomic_dec(&sch->q.atomic_qlen);
}
static inline void qdisc_qstats_cpu_requeues_inc(struct Qdisc *sch)
@@ -843,6 +923,41 @@ static inline void qdisc_qstats_overlimit(struct Qdisc *sch)
sch->qstats.overlimits++;
}
+static inline int qdisc_qstats_copy(struct gnet_dump *d, struct Qdisc *sch)
+{
+ __u32 qlen = qdisc_qlen_sum(sch);
+
+ return gnet_stats_copy_queue(d, sch->cpu_qstats, &sch->qstats, qlen);
+}
+
+static inline void qdisc_qstats_qlen_backlog(struct Qdisc *sch, __u32 *qlen,
+ __u32 *backlog)
+{
+ struct gnet_stats_queue qstats = { 0 };
+ __u32 len = qdisc_qlen_sum(sch);
+
+ __gnet_stats_copy_queue(&qstats, sch->cpu_qstats, &sch->qstats, len);
+ *qlen = qstats.qlen;
+ *backlog = qstats.backlog;
+}
+
+static inline void qdisc_tree_flush_backlog(struct Qdisc *sch)
+{
+ __u32 qlen, backlog;
+
+ qdisc_qstats_qlen_backlog(sch, &qlen, &backlog);
+ qdisc_tree_reduce_backlog(sch, qlen, backlog);
+}
+
+static inline void qdisc_purge_queue(struct Qdisc *sch)
+{
+ __u32 qlen, backlog;
+
+ qdisc_qstats_qlen_backlog(sch, &qlen, &backlog);
+ qdisc_reset(sch);
+ qdisc_tree_reduce_backlog(sch, qlen, backlog);
+}
+
static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh)
{
qh->head = NULL;
@@ -1026,13 +1141,8 @@ static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
sch_tree_lock(sch);
old = *pold;
*pold = new;
- if (old != NULL) {
- unsigned int qlen = old->q.qlen;
- unsigned int backlog = old->qstats.backlog;
-
- qdisc_reset(old);
- qdisc_tree_reduce_backlog(old, qlen, backlog);
- }
+ if (old != NULL)
+ qdisc_tree_flush_backlog(old);
sch_tree_unlock(sch);
return old;