summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorToke Høiland-Jørgensen <toke@toke.dk>2018-07-06 17:37:19 +0200
committerDavid S. Miller <davem@davemloft.net>2018-07-10 20:06:34 -0700
commit0c850344d3882886f842bf0b50a9ff23001adb7e (patch)
treee01420f0959511764b36e4fb3fe72e55ab6507f5 /net
parenta729b7f0bd5bf4919306556aed614438f5174537 (diff)
sch_cake: Conditionally split GSO segments
At lower bandwidths, the transmission time of a single GSO segment can add an unacceptable amount of latency due to HOL blocking. Furthermore, with a software shaper, any tuning mechanism employed by the kernel to control the maximum size of GSO segments is thrown off by the artificial limit on bandwidth. For this reason, we split GSO segments into their individual packets iff the shaper is active and configured to a bandwidth <= 1 Gbps. Signed-off-by: Toke Høiland-Jørgensen <toke@toke.dk> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/sched/sch_cake.c99
1 files changed, 73 insertions, 26 deletions
diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 199670e1eb94..30695691e9ff 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -80,6 +80,7 @@
#define CAKE_QUEUES (1024)
#define CAKE_FLOW_MASK 63
#define CAKE_FLOW_NAT_FLAG 64
+#define CAKE_SPLIT_GSO_THRESHOLD (125000000) /* 1Gbps */
/* struct cobalt_params - contains codel and blue parameters
* @interval: codel initial drop rate
@@ -1650,36 +1651,73 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
if (unlikely(len > b->max_skblen))
b->max_skblen = len;
- cobalt_set_enqueue_time(skb, now);
- get_cobalt_cb(skb)->adjusted_len = cake_overhead(q, skb);
- flow_queue_add(flow, skb);
-
- if (q->ack_filter)
- ack = cake_ack_filter(q, flow);
+ if (skb_is_gso(skb) && q->rate_flags & CAKE_FLAG_SPLIT_GSO) {
+ struct sk_buff *segs, *nskb;
+ netdev_features_t features = netif_skb_features(skb);
+ unsigned int slen = 0;
+
+ segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
+ if (IS_ERR_OR_NULL(segs))
+ return qdisc_drop(skb, sch, to_free);
+
+ while (segs) {
+ nskb = segs->next;
+ segs->next = NULL;
+ qdisc_skb_cb(segs)->pkt_len = segs->len;
+ cobalt_set_enqueue_time(segs, now);
+ get_cobalt_cb(segs)->adjusted_len = cake_overhead(q,
+ segs);
+ flow_queue_add(flow, segs);
+
+ sch->q.qlen++;
+ slen += segs->len;
+ q->buffer_used += segs->truesize;
+ b->packets++;
+ segs = nskb;
+ }
- if (ack) {
- b->ack_drops++;
- sch->qstats.drops++;
- b->bytes += qdisc_pkt_len(ack);
- len -= qdisc_pkt_len(ack);
- q->buffer_used += skb->truesize - ack->truesize;
- if (q->rate_flags & CAKE_FLAG_INGRESS)
- cake_advance_shaper(q, b, ack, now, true);
+ /* stats */
+ b->bytes += slen;
+ b->backlogs[idx] += slen;
+ b->tin_backlog += slen;
+ sch->qstats.backlog += slen;
+ q->avg_window_bytes += slen;
- qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(ack));
- consume_skb(ack);
+ qdisc_tree_reduce_backlog(sch, 1, len);
+ consume_skb(skb);
} else {
- sch->q.qlen++;
- q->buffer_used += skb->truesize;
- }
+ /* not splitting */
+ cobalt_set_enqueue_time(skb, now);
+ get_cobalt_cb(skb)->adjusted_len = cake_overhead(q, skb);
+ flow_queue_add(flow, skb);
+
+ if (q->ack_filter)
+ ack = cake_ack_filter(q, flow);
+
+ if (ack) {
+ b->ack_drops++;
+ sch->qstats.drops++;
+ b->bytes += qdisc_pkt_len(ack);
+ len -= qdisc_pkt_len(ack);
+ q->buffer_used += skb->truesize - ack->truesize;
+ if (q->rate_flags & CAKE_FLAG_INGRESS)
+ cake_advance_shaper(q, b, ack, now, true);
+
+ qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(ack));
+ consume_skb(ack);
+ } else {
+ sch->q.qlen++;
+ q->buffer_used += skb->truesize;
+ }
- /* stats */
- b->packets++;
- b->bytes += len;
- b->backlogs[idx] += len;
- b->tin_backlog += len;
- sch->qstats.backlog += len;
- q->avg_window_bytes += len;
+ /* stats */
+ b->packets++;
+ b->bytes += len;
+ b->backlogs[idx] += len;
+ b->tin_backlog += len;
+ sch->qstats.backlog += len;
+ q->avg_window_bytes += len;
+ }
if (q->overflow_timeout)
cake_heapify_up(q, b->overflow_idx[idx]);
@@ -2531,6 +2569,11 @@ static int cake_change(struct Qdisc *sch, struct nlattr *opt,
if (tb[TCA_CAKE_MEMORY])
q->buffer_config_limit = nla_get_u32(tb[TCA_CAKE_MEMORY]);
+ if (q->rate_bps && q->rate_bps <= CAKE_SPLIT_GSO_THRESHOLD)
+ q->rate_flags |= CAKE_FLAG_SPLIT_GSO;
+ else
+ q->rate_flags &= ~CAKE_FLAG_SPLIT_GSO;
+
if (q->tins) {
sch_tree_lock(sch);
cake_reconfigure(sch);
@@ -2686,6 +2729,10 @@ static int cake_dump(struct Qdisc *sch, struct sk_buff *skb)
if (nla_put_u32(skb, TCA_CAKE_MPU, q->rate_mpu))
goto nla_put_failure;
+ if (nla_put_u32(skb, TCA_CAKE_SPLIT_GSO,
+ !!(q->rate_flags & CAKE_FLAG_SPLIT_GSO)))
+ goto nla_put_failure;
+
return nla_nest_end(skb, opts);
nla_put_failure: