diff options
author | Stephen Hemminger <shemminger@osdl.org> | 2005-05-03 16:24:32 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2005-05-03 16:24:32 -0700 |
commit | 771018e76aaa6474be20a53c20458bcae8b00485 (patch) | |
tree | acbd1186524815205337dd57322a4926cadc3554 /net/sched | |
parent | 8cbe1d46d69f9e2c49f284fe0e9aee3387bd2c71 (diff) |
[PKT_SCHED]: netetm: make qdisc friendly to outer disciplines
Netem currently dumps packets into the queue when timer expires. This
patch makes work by self-clocking (more like TBF). It fixes a bug
when 0 delay is requested (only doing loss or duplication).
Signed-off-by: Stephen Hemminger <shemminger@osdl.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched')
-rw-r--r-- | net/sched/sch_netem.c | 113 |
1 files changed, 67 insertions, 46 deletions
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 31c29deb139d..864b8d353ffa 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -138,38 +138,78 @@ static long tabledist(unsigned long mu, long sigma, } /* Put skb in the private delayed queue. */ -static int delay_skb(struct Qdisc *sch, struct sk_buff *skb) +static int netem_delay(struct Qdisc *sch, struct sk_buff *skb) { struct netem_sched_data *q = qdisc_priv(sch); - struct netem_skb_cb *cb = (struct netem_skb_cb *)skb->cb; psched_tdiff_t td; psched_time_t now; PSCHED_GET_TIME(now); td = tabledist(q->latency, q->jitter, &q->delay_cor, q->delay_dist); - PSCHED_TADD2(now, td, cb->time_to_send); /* Always queue at tail to keep packets in order */ if (likely(q->delayed.qlen < q->limit)) { + struct netem_skb_cb *cb = (struct netem_skb_cb *)skb->cb; + + PSCHED_TADD2(now, td, cb->time_to_send); + + pr_debug("netem_delay: skb=%p now=%llu tosend=%llu\n", skb, + now, cb->time_to_send); + __skb_queue_tail(&q->delayed, skb); - if (!timer_pending(&q->timer)) { - q->timer.expires = jiffies + PSCHED_US2JIFFIE(td); - add_timer(&q->timer); - } return NET_XMIT_SUCCESS; } + pr_debug("netem_delay: queue over limit %d\n", q->limit); + sch->qstats.overlimits++; kfree_skb(skb); return NET_XMIT_DROP; } +/* + * Move a packet that is ready to send from the delay holding + * list to the underlying qdisc. + */ +static int netem_run(struct Qdisc *sch) +{ + struct netem_sched_data *q = qdisc_priv(sch); + struct sk_buff *skb; + psched_time_t now; + + PSCHED_GET_TIME(now); + + skb = skb_peek(&q->delayed); + if (skb) { + const struct netem_skb_cb *cb + = (const struct netem_skb_cb *)skb->cb; + long delay + = PSCHED_US2JIFFIE(PSCHED_TDIFF(cb->time_to_send, now)); + pr_debug("netem_run: skb=%p delay=%ld\n", skb, delay); + + /* if more time remaining? */ + if (delay > 0) { + mod_timer(&q->timer, jiffies + delay); + return 1; + } + + __skb_unlink(skb, &q->delayed); + + if (q->qdisc->enqueue(skb, q->qdisc)) { + sch->q.qlen--; + sch->qstats.drops++; + } + } + + return 0; +} + static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) { struct netem_sched_data *q = qdisc_priv(sch); struct sk_buff *skb2; int ret; - pr_debug("netem_enqueue skb=%p @%lu\n", skb, jiffies); + pr_debug("netem_enqueue skb=%p\n", skb); /* Random packet drop 0 => none, ~0 => all */ if (q->loss && q->loss >= get_crandom(&q->loss_cor)) { @@ -184,7 +224,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) { pr_debug("netem_enqueue: dup %p\n", skb2); - if (delay_skb(sch, skb2)) { + if (netem_delay(sch, skb2)) { sch->q.qlen++; sch->bstats.bytes += skb2->len; sch->bstats.packets++; @@ -202,7 +242,8 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) ret = q->qdisc->enqueue(skb, q->qdisc); } else { q->counter = 0; - ret = delay_skb(sch, skb); + ret = netem_delay(sch, skb); + netem_run(sch); } if (likely(ret == NET_XMIT_SUCCESS)) { @@ -241,56 +282,35 @@ static unsigned int netem_drop(struct Qdisc* sch) return len; } -/* Dequeue packet. - * Move all packets that are ready to send from the delay holding - * list to the underlying qdisc, then just call dequeue - */ static struct sk_buff *netem_dequeue(struct Qdisc *sch) { struct netem_sched_data *q = qdisc_priv(sch); struct sk_buff *skb; + int pending; + + pending = netem_run(sch); skb = q->qdisc->dequeue(q->qdisc); - if (skb) + if (skb) { + pr_debug("netem_dequeue: return skb=%p\n", skb); sch->q.qlen--; + sch->flags &= ~TCQ_F_THROTTLED; + } + else if (pending) { + pr_debug("netem_dequeue: throttling\n"); + sch->flags |= TCQ_F_THROTTLED; + } + return skb; } static void netem_watchdog(unsigned long arg) { struct Qdisc *sch = (struct Qdisc *)arg; - struct netem_sched_data *q = qdisc_priv(sch); - struct net_device *dev = sch->dev; - struct sk_buff *skb; - psched_time_t now; - pr_debug("netem_watchdog: fired @%lu\n", jiffies); - - spin_lock_bh(&dev->queue_lock); - PSCHED_GET_TIME(now); - - while ((skb = skb_peek(&q->delayed)) != NULL) { - const struct netem_skb_cb *cb - = (const struct netem_skb_cb *)skb->cb; - long delay - = PSCHED_US2JIFFIE(PSCHED_TDIFF(cb->time_to_send, now)); - pr_debug("netem_watchdog: skb %p@%lu %ld\n", - skb, jiffies, delay); - - /* if more time remaining? */ - if (delay > 0) { - mod_timer(&q->timer, jiffies + delay); - break; - } - __skb_unlink(skb, &q->delayed); - - if (q->qdisc->enqueue(skb, q->qdisc)) { - sch->q.qlen--; - sch->qstats.drops++; - } - } - qdisc_run(dev); - spin_unlock_bh(&dev->queue_lock); + pr_debug("netem_watchdog qlen=%d\n", sch->q.qlen); + sch->flags &= ~TCQ_F_THROTTLED; + netif_schedule(sch->dev); } static void netem_reset(struct Qdisc *sch) @@ -301,6 +321,7 @@ static void netem_reset(struct Qdisc *sch) skb_queue_purge(&q->delayed); sch->q.qlen = 0; + sch->flags &= ~TCQ_F_THROTTLED; del_timer_sync(&q->timer); } |