diff options
author | Eric Dumazet <edumazet@google.com> | 2012-12-28 06:06:37 +0000 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2012-12-28 15:25:19 -0800 |
commit | b2111724a639ec31a19fdca62ea3a0a222d59d11 (patch) | |
tree | 0d707599721ae209b176feab8ce41b7e63191e78 /net | |
parent | 210ab6656fa8c49d7238c13f85ed551ebab94fb0 (diff) |
net: use per task frag allocator in skb_append_datato_frags
Use the new per task frag allocator in skb_append_datato_frags(),
to reduce number of frags and page allocator overhead.
Tested:
ifconfig lo mtu 16436
perf record netperf -t UDP_STREAM ; perf report
before :
Throughput: 32928 Mbit/s
51.79% netperf [kernel.kallsyms] [k] copy_user_generic_string
5.98% netperf [kernel.kallsyms] [k] __alloc_pages_nodemask
5.58% netperf [kernel.kallsyms] [k] get_page_from_freelist
5.01% netperf [kernel.kallsyms] [k] __rmqueue
3.74% netperf [kernel.kallsyms] [k] skb_append_datato_frags
1.87% netperf [kernel.kallsyms] [k] prep_new_page
1.42% netperf [kernel.kallsyms] [k] next_zones_zonelist
1.28% netperf [kernel.kallsyms] [k] __inc_zone_state
1.26% netperf [kernel.kallsyms] [k] alloc_pages_current
0.78% netperf [kernel.kallsyms] [k] sock_alloc_send_pskb
0.74% netperf [kernel.kallsyms] [k] udp_sendmsg
0.72% netperf [kernel.kallsyms] [k] zone_watermark_ok
0.68% netperf [kernel.kallsyms] [k] __cpuset_node_allowed_softwall
0.67% netperf [kernel.kallsyms] [k] fib_table_lookup
0.60% netperf [kernel.kallsyms] [k] memcpy_fromiovecend
0.55% netperf [kernel.kallsyms] [k] __udp4_lib_lookup
after:
Throughput: 47185 Mbit/s
61.74% netperf [kernel.kallsyms] [k] copy_user_generic_string
2.07% netperf [kernel.kallsyms] [k] prep_new_page
1.98% netperf [kernel.kallsyms] [k] skb_append_datato_frags
1.02% netperf [kernel.kallsyms] [k] sock_alloc_send_pskb
0.97% netperf [kernel.kallsyms] [k] enqueue_task_fair
0.97% netperf [kernel.kallsyms] [k] udp_sendmsg
0.91% netperf [kernel.kallsyms] [k] __ip_route_output_key
0.88% netperf [kernel.kallsyms] [k] __netif_receive_skb
0.87% netperf [kernel.kallsyms] [k] fib_table_lookup
0.85% netperf [kernel.kallsyms] [k] resched_task
0.78% netperf [kernel.kallsyms] [k] __udp4_lib_lookup
0.77% netperf [kernel.kallsyms] [k] _raw_spin_lock_irqsave
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r-- | net/core/skbuff.c | 43 |
1 files changed, 16 insertions, 27 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 3ab989b0de42..ec8737ec59b5 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -2686,48 +2686,37 @@ int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, int len, int odd, struct sk_buff *skb), void *from, int length) { - int frg_cnt = 0; - skb_frag_t *frag = NULL; - struct page *page = NULL; - int copy, left; + int frg_cnt = skb_shinfo(skb)->nr_frags; + int copy; int offset = 0; int ret; + struct page_frag *pfrag = ¤t->task_frag; do { /* Return error if we don't have space for new frag */ - frg_cnt = skb_shinfo(skb)->nr_frags; if (frg_cnt >= MAX_SKB_FRAGS) - return -EFAULT; - - /* allocate a new page for next frag */ - page = alloc_pages(sk->sk_allocation, 0); + return -EMSGSIZE; - /* If alloc_page fails just return failure and caller will - * free previous allocated pages by doing kfree_skb() - */ - if (page == NULL) + if (!sk_page_frag_refill(sk, pfrag)) return -ENOMEM; - /* initialize the next frag */ - skb_fill_page_desc(skb, frg_cnt, page, 0, 0); - skb->truesize += PAGE_SIZE; - atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc); - - /* get the new initialized frag */ - frg_cnt = skb_shinfo(skb)->nr_frags; - frag = &skb_shinfo(skb)->frags[frg_cnt - 1]; - /* copy the user data to page */ - left = PAGE_SIZE - frag->page_offset; - copy = (length > left)? left : length; + copy = min_t(int, length, pfrag->size - pfrag->offset); - ret = getfrag(from, skb_frag_address(frag) + skb_frag_size(frag), - offset, copy, 0, skb); + ret = getfrag(from, page_address(pfrag->page) + pfrag->offset, + offset, copy, 0, skb); if (ret < 0) return -EFAULT; /* copy was successful so update the size parameters */ - skb_frag_size_add(frag, copy); + skb_fill_page_desc(skb, frg_cnt, pfrag->page, pfrag->offset, + copy); + frg_cnt++; + pfrag->offset += copy; + get_page(pfrag->page); + + skb->truesize += copy; + atomic_add(copy, &sk->sk_wmem_alloc); skb->len += copy; skb->data_len += copy; offset += copy; |