diff options
author | David S. Miller <davem@davemloft.net> | 2021-10-26 14:45:12 +0100 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2021-10-26 14:45:12 +0100 |
commit | 3247e3ffafd91b231d5def0cc62f92117671d01f (patch) | |
tree | fea73bdcb45c78a1bc4090467092a119ded086b4 | |
parent | d18785e213866935b4c3dc0c33c3e18801ce0ce8 (diff) | |
parent | c4322884ed2132beee95a16234035ad7cc991f09 (diff) |
Merge branch 'tcp_stream_alloc_skb'
Eric Dumazet says:
====================
tcp: tcp_stream_alloc_skb() changes
sk_stream_alloc_skb() is only used by TCP.
Rename it to tcp_stream_alloc_skb() and apply small
optimizations.
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | include/net/sock.h | 3 | ||||
-rw-r--r-- | include/net/tcp.h | 2 | ||||
-rw-r--r-- | net/ipv4/tcp.c | 19 | ||||
-rw-r--r-- | net/ipv4/tcp_output.c | 10 |
4 files changed, 15 insertions, 19 deletions
diff --git a/include/net/sock.h b/include/net/sock.h index b76be30674ef..ff4e62aa62e5 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -2422,9 +2422,6 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk) WRITE_ONCE(sk->sk_sndbuf, max_t(u32, val, SOCK_MIN_SNDBUF)); } -struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp, - bool force_schedule); - /** * sk_page_frag - return an appropriate page_frag * @sk: socket diff --git a/include/net/tcp.h b/include/net/tcp.h index d62467a0094f..701587af6852 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -337,6 +337,8 @@ void tcp_twsk_destructor(struct sock *sk); ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags); +struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp, + bool force_schedule); void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks); static inline void tcp_dec_quickack_mode(struct sock *sk, diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 56ff7c746f88..d0b848ff5c0f 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -856,18 +856,15 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos, } EXPORT_SYMBOL(tcp_splice_read); -struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp, - bool force_schedule) +struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp, + bool force_schedule) { struct sk_buff *skb; - /* The TCP header must be at least 32-bit aligned. */ - size = ALIGN(size, 4); - if (unlikely(tcp_under_memory_pressure(sk))) sk_mem_reclaim_partial(sk); - skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp); + skb = alloc_skb_fclone(size + MAX_TCP_HEADER, gfp); if (likely(skb)) { bool mem_scheduled; @@ -878,7 +875,7 @@ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp, mem_scheduled = sk_wmem_schedule(sk, skb->truesize); } if (likely(mem_scheduled)) { - skb_reserve(skb, sk->sk_prot->max_header); + skb_reserve(skb, MAX_TCP_HEADER); /* * Make sure that we have exactly size bytes * available to the caller, no more, no less. @@ -960,8 +957,8 @@ new_segment: if (!sk_stream_memory_free(sk)) return NULL; - skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation, - tcp_rtx_and_write_queues_empty(sk)); + skb = tcp_stream_alloc_skb(sk, 0, sk->sk_allocation, + tcp_rtx_and_write_queues_empty(sk)); if (!skb) return NULL; @@ -1289,8 +1286,8 @@ new_segment: goto restart; } first_skb = tcp_rtx_and_write_queues_empty(sk); - skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation, - first_skb); + skb = tcp_stream_alloc_skb(sk, 0, sk->sk_allocation, + first_skb); if (!skb) goto wait_for_space; diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 3a01e5593a17..c0c55a8be8f7 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1564,7 +1564,7 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue, return -ENOMEM; /* Get a new skb... force flag on. */ - buff = sk_stream_alloc_skb(sk, nsize, gfp, true); + buff = tcp_stream_alloc_skb(sk, nsize, gfp, true); if (!buff) return -ENOMEM; /* We'll just try again later. */ skb_copy_decrypted(buff, skb); @@ -2121,7 +2121,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, return tcp_fragment(sk, TCP_FRAG_IN_WRITE_QUEUE, skb, len, mss_now, gfp); - buff = sk_stream_alloc_skb(sk, 0, gfp, true); + buff = tcp_stream_alloc_skb(sk, 0, gfp, true); if (unlikely(!buff)) return -ENOMEM; skb_copy_decrypted(buff, skb); @@ -2388,7 +2388,7 @@ static int tcp_mtu_probe(struct sock *sk) return -1; /* We're allowed to probe. Build it now. */ - nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC, false); + nskb = tcp_stream_alloc_skb(sk, probe_size, GFP_ATOMIC, false); if (!nskb) return -1; sk_wmem_queued_add(sk, nskb->truesize); @@ -3754,7 +3754,7 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn) /* limit to order-0 allocations */ space = min_t(size_t, space, SKB_MAX_HEAD(MAX_TCP_HEADER)); - syn_data = sk_stream_alloc_skb(sk, space, sk->sk_allocation, false); + syn_data = tcp_stream_alloc_skb(sk, space, sk->sk_allocation, false); if (!syn_data) goto fallback; syn_data->ip_summed = CHECKSUM_PARTIAL; @@ -3835,7 +3835,7 @@ int tcp_connect(struct sock *sk) return 0; } - buff = sk_stream_alloc_skb(sk, 0, sk->sk_allocation, true); + buff = tcp_stream_alloc_skb(sk, 0, sk->sk_allocation, true); if (unlikely(!buff)) return -ENOBUFS; |