diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-09-07 14:27:46 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-09-07 14:27:46 -0700 |
commit | 0ffb01d9def22f1954e99529b7e4ded497b2e88b (patch) | |
tree | e18b4dd941bc0e2e34078b7b64469f5675046734 /net | |
parent | 7b4022fa17991801e29f09c6794bbf4d1a0d6b6d (diff) | |
parent | 4e4f1fc226816905c937f9b29dabe351075dfe0f (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller:
"A quick set of fixes, some to deal with fallout from yesterday's
net-next merge.
1) Fix compilation of bnx2x driver with CONFIG_BNX2X_SRIOV disabled,
from Dmitry Kravkov.
2) Fix a bnx2x regression caused by one of Dave Jones's mistaken
braces changes, from Eilon Greenstein.
3) Add some protective filtering in the netlink tap code, from Daniel
Borkmann.
4) Fix TCP congestion window growth regression after timeouts, from
Yuchung Cheng.
5) Correctly adjust TCP's rcv_ssthresh for out of order packets, from
Eric Dumazet"
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net:
tcp: properly increase rcv_ssthresh for ofo packets
net: add documentation for BQL helpers
mlx5: remove unused MLX5_DEBUG param in Kconfig
bnx2x: Restore a call to config_init
bnx2x: fix broken compilation with CONFIG_BNX2X_SRIOV is not set
tcp: fix no cwnd growth after timeout
net: netlink: filter particular protocols from analyzers
Diffstat (limited to 'net')
-rw-r--r-- | net/ipv4/tcp_input.c | 11 | ||||
-rw-r--r-- | net/netlink/af_netlink.c | 30 |
2 files changed, 36 insertions, 5 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 1969e16d936d..25a89eaa669d 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -3162,16 +3162,14 @@ static inline bool tcp_may_raise_cwnd(const struct sock *sk, const int flag) /* If reordering is high then always grow cwnd whenever data is * delivered regardless of its ordering. Otherwise stay conservative - * and only grow cwnd on in-order delivery in Open state, and retain - * cwnd in Disordered state (RFC5681). A stretched ACK with + * and only grow cwnd on in-order delivery (RFC5681). A stretched ACK w/ * new SACK or ECE mark may first advance cwnd here and later reduce * cwnd in tcp_fastretrans_alert() based on more states. */ if (tcp_sk(sk)->reordering > sysctl_tcp_reordering) return flag & FLAG_FORWARD_PROGRESS; - return inet_csk(sk)->icsk_ca_state == TCP_CA_Open && - flag & FLAG_DATA_ACKED; + return flag & FLAG_DATA_ACKED; } /* Check that window update is acceptable. @@ -4141,6 +4139,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) if (!tcp_try_coalesce(sk, skb1, skb, &fragstolen)) { __skb_queue_after(&tp->out_of_order_queue, skb1, skb); } else { + tcp_grow_window(sk, skb); kfree_skb_partial(skb, fragstolen); skb = NULL; } @@ -4216,8 +4215,10 @@ add_sack: if (tcp_is_sack(tp)) tcp_sack_new_ofo_skb(sk, seq, end_seq); end: - if (skb) + if (skb) { + tcp_grow_window(sk, skb); skb_set_owner_r(skb, sk); + } } static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int hdrlen, diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index a17dda1bbee0..8df7f64c6db3 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -168,16 +168,43 @@ int netlink_remove_tap(struct netlink_tap *nt) } EXPORT_SYMBOL_GPL(netlink_remove_tap); +static bool netlink_filter_tap(const struct sk_buff *skb) +{ + struct sock *sk = skb->sk; + bool pass = false; + + /* We take the more conservative approach and + * whitelist socket protocols that may pass. + */ + switch (sk->sk_protocol) { + case NETLINK_ROUTE: + case NETLINK_USERSOCK: + case NETLINK_SOCK_DIAG: + case NETLINK_NFLOG: + case NETLINK_XFRM: + case NETLINK_FIB_LOOKUP: + case NETLINK_NETFILTER: + case NETLINK_GENERIC: + pass = true; + break; + } + + return pass; +} + static int __netlink_deliver_tap_skb(struct sk_buff *skb, struct net_device *dev) { struct sk_buff *nskb; + struct sock *sk = skb->sk; int ret = -ENOMEM; dev_hold(dev); nskb = skb_clone(skb, GFP_ATOMIC); if (nskb) { nskb->dev = dev; + nskb->protocol = htons((u16) sk->sk_protocol); + ret = dev_queue_xmit(nskb); if (unlikely(ret > 0)) ret = net_xmit_errno(ret); @@ -192,6 +219,9 @@ static void __netlink_deliver_tap(struct sk_buff *skb) int ret; struct netlink_tap *tmp; + if (!netlink_filter_tap(skb)) + return; + list_for_each_entry_rcu(tmp, &netlink_tap_all, list) { ret = __netlink_deliver_tap_skb(skb, tmp->dev); if (unlikely(ret)) |