diff options
Diffstat (limited to 'net')
37 files changed, 279 insertions, 161 deletions
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index 1f1de715197c..e2aa7be3a847 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c @@ -154,7 +154,8 @@ int br_handle_frame_finish(struct sk_buff *skb) dst = NULL; if (is_broadcast_ether_addr(dest)) { - if (p->flags & BR_PROXYARP && + if (IS_ENABLED(CONFIG_INET) && + p->flags & BR_PROXYARP && skb->protocol == htons(ETH_P_ARP)) br_do_proxy_arp(skb, br, vid); diff --git a/net/core/dev.c b/net/core/dev.c index 683d493aa1bf..171420e75b03 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -7072,10 +7072,20 @@ static int dev_cpu_callback(struct notifier_block *nfb, oldsd->output_queue = NULL; oldsd->output_queue_tailp = &oldsd->output_queue; } - /* Append NAPI poll list from offline CPU. */ - if (!list_empty(&oldsd->poll_list)) { - list_splice_init(&oldsd->poll_list, &sd->poll_list); - raise_softirq_irqoff(NET_RX_SOFTIRQ); + /* Append NAPI poll list from offline CPU, with one exception : + * process_backlog() must be called by cpu owning percpu backlog. + * We properly handle process_queue & input_pkt_queue later. + */ + while (!list_empty(&oldsd->poll_list)) { + struct napi_struct *napi = list_first_entry(&oldsd->poll_list, + struct napi_struct, + poll_list); + + list_del_init(&napi->poll_list); + if (napi->poll == process_backlog) + napi->state = 0; + else + ____napi_schedule(sd, napi); } raise_softirq_irqoff(NET_TX_SOFTIRQ); @@ -7086,7 +7096,7 @@ static int dev_cpu_callback(struct notifier_block *nfb, netif_rx_internal(skb); input_queue_head_incr(oldsd); } - while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) { + while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) { netif_rx_internal(skb); input_queue_head_incr(oldsd); } diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 8e38f17288d3..8d614c93f86a 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -2043,6 +2043,12 @@ static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh) case NDTPA_BASE_REACHABLE_TIME: NEIGH_VAR_SET(p, BASE_REACHABLE_TIME, nla_get_msecs(tbp[i])); + /* update reachable_time as well, otherwise, the change will + * only be effective after the next time neigh_periodic_work + * decides to recompute it (can be multiple minutes) + */ + p->reachable_time = + neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME)); break; case NDTPA_GC_STALETIME: NEIGH_VAR_SET(p, GC_STALETIME, @@ -2921,6 +2927,31 @@ static int neigh_proc_dointvec_unres_qlen(struct ctl_table *ctl, int write, return ret; } +static int neigh_proc_base_reachable_time(struct ctl_table *ctl, int write, + void __user *buffer, + size_t *lenp, loff_t *ppos) +{ + struct neigh_parms *p = ctl->extra2; + int ret; + + if (strcmp(ctl->procname, "base_reachable_time") == 0) + ret = neigh_proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos); + else if (strcmp(ctl->procname, "base_reachable_time_ms") == 0) + ret = neigh_proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos); + else + ret = -1; + + if (write && ret == 0) { + /* update reachable_time as well, otherwise, the change will + * only be effective after the next time neigh_periodic_work + * decides to recompute it + */ + p->reachable_time = + neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME)); + } + return ret; +} + #define NEIGH_PARMS_DATA_OFFSET(index) \ (&((struct neigh_parms *) 0)->data[index]) @@ -3047,6 +3078,19 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p, t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler; /* ReachableTime (in milliseconds) */ t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler; + } else { + /* Those handlers will update p->reachable_time after + * base_reachable_time(_ms) is set to ensure the new timer starts being + * applied after the next neighbour update instead of waiting for + * neigh_periodic_work to update its value (can be multiple minutes) + * So any handler that replaces them should do this as well + */ + /* ReachableTime */ + t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = + neigh_proc_base_reachable_time; + /* ReachableTime (in milliseconds) */ + t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = + neigh_proc_base_reachable_time; } /* Don't export sysctls to unprivileged users */ diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 515569ffde8a..589aafd01fc5 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -46,6 +46,7 @@ void dsa_slave_mii_bus_init(struct dsa_switch *ds) snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "dsa-%d:%.2x", ds->index, ds->pd->sw_addr); ds->slave_mii_bus->parent = ds->master_dev; + ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask; } diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c index 3a83ce5efa80..787b3c294ce6 100644 --- a/net/ipv4/ip_forward.c +++ b/net/ipv4/ip_forward.c @@ -129,7 +129,8 @@ int ip_forward(struct sk_buff *skb) * We now generate an ICMP HOST REDIRECT giving the route * we calculated. */ - if (rt->rt_flags&RTCF_DOREDIRECT && !opt->srr && !skb_sec_path(skb)) + if (IPCB(skb)->flags & IPSKB_DOREDIRECT && !opt->srr && + !skb_sec_path(skb)) ip_rt_send_redirect(skb); skb->priority = rt_tos2priority(iph->tos); diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 8a89c738b7a3..6b85adb05003 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -461,17 +461,13 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err)); sin = &errhdr.offender; - sin->sin_family = AF_UNSPEC; + memset(sin, 0, sizeof(*sin)); if (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP || ipv4_pktinfo_prepare_errqueue(sk, skb, serr->ee.ee_origin)) { - struct inet_sock *inet = inet_sk(sk); - sin->sin_family = AF_INET; sin->sin_addr.s_addr = ip_hdr(skb)->saddr; - sin->sin_port = 0; - memset(&sin->sin_zero, 0, sizeof(sin->sin_zero)); - if (inet->cmsg_flags) + if (inet_sk(sk)->cmsg_flags) ip_cmsg_recv(msg, skb); } diff --git a/net/ipv4/netfilter/nft_redir_ipv4.c b/net/ipv4/netfilter/nft_redir_ipv4.c index ff2d23d8c87a..6ecfce63201a 100644 --- a/net/ipv4/netfilter/nft_redir_ipv4.c +++ b/net/ipv4/netfilter/nft_redir_ipv4.c @@ -27,10 +27,10 @@ static void nft_redir_ipv4_eval(const struct nft_expr *expr, memset(&mr, 0, sizeof(mr)); if (priv->sreg_proto_min) { - mr.range[0].min.all = (__force __be16) - data[priv->sreg_proto_min].data[0]; - mr.range[0].max.all = (__force __be16) - data[priv->sreg_proto_max].data[0]; + mr.range[0].min.all = + *(__be16 *)&data[priv->sreg_proto_min].data[0]; + mr.range[0].max.all = + *(__be16 *)&data[priv->sreg_proto_max].data[0]; mr.range[0].flags |= NF_NAT_RANGE_PROTO_SPECIFIED; } diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index c0d82f78d364..2a3720fb5a5f 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c @@ -966,8 +966,11 @@ bool ping_rcv(struct sk_buff *skb) sk = ping_lookup(net, skb, ntohs(icmph->un.echo.id)); if (sk != NULL) { + struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); + pr_debug("rcv on socket %p\n", sk); - ping_queue_rcv_skb(sk, skb_get(skb)); + if (skb2) + ping_queue_rcv_skb(sk, skb2); sock_put(sk); return true; } diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 6a2155b02602..d58dd0ec3e53 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -1554,11 +1554,10 @@ static int __mkroute_input(struct sk_buff *skb, do_cache = res->fi && !itag; if (out_dev == in_dev && err && IN_DEV_TX_REDIRECTS(out_dev) && + skb->protocol == htons(ETH_P_IP) && (IN_DEV_SHARED_MEDIA(out_dev) || - inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res)))) { - flags |= RTCF_DOREDIRECT; - do_cache = false; - } + inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res)))) + IPCB(skb)->flags |= IPSKB_DOREDIRECT; if (skb->protocol != htons(ETH_P_IP)) { /* Not IP (i.e. ARP). Do not create route, if it is @@ -2303,6 +2302,8 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src, r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED; if (rt->rt_flags & RTCF_NOTIFY) r->rtm_flags |= RTM_F_NOTIFY; + if (IPCB(skb)->flags & IPSKB_DOREDIRECT) + r->rtm_flags |= RTCF_DOREDIRECT; if (nla_put_be32(skb, RTA_DST, dst)) goto nla_put_failure; diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c index 7927db0a9279..4a000f1dd757 100644 --- a/net/ipv4/udp_diag.c +++ b/net/ipv4/udp_diag.c @@ -99,11 +99,13 @@ static void udp_dump(struct udp_table *table, struct sk_buff *skb, struct netlin s_slot = cb->args[0]; num = s_num = cb->args[1]; - for (slot = s_slot; slot <= table->mask; num = s_num = 0, slot++) { + for (slot = s_slot; slot <= table->mask; s_num = 0, slot++) { struct sock *sk; struct hlist_nulls_node *node; struct udp_hslot *hslot = &table->hash[slot]; + num = 0; + if (hlist_nulls_empty(&hslot->head)) continue; diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index 100c589a2a6c..49f5e73db122 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -393,11 +393,10 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err)); sin = &errhdr.offender; - sin->sin6_family = AF_UNSPEC; + memset(sin, 0, sizeof(*sin)); + if (serr->ee.ee_origin != SO_EE_ORIGIN_LOCAL) { sin->sin6_family = AF_INET6; - sin->sin6_flowinfo = 0; - sin->sin6_port = 0; if (np->rxopt.all) { if (serr->ee.ee_origin != SO_EE_ORIGIN_ICMP && serr->ee.ee_origin != SO_EE_ORIGIN_ICMP6) @@ -412,12 +411,9 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) ipv6_iface_scope_id(&sin->sin6_addr, IP6CB(skb)->iif); } else { - struct inet_sock *inet = inet_sk(sk); - ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr, &sin->sin6_addr); - sin->sin6_scope_id = 0; - if (inet->cmsg_flags) + if (inet_sk(sk)->cmsg_flags) ip_cmsg_recv(msg, skb); } } diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index b2d1838897c9..f1c6d5e98322 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -659,6 +659,29 @@ static int fib6_commit_metrics(struct dst_entry *dst, return 0; } +static void fib6_purge_rt(struct rt6_info *rt, struct fib6_node *fn, + struct net *net) +{ + if (atomic_read(&rt->rt6i_ref) != 1) { + /* This route is used as dummy address holder in some split + * nodes. It is not leaked, but it still holds other resources, + * which must be released in time. So, scan ascendant nodes + * and replace dummy references to this route with references + * to still alive ones. + */ + while (fn) { + if (!(fn->fn_flags & RTN_RTINFO) && fn->leaf == rt) { + fn->leaf = fib6_find_prefix(net, fn); + atomic_inc(&fn->leaf->rt6i_ref); + rt6_release(rt); + } + fn = fn->parent; + } + /* No more references are possible at this point. */ + BUG_ON(atomic_read(&rt->rt6i_ref) != 1); + } +} + /* * Insert routing information in a node. */ @@ -807,11 +830,12 @@ add: rt->dst.rt6_next = iter->dst.rt6_next; atomic_inc(&rt->rt6i_ref); inet6_rt_notify(RTM_NEWROUTE, rt, info); - rt6_release(iter); if (!(fn->fn_flags & RTN_RTINFO)) { info->nl_net->ipv6.rt6_stats->fib_route_nodes++; fn->fn_flags |= RTN_RTINFO; } + fib6_purge_rt(iter, fn, info->nl_net); + rt6_release(iter); } return 0; @@ -1322,24 +1346,7 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp, fn = fib6_repair_tree(net, fn); } - if (atomic_read(&rt->rt6i_ref) != 1) { - /* This route is used as dummy address holder in some split - * nodes. It is not leaked, but it still holds other resources, - * which must be released in time. So, scan ascendant nodes - * and replace dummy references to this route with references - * to still alive ones. - */ - while (fn) { - if (!(fn->fn_flags & RTN_RTINFO) && fn->leaf == rt) { - fn->leaf = fib6_find_prefix(net, fn); - atomic_inc(&fn->leaf->rt6i_ref); - rt6_release(rt); - } - fn = fn->parent; - } - /* No more references are possible at this point. */ - BUG_ON(atomic_read(&rt->rt6i_ref) != 1); - } + fib6_purge_rt(rt, fn, net); inet6_rt_notify(RTM_DELROUTE, rt, info); rt6_release(rt); diff --git a/net/ipv6/netfilter/nft_redir_ipv6.c b/net/ipv6/netfilter/nft_redir_ipv6.c index 2433a6bfb191..11820b6b3613 100644 --- a/net/ipv6/netfilter/nft_redir_ipv6.c +++ b/net/ipv6/netfilter/nft_redir_ipv6.c @@ -27,10 +27,10 @@ static void nft_redir_ipv6_eval(const struct nft_expr *expr, memset(&range, 0, sizeof(range)); if (priv->sreg_proto_min) { - range.min_proto.all = (__force __be16) - data[priv->sreg_proto_min].data[0]; - range.max_proto.all = (__force __be16) - data[priv->sreg_proto_max].data[0]; + range.min_proto.all = + *(__be16 *)&data[priv->sreg_proto_min].data[0]; + range.max_proto.all = + *(__be16 *)&data[priv->sreg_proto_max].data[0]; range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED; } diff --git a/net/ipv6/route.c b/net/ipv6/route.c index c91083156edb..495965358d22 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -1160,12 +1160,9 @@ static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, struct net *net = dev_net(dst->dev); rt6->rt6i_flags |= RTF_MODIFIED; - if (mtu < IPV6_MIN_MTU) { - u32 features = dst_metric(dst, RTAX_FEATURES); + if (mtu < IPV6_MIN_MTU) mtu = IPV6_MIN_MTU; - features |= RTAX_FEATURE_ALLFRAG; - dst_metric_set(dst, RTAX_FEATURES, features); - } + dst_metric_set(dst, RTAX_MTU, mtu); rt6_update_expires(rt6, net->ipv6.sysctl.ip6_rt_mtu_expires); } @@ -1245,12 +1242,16 @@ restart: rt = net->ipv6.ip6_null_entry; else if (rt->dst.error) { rt = net->ipv6.ip6_null_entry; - } else if (rt == net->ipv6.ip6_null_entry) { + goto out; + } + + if (rt == net->ipv6.ip6_null_entry) { fn = fib6_backtrack(fn, &fl6->saddr); if (fn) goto restart; } +out: dst_hold(&rt->dst); read_unlock_bh(&table->tb6_lock); diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index 5f983644373a..48bf5a06847b 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c @@ -130,12 +130,18 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse) { struct flowi6 *fl6 = &fl->u.ip6; int onlyproto = 0; - u16 offset = skb_network_header_len(skb); const struct ipv6hdr *hdr = ipv6_hdr(skb); + u16 offset = sizeof(*hdr); struct ipv6_opt_hdr *exthdr; const unsigned char *nh = skb_network_header(skb); - u8 nexthdr = nh[IP6CB(skb)->nhoff]; + u16 nhoff = IP6CB(skb)->nhoff; int oif = 0; + u8 nexthdr; + + if (!nhoff) + nhoff = offsetof(struct ipv6hdr, nexthdr); + + nexthdr = nh[nhoff]; if (skb_dst(skb)) oif = skb_dst(skb)->dev->ifindex; diff --git a/net/llc/sysctl_net_llc.c b/net/llc/sysctl_net_llc.c index 612a5ddaf93b..799bafc2af39 100644 --- a/net/llc/sysctl_net_llc.c +++ b/net/llc/sysctl_net_llc.c @@ -18,28 +18,28 @@ static struct ctl_table llc2_timeout_table[] = { { .procname = "ack", .data = &sysctl_llc2_ack_timeout, - .maxlen = sizeof(long), + .maxlen = sizeof(sysctl_llc2_ack_timeout), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "busy", .data = &sysctl_llc2_busy_timeout, - .maxlen = sizeof(long), + .maxlen = sizeof(sysctl_llc2_busy_timeout), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "p", .data = &sysctl_llc2_p_timeout, - .maxlen = sizeof(long), + .maxlen = sizeof(sysctl_llc2_p_timeout), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "rej", .data = &sysctl_llc2_rej_timeout, - .maxlen = sizeof(long), + .maxlen = sizeof(sysctl_llc2_rej_timeout), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 2c36c4765f47..837a406a9dd6 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -1643,7 +1643,7 @@ __ieee80211_sta_handle_tspec_ac_params(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; - bool ret; + bool ret = false; int ac; if (local->hw.queues < IEEE80211_NUM_ACS) diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c index 4c5192e0d66c..4a95fe3cffbc 100644 --- a/net/mac80211/pm.c +++ b/net/mac80211/pm.c @@ -86,20 +86,6 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) } } - /* tear down aggregation sessions and remove STAs */ - mutex_lock(&local->sta_mtx); - list_for_each_entry(sta, &local->sta_list, list) { - if (sta->uploaded) { - enum ieee80211_sta_state state; - - state = sta->sta_state; - for (; state > IEEE80211_STA_NOTEXIST; state--) - WARN_ON(drv_sta_state(local, sta->sdata, sta, - state, state - 1)); - } - } - mutex_unlock(&local->sta_mtx); - /* remove all interfaces that were created in the driver */ list_for_each_entry(sdata, &local->interfaces, list) { if (!ieee80211_sdata_running(sdata)) @@ -111,6 +97,21 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) case NL80211_IFTYPE_STATION: ieee80211_mgd_quiesce(sdata); break; + case NL80211_IFTYPE_WDS: + /* tear down aggregation sessions and remove STAs */ + mutex_lock(&local->sta_mtx); + sta = sdata->u.wds.sta; + if (sta && sta->uploaded) { + enum ieee80211_sta_state state; + + state = sta->sta_state; + for (; state > IEEE80211_STA_NOTEXIST; state--) + WARN_ON(drv_sta_state(local, sta->sdata, + sta, state, + state - 1)); + } + mutex_unlock(&local->sta_mtx); + break; default: break; } diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 683b10f46505..d69ca513848e 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -272,7 +272,7 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, else if (rate && rate->flags & IEEE80211_RATE_ERP_G) channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ; else if (rate) - channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ; + channel_flags |= IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ; else channel_flags |= IEEE80211_CHAN_2GHZ; put_unaligned_le16(channel_flags, pos); diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c index 1d5341f3761d..5d3daae98bf0 100644 --- a/net/netfilter/ipvs/ip_vs_ftp.c +++ b/net/netfilter/ipvs/ip_vs_ftp.c @@ -183,6 +183,8 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp, struct nf_conn *ct; struct net *net; + *diff = 0; + #ifdef CONFIG_IP_VS_IPV6 /* This application helper doesn't work with IPv6 yet, * so turn this into a no-op for IPv6 packets @@ -191,8 +193,6 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp, return 1; #endif - *diff = 0; - /* Only useful for established sessions */ if (cp->state != IP_VS_TCP_S_ESTABLISHED) return 1; @@ -322,6 +322,9 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp, struct ip_vs_conn *n_cp; struct net *net; + /* no diff required for incoming packets */ + *diff = 0; + #ifdef CONFIG_IP_VS_IPV6 /* This application helper doesn't work with IPv6 yet, * so turn this into a no-op for IPv6 packets @@ -330,9 +333,6 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp, return 1; #endif - /* no diff required for incoming packets */ - *diff = 0; - /* Only useful for established sessions */ if (cp->state != IP_VS_TCP_S_ESTABLISHED) return 1; diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index a11674806707..46d1b26a468e 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -611,16 +611,15 @@ __nf_conntrack_confirm(struct sk_buff *skb) */ NF_CT_ASSERT(!nf_ct_is_confirmed(ct)); pr_debug("Confirming conntrack %p\n", ct); - /* We have to check the DYING flag inside the lock to prevent - a race against nf_ct_get_next_corpse() possibly called from - user context, else we insert an already 'dead' hash, blocking - further use of that particular connection -JM */ + /* We have to check the DYING flag after unlink to prevent + * a race against nf_ct_get_next_corpse() possibly called from + * user context, else we insert an already 'dead' hash, blocking + * further use of that particular connection -JM. + */ + nf_ct_del_from_dying_or_unconfirmed_list(ct); - if (unlikely(nf_ct_is_dying(ct))) { - nf_conntrack_double_unlock(hash, reply_hash); - local_bh_enable(); - return NF_ACCEPT; - } + if (unlikely(nf_ct_is_dying(ct))) + goto out; /* See if there's one in the list already, including reverse: NAT could have grabbed it without realizing, since we're @@ -636,8 +635,6 @@ __nf_conntrack_confirm(struct sk_buff *skb) zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h))) goto out; - nf_ct_del_from_dying_or_unconfirmed_list(ct); - /* Timer relative to confirmation time, not original setting time, otherwise we'd get timer wrap in weird delay cases. */ @@ -673,6 +670,7 @@ __nf_conntrack_confirm(struct sk_buff *skb) return NF_ACCEPT; out: + nf_ct_add_to_dying_list(ct); nf_conntrack_double_unlock(hash, reply_hash); NF_CT_STAT_INC(net, insert_failed); local_bh_enable(); diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 129a8daa4abf..3b3ddb4fb9ee 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -713,16 +713,12 @@ static int nft_flush_table(struct nft_ctx *ctx) struct nft_chain *chain, *nc; struct nft_set *set, *ns; - list_for_each_entry_safe(chain, nc, &ctx->table->chains, list) { + list_for_each_entry(chain, &ctx->table->chains, list) { ctx->chain = chain; err = nft_delrule_by_chain(ctx); if (err < 0) goto out; - - err = nft_delchain(ctx); - if (err < 0) - goto out; } list_for_each_entry_safe(set, ns, &ctx->table->sets, list) { @@ -735,6 +731,14 @@ static int nft_flush_table(struct nft_ctx *ctx) goto out; } + list_for_each_entry_safe(chain, nc, &ctx->table->chains, list) { + ctx->chain = chain; + + err = nft_delchain(ctx); + if (err < 0) + goto out; + } + err = nft_deltable(ctx); out: return err; diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c index cde4a6702fa3..c421d94c4652 100644 --- a/net/netfilter/nfnetlink.c +++ b/net/netfilter/nfnetlink.c @@ -321,7 +321,8 @@ replay: nlh = nlmsg_hdr(skb); err = 0; - if (nlh->nlmsg_len < NLMSG_HDRLEN) { + if (nlmsg_len(nlh) < sizeof(struct nfgenmsg) || + skb->len < nlh->nlmsg_len) { err = -EINVAL; goto ack; } @@ -469,7 +470,7 @@ static int nfnetlink_bind(struct net *net, int group) int type; if (group <= NFNLGRP_NONE || group > NFNLGRP_MAX) - return -EINVAL; + return 0; type = nfnl_group2type[group]; diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c index afe2b0b45ec4..aff54fb1c8a0 100644 --- a/net/netfilter/nft_nat.c +++ b/net/netfilter/nft_nat.c @@ -65,10 +65,10 @@ static void nft_nat_eval(const struct nft_expr *expr, } if (priv->sreg_proto_min) { - range.min_proto.all = (__force __be16) - data[priv->sreg_proto_min].data[0]; - range.max_proto.all = (__force __be16) - data[priv->sreg_proto_max].data[0]; + range.min_proto.all = + *(__be16 *)&data[priv->sreg_proto_min].data[0]; + range.max_proto.all = + *(__be16 *)&data[priv->sreg_proto_max].data[0]; range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED; } diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 84ea76ca3f1f..02fdde28dada 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -61,6 +61,7 @@ #include <linux/rhashtable.h> #include <asm/cacheflush.h> #include <linux/hash.h> +#include <linux/genetlink.h> #include <net/net_namespace.h> #include <net/sock.h> @@ -1095,6 +1096,8 @@ static void netlink_remove(struct sock *sk) __sk_del_bind_node(sk); netlink_update_listeners(sk); } + if (sk->sk_protocol == NETLINK_GENERIC) + atomic_inc(&genl_sk_destructing_cnt); netlink_table_ungrab(); } @@ -1211,6 +1214,20 @@ static int netlink_release(struct socket *sock) * will be purged. */ + /* must not acquire netlink_table_lock in any way again before unbind + * and notifying genetlink is done as otherwise it might deadlock + */ + if (nlk->netlink_unbind) { + int i; + + for (i = 0; i < nlk->ngroups; i++) + if (test_bit(i, nlk->groups)) + nlk->netlink_unbind(sock_net(sk), i + 1); + } + if (sk->sk_protocol == NETLINK_GENERIC && + atomic_dec_return(&genl_sk_destructing_cnt) == 0) + wake_up(&genl_sk_destructing_waitq); + sock->sk = NULL; wake_up_interruptible_all(&nlk->wait); @@ -1246,13 +1263,6 @@ static int netlink_release(struct socket *sock) netlink_table_ungrab(); } - if (nlk->netlink_unbind) { - int i; - - for (i = 0; i < nlk->ngroups; i++) - if (test_bit(i, nlk->groups)) - nlk->netlink_unbind(sock_net(sk), i + 1); - } kfree(nlk->groups); nlk->groups = NULL; diff --git a/net/netlink/af_netlink.h b/net/netlink/af_netlink.h index f123a88496f8..f1c31b39aa3e 100644 --- a/net/netlink/af_netlink.h +++ b/net/netlink/af_netlink.h @@ -2,6 +2,7 @@ #define _AF_NETLINK_H #include <linux/rhashtable.h> +#include <linux/atomic.h> #include <net/sock.h> #define NLGRPSZ(x) (ALIGN(x, sizeof(unsigned long) * 8) / 8) diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c index 2e11061ef885..ee57459fc258 100644 --- a/net/netlink/genetlink.c +++ b/net/netlink/genetlink.c @@ -23,6 +23,9 @@ static DEFINE_MUTEX(genl_mutex); /* serialization of message processing */ static DECLARE_RWSEM(cb_lock); +atomic_t genl_sk_destructing_cnt = ATOMIC_INIT(0); +DECLARE_WAIT_QUEUE_HEAD(genl_sk_destructing_waitq); + void genl_lock(void) { mutex_lock(&genl_mutex); @@ -435,15 +438,18 @@ int genl_unregister_family(struct genl_family *family) genl_lock_all(); - genl_unregister_mc_groups(family); - list_for_each_entry(rc, genl_family_chain(family->id), family_list) { if (family->id != rc->id || strcmp(rc->name, family->name)) continue; + genl_unregister_mc_groups(family); + list_del(&rc->family_list); family->n_ops = 0; - genl_unlock_all(); + up_write(&cb_lock); + wait_event(genl_sk_destructing_waitq, + atomic_read(&genl_sk_destructing_cnt) == 0); + genl_unlock(); kfree(family->attrbuf); genl_ctrl_event(CTRL_CMD_DELFAMILY, family, NULL, 0); @@ -985,7 +991,7 @@ static struct genl_multicast_group genl_ctrl_groups[] = { static int genl_bind(struct net *net, int group) { - int i, err = 0; + int i, err = -ENOENT; down_read(&cb_lock); for (i = 0; i < GENL_FAM_TAB_SIZE; i++) { @@ -1014,7 +1020,6 @@ static int genl_bind(struct net *net, int group) static void genl_unbind(struct net *net, int group) { int i; - bool found = false; down_read(&cb_lock); for (i = 0; i < GENL_FAM_TAB_SIZE; i++) { @@ -1027,14 +1032,11 @@ static void genl_unbind(struct net *net, int group) if (f->mcast_unbind) f->mcast_unbind(net, fam_grp); - found = true; break; } } } up_read(&cb_lock); - - WARN_ON(!found); } static int __net_init genl_pernet_init(struct net *net) diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index 4e9a5f035cbc..b07349e82d78 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -524,7 +524,7 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info) struct vport *input_vport; int len; int err; - bool log = !a[OVS_FLOW_ATTR_PROBE]; + bool log = !a[OVS_PACKET_ATTR_PROBE]; err = -EINVAL; if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] || @@ -610,6 +610,7 @@ static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = { [OVS_PACKET_ATTR_PACKET] = { .len = ETH_HLEN }, [OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED }, [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED }, + [OVS_PACKET_ATTR_PROBE] = { .type = NLA_FLAG }, }; static const struct genl_ops dp_packet_genl_ops[] = { diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 6880f34a529a..9cfe2e1dd8b5 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -2517,7 +2517,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) err = -EINVAL; if (sock->type == SOCK_DGRAM) { offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len); - if (unlikely(offset) < 0) + if (unlikely(offset < 0)) goto out_free; } else { if (ll_header_truncated(dev, len)) diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c index 84c8219c3e1c..f59adf8a4cd7 100644 --- a/net/sched/cls_bpf.c +++ b/net/sched/cls_bpf.c @@ -180,6 +180,11 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp, } bpf_size = bpf_len * sizeof(*bpf_ops); + if (bpf_size != nla_len(tb[TCA_BPF_OPS])) { + ret = -EINVAL; + goto errout; + } + bpf_ops = kzalloc(bpf_size, GFP_KERNEL); if (bpf_ops == NULL) { ret = -ENOMEM; @@ -215,15 +220,21 @@ static u32 cls_bpf_grab_new_handle(struct tcf_proto *tp, struct cls_bpf_head *head) { unsigned int i = 0x80000000; + u32 handle; do { if (++head->hgen == 0x7FFFFFFF) head->hgen = 1; } while (--i > 0 && cls_bpf_get(tp, head->hgen)); - if (i == 0) + + if (unlikely(i == 0)) { pr_err("Insufficient number of handles\n"); + handle = 0; + } else { + handle = head->hgen; + } - return i; + return handle; } static int cls_bpf_change(struct net *net, struct sk_buff *in_skb, diff --git a/net/sctp/associola.c b/net/sctp/associola.c index f791edd64d6c..26d06dbcc1c8 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -1182,7 +1182,6 @@ void sctp_assoc_update(struct sctp_association *asoc, asoc->peer.peer_hmacs = new->peer.peer_hmacs; new->peer.peer_hmacs = NULL; - sctp_auth_key_put(asoc->asoc_shared_key); sctp_auth_asoc_init_active_key(asoc, GFP_ATOMIC); } diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 2625eccb77d5..aafe94bf292e 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -1603,7 +1603,7 @@ static int sctp_sendmsg(struct kiocb *iocb, struct sock *sk, sctp_assoc_t associd = 0; sctp_cmsgs_t cmsgs = { NULL }; sctp_scope_t scope; - bool fill_sinfo_ttl = false; + bool fill_sinfo_ttl = false, wait_connect = false; struct sctp_datamsg *datamsg; int msg_flags = msg->msg_flags; __u16 sinfo_flags = 0; @@ -1943,6 +1943,7 @@ static int sctp_sendmsg(struct kiocb *iocb, struct sock *sk, if (err < 0) goto out_free; + wait_connect = true; pr_debug("%s: we associated primitively\n", __func__); } @@ -1980,6 +1981,11 @@ static int sctp_sendmsg(struct kiocb *iocb, struct sock *sk, sctp_datamsg_put(datamsg); err = msg_len; + if (unlikely(wait_connect)) { + timeo = sock_sndtimeo(sk, msg_flags & MSG_DONTWAIT); + sctp_wait_for_connect(asoc, &timeo); + } + /* If we are already past ASSOCIATE, the lower * layers are responsible for association cleanup. */ diff --git a/net/socket.c b/net/socket.c index a2c33a4dc7ba..418795caa897 100644 --- a/net/socket.c +++ b/net/socket.c @@ -869,9 +869,6 @@ static ssize_t sock_splice_read(struct file *file, loff_t *ppos, static struct sock_iocb *alloc_sock_iocb(struct kiocb *iocb, struct sock_iocb *siocb) { - if (!is_sync_kiocb(iocb)) - BUG(); - siocb->kiocb = iocb; iocb->private = siocb; return siocb; diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c index 96ceefeb9daf..a9e174fc0f91 100644 --- a/net/tipc/bcast.c +++ b/net/tipc/bcast.c @@ -220,10 +220,11 @@ static void bclink_retransmit_pkt(u32 after, u32 to) struct sk_buff *skb; skb_queue_walk(&bcl->outqueue, skb) { - if (more(buf_seqno(skb), after)) + if (more(buf_seqno(skb), after)) { + tipc_link_retransmit(bcl, skb, mod(to - after)); break; + } } - tipc_link_retransmit(bcl, skb, mod(to - after)); } /** diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 7ca4b5133123..8887c6e5fca8 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -2854,6 +2854,9 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info) if (!rdev->ops->get_key) return -EOPNOTSUPP; + if (!pairwise && mac_addr && !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN)) + return -ENOENT; + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; @@ -2873,10 +2876,6 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info) nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr)) goto nla_put_failure; - if (pairwise && mac_addr && - !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN)) - return -ENOENT; - err = rdev_get_key(rdev, dev, key_idx, pairwise, mac_addr, &cookie, get_key_callback); @@ -3047,7 +3046,7 @@ static int nl80211_del_key(struct sk_buff *skb, struct genl_info *info) wdev_lock(dev->ieee80211_ptr); err = nl80211_key_allowed(dev->ieee80211_ptr); - if (key.type == NL80211_KEYTYPE_PAIRWISE && mac_addr && + if (key.type == NL80211_KEYTYPE_GROUP && mac_addr && !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN)) err = -ENOENT; diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 7b8309840d4e..d39d1cbc86b1 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -1530,45 +1530,40 @@ static void reg_call_notifier(struct wiphy *wiphy, static bool reg_wdev_chan_valid(struct wiphy *wiphy, struct wireless_dev *wdev) { - struct ieee80211_channel *ch; struct cfg80211_chan_def chandef; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); - bool ret = true; + enum nl80211_iftype iftype; wdev_lock(wdev); + iftype = wdev->iftype; + /* make sure the interface is active */ if (!wdev->netdev || !netif_running(wdev->netdev)) - goto out; + goto wdev_inactive_unlock; - switch (wdev->iftype) { + switch (iftype) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_P2P_GO: if (!wdev->beacon_interval) - goto out; - - ret = cfg80211_reg_can_beacon(wiphy, - &wdev->chandef, wdev->iftype); + goto wdev_inactive_unlock; + chandef = wdev->chandef; break; case NL80211_IFTYPE_ADHOC: if (!wdev->ssid_len) - goto out; - - ret = cfg80211_reg_can_beacon(wiphy, - &wdev->chandef, wdev->iftype); + goto wdev_inactive_unlock; + chandef = wdev->chandef; break; case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_P2P_CLIENT: if (!wdev->current_bss || !wdev->current_bss->pub.channel) - goto out; + goto wdev_inactive_unlock; - ch = wdev->current_bss->pub.channel; - if (rdev->ops->get_channel && - !rdev_get_channel(rdev, wdev, &chandef)) - ret = cfg80211_chandef_usable(wiphy, &chandef, - IEEE80211_CHAN_DISABLED); - else - ret = !(ch->flags & IEEE80211_CHAN_DISABLED); + if (!rdev->ops->get_channel || + rdev_get_channel(rdev, wdev, &chandef)) + cfg80211_chandef_create(&chandef, + wdev->current_bss->pub.channel, + NL80211_CHAN_NO_HT); break; case NL80211_IFTYPE_MONITOR: case NL80211_IFTYPE_AP_VLAN: @@ -1581,9 +1576,26 @@ static bool reg_wdev_chan_valid(struct wiphy *wiphy, struct wireless_dev *wdev) break; } -out: wdev_unlock(wdev); - return ret; + + switch (iftype) { + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_P2P_GO: + case NL80211_IFTYPE_ADHOC: + return cfg80211_reg_can_beacon(wiphy, &chandef, iftype); + case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_P2P_CLIENT: + return cfg80211_chandef_usable(wiphy, &chandef, + IEEE80211_CHAN_DISABLED); + default: + break; + } + + return true; + +wdev_inactive_unlock: + wdev_unlock(wdev); + return true; } static void reg_leave_invalid_chans(struct wiphy *wiphy) diff --git a/net/wireless/util.c b/net/wireless/util.c index d0ac795445b7..5488c3662f7d 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -308,6 +308,12 @@ unsigned int __attribute_const__ ieee80211_hdrlen(__le16 fc) goto out; } + if (ieee80211_is_mgmt(fc)) { + if (ieee80211_has_order(fc)) + hdrlen += IEEE80211_HT_CTL_LEN; + goto out; + } + if (ieee80211_is_ctl(fc)) { /* * ACK and CTS are 10 bytes, all others 16. To see how |