summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/bridge/br_device.c2
-rw-r--r--net/bridge/br_input.c2
-rw-r--r--net/bridge/br_mdb.c14
-rw-r--r--net/bridge/br_multicast.c261
-rw-r--r--net/bridge/br_private.h57
-rw-r--r--net/core/flow_dissector.c11
-rw-r--r--net/core/scm.c2
-rw-r--r--net/ipv4/ip_output.c8
-rw-r--r--net/ipv4/ipip.c5
-rw-r--r--net/ipv4/raw.c3
-rw-r--r--net/ipv4/tcp_input.c9
-rw-r--r--net/ipv4/tcp_output.c4
-rw-r--r--net/ipv4/xfrm4_output.c16
-rw-r--r--net/ipv4/xfrm4_state.c1
-rw-r--r--net/ipv6/ip6_gre.c5
-rw-r--r--net/ipv6/ip6_output.c3
-rw-r--r--net/ipv6/ip6_tunnel.c6
-rw-r--r--net/ipv6/raw.c1
-rw-r--r--net/ipv6/sit.c11
-rw-r--r--net/ipv6/xfrm6_output.c21
-rw-r--r--net/ipv6/xfrm6_state.c1
-rw-r--r--net/mac80211/ibss.c34
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c3
-rw-r--r--net/netlink/genetlink.c67
-rw-r--r--net/tipc/socket.c4
-rw-r--r--net/xfrm/xfrm_output.c21
-rw-r--r--net/xfrm/xfrm_policy.c9
-rw-r--r--net/xfrm/xfrm_state.c7
28 files changed, 423 insertions, 165 deletions
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 69363bd37f64..89659d4ed1f9 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -71,7 +71,7 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
mdst = br_mdb_get(br, skb, vid);
if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) &&
- br_multicast_querier_exists(br))
+ br_multicast_querier_exists(br, eth_hdr(skb)))
br_multicast_deliver(mdst, skb);
else
br_flood_deliver(br, skb, false);
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 8c561c0aa636..a2fd37ec35f7 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -102,7 +102,7 @@ int br_handle_frame_finish(struct sk_buff *skb)
} else if (is_multicast_ether_addr(dest)) {
mdst = br_mdb_get(br, skb, vid);
if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) &&
- br_multicast_querier_exists(br)) {
+ br_multicast_querier_exists(br, eth_hdr(skb))) {
if ((mdst && mdst->mglist) ||
br_multicast_is_router(br))
skb2 = skb;
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index 0daae3ec2355..6319c4333c39 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -414,16 +414,20 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
if (!netif_running(br->dev) || br->multicast_disabled)
return -EINVAL;
- if (timer_pending(&br->multicast_querier_timer))
- return -EBUSY;
-
ip.proto = entry->addr.proto;
- if (ip.proto == htons(ETH_P_IP))
+ if (ip.proto == htons(ETH_P_IP)) {
+ if (timer_pending(&br->ip4_querier.timer))
+ return -EBUSY;
+
ip.u.ip4 = entry->addr.u.ip4;
#if IS_ENABLED(CONFIG_IPV6)
- else
+ } else {
+ if (timer_pending(&br->ip6_querier.timer))
+ return -EBUSY;
+
ip.u.ip6 = entry->addr.u.ip6;
#endif
+ }
spin_lock_bh(&br->multicast_lock);
mdb = mlock_dereference(br->mdb, br);
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 08e576ada0b2..bbcb43582496 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -33,7 +33,8 @@
#include "br_private.h"
-static void br_multicast_start_querier(struct net_bridge *br);
+static void br_multicast_start_querier(struct net_bridge *br,
+ struct bridge_mcast_query *query);
unsigned int br_mdb_rehash_seq;
static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b)
@@ -755,20 +756,35 @@ static void br_multicast_local_router_expired(unsigned long data)
{
}
-static void br_multicast_querier_expired(unsigned long data)
+static void br_multicast_querier_expired(struct net_bridge *br,
+ struct bridge_mcast_query *query)
{
- struct net_bridge *br = (void *)data;
-
spin_lock(&br->multicast_lock);
if (!netif_running(br->dev) || br->multicast_disabled)
goto out;
- br_multicast_start_querier(br);
+ br_multicast_start_querier(br, query);
out:
spin_unlock(&br->multicast_lock);
}
+static void br_ip4_multicast_querier_expired(unsigned long data)
+{
+ struct net_bridge *br = (void *)data;
+
+ br_multicast_querier_expired(br, &br->ip4_query);
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static void br_ip6_multicast_querier_expired(unsigned long data)
+{
+ struct net_bridge *br = (void *)data;
+
+ br_multicast_querier_expired(br, &br->ip6_query);
+}
+#endif
+
static void __br_multicast_send_query(struct net_bridge *br,
struct net_bridge_port *port,
struct br_ip *ip)
@@ -789,37 +805,45 @@ static void __br_multicast_send_query(struct net_bridge *br,
}
static void br_multicast_send_query(struct net_bridge *br,
- struct net_bridge_port *port, u32 sent)
+ struct net_bridge_port *port,
+ struct bridge_mcast_query *query)
{
unsigned long time;
struct br_ip br_group;
+ struct bridge_mcast_querier *querier = NULL;
if (!netif_running(br->dev) || br->multicast_disabled ||
- !br->multicast_querier ||
- timer_pending(&br->multicast_querier_timer))
+ !br->multicast_querier)
return;
memset(&br_group.u, 0, sizeof(br_group.u));
- br_group.proto = htons(ETH_P_IP);
- __br_multicast_send_query(br, port, &br_group);
-
+ if (port ? (query == &port->ip4_query) :
+ (query == &br->ip4_query)) {
+ querier = &br->ip4_querier;
+ br_group.proto = htons(ETH_P_IP);
#if IS_ENABLED(CONFIG_IPV6)
- br_group.proto = htons(ETH_P_IPV6);
- __br_multicast_send_query(br, port, &br_group);
+ } else {
+ querier = &br->ip6_querier;
+ br_group.proto = htons(ETH_P_IPV6);
#endif
+ }
+
+ if (!querier || timer_pending(&querier->timer))
+ return;
+
+ __br_multicast_send_query(br, port, &br_group);
time = jiffies;
- time += sent < br->multicast_startup_query_count ?
+ time += query->startup_sent < br->multicast_startup_query_count ?
br->multicast_startup_query_interval :
br->multicast_query_interval;
- mod_timer(port ? &port->multicast_query_timer :
- &br->multicast_query_timer, time);
+ mod_timer(&query->timer, time);
}
-static void br_multicast_port_query_expired(unsigned long data)
+static void br_multicast_port_query_expired(struct net_bridge_port *port,
+ struct bridge_mcast_query *query)
{
- struct net_bridge_port *port = (void *)data;
struct net_bridge *br = port->br;
spin_lock(&br->multicast_lock);
@@ -827,25 +851,43 @@ static void br_multicast_port_query_expired(unsigned long data)
port->state == BR_STATE_BLOCKING)
goto out;
- if (port->multicast_startup_queries_sent <
- br->multicast_startup_query_count)
- port->multicast_startup_queries_sent++;
+ if (query->startup_sent < br->multicast_startup_query_count)
+ query->startup_sent++;
- br_multicast_send_query(port->br, port,
- port->multicast_startup_queries_sent);
+ br_multicast_send_query(port->br, port, query);
out:
spin_unlock(&br->multicast_lock);
}
+static void br_ip4_multicast_port_query_expired(unsigned long data)
+{
+ struct net_bridge_port *port = (void *)data;
+
+ br_multicast_port_query_expired(port, &port->ip4_query);
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static void br_ip6_multicast_port_query_expired(unsigned long data)
+{
+ struct net_bridge_port *port = (void *)data;
+
+ br_multicast_port_query_expired(port, &port->ip6_query);
+}
+#endif
+
void br_multicast_add_port(struct net_bridge_port *port)
{
port->multicast_router = 1;
setup_timer(&port->multicast_router_timer, br_multicast_router_expired,
(unsigned long)port);
- setup_timer(&port->multicast_query_timer,
- br_multicast_port_query_expired, (unsigned long)port);
+ setup_timer(&port->ip4_query.timer, br_ip4_multicast_port_query_expired,
+ (unsigned long)port);
+#if IS_ENABLED(CONFIG_IPV6)
+ setup_timer(&port->ip6_query.timer, br_ip6_multicast_port_query_expired,
+ (unsigned long)port);
+#endif
}
void br_multicast_del_port(struct net_bridge_port *port)
@@ -853,13 +895,13 @@ void br_multicast_del_port(struct net_bridge_port *port)
del_timer_sync(&port->multicast_router_timer);
}
-static void __br_multicast_enable_port(struct net_bridge_port *port)
+static void br_multicast_enable(struct bridge_mcast_query *query)
{
- port->multicast_startup_queries_sent = 0;
+ query->startup_sent = 0;
- if (try_to_del_timer_sync(&port->multicast_query_timer) >= 0 ||
- del_timer(&port->multicast_query_timer))
- mod_timer(&port->multicast_query_timer, jiffies);
+ if (try_to_del_timer_sync(&query->timer) >= 0 ||
+ del_timer(&query->timer))
+ mod_timer(&query->timer, jiffies);
}
void br_multicast_enable_port(struct net_bridge_port *port)
@@ -870,7 +912,10 @@ void br_multicast_enable_port(struct net_bridge_port *port)
if (br->multicast_disabled || !netif_running(br->dev))
goto out;
- __br_multicast_enable_port(port);
+ br_multicast_enable(&port->ip4_query);
+#if IS_ENABLED(CONFIG_IPV6)
+ br_multicast_enable(&port->ip6_query);
+#endif
out:
spin_unlock(&br->multicast_lock);
@@ -889,7 +934,10 @@ void br_multicast_disable_port(struct net_bridge_port *port)
if (!hlist_unhashed(&port->rlist))
hlist_del_init_rcu(&port->rlist);
del_timer(&port->multicast_router_timer);
- del_timer(&port->multicast_query_timer);
+ del_timer(&port->ip4_query.timer);
+#if IS_ENABLED(CONFIG_IPV6)
+ del_timer(&port->ip6_query.timer);
+#endif
spin_unlock(&br->multicast_lock);
}
@@ -1014,14 +1062,15 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
}
#endif
-static void br_multicast_update_querier_timer(struct net_bridge *br,
- unsigned long max_delay)
+static void
+br_multicast_update_querier_timer(struct net_bridge *br,
+ struct bridge_mcast_querier *querier,
+ unsigned long max_delay)
{
- if (!timer_pending(&br->multicast_querier_timer))
- br->multicast_querier_delay_time = jiffies + max_delay;
+ if (!timer_pending(&querier->timer))
+ querier->delay_time = jiffies + max_delay;
- mod_timer(&br->multicast_querier_timer,
- jiffies + br->multicast_querier_interval);
+ mod_timer(&querier->timer, jiffies + br->multicast_querier_interval);
}
/*
@@ -1074,12 +1123,13 @@ timer:
static void br_multicast_query_received(struct net_bridge *br,
struct net_bridge_port *port,
+ struct bridge_mcast_querier *querier,
int saddr,
unsigned long max_delay)
{
if (saddr)
- br_multicast_update_querier_timer(br, max_delay);
- else if (timer_pending(&br->multicast_querier_timer))
+ br_multicast_update_querier_timer(br, querier, max_delay);
+ else if (timer_pending(&querier->timer))
return;
br_multicast_mark_router(br, port);
@@ -1129,7 +1179,8 @@ static int br_ip4_multicast_query(struct net_bridge *br,
IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1;
}
- br_multicast_query_received(br, port, !!iph->saddr, max_delay);
+ br_multicast_query_received(br, port, &br->ip4_querier, !!iph->saddr,
+ max_delay);
if (!group)
goto out;
@@ -1203,11 +1254,12 @@ static int br_ip6_multicast_query(struct net_bridge *br,
mld2q = (struct mld2_query *)icmp6_hdr(skb);
if (!mld2q->mld2q_nsrcs)
group = &mld2q->mld2q_mca;
- max_delay = mld2q->mld2q_mrc ? MLDV2_MRC(ntohs(mld2q->mld2q_mrc)) : 1;
+
+ max_delay = max(msecs_to_jiffies(MLDV2_MRC(ntohs(mld2q->mld2q_mrc))), 1UL);
}
- br_multicast_query_received(br, port, !ipv6_addr_any(&ip6h->saddr),
- max_delay);
+ br_multicast_query_received(br, port, &br->ip6_querier,
+ !ipv6_addr_any(&ip6h->saddr), max_delay);
if (!group)
goto out;
@@ -1244,7 +1296,9 @@ out:
static void br_multicast_leave_group(struct net_bridge *br,
struct net_bridge_port *port,
- struct br_ip *group)
+ struct br_ip *group,
+ struct bridge_mcast_querier *querier,
+ struct bridge_mcast_query *query)
{
struct net_bridge_mdb_htable *mdb;
struct net_bridge_mdb_entry *mp;
@@ -1255,7 +1309,7 @@ static void br_multicast_leave_group(struct net_bridge *br,
spin_lock(&br->multicast_lock);
if (!netif_running(br->dev) ||
(port && port->state == BR_STATE_DISABLED) ||
- timer_pending(&br->multicast_querier_timer))
+ timer_pending(&querier->timer))
goto out;
mdb = mlock_dereference(br->mdb, br);
@@ -1263,14 +1317,13 @@ static void br_multicast_leave_group(struct net_bridge *br,
if (!mp)
goto out;
- if (br->multicast_querier &&
- !timer_pending(&br->multicast_querier_timer)) {
+ if (br->multicast_querier) {
__br_multicast_send_query(br, port, &mp->addr);
time = jiffies + br->multicast_last_member_count *
br->multicast_last_member_interval;
- mod_timer(port ? &port->multicast_query_timer :
- &br->multicast_query_timer, time);
+
+ mod_timer(&query->timer, time);
for (p = mlock_dereference(mp->ports, br);
p != NULL;
@@ -1323,7 +1376,6 @@ static void br_multicast_leave_group(struct net_bridge *br,
mod_timer(&mp->timer, time);
}
}
-
out:
spin_unlock(&br->multicast_lock);
}
@@ -1334,6 +1386,8 @@ static void br_ip4_multicast_leave_group(struct net_bridge *br,
__u16 vid)
{
struct br_ip br_group;
+ struct bridge_mcast_query *query = port ? &port->ip4_query :
+ &br->ip4_query;
if (ipv4_is_local_multicast(group))
return;
@@ -1342,7 +1396,7 @@ static void br_ip4_multicast_leave_group(struct net_bridge *br,
br_group.proto = htons(ETH_P_IP);
br_group.vid = vid;
- br_multicast_leave_group(br, port, &br_group);
+ br_multicast_leave_group(br, port, &br_group, &br->ip4_querier, query);
}
#if IS_ENABLED(CONFIG_IPV6)
@@ -1352,6 +1406,9 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br,
__u16 vid)
{
struct br_ip br_group;
+ struct bridge_mcast_query *query = port ? &port->ip6_query :
+ &br->ip6_query;
+
if (!ipv6_is_transient_multicast(group))
return;
@@ -1360,7 +1417,7 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br,
br_group.proto = htons(ETH_P_IPV6);
br_group.vid = vid;
- br_multicast_leave_group(br, port, &br_group);
+ br_multicast_leave_group(br, port, &br_group, &br->ip6_querier, query);
}
#endif
@@ -1622,19 +1679,32 @@ int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port,
return 0;
}
-static void br_multicast_query_expired(unsigned long data)
+static void br_multicast_query_expired(struct net_bridge *br,
+ struct bridge_mcast_query *query)
+{
+ spin_lock(&br->multicast_lock);
+ if (query->startup_sent < br->multicast_startup_query_count)
+ query->startup_sent++;
+
+ br_multicast_send_query(br, NULL, query);
+ spin_unlock(&br->multicast_lock);
+}
+
+static void br_ip4_multicast_query_expired(unsigned long data)
{
struct net_bridge *br = (void *)data;
- spin_lock(&br->multicast_lock);
- if (br->multicast_startup_queries_sent <
- br->multicast_startup_query_count)
- br->multicast_startup_queries_sent++;
+ br_multicast_query_expired(br, &br->ip4_query);
+}
- br_multicast_send_query(br, NULL, br->multicast_startup_queries_sent);
+#if IS_ENABLED(CONFIG_IPV6)
+static void br_ip6_multicast_query_expired(unsigned long data)
+{
+ struct net_bridge *br = (void *)data;
- spin_unlock(&br->multicast_lock);
+ br_multicast_query_expired(br, &br->ip6_query);
}
+#endif
void br_multicast_init(struct net_bridge *br)
{
@@ -1654,25 +1724,43 @@ void br_multicast_init(struct net_bridge *br)
br->multicast_querier_interval = 255 * HZ;
br->multicast_membership_interval = 260 * HZ;
- br->multicast_querier_delay_time = 0;
+ br->ip4_querier.delay_time = 0;
+#if IS_ENABLED(CONFIG_IPV6)
+ br->ip6_querier.delay_time = 0;
+#endif
spin_lock_init(&br->multicast_lock);
setup_timer(&br->multicast_router_timer,
br_multicast_local_router_expired, 0);
- setup_timer(&br->multicast_querier_timer,
- br_multicast_querier_expired, (unsigned long)br);
- setup_timer(&br->multicast_query_timer, br_multicast_query_expired,
+ setup_timer(&br->ip4_querier.timer, br_ip4_multicast_querier_expired,
+ (unsigned long)br);
+ setup_timer(&br->ip4_query.timer, br_ip4_multicast_query_expired,
(unsigned long)br);
+#if IS_ENABLED(CONFIG_IPV6)
+ setup_timer(&br->ip6_querier.timer, br_ip6_multicast_querier_expired,
+ (unsigned long)br);
+ setup_timer(&br->ip6_query.timer, br_ip6_multicast_query_expired,
+ (unsigned long)br);
+#endif
}
-void br_multicast_open(struct net_bridge *br)
+static void __br_multicast_open(struct net_bridge *br,
+ struct bridge_mcast_query *query)
{
- br->multicast_startup_queries_sent = 0;
+ query->startup_sent = 0;
if (br->multicast_disabled)
return;
- mod_timer(&br->multicast_query_timer, jiffies);
+ mod_timer(&query->timer, jiffies);
+}
+
+void br_multicast_open(struct net_bridge *br)
+{
+ __br_multicast_open(br, &br->ip4_query);
+#if IS_ENABLED(CONFIG_IPV6)
+ __br_multicast_open(br, &br->ip6_query);
+#endif
}
void br_multicast_stop(struct net_bridge *br)
@@ -1684,8 +1772,12 @@ void br_multicast_stop(struct net_bridge *br)
int i;
del_timer_sync(&br->multicast_router_timer);
- del_timer_sync(&br->multicast_querier_timer);
- del_timer_sync(&br->multicast_query_timer);
+ del_timer_sync(&br->ip4_querier.timer);
+ del_timer_sync(&br->ip4_query.timer);
+#if IS_ENABLED(CONFIG_IPV6)
+ del_timer_sync(&br->ip6_querier.timer);
+ del_timer_sync(&br->ip6_query.timer);
+#endif
spin_lock_bh(&br->multicast_lock);
mdb = mlock_dereference(br->mdb, br);
@@ -1788,18 +1880,24 @@ unlock:
return err;
}
-static void br_multicast_start_querier(struct net_bridge *br)
+static void br_multicast_start_querier(struct net_bridge *br,
+ struct bridge_mcast_query *query)
{
struct net_bridge_port *port;
- br_multicast_open(br);
+ __br_multicast_open(br, query);
list_for_each_entry(port, &br->port_list, list) {
if (port->state == BR_STATE_DISABLED ||
port->state == BR_STATE_BLOCKING)
continue;
- __br_multicast_enable_port(port);
+ if (query == &br->ip4_query)
+ br_multicast_enable(&port->ip4_query);
+#if IS_ENABLED(CONFIG_IPV6)
+ else
+ br_multicast_enable(&port->ip6_query);
+#endif
}
}
@@ -1834,7 +1932,10 @@ rollback:
goto rollback;
}
- br_multicast_start_querier(br);
+ br_multicast_start_querier(br, &br->ip4_query);
+#if IS_ENABLED(CONFIG_IPV6)
+ br_multicast_start_querier(br, &br->ip6_query);
+#endif
unlock:
spin_unlock_bh(&br->multicast_lock);
@@ -1857,10 +1958,18 @@ int br_multicast_set_querier(struct net_bridge *br, unsigned long val)
goto unlock;
max_delay = br->multicast_query_response_interval;
- if (!timer_pending(&br->multicast_querier_timer))
- br->multicast_querier_delay_time = jiffies + max_delay;
- br_multicast_start_querier(br);
+ if (!timer_pending(&br->ip4_querier.timer))
+ br->ip4_querier.delay_time = jiffies + max_delay;
+
+ br_multicast_start_querier(br, &br->ip4_query);
+
+#if IS_ENABLED(CONFIG_IPV6)
+ if (!timer_pending(&br->ip6_querier.timer))
+ br->ip6_querier.delay_time = jiffies + max_delay;
+
+ br_multicast_start_querier(br, &br->ip6_query);
+#endif
unlock:
spin_unlock_bh(&br->multicast_lock);
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 2f7da41851bf..263ba9034468 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -66,6 +66,20 @@ struct br_ip
__u16 vid;
};
+#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
+/* our own querier */
+struct bridge_mcast_query {
+ struct timer_list timer;
+ u32 startup_sent;
+};
+
+/* other querier */
+struct bridge_mcast_querier {
+ struct timer_list timer;
+ unsigned long delay_time;
+};
+#endif
+
struct net_port_vlans {
u16 port_idx;
u16 pvid;
@@ -162,10 +176,12 @@ struct net_bridge_port
#define BR_FLOOD 0x00000040
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
- u32 multicast_startup_queries_sent;
+ struct bridge_mcast_query ip4_query;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct bridge_mcast_query ip6_query;
+#endif /* IS_ENABLED(CONFIG_IPV6) */
unsigned char multicast_router;
struct timer_list multicast_router_timer;
- struct timer_list multicast_query_timer;
struct hlist_head mglist;
struct hlist_node rlist;
#endif
@@ -258,7 +274,6 @@ struct net_bridge
u32 hash_max;
u32 multicast_last_member_count;
- u32 multicast_startup_queries_sent;
u32 multicast_startup_query_count;
unsigned long multicast_last_member_interval;
@@ -267,15 +282,18 @@ struct net_bridge
unsigned long multicast_query_interval;
unsigned long multicast_query_response_interval;
unsigned long multicast_startup_query_interval;
- unsigned long multicast_querier_delay_time;
spinlock_t multicast_lock;
struct net_bridge_mdb_htable __rcu *mdb;
struct hlist_head router_list;
struct timer_list multicast_router_timer;
- struct timer_list multicast_querier_timer;
- struct timer_list multicast_query_timer;
+ struct bridge_mcast_querier ip4_querier;
+ struct bridge_mcast_query ip4_query;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct bridge_mcast_querier ip6_querier;
+ struct bridge_mcast_query ip6_query;
+#endif /* IS_ENABLED(CONFIG_IPV6) */
#endif
struct timer_list hello_timer;
@@ -503,11 +521,27 @@ static inline bool br_multicast_is_router(struct net_bridge *br)
timer_pending(&br->multicast_router_timer));
}
-static inline bool br_multicast_querier_exists(struct net_bridge *br)
+static inline bool
+__br_multicast_querier_exists(struct net_bridge *br,
+ struct bridge_mcast_querier *querier)
+{
+ return time_is_before_jiffies(querier->delay_time) &&
+ (br->multicast_querier || timer_pending(&querier->timer));
+}
+
+static inline bool br_multicast_querier_exists(struct net_bridge *br,
+ struct ethhdr *eth)
{
- return time_is_before_jiffies(br->multicast_querier_delay_time) &&
- (br->multicast_querier ||
- timer_pending(&br->multicast_querier_timer));
+ switch (eth->h_proto) {
+ case (htons(ETH_P_IP)):
+ return __br_multicast_querier_exists(br, &br->ip4_querier);
+#if IS_ENABLED(CONFIG_IPV6)
+ case (htons(ETH_P_IPV6)):
+ return __br_multicast_querier_exists(br, &br->ip6_querier);
+#endif
+ default:
+ return false;
+ }
}
#else
static inline int br_multicast_rcv(struct net_bridge *br,
@@ -565,7 +599,8 @@ static inline bool br_multicast_is_router(struct net_bridge *br)
{
return 0;
}
-static inline bool br_multicast_querier_exists(struct net_bridge *br)
+static inline bool br_multicast_querier_exists(struct net_bridge *br,
+ struct ethhdr *eth)
{
return false;
}
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index b84a1b155bc1..d12e3a9a5356 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -346,14 +346,9 @@ u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
if (new_index < 0)
new_index = skb_tx_hash(dev, skb);
- if (queue_index != new_index && sk) {
- struct dst_entry *dst =
- rcu_dereference_check(sk->sk_dst_cache, 1);
-
- if (dst && skb_dst(skb) == dst)
- sk_tx_queue_set(sk, queue_index);
-
- }
+ if (queue_index != new_index && sk &&
+ rcu_access_pointer(sk->sk_dst_cache))
+ sk_tx_queue_set(sk, queue_index);
queue_index = new_index;
}
diff --git a/net/core/scm.c b/net/core/scm.c
index 03795d0147f2..b4da80b1cc07 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -54,7 +54,7 @@ static __inline__ int scm_check_creds(struct ucred *creds)
return -EINVAL;
if ((creds->pid == task_tgid_vnr(current) ||
- ns_capable(current->nsproxy->pid_ns->user_ns, CAP_SYS_ADMIN)) &&
+ ns_capable(task_active_pid_ns(current)->user_ns, CAP_SYS_ADMIN)) &&
((uid_eq(uid, cred->uid) || uid_eq(uid, cred->euid) ||
uid_eq(uid, cred->suid)) || nsown_capable(CAP_SETUID)) &&
((gid_eq(gid, cred->gid) || gid_eq(gid, cred->egid) ||
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 4bcabf3ab4ca..9ee17e3d11c3 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -211,14 +211,6 @@ static inline int ip_finish_output2(struct sk_buff *skb)
return -EINVAL;
}
-static inline int ip_skb_dst_mtu(struct sk_buff *skb)
-{
- struct inet_sock *inet = skb->sk ? inet_sk(skb->sk) : NULL;
-
- return (inet && inet->pmtudisc == IP_PMTUDISC_PROBE) ?
- skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
-}
-
static int ip_finish_output(struct sk_buff *skb)
{
#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 51fc2a1dcdd3..b3ac3c3f6219 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -190,15 +190,14 @@ static int ipip_rcv(struct sk_buff *skb)
struct ip_tunnel *tunnel;
const struct iphdr *iph;
- if (iptunnel_pull_header(skb, 0, tpi.proto))
- goto drop;
-
iph = ip_hdr(skb);
tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
iph->saddr, iph->daddr, 0);
if (tunnel) {
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
goto drop;
+ if (iptunnel_pull_header(skb, 0, tpi.proto))
+ goto drop;
return ip_tunnel_rcv(tunnel, skb, &tpi, log_ecn_error);
}
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index dd44e0ab600c..61e60d67adca 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -571,7 +571,8 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos,
RT_SCOPE_UNIVERSE,
inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
- inet_sk_flowi_flags(sk) | FLOWI_FLAG_CAN_SLEEP,
+ inet_sk_flowi_flags(sk) | FLOWI_FLAG_CAN_SLEEP |
+ (inet->hdrincl ? FLOWI_FLAG_KNOWN_NH : 0),
daddr, saddr, 0, 0);
if (!inet->hdrincl) {
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 28af45abe062..3ca2139a130b 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -3535,7 +3535,10 @@ static bool tcp_parse_aligned_timestamp(struct tcp_sock *tp, const struct tcphdr
++ptr;
tp->rx_opt.rcv_tsval = ntohl(*ptr);
++ptr;
- tp->rx_opt.rcv_tsecr = ntohl(*ptr) - tp->tsoffset;
+ if (*ptr)
+ tp->rx_opt.rcv_tsecr = ntohl(*ptr) - tp->tsoffset;
+ else
+ tp->rx_opt.rcv_tsecr = 0;
return true;
}
return false;
@@ -3560,7 +3563,7 @@ static bool tcp_fast_parse_options(const struct sk_buff *skb,
}
tcp_parse_options(skb, &tp->rx_opt, 1, NULL);
- if (tp->rx_opt.saw_tstamp)
+ if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr)
tp->rx_opt.rcv_tsecr -= tp->tsoffset;
return true;
@@ -5316,7 +5319,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
int saved_clamp = tp->rx_opt.mss_clamp;
tcp_parse_options(skb, &tp->rx_opt, 0, &foc);
- if (tp->rx_opt.saw_tstamp)
+ if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr)
tp->rx_opt.rcv_tsecr -= tp->tsoffset;
if (th->ack) {
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 92fde8d1aa82..170737a9d56d 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2670,7 +2670,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
int tcp_header_size;
int mss;
- skb = alloc_skb(MAX_TCP_HEADER + 15, sk_gfp_atomic(sk, GFP_ATOMIC));
+ skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
if (unlikely(!skb)) {
dst_release(dst);
return NULL;
@@ -2814,6 +2814,8 @@ void tcp_connect_init(struct sock *sk)
if (likely(!tp->repair))
tp->rcv_nxt = 0;
+ else
+ tp->rcv_tstamp = tcp_time_stamp;
tp->rcv_wup = tp->rcv_nxt;
tp->copied_seq = tp->rcv_nxt;
diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c
index 327a617d594c..baa0f63731fd 100644
--- a/net/ipv4/xfrm4_output.c
+++ b/net/ipv4/xfrm4_output.c
@@ -21,7 +21,6 @@
static int xfrm4_tunnel_check_size(struct sk_buff *skb)
{
int mtu, ret = 0;
- struct dst_entry *dst;
if (IPCB(skb)->flags & IPSKB_XFRM_TUNNEL_SIZE)
goto out;
@@ -29,12 +28,10 @@ static int xfrm4_tunnel_check_size(struct sk_buff *skb)
if (!(ip_hdr(skb)->frag_off & htons(IP_DF)) || skb->local_df)
goto out;
- dst = skb_dst(skb);
- mtu = dst_mtu(dst);
+ mtu = dst_mtu(skb_dst(skb));
if (skb->len > mtu) {
if (skb->sk)
- ip_local_error(skb->sk, EMSGSIZE, ip_hdr(skb)->daddr,
- inet_sk(skb->sk)->inet_dport, mtu);
+ xfrm_local_error(skb, mtu);
else
icmp_send(skb, ICMP_DEST_UNREACH,
ICMP_FRAG_NEEDED, htonl(mtu));
@@ -99,3 +96,12 @@ int xfrm4_output(struct sk_buff *skb)
x->outer_mode->afinfo->output_finish,
!(IPCB(skb)->flags & IPSKB_REROUTED));
}
+
+void xfrm4_local_error(struct sk_buff *skb, u32 mtu)
+{
+ struct iphdr *hdr;
+
+ hdr = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
+ ip_local_error(skb->sk, EMSGSIZE, hdr->daddr,
+ inet_sk(skb->sk)->inet_dport, mtu);
+}
diff --git a/net/ipv4/xfrm4_state.c b/net/ipv4/xfrm4_state.c
index 9258e751baba..0b2a0641526a 100644
--- a/net/ipv4/xfrm4_state.c
+++ b/net/ipv4/xfrm4_state.c
@@ -83,6 +83,7 @@ static struct xfrm_state_afinfo xfrm4_state_afinfo = {
.extract_input = xfrm4_extract_input,
.extract_output = xfrm4_extract_output,
.transport_finish = xfrm4_transport_finish,
+ .local_error = xfrm4_local_error,
};
void __init xfrm4_state_init(void)
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index ecd60733e5e2..90747f1973fe 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -724,6 +724,11 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL);
}
+ if (likely(!skb->encapsulation)) {
+ skb_reset_inner_headers(skb);
+ skb->encapsulation = 1;
+ }
+
skb_push(skb, gre_hlen);
skb_reset_network_header(skb);
skb_set_transport_header(skb, sizeof(*ipv6h));
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 6e3ddf806ec2..e7ceb6c871d1 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -238,6 +238,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
hdr->saddr = fl6->saddr;
hdr->daddr = *first_hop;
+ skb->protocol = htons(ETH_P_IPV6);
skb->priority = sk->sk_priority;
skb->mark = sk->sk_mark;
@@ -1057,6 +1058,7 @@ static inline int ip6_ufo_append_data(struct sock *sk,
/* initialize protocol header pointer */
skb->transport_header = skb->network_header + fragheaderlen;
+ skb->protocol = htons(ETH_P_IPV6);
skb->ip_summed = CHECKSUM_PARTIAL;
skb->csum = 0;
}
@@ -1359,6 +1361,7 @@ alloc_new_skb:
/*
* Fill in the control structures
*/
+ skb->protocol = htons(ETH_P_IPV6);
skb->ip_summed = CHECKSUM_NONE;
skb->csum = 0;
/* reserve for fragmentation and ipsec header */
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 1e55866cead7..46ba243605a3 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1027,6 +1027,12 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
init_tel_txopt(&opt, encap_limit);
ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL);
}
+
+ if (likely(!skb->encapsulation)) {
+ skb_reset_inner_headers(skb);
+ skb->encapsulation = 1;
+ }
+
skb_push(skb, sizeof(struct ipv6hdr));
skb_reset_network_header(skb);
ipv6h = ipv6_hdr(skb);
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index c45f7a5c36e9..cdaed47ba932 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -628,6 +628,7 @@ static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
goto error;
skb_reserve(skb, hlen);
+ skb->protocol = htons(ETH_P_IPV6);
skb->priority = sk->sk_priority;
skb->mark = sk->sk_mark;
skb_dst_set(skb, &rt->dst);
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index a3437a4cd07e..21b25dd8466b 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -645,11 +645,7 @@ static int ipip_rcv(struct sk_buff *skb)
const struct iphdr *iph;
struct ip_tunnel *tunnel;
- if (iptunnel_pull_header(skb, 0, tpi.proto))
- goto drop;
-
iph = ip_hdr(skb);
-
tunnel = ipip6_tunnel_lookup(dev_net(skb->dev), skb->dev,
iph->saddr, iph->daddr);
if (tunnel != NULL) {
@@ -659,6 +655,8 @@ static int ipip_rcv(struct sk_buff *skb)
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
goto drop;
+ if (iptunnel_pull_header(skb, 0, tpi.proto))
+ goto drop;
return ip_tunnel_rcv(tunnel, skb, &tpi, log_ecn_error);
}
@@ -888,6 +886,11 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
ttl = iph6->hop_limit;
tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6));
+ if (likely(!skb->encapsulation)) {
+ skb_reset_inner_headers(skb);
+ skb->encapsulation = 1;
+ }
+
err = iptunnel_xmit(dev_net(dev), rt, skb, fl4.saddr, fl4.daddr,
IPPROTO_IPV6, tos, ttl, df);
iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index 8755a3079d0f..6cd625e37706 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -34,8 +34,10 @@ static int xfrm6_local_dontfrag(struct sk_buff *skb)
struct sock *sk = skb->sk;
if (sk) {
- proto = sk->sk_protocol;
+ if (sk->sk_family != AF_INET6)
+ return 0;
+ proto = sk->sk_protocol;
if (proto == IPPROTO_UDP || proto == IPPROTO_RAW)
return inet6_sk(sk)->dontfrag;
}
@@ -54,13 +56,15 @@ static void xfrm6_local_rxpmtu(struct sk_buff *skb, u32 mtu)
ipv6_local_rxpmtu(sk, &fl6, mtu);
}
-static void xfrm6_local_error(struct sk_buff *skb, u32 mtu)
+void xfrm6_local_error(struct sk_buff *skb, u32 mtu)
{
struct flowi6 fl6;
+ const struct ipv6hdr *hdr;
struct sock *sk = skb->sk;
+ hdr = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
fl6.fl6_dport = inet_sk(sk)->inet_dport;
- fl6.daddr = ipv6_hdr(skb)->daddr;
+ fl6.daddr = hdr->daddr;
ipv6_local_error(sk, EMSGSIZE, &fl6, mtu);
}
@@ -80,7 +84,7 @@ static int xfrm6_tunnel_check_size(struct sk_buff *skb)
if (xfrm6_local_dontfrag(skb))
xfrm6_local_rxpmtu(skb, mtu);
else if (skb->sk)
- xfrm6_local_error(skb, mtu);
+ xfrm_local_error(skb, mtu);
else
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
ret = -EMSGSIZE;
@@ -136,13 +140,18 @@ static int __xfrm6_output(struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
struct xfrm_state *x = dst->xfrm;
- int mtu = ip6_skb_dst_mtu(skb);
+ int mtu;
+
+ if (skb->protocol == htons(ETH_P_IPV6))
+ mtu = ip6_skb_dst_mtu(skb);
+ else
+ mtu = dst_mtu(skb_dst(skb));
if (skb->len > mtu && xfrm6_local_dontfrag(skb)) {
xfrm6_local_rxpmtu(skb, mtu);
return -EMSGSIZE;
} else if (!skb->local_df && skb->len > mtu && skb->sk) {
- xfrm6_local_error(skb, mtu);
+ xfrm_local_error(skb, mtu);
return -EMSGSIZE;
}
diff --git a/net/ipv6/xfrm6_state.c b/net/ipv6/xfrm6_state.c
index d8c70b8efc24..3fc970135fc6 100644
--- a/net/ipv6/xfrm6_state.c
+++ b/net/ipv6/xfrm6_state.c
@@ -183,6 +183,7 @@ static struct xfrm_state_afinfo xfrm6_state_afinfo = {
.extract_input = xfrm6_extract_input,
.extract_output = xfrm6_extract_output,
.transport_finish = xfrm6_transport_finish,
+ .local_error = xfrm6_local_error,
};
int __init xfrm6_state_init(void)
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index ea7b9c2c7e66..2d45643c964e 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -36,7 +36,7 @@
static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
const u8 *bssid, const int beacon_int,
- struct ieee80211_channel *chan,
+ struct cfg80211_chan_def *req_chandef,
const u32 basic_rates,
const u16 capability, u64 tsf,
bool creator)
@@ -51,6 +51,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
u32 bss_change;
u8 supp_rates[IEEE80211_MAX_SUPP_RATES];
struct cfg80211_chan_def chandef;
+ struct ieee80211_channel *chan;
struct beacon_data *presp;
int frame_len;
@@ -81,7 +82,9 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
sdata->drop_unencrypted = capability & WLAN_CAPABILITY_PRIVACY ? 1 : 0;
- chandef = ifibss->chandef;
+ /* make a copy of the chandef, it could be modified below. */
+ chandef = *req_chandef;
+ chan = chandef.chan;
if (!cfg80211_reg_can_beacon(local->hw.wiphy, &chandef)) {
chandef.width = NL80211_CHAN_WIDTH_20;
chandef.center_freq1 = chan->center_freq;
@@ -259,10 +262,12 @@ static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
struct cfg80211_bss *cbss =
container_of((void *)bss, struct cfg80211_bss, priv);
struct ieee80211_supported_band *sband;
+ struct cfg80211_chan_def chandef;
u32 basic_rates;
int i, j;
u16 beacon_int = cbss->beacon_interval;
const struct cfg80211_bss_ies *ies;
+ enum nl80211_channel_type chan_type;
u64 tsf;
sdata_assert_lock(sdata);
@@ -270,6 +275,26 @@ static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
if (beacon_int < 10)
beacon_int = 10;
+ switch (sdata->u.ibss.chandef.width) {
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ case NL80211_CHAN_WIDTH_20:
+ case NL80211_CHAN_WIDTH_40:
+ chan_type = cfg80211_get_chandef_type(&sdata->u.ibss.chandef);
+ cfg80211_chandef_create(&chandef, cbss->channel, chan_type);
+ break;
+ case NL80211_CHAN_WIDTH_5:
+ case NL80211_CHAN_WIDTH_10:
+ cfg80211_chandef_create(&chandef, cbss->channel,
+ NL80211_CHAN_WIDTH_20_NOHT);
+ chandef.width = sdata->u.ibss.chandef.width;
+ break;
+ default:
+ /* fall back to 20 MHz for unsupported modes */
+ cfg80211_chandef_create(&chandef, cbss->channel,
+ NL80211_CHAN_WIDTH_20_NOHT);
+ break;
+ }
+
sband = sdata->local->hw.wiphy->bands[cbss->channel->band];
basic_rates = 0;
@@ -294,7 +319,7 @@ static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
__ieee80211_sta_join_ibss(sdata, cbss->bssid,
beacon_int,
- cbss->channel,
+ &chandef,
basic_rates,
cbss->capability,
tsf, false);
@@ -736,7 +761,7 @@ static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata)
sdata->drop_unencrypted = 0;
__ieee80211_sta_join_ibss(sdata, bssid, sdata->vif.bss_conf.beacon_int,
- ifibss->chandef.chan, ifibss->basic_rates,
+ &ifibss->chandef, ifibss->basic_rates,
capability, 0, true);
}
@@ -1138,6 +1163,7 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state);
ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED |
BSS_CHANGED_IBSS);
+ ieee80211_vif_release_channel(sdata);
synchronize_rcu();
kfree(presp);
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index f5aed963b22e..f3bbea1eb9e7 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -828,6 +828,9 @@ minstrel_ht_update_cck(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
if (sband->band != IEEE80211_BAND_2GHZ)
return;
+ if (!(mp->hw->flags & IEEE80211_HW_SUPPORTS_HT_CCK_RATES))
+ return;
+
mi->cck_supported = 0;
mi->cck_supported_short = 0;
for (i = 0; i < 4; i++) {
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 512718adb0d5..0c741cec4d0d 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -364,7 +364,7 @@ int genl_unregister_ops(struct genl_family *family, struct genl_ops *ops)
EXPORT_SYMBOL(genl_unregister_ops);
/**
- * genl_register_family - register a generic netlink family
+ * __genl_register_family - register a generic netlink family
* @family: generic netlink family
*
* Registers the specified family after validating it first. Only one
@@ -374,7 +374,7 @@ EXPORT_SYMBOL(genl_unregister_ops);
*
* Return 0 on success or a negative error code.
*/
-int genl_register_family(struct genl_family *family)
+int __genl_register_family(struct genl_family *family)
{
int err = -EINVAL;
@@ -430,10 +430,10 @@ errout_locked:
errout:
return err;
}
-EXPORT_SYMBOL(genl_register_family);
+EXPORT_SYMBOL(__genl_register_family);
/**
- * genl_register_family_with_ops - register a generic netlink family
+ * __genl_register_family_with_ops - register a generic netlink family
* @family: generic netlink family
* @ops: operations to be registered
* @n_ops: number of elements to register
@@ -457,12 +457,12 @@ EXPORT_SYMBOL(genl_register_family);
*
* Return 0 on success or a negative error code.
*/
-int genl_register_family_with_ops(struct genl_family *family,
+int __genl_register_family_with_ops(struct genl_family *family,
struct genl_ops *ops, size_t n_ops)
{
int err, i;
- err = genl_register_family(family);
+ err = __genl_register_family(family);
if (err)
return err;
@@ -476,7 +476,7 @@ err_out:
genl_unregister_family(family);
return err;
}
-EXPORT_SYMBOL(genl_register_family_with_ops);
+EXPORT_SYMBOL(__genl_register_family_with_ops);
/**
* genl_unregister_family - unregister generic netlink family
@@ -544,6 +544,30 @@ void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq,
}
EXPORT_SYMBOL(genlmsg_put);
+static int genl_lock_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct genl_ops *ops = cb->data;
+ int rc;
+
+ genl_lock();
+ rc = ops->dumpit(skb, cb);
+ genl_unlock();
+ return rc;
+}
+
+static int genl_lock_done(struct netlink_callback *cb)
+{
+ struct genl_ops *ops = cb->data;
+ int rc = 0;
+
+ if (ops->done) {
+ genl_lock();
+ rc = ops->done(cb);
+ genl_unlock();
+ }
+ return rc;
+}
+
static int genl_family_rcv_msg(struct genl_family *family,
struct sk_buff *skb,
struct nlmsghdr *nlh)
@@ -572,15 +596,34 @@ static int genl_family_rcv_msg(struct genl_family *family,
return -EPERM;
if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) {
- struct netlink_dump_control c = {
- .dump = ops->dumpit,
- .done = ops->done,
- };
+ int rc;
if (ops->dumpit == NULL)
return -EOPNOTSUPP;
- return netlink_dump_start(net->genl_sock, skb, nlh, &c);
+ if (!family->parallel_ops) {
+ struct netlink_dump_control c = {
+ .module = family->module,
+ .data = ops,
+ .dump = genl_lock_dumpit,
+ .done = genl_lock_done,
+ };
+
+ genl_unlock();
+ rc = __netlink_dump_start(net->genl_sock, skb, nlh, &c);
+ genl_lock();
+
+ } else {
+ struct netlink_dump_control c = {
+ .module = family->module,
+ .dump = ops->dumpit,
+ .done = ops->done,
+ };
+
+ rc = __netlink_dump_start(net->genl_sock, skb, nlh, &c);
+ }
+
+ return rc;
}
if (ops->doit == NULL)
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index ce8249c76827..6cc7ddd2fb7c 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -1257,7 +1257,7 @@ static u32 filter_connect(struct tipc_sock *tsock, struct sk_buff **buf)
/* Accept only ACK or NACK message */
if (unlikely(msg_errcode(msg))) {
sock->state = SS_DISCONNECTING;
- sk->sk_err = -ECONNREFUSED;
+ sk->sk_err = ECONNREFUSED;
retval = TIPC_OK;
break;
}
@@ -1268,7 +1268,7 @@ static u32 filter_connect(struct tipc_sock *tsock, struct sk_buff **buf)
res = auto_connect(sock, msg);
if (res) {
sock->state = SS_DISCONNECTING;
- sk->sk_err = res;
+ sk->sk_err = -res;
retval = TIPC_OK;
break;
}
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index eb4a84288648..3bb2cdc13b46 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -214,5 +214,26 @@ int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb)
return inner_mode->afinfo->extract_output(x, skb);
}
+void xfrm_local_error(struct sk_buff *skb, int mtu)
+{
+ unsigned int proto;
+ struct xfrm_state_afinfo *afinfo;
+
+ if (skb->protocol == htons(ETH_P_IP))
+ proto = AF_INET;
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ proto = AF_INET6;
+ else
+ return;
+
+ afinfo = xfrm_state_get_afinfo(proto);
+ if (!afinfo)
+ return;
+
+ afinfo->local_error(skb, mtu);
+ xfrm_state_put_afinfo(afinfo);
+}
+
EXPORT_SYMBOL_GPL(xfrm_output);
EXPORT_SYMBOL_GPL(xfrm_inner_extract_output);
+EXPORT_SYMBOL_GPL(xfrm_local_error);
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index e52cab3591dd..f77c371ea72b 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -320,10 +320,8 @@ static void xfrm_queue_purge(struct sk_buff_head *list)
{
struct sk_buff *skb;
- while ((skb = skb_dequeue(list)) != NULL) {
- dev_put(skb->dev);
+ while ((skb = skb_dequeue(list)) != NULL)
kfree_skb(skb);
- }
}
/* Rule must be locked. Release descentant resources, announce
@@ -1758,7 +1756,6 @@ static void xfrm_policy_queue_process(unsigned long arg)
struct sk_buff *skb;
struct sock *sk;
struct dst_entry *dst;
- struct net_device *dev;
struct xfrm_policy *pol = (struct xfrm_policy *)arg;
struct xfrm_policy_queue *pq = &pol->polq;
struct flowi fl;
@@ -1805,7 +1802,6 @@ static void xfrm_policy_queue_process(unsigned long arg)
dst = xfrm_lookup(xp_net(pol), skb_dst(skb)->path,
&fl, skb->sk, 0);
if (IS_ERR(dst)) {
- dev_put(skb->dev);
kfree_skb(skb);
continue;
}
@@ -1814,9 +1810,7 @@ static void xfrm_policy_queue_process(unsigned long arg)
skb_dst_drop(skb);
skb_dst_set(skb, dst);
- dev = skb->dev;
err = dst_output(skb);
- dev_put(dev);
}
return;
@@ -1839,7 +1833,6 @@ static int xdst_queue_output(struct sk_buff *skb)
}
skb_dst_force(skb);
- dev_hold(skb->dev);
spin_lock_bh(&pq->hold_queue.lock);
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 78f66fa92449..54c0acd29468 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -39,9 +39,6 @@ static DEFINE_SPINLOCK(xfrm_state_lock);
static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
-static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
-static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
-
static inline unsigned int xfrm_dst_hash(struct net *net,
const xfrm_address_t *daddr,
const xfrm_address_t *saddr,
@@ -1860,7 +1857,7 @@ int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
}
EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
-static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family)
+struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family)
{
struct xfrm_state_afinfo *afinfo;
if (unlikely(family >= NPROTO))
@@ -1872,7 +1869,7 @@ static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family)
return afinfo;
}
-static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo)
+void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo)
{
rcu_read_unlock();
}