diff options
author | Patrick McHardy <kaber@trash.net> | 2011-01-19 23:51:37 +0100 |
---|---|---|
committer | Patrick McHardy <kaber@trash.net> | 2011-01-19 23:51:37 +0100 |
commit | 14f0290ba44de6ed435fea24bba26e7868421c66 (patch) | |
tree | 449d32e4848007e3edbcab14fa8e09fdc66608ed /net | |
parent | f5c88f56b35599ab9ff2d3398e0153e4cd4a4c82 (diff) | |
parent | a5db219f4cf9f67995eabd53b81a1232c82f5852 (diff) |
Merge branch 'master' of /repos/git/net-next-2.6
Diffstat (limited to 'net')
88 files changed, 839 insertions, 720 deletions
diff --git a/net/9p/protocol.c b/net/9p/protocol.c index 798beac7f100..1e308f210928 100644 --- a/net/9p/protocol.c +++ b/net/9p/protocol.c @@ -178,27 +178,24 @@ p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt, break; case 's':{ char **sptr = va_arg(ap, char **); - int16_t len; - int size; + uint16_t len; errcode = p9pdu_readf(pdu, proto_version, "w", &len); if (errcode) break; - size = max_t(int16_t, len, 0); - - *sptr = kmalloc(size + 1, GFP_KERNEL); + *sptr = kmalloc(len + 1, GFP_KERNEL); if (*sptr == NULL) { errcode = -EFAULT; break; } - if (pdu_read(pdu, *sptr, size)) { + if (pdu_read(pdu, *sptr, len)) { errcode = -EFAULT; kfree(*sptr); *sptr = NULL; } else - (*sptr)[size] = 0; + (*sptr)[len] = 0; } break; case 'Q':{ @@ -234,14 +231,14 @@ p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt, } break; case 'D':{ - int32_t *count = va_arg(ap, int32_t *); + uint32_t *count = va_arg(ap, uint32_t *); void **data = va_arg(ap, void **); errcode = p9pdu_readf(pdu, proto_version, "d", count); if (!errcode) { *count = - min_t(int32_t, *count, + min_t(uint32_t, *count, pdu->size - pdu->offset); *data = &pdu->sdata[pdu->offset]; } @@ -404,9 +401,10 @@ p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt, break; case 's':{ const char *sptr = va_arg(ap, const char *); - int16_t len = 0; + uint16_t len = 0; if (sptr) - len = min_t(int16_t, strlen(sptr), USHRT_MAX); + len = min_t(uint16_t, strlen(sptr), + USHRT_MAX); errcode = p9pdu_writef(pdu, proto_version, "w", len); @@ -438,7 +436,7 @@ p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt, stbuf->n_gid, stbuf->n_muid); } break; case 'D':{ - int32_t count = va_arg(ap, int32_t); + uint32_t count = va_arg(ap, uint32_t); const void *data = va_arg(ap, const void *); errcode = p9pdu_writef(pdu, proto_version, "d", diff --git a/net/Kconfig b/net/Kconfig index ad0aafe903f8..72840626284b 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -253,7 +253,9 @@ config NET_TCPPROBE what was just said, you don't need it: say N. Documentation on how to use TCP connection probing can be found - at http://linux-net.osdl.org/index.php/TcpProbe + at: + + http://www.linuxfoundation.org/collaborate/workgroups/networking/tcpprobe To compile this code as a module, choose M here: the module will be called tcp_probe. diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index bb86d2932394..6da5daeebab7 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c @@ -1392,7 +1392,7 @@ static int ax25_getname(struct socket *sock, struct sockaddr *uaddr, ax25_cb *ax25; int err = 0; - memset(fsa, 0, sizeof(fsa)); + memset(fsa, 0, sizeof(*fsa)); lock_sock(sk); ax25 = ax25_sk(sk); diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h index d4d9926c2201..65106fb61b8f 100644 --- a/net/batman-adv/main.h +++ b/net/batman-adv/main.h @@ -151,9 +151,9 @@ int debug_log(struct bat_priv *bat_priv, char *fmt, ...); } \ while (0) #else /* !CONFIG_BATMAN_ADV_DEBUG */ -static inline void bat_dbg(char type __attribute__((unused)), - struct bat_priv *bat_priv __attribute__((unused)), - char *fmt __attribute__((unused)), ...) +static inline void bat_dbg(char type __always_unused, + struct bat_priv *bat_priv __always_unused, + char *fmt __always_unused, ...) { } #endif diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h index b49fdf70a6d5..2284e8129cb2 100644 --- a/net/batman-adv/packet.h +++ b/net/batman-adv/packet.h @@ -63,7 +63,7 @@ struct batman_packet { uint8_t num_hna; uint8_t gw_flags; /* flags related to gateway class */ uint8_t align; -} __attribute__((packed)); +} __packed; #define BAT_PACKET_LEN sizeof(struct batman_packet) @@ -76,7 +76,7 @@ struct icmp_packet { uint8_t orig[6]; uint16_t seqno; uint8_t uid; -} __attribute__((packed)); +} __packed; #define BAT_RR_LEN 16 @@ -93,14 +93,14 @@ struct icmp_packet_rr { uint8_t uid; uint8_t rr_cur; uint8_t rr[BAT_RR_LEN][ETH_ALEN]; -} __attribute__((packed)); +} __packed; struct unicast_packet { uint8_t packet_type; uint8_t version; /* batman version field */ uint8_t dest[6]; uint8_t ttl; -} __attribute__((packed)); +} __packed; struct unicast_frag_packet { uint8_t packet_type; @@ -110,7 +110,7 @@ struct unicast_frag_packet { uint8_t flags; uint8_t orig[6]; uint16_t seqno; -} __attribute__((packed)); +} __packed; struct bcast_packet { uint8_t packet_type; @@ -118,7 +118,7 @@ struct bcast_packet { uint8_t orig[6]; uint8_t ttl; uint32_t seqno; -} __attribute__((packed)); +} __packed; struct vis_packet { uint8_t packet_type; @@ -131,6 +131,6 @@ struct vis_packet { * neighbors */ uint8_t target_orig[6]; /* who should receive this packet */ uint8_t sender_orig[6]; /* who sent or rebroadcasted this packet */ -} __attribute__((packed)); +} __packed; #endif /* _NET_BATMAN_ADV_PACKET_H_ */ diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h index 97cb23dd3e69..bf3f6f5a12c4 100644 --- a/net/batman-adv/types.h +++ b/net/batman-adv/types.h @@ -246,13 +246,13 @@ struct vis_info { /* this packet might be part of the vis send queue. */ struct sk_buff *skb_packet; /* vis_info may follow here*/ -} __attribute__((packed)); +} __packed; struct vis_info_entry { uint8_t src[ETH_ALEN]; uint8_t dest[ETH_ALEN]; uint8_t quality; /* quality = 0 means HNA */ -} __attribute__((packed)); +} __packed; struct recvlist_node { struct list_head list; diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c index dc2e28bed844..ee41fef04b21 100644 --- a/net/batman-adv/unicast.c +++ b/net/batman-adv/unicast.c @@ -229,10 +229,12 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv, if (!bat_priv->primary_if) goto dropped; - unicast_packet = (struct unicast_packet *) skb->data; + frag_skb = dev_alloc_skb(data_len - (data_len / 2) + ucf_hdr_len); + if (!frag_skb) + goto dropped; + unicast_packet = (struct unicast_packet *) skb->data; memcpy(&tmp_uc, unicast_packet, uc_hdr_len); - frag_skb = dev_alloc_skb(data_len - (data_len / 2) + ucf_hdr_len); skb_split(skb, frag_skb, data_len / 2); if (my_skb_head_push(skb, ucf_hdr_len - uc_hdr_len) < 0 || diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c index 1bf0cf503796..8184c031d028 100644 --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c @@ -740,12 +740,12 @@ static int setsockopt(struct socket *sock, if (cf_sk->sk.sk_protocol != CAIFPROTO_UTIL) return -ENOPROTOOPT; lock_sock(&(cf_sk->sk)); - cf_sk->conn_req.param.size = ol; if (ol > sizeof(cf_sk->conn_req.param.data) || copy_from_user(&cf_sk->conn_req.param.data, ov, ol)) { release_sock(&cf_sk->sk); return -EINVAL; } + cf_sk->conn_req.param.size = ol; release_sock(&cf_sk->sk); return 0; diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c index 21ede141018a..c665de778b60 100644 --- a/net/caif/cfcnfg.c +++ b/net/caif/cfcnfg.c @@ -191,6 +191,7 @@ int cfcnfg_disconn_adapt_layer(struct cfcnfg *cnfg, struct cflayer *adap_layer) struct cflayer *servl = NULL; struct cfcnfg_phyinfo *phyinfo = NULL; u8 phyid = 0; + caif_assert(adap_layer != NULL); channel_id = adap_layer->id; if (adap_layer->dn == NULL || channel_id == 0) { @@ -199,16 +200,16 @@ int cfcnfg_disconn_adapt_layer(struct cfcnfg *cnfg, struct cflayer *adap_layer) goto end; } servl = cfmuxl_remove_uplayer(cnfg->mux, channel_id); - if (servl == NULL) - goto end; - layer_set_up(servl, NULL); - ret = cfctrl_linkdown_req(cnfg->ctrl, channel_id, adap_layer); if (servl == NULL) { pr_err("PROTOCOL ERROR - Error removing service_layer Channel_Id(%d)", channel_id); ret = -EINVAL; goto end; } + layer_set_up(servl, NULL); + ret = cfctrl_linkdown_req(cnfg->ctrl, channel_id, adap_layer); + if (ret) + goto end; caif_assert(channel_id == servl->id); if (adap_layer->dn != NULL) { phyid = cfsrvl_getphyid(adap_layer->dn); diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c index 84a422c98941..fa9dab372b68 100644 --- a/net/caif/chnl_net.c +++ b/net/caif/chnl_net.c @@ -76,6 +76,8 @@ static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt) struct chnl_net *priv = container_of(layr, struct chnl_net, chnl); int pktlen; int err = 0; + const u8 *ip_version; + u8 buf; priv = container_of(layr, struct chnl_net, chnl); @@ -90,7 +92,21 @@ static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt) * send the packet to the net stack. */ skb->dev = priv->netdev; - skb->protocol = htons(ETH_P_IP); + + /* check the version of IP */ + ip_version = skb_header_pointer(skb, 0, 1, &buf); + if (!ip_version) + return -EINVAL; + switch (*ip_version >> 4) { + case 4: + skb->protocol = htons(ETH_P_IP); + break; + case 6: + skb->protocol = htons(ETH_P_IPV6); + break; + default: + return -EINVAL; + } /* If we change the header in loop mode, the checksum is corrupted. */ if (priv->conn_req.protocol == CAIFPROTO_DATAGRAM_LOOP) diff --git a/net/can/bcm.c b/net/can/bcm.c index 9d5e8accfab1..092dc88a7c64 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c @@ -1256,6 +1256,9 @@ static int bcm_sendmsg(struct kiocb *iocb, struct socket *sock, struct sockaddr_can *addr = (struct sockaddr_can *)msg->msg_name; + if (msg->msg_namelen < sizeof(*addr)) + return -EINVAL; + if (addr->can_family != AF_CAN) return -EINVAL; diff --git a/net/can/raw.c b/net/can/raw.c index e88f610fdb7b..883e9d74fddf 100644 --- a/net/can/raw.c +++ b/net/can/raw.c @@ -649,6 +649,9 @@ static int raw_sendmsg(struct kiocb *iocb, struct socket *sock, struct sockaddr_can *addr = (struct sockaddr_can *)msg->msg_name; + if (msg->msg_namelen < sizeof(*addr)) + return -EINVAL; + if (addr->can_family != AF_CAN) return -EINVAL; diff --git a/net/ceph/ceph_hash.c b/net/ceph/ceph_hash.c index 815ef8826796..0a1b53bce76d 100644 --- a/net/ceph/ceph_hash.c +++ b/net/ceph/ceph_hash.c @@ -1,5 +1,6 @@ #include <linux/ceph/types.h> +#include <linux/module.h> /* * Robert Jenkin's hash function. @@ -104,6 +105,7 @@ unsigned ceph_str_hash(int type, const char *s, unsigned len) return -1; } } +EXPORT_SYMBOL(ceph_str_hash); const char *ceph_str_hash_name(int type) { @@ -116,3 +118,4 @@ const char *ceph_str_hash_name(int type) return "unknown"; } } +EXPORT_SYMBOL(ceph_str_hash_name); diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index b6ff4a1519ab..dff633d62e5b 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -96,7 +96,7 @@ struct workqueue_struct *ceph_msgr_wq; int ceph_msgr_init(void) { - ceph_msgr_wq = create_workqueue("ceph-msgr"); + ceph_msgr_wq = alloc_workqueue("ceph-msgr", WQ_NON_REENTRANT, 0); if (!ceph_msgr_wq) { pr_err("msgr_init failed to create workqueue\n"); return -ENOMEM; @@ -1920,20 +1920,6 @@ bad_tag: /* * Atomically queue work on a connection. Bump @con reference to * avoid races with connection teardown. - * - * There is some trickery going on with QUEUED and BUSY because we - * only want a _single_ thread operating on each connection at any - * point in time, but we want to use all available CPUs. - * - * The worker thread only proceeds if it can atomically set BUSY. It - * clears QUEUED and does it's thing. When it thinks it's done, it - * clears BUSY, then rechecks QUEUED.. if it's set again, it loops - * (tries again to set BUSY). - * - * To queue work, we first set QUEUED, _then_ if BUSY isn't set, we - * try to queue work. If that fails (work is already queued, or BUSY) - * we give up (work also already being done or is queued) but leave QUEUED - * set so that the worker thread will loop if necessary. */ static void queue_con(struct ceph_connection *con) { @@ -1948,11 +1934,7 @@ static void queue_con(struct ceph_connection *con) return; } - set_bit(QUEUED, &con->state); - if (test_bit(BUSY, &con->state)) { - dout("queue_con %p - already BUSY\n", con); - con->ops->put(con); - } else if (!queue_work(ceph_msgr_wq, &con->work.work)) { + if (!queue_delayed_work(ceph_msgr_wq, &con->work, 0)) { dout("queue_con %p - already queued\n", con); con->ops->put(con); } else { @@ -1967,15 +1949,6 @@ static void con_work(struct work_struct *work) { struct ceph_connection *con = container_of(work, struct ceph_connection, work.work); - int backoff = 0; - -more: - if (test_and_set_bit(BUSY, &con->state) != 0) { - dout("con_work %p BUSY already set\n", con); - goto out; - } - dout("con_work %p start, clearing QUEUED\n", con); - clear_bit(QUEUED, &con->state); mutex_lock(&con->mutex); @@ -1994,28 +1967,13 @@ more: try_read(con) < 0 || try_write(con) < 0) { mutex_unlock(&con->mutex); - backoff = 1; ceph_fault(con); /* error/fault path */ goto done_unlocked; } done: mutex_unlock(&con->mutex); - done_unlocked: - clear_bit(BUSY, &con->state); - dout("con->state=%lu\n", con->state); - if (test_bit(QUEUED, &con->state)) { - if (!backoff || test_bit(OPENING, &con->state)) { - dout("con_work %p QUEUED reset, looping\n", con); - goto more; - } - dout("con_work %p QUEUED reset, but just faulted\n", con); - clear_bit(QUEUED, &con->state); - } - dout("con_work %p done\n", con); - -out: con->ops->put(con); } diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c index d73f3f6efa36..71603ac3dff5 100644 --- a/net/ceph/osdmap.c +++ b/net/ceph/osdmap.c @@ -605,8 +605,10 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end) goto bad; } err = __decode_pool(p, end, pi); - if (err < 0) + if (err < 0) { + kfree(pi); goto bad; + } __insert_pg_pool(&map->pg_pools, pi); } diff --git a/net/core/dev.c b/net/core/dev.c index a215269d2e35..7741507429f4 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1732,33 +1732,6 @@ void netif_device_attach(struct net_device *dev) } EXPORT_SYMBOL(netif_device_attach); -static bool can_checksum_protocol(unsigned long features, __be16 protocol) -{ - return ((features & NETIF_F_NO_CSUM) || - ((features & NETIF_F_V4_CSUM) && - protocol == htons(ETH_P_IP)) || - ((features & NETIF_F_V6_CSUM) && - protocol == htons(ETH_P_IPV6)) || - ((features & NETIF_F_FCOE_CRC) && - protocol == htons(ETH_P_FCOE))); -} - -static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb) -{ - __be16 protocol = skb->protocol; - int features = dev->features; - - if (vlan_tx_tag_present(skb)) { - features &= dev->vlan_features; - } else if (protocol == htons(ETH_P_8021Q)) { - struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; - protocol = veh->h_vlan_encapsulated_proto; - features &= dev->vlan_features; - } - - return can_checksum_protocol(features, protocol); -} - /** * skb_dev_set -- assign a new device to a buffer * @skb: buffer for the new device @@ -1971,16 +1944,14 @@ static void dev_gso_skb_destructor(struct sk_buff *skb) /** * dev_gso_segment - Perform emulated hardware segmentation on skb. * @skb: buffer to segment + * @features: device features as applicable to this skb * * This function segments the given skb and stores the list of segments * in skb->next. */ -static int dev_gso_segment(struct sk_buff *skb) +static int dev_gso_segment(struct sk_buff *skb, int features) { - struct net_device *dev = skb->dev; struct sk_buff *segs; - int features = dev->features & ~(illegal_highdma(dev, skb) ? - NETIF_F_SG : 0); segs = skb_gso_segment(skb, features); @@ -2017,22 +1988,52 @@ static inline void skb_orphan_try(struct sk_buff *skb) } } -int netif_get_vlan_features(struct sk_buff *skb, struct net_device *dev) +static bool can_checksum_protocol(unsigned long features, __be16 protocol) +{ + return ((features & NETIF_F_GEN_CSUM) || + ((features & NETIF_F_V4_CSUM) && + protocol == htons(ETH_P_IP)) || + ((features & NETIF_F_V6_CSUM) && + protocol == htons(ETH_P_IPV6)) || + ((features & NETIF_F_FCOE_CRC) && + protocol == htons(ETH_P_FCOE))); +} + +static int harmonize_features(struct sk_buff *skb, __be16 protocol, int features) +{ + if (!can_checksum_protocol(protocol, features)) { + features &= ~NETIF_F_ALL_CSUM; + features &= ~NETIF_F_SG; + } else if (illegal_highdma(skb->dev, skb)) { + features &= ~NETIF_F_SG; + } + + return features; +} + +int netif_skb_features(struct sk_buff *skb) { __be16 protocol = skb->protocol; + int features = skb->dev->features; if (protocol == htons(ETH_P_8021Q)) { struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; protocol = veh->h_vlan_encapsulated_proto; - } else if (!skb->vlan_tci) - return dev->features; + } else if (!vlan_tx_tag_present(skb)) { + return harmonize_features(skb, protocol, features); + } - if (protocol != htons(ETH_P_8021Q)) - return dev->features & dev->vlan_features; - else - return 0; + features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_TX); + + if (protocol != htons(ETH_P_8021Q)) { + return harmonize_features(skb, protocol, features); + } else { + features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | + NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_TX; + return harmonize_features(skb, protocol, features); + } } -EXPORT_SYMBOL(netif_get_vlan_features); +EXPORT_SYMBOL(netif_skb_features); /* * Returns true if either: @@ -2042,22 +2043,13 @@ EXPORT_SYMBOL(netif_get_vlan_features); * support DMA from it. */ static inline int skb_needs_linearize(struct sk_buff *skb, - struct net_device *dev) + int features) { - if (skb_is_nonlinear(skb)) { - int features = dev->features; - - if (vlan_tx_tag_present(skb)) - features &= dev->vlan_features; - - return (skb_has_frag_list(skb) && - !(features & NETIF_F_FRAGLIST)) || + return skb_is_nonlinear(skb) && + ((skb_has_frag_list(skb) && + !(features & NETIF_F_FRAGLIST)) || (skb_shinfo(skb)->nr_frags && - (!(features & NETIF_F_SG) || - illegal_highdma(dev, skb))); - } - - return 0; + !(features & NETIF_F_SG))); } int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, @@ -2067,6 +2059,8 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, int rc = NETDEV_TX_OK; if (likely(!skb->next)) { + int features; + /* * If device doesnt need skb->dst, release it right now while * its hot in this cpu cache @@ -2079,8 +2073,10 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, skb_orphan_try(skb); + features = netif_skb_features(skb); + if (vlan_tx_tag_present(skb) && - !(dev->features & NETIF_F_HW_VLAN_TX)) { + !(features & NETIF_F_HW_VLAN_TX)) { skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb)); if (unlikely(!skb)) goto out; @@ -2088,13 +2084,13 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, skb->vlan_tci = 0; } - if (netif_needs_gso(dev, skb)) { - if (unlikely(dev_gso_segment(skb))) + if (netif_needs_gso(skb, features)) { + if (unlikely(dev_gso_segment(skb, features))) goto out_kfree_skb; if (skb->next) goto gso; } else { - if (skb_needs_linearize(skb, dev) && + if (skb_needs_linearize(skb, features) && __skb_linearize(skb)) goto out_kfree_skb; @@ -2105,7 +2101,7 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, if (skb->ip_summed == CHECKSUM_PARTIAL) { skb_set_transport_header(skb, skb_checksum_start_offset(skb)); - if (!dev_can_checksum(dev, skb) && + if (!(features & NETIF_F_ALL_CSUM) && skb_checksum_help(skb)) goto out_kfree_skb; } @@ -2301,7 +2297,10 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, */ if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE)) skb_dst_force(skb); - __qdisc_update_bstats(q, skb->len); + + qdisc_skb_cb(skb)->pkt_len = skb->len; + qdisc_bstats_update(q, skb); + if (sch_direct_xmit(skb, q, dev, txq, root_lock)) { if (unlikely(contended)) { spin_unlock(&q->busylock); @@ -5524,34 +5523,6 @@ void netdev_run_todo(void) } } -/** - * dev_txq_stats_fold - fold tx_queues stats - * @dev: device to get statistics from - * @stats: struct rtnl_link_stats64 to hold results - */ -void dev_txq_stats_fold(const struct net_device *dev, - struct rtnl_link_stats64 *stats) -{ - u64 tx_bytes = 0, tx_packets = 0, tx_dropped = 0; - unsigned int i; - struct netdev_queue *txq; - - for (i = 0; i < dev->num_tx_queues; i++) { - txq = netdev_get_tx_queue(dev, i); - spin_lock_bh(&txq->_xmit_lock); - tx_bytes += txq->tx_bytes; - tx_packets += txq->tx_packets; - tx_dropped += txq->tx_dropped; - spin_unlock_bh(&txq->_xmit_lock); - } - if (tx_bytes || tx_packets || tx_dropped) { - stats->tx_bytes = tx_bytes; - stats->tx_packets = tx_packets; - stats->tx_dropped = tx_dropped; - } -} -EXPORT_SYMBOL(dev_txq_stats_fold); - /* Convert net_device_stats to rtnl_link_stats64. They have the same * fields in the same order, with only the type differing. */ @@ -5595,7 +5566,6 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev)); } else { netdev_stats_to_stats64(storage, &dev->stats); - dev_txq_stats_fold(dev, storage); } storage->rx_dropped += atomic_long_read(&dev->rx_dropped); return storage; @@ -5621,18 +5591,20 @@ struct netdev_queue *dev_ingress_queue_create(struct net_device *dev) } /** - * alloc_netdev_mq - allocate network device + * alloc_netdev_mqs - allocate network device * @sizeof_priv: size of private data to allocate space for * @name: device name format string * @setup: callback to initialize device - * @queue_count: the number of subqueues to allocate + * @txqs: the number of TX subqueues to allocate + * @rxqs: the number of RX subqueues to allocate * * Allocates a struct net_device with private data area for driver use * and performs basic initialization. Also allocates subquue structs - * for each queue on the device at the end of the netdevice. + * for each queue on the device. */ -struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, - void (*setup)(struct net_device *), unsigned int queue_count) +struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, + void (*setup)(struct net_device *), + unsigned int txqs, unsigned int rxqs) { struct net_device *dev; size_t alloc_size; @@ -5640,12 +5612,20 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, BUG_ON(strlen(name) >= sizeof(dev->name)); - if (queue_count < 1) { + if (txqs < 1) { pr_err("alloc_netdev: Unable to allocate device " "with zero queues.\n"); return NULL; } +#ifdef CONFIG_RPS + if (rxqs < 1) { + pr_err("alloc_netdev: Unable to allocate device " + "with zero RX queues.\n"); + return NULL; + } +#endif + alloc_size = sizeof(struct net_device); if (sizeof_priv) { /* ensure 32-byte alignment of private area */ @@ -5676,14 +5656,14 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, dev_net_set(dev, &init_net); - dev->num_tx_queues = queue_count; - dev->real_num_tx_queues = queue_count; + dev->num_tx_queues = txqs; + dev->real_num_tx_queues = txqs; if (netif_alloc_netdev_queues(dev)) goto free_pcpu; #ifdef CONFIG_RPS - dev->num_rx_queues = queue_count; - dev->real_num_rx_queues = queue_count; + dev->num_rx_queues = rxqs; + dev->real_num_rx_queues = rxqs; if (netif_alloc_rx_queues(dev)) goto free_pcpu; #endif @@ -5711,7 +5691,7 @@ free_p: kfree(p); return NULL; } -EXPORT_SYMBOL(alloc_netdev_mq); +EXPORT_SYMBOL(alloc_netdev_mqs); /** * free_netdev - free network device @@ -6209,7 +6189,7 @@ static void __net_exit default_device_exit(struct net *net) static void __net_exit default_device_exit_batch(struct list_head *net_list) { /* At exit all network devices most be removed from a network - * namespace. Do this in the reverse order of registeration. + * namespace. Do this in the reverse order of registration. * Do this across as many network namespaces as possible to * improve batching efficiency. */ diff --git a/net/core/filter.c b/net/core/filter.c index 2b27d4efdd48..afc58374ca96 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -158,7 +158,7 @@ EXPORT_SYMBOL(sk_filter); /** * sk_run_filter - run a filter on a socket * @skb: buffer to run the filter on - * @filter: filter to apply + * @fentry: filter to apply * * Decode and apply filter instructions to the skb->data. * Return length to keep, 0 for none. @skb is the data we are diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 750db57f3bb3..a5f7535aab5b 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -1820,7 +1820,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) if (kind != 2 && security_netlink_recv(skb, CAP_NET_ADMIN)) return -EPERM; - if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) { + if (kind == 2 && (nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) { struct sock *rtnl; rtnl_dumpit_func dumpit; diff --git a/net/core/sock.c b/net/core/sock.c index a658aeb6d554..7dfed792434d 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -157,7 +157,7 @@ static const char *const af_family_key_strings[AF_MAX+1] = { "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" , "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" , "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" , - "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , + "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" , "sk_lock-AF_MAX" }; static const char *const af_family_slock_key_strings[AF_MAX+1] = { @@ -173,7 +173,7 @@ static const char *const af_family_slock_key_strings[AF_MAX+1] = { "slock-27" , "slock-28" , "slock-AF_CAN" , "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" , "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" , - "slock-AF_IEEE802154", "slock-AF_CAIF" , + "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" , "slock-AF_MAX" }; static const char *const af_family_clock_key_strings[AF_MAX+1] = { @@ -189,7 +189,7 @@ static const char *const af_family_clock_key_strings[AF_MAX+1] = { "clock-27" , "clock-28" , "clock-AF_CAN" , "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" , "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" , - "clock-AF_IEEE802154", "clock-AF_CAIF" , + "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" , "clock-AF_MAX" }; diff --git a/net/dccp/Kconfig b/net/dccp/Kconfig index ad6dffd9070e..b75968a04017 100644 --- a/net/dccp/Kconfig +++ b/net/dccp/Kconfig @@ -49,7 +49,9 @@ config NET_DCCPPROBE what was just said, you don't need it: say N. Documentation on how to use DCCP connection probing can be found - at http://linux-net.osdl.org/index.php/DccpProbe + at: + + http://www.linuxfoundation.org/collaborate/workgroups/networking/dccpprobe To compile this code as a module, choose M here: the module will be called dccp_probe. diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h index 45087052d894..5fdb07229017 100644 --- a/net/dccp/dccp.h +++ b/net/dccp/dccp.h @@ -426,7 +426,8 @@ static inline void dccp_update_gsr(struct sock *sk, u64 seq) { struct dccp_sock *dp = dccp_sk(sk); - dp->dccps_gsr = seq; + if (after48(seq, dp->dccps_gsr)) + dp->dccps_gsr = seq; /* Sequence validity window depends on remote Sequence Window (7.5.1) */ dp->dccps_swl = SUB48(ADD48(dp->dccps_gsr, 1), dp->dccps_r_seq_win / 4); /* diff --git a/net/dccp/input.c b/net/dccp/input.c index 15af247ea007..8cde009e8b85 100644 --- a/net/dccp/input.c +++ b/net/dccp/input.c @@ -260,7 +260,7 @@ static int dccp_check_seqno(struct sock *sk, struct sk_buff *skb) */ if (time_before(now, (dp->dccps_rate_last + sysctl_dccp_sync_ratelimit))) - return 0; + return -1; DCCP_WARN("Step 6 failed for %s packet, " "(LSWL(%llu) <= P.seqno(%llu) <= S.SWH(%llu)) and " diff --git a/net/dccp/sysctl.c b/net/dccp/sysctl.c index 563943822e58..42348824ee31 100644 --- a/net/dccp/sysctl.c +++ b/net/dccp/sysctl.c @@ -21,7 +21,8 @@ /* Boundary values */ static int zero = 0, u8_max = 0xFF; -static unsigned long seqw_min = 32; +static unsigned long seqw_min = DCCPF_SEQ_WMIN, + seqw_max = 0xFFFFFFFF; /* maximum on 32 bit */ static struct ctl_table dccp_default_table[] = { { @@ -31,6 +32,7 @@ static struct ctl_table dccp_default_table[] = { .mode = 0644, .proc_handler = proc_doulongvec_minmax, .extra1 = &seqw_min, /* RFC 4340, 7.5.2 */ + .extra2 = &seqw_max, }, { .procname = "rx_ccid", diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c index 0ba15633c418..0dcaa903e00e 100644 --- a/net/decnet/dn_dev.c +++ b/net/decnet/dn_dev.c @@ -1130,7 +1130,7 @@ static struct dn_dev *dn_dev_create(struct net_device *dev, int *err) /* * This processes a device up event. We only start up * the loopback device & ethernet devices with correct - * MAC addreses automatically. Others must be started + * MAC addresses automatically. Others must be started * specifically. * * FIXME: How should we configure the loopback address ? If we could dispense diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c index f00ef2f1d814..44d2b42fda56 100644 --- a/net/ethernet/eth.c +++ b/net/ethernet/eth.c @@ -347,10 +347,11 @@ void ether_setup(struct net_device *dev) EXPORT_SYMBOL(ether_setup); /** - * alloc_etherdev_mq - Allocates and sets up an Ethernet device + * alloc_etherdev_mqs - Allocates and sets up an Ethernet device * @sizeof_priv: Size of additional driver-private structure to be allocated * for this Ethernet device - * @queue_count: The number of queues this device has. + * @txqs: The number of TX queues this device has. + * @rxqs: The number of RX queues this device has. * * Fill in the fields of the device structure with Ethernet-generic * values. Basically does everything except registering the device. @@ -360,11 +361,12 @@ EXPORT_SYMBOL(ether_setup); * this private data area. */ -struct net_device *alloc_etherdev_mq(int sizeof_priv, unsigned int queue_count) +struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs, + unsigned int rxqs) { - return alloc_netdev_mq(sizeof_priv, "eth%d", ether_setup, queue_count); + return alloc_netdev_mqs(sizeof_priv, "eth%d", ether_setup, txqs, rxqs); } -EXPORT_SYMBOL(alloc_etherdev_mq); +EXPORT_SYMBOL(alloc_etherdev_mqs); static size_t _format_mac_addr(char *buf, int buflen, const unsigned char *addr, int len) diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig index dcb2e18f6f8e..8949a05ac307 100644 --- a/net/ipv4/Kconfig +++ b/net/ipv4/Kconfig @@ -435,7 +435,9 @@ config INET_DIAG ---help--- Support for INET (TCP, DCCP, etc) socket monitoring interface used by native Linux tools such as ss. ss is included in iproute2, currently - downloadable at <http://linux-net.osdl.org/index.php/Iproute2>. + downloadable at: + + http://www.linuxfoundation.org/collaborate/workgroups/networking/iproute2 If unsure, say Y. diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c index 880a5ec6dce0..86961bec70ab 100644 --- a/net/ipv4/ah4.c +++ b/net/ipv4/ah4.c @@ -314,14 +314,15 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb) skb->ip_summed = CHECKSUM_NONE; - ah = (struct ip_auth_hdr *)skb->data; - iph = ip_hdr(skb); - ihl = ip_hdrlen(skb); if ((err = skb_cow_data(skb, 0, &trailer)) < 0) goto out; nfrags = err; + ah = (struct ip_auth_hdr *)skb->data; + iph = ip_hdr(skb); + ihl = ip_hdrlen(skb); + work_iph = ah_alloc_tmp(ahash, nfrags, ihl + ahp->icv_trunc_len); if (!work_iph) goto out; diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index a2fc7b961dbc..04c8b69fd426 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c @@ -1143,6 +1143,23 @@ static int arp_req_get(struct arpreq *r, struct net_device *dev) return err; } +int arp_invalidate(struct net_device *dev, __be32 ip) +{ + struct neighbour *neigh = neigh_lookup(&arp_tbl, &ip, dev); + int err = -ENXIO; + + if (neigh) { + if (neigh->nud_state & ~NUD_NOARP) + err = neigh_update(neigh, NULL, NUD_FAILED, + NEIGH_UPDATE_F_OVERRIDE| + NEIGH_UPDATE_F_ADMIN); + neigh_release(neigh); + } + + return err; +} +EXPORT_SYMBOL(arp_invalidate); + static int arp_req_delete_public(struct net *net, struct arpreq *r, struct net_device *dev) { @@ -1163,7 +1180,6 @@ static int arp_req_delete(struct net *net, struct arpreq *r, { int err; __be32 ip; - struct neighbour *neigh; if (r->arp_flags & ATF_PUBL) return arp_req_delete_public(net, r, dev); @@ -1181,16 +1197,7 @@ static int arp_req_delete(struct net *net, struct arpreq *r, if (!dev) return -EINVAL; } - err = -ENXIO; - neigh = neigh_lookup(&arp_tbl, &ip, dev); - if (neigh) { - if (neigh->nud_state & ~NUD_NOARP) - err = neigh_update(neigh, NULL, NUD_FAILED, - NEIGH_UPDATE_F_OVERRIDE| - NEIGH_UPDATE_F_ADMIN); - neigh_release(neigh); - } - return err; + return arp_invalidate(dev, ip); } /* diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 25e318153f14..97e5fb765265 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -73,7 +73,7 @@ int inet_csk_bind_conflict(const struct sock *sk, !sk2->sk_bound_dev_if || sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) { if (!reuse || !sk2->sk_reuse || - sk2->sk_state == TCP_LISTEN) { + ((1 << sk2->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))) { const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2); if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) || sk2_rcv_saddr == sk_rcv_saddr(sk)) @@ -122,7 +122,8 @@ again: (tb->num_owners < smallest_size || smallest_size == -1)) { smallest_size = tb->num_owners; smallest_rover = rover; - if (atomic_read(&hashinfo->bsockets) > (high - low) + 1) { + if (atomic_read(&hashinfo->bsockets) > (high - low) + 1 && + !inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) { spin_unlock(&head->lock); snum = smallest_rover; goto have_snum; diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index 2ada17129fce..2746c1fa6417 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c @@ -858,7 +858,7 @@ static int inet_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) nlmsg_len(nlh) < hdrlen) return -EINVAL; - if (nlh->nlmsg_flags & NLM_F_DUMP) { + if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) { if (nlmsg_attrlen(nlh, hdrlen)) { struct nlattr *attr; diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 47e5178b998b..e95054c690c6 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c @@ -710,42 +710,25 @@ static void get_counters(const struct xt_table_info *t, struct arpt_entry *iter; unsigned int cpu; unsigned int i; - unsigned int curcpu = get_cpu(); - - /* Instead of clearing (by a previous call to memset()) - * the counters and using adds, we set the counters - * with data used by 'current' CPU - * - * Bottom half has to be disabled to prevent deadlock - * if new softirq were to run and call ipt_do_table - */ - local_bh_disable(); - i = 0; - xt_entry_foreach(iter, t->entries[curcpu], t->size) { - SET_COUNTER(counters[i], iter->counters.bcnt, - iter->counters.pcnt); - ++i; - } - local_bh_enable(); - /* Processing counters from other cpus, we can let bottom half enabled, - * (preemption is disabled) - */ for_each_possible_cpu(cpu) { - if (cpu == curcpu) - continue; + seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock; + i = 0; - local_bh_disable(); - xt_info_wrlock(cpu); xt_entry_foreach(iter, t->entries[cpu], t->size) { - ADD_COUNTER(counters[i], iter->counters.bcnt, - iter->counters.pcnt); + u64 bcnt, pcnt; + unsigned int start; + + do { + start = read_seqbegin(lock); + bcnt = iter->counters.bcnt; + pcnt = iter->counters.pcnt; + } while (read_seqretry(lock, start)); + + ADD_COUNTER(counters[i], bcnt, pcnt); ++i; } - xt_info_wrunlock(cpu); - local_bh_enable(); } - put_cpu(); } static struct xt_counters *alloc_counters(const struct xt_table *table) @@ -759,7 +742,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table) * about). */ countersize = sizeof(struct xt_counters) * private->number; - counters = vmalloc(countersize); + counters = vzalloc(countersize); if (counters == NULL) return ERR_PTR(-ENOMEM); @@ -1008,7 +991,7 @@ static int __do_replace(struct net *net, const char *name, struct arpt_entry *iter; ret = 0; - counters = vmalloc(num_counters * sizeof(struct xt_counters)); + counters = vzalloc(num_counters * sizeof(struct xt_counters)); if (!counters) { ret = -ENOMEM; goto out; diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index c5a75d70970f..ef7d7b9680ea 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -884,42 +884,25 @@ get_counters(const struct xt_table_info *t, struct ipt_entry *iter; unsigned int cpu; unsigned int i; - unsigned int curcpu = get_cpu(); - - /* Instead of clearing (by a previous call to memset()) - * the counters and using adds, we set the counters - * with data used by 'current' CPU. - * - * Bottom half has to be disabled to prevent deadlock - * if new softirq were to run and call ipt_do_table - */ - local_bh_disable(); - i = 0; - xt_entry_foreach(iter, t->entries[curcpu], t->size) { - SET_COUNTER(counters[i], iter->counters.bcnt, - iter->counters.pcnt); - ++i; - } - local_bh_enable(); - /* Processing counters from other cpus, we can let bottom half enabled, - * (preemption is disabled) - */ for_each_possible_cpu(cpu) { - if (cpu == curcpu) - continue; + seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock; + i = 0; - local_bh_disable(); - xt_info_wrlock(cpu); xt_entry_foreach(iter, t->entries[cpu], t->size) { - ADD_COUNTER(counters[i], iter->counters.bcnt, - iter->counters.pcnt); + u64 bcnt, pcnt; + unsigned int start; + + do { + start = read_seqbegin(lock); + bcnt = iter->counters.bcnt; + pcnt = iter->counters.pcnt; + } while (read_seqretry(lock, start)); + + ADD_COUNTER(counters[i], bcnt, pcnt); ++i; /* macro does multi eval of i */ } - xt_info_wrunlock(cpu); - local_bh_enable(); } - put_cpu(); } static struct xt_counters *alloc_counters(const struct xt_table *table) @@ -932,7 +915,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table) (other than comefrom, which userspace doesn't care about). */ countersize = sizeof(struct xt_counters) * private->number; - counters = vmalloc(countersize); + counters = vzalloc(countersize); if (counters == NULL) return ERR_PTR(-ENOMEM); @@ -1204,7 +1187,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks, struct ipt_entry *iter; ret = 0; - counters = vmalloc(num_counters * sizeof(struct xt_counters)); + counters = vzalloc(num_counters * sizeof(struct xt_counters)); if (!counters) { ret = -ENOMEM; goto out; diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index dc7c096ddfef..406f320336e6 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1350,7 +1350,7 @@ static inline unsigned int tcp_cwnd_test(struct tcp_sock *tp, return 0; } -/* Intialize TSO state of a skb. +/* Initialize TSO state of a skb. * This must be invoked the first time we consider transmitting * SKB onto the wire. */ diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 5b189c97c2fc..24a1cf110d80 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -420,9 +420,6 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev) dev->type == ARPHRD_TUNNEL6 || dev->type == ARPHRD_SIT || dev->type == ARPHRD_NONE) { - printk(KERN_INFO - "%s: Disabled Privacy Extensions\n", - dev->name); ndev->cnf.use_tempaddr = -1; } else { in6_dev_hold(ndev); diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 059a3de647db..978e80e2c4a8 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -300,7 +300,7 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) goto out; } - /* Reproduce AF_INET checks to make the bindings consitant */ + /* Reproduce AF_INET checks to make the bindings consistent */ v4addr = addr->sin6_addr.s6_addr32[3]; chk_addr_ret = inet_addr_type(net, v4addr); if (!sysctl_ip_nonlocal_bind && diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c index ee82d4ef26ce..1aba54ae53c4 100644 --- a/net/ipv6/ah6.c +++ b/net/ipv6/ah6.c @@ -538,14 +538,16 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb) if (!pskb_may_pull(skb, ah_hlen)) goto out; - ip6h = ipv6_hdr(skb); - - skb_push(skb, hdr_len); if ((err = skb_cow_data(skb, 0, &trailer)) < 0) goto out; nfrags = err; + ah = (struct ip_auth_hdr *)skb->data; + ip6h = ipv6_hdr(skb); + + skb_push(skb, hdr_len); + work_iph = ah_alloc_tmp(ahash, nfrags, hdr_len + ahp->icv_trunc_len); if (!work_iph) goto out; diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c index e46305d1815a..d144e629d2b4 100644 --- a/net/ipv6/inet6_connection_sock.c +++ b/net/ipv6/inet6_connection_sock.c @@ -44,7 +44,7 @@ int inet6_csk_bind_conflict(const struct sock *sk, !sk2->sk_bound_dev_if || sk->sk_bound_dev_if == sk2->sk_bound_dev_if) && (!sk->sk_reuse || !sk2->sk_reuse || - sk2->sk_state == TCP_LISTEN) && + ((1 << sk2->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))) && ipv6_rcv_saddr_equal(sk, sk2)) break; } diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 94b5bf132b2e..5f8d242be3f3 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -401,6 +401,9 @@ int ip6_forward(struct sk_buff *skb) goto drop; } + if (skb->pkt_type != PACKET_HOST) + goto drop; + skb_forward_csum(skb); /* diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 0c9973a657fb..47b7b8df7fac 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -897,42 +897,25 @@ get_counters(const struct xt_table_info *t, struct ip6t_entry *iter; unsigned int cpu; unsigned int i; - unsigned int curcpu = get_cpu(); - - /* Instead of clearing (by a previous call to memset()) - * the counters and using adds, we set the counters - * with data used by 'current' CPU - * - * Bottom half has to be disabled to prevent deadlock - * if new softirq were to run and call ipt_do_table - */ - local_bh_disable(); - i = 0; - xt_entry_foreach(iter, t->entries[curcpu], t->size) { - SET_COUNTER(counters[i], iter->counters.bcnt, - iter->counters.pcnt); - ++i; - } - local_bh_enable(); - /* Processing counters from other cpus, we can let bottom half enabled, - * (preemption is disabled) - */ for_each_possible_cpu(cpu) { - if (cpu == curcpu) - continue; + seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock; + i = 0; - local_bh_disable(); - xt_info_wrlock(cpu); xt_entry_foreach(iter, t->entries[cpu], t->size) { - ADD_COUNTER(counters[i], iter->counters.bcnt, - iter->counters.pcnt); + u64 bcnt, pcnt; + unsigned int start; + + do { + start = read_seqbegin(lock); + bcnt = iter->counters.bcnt; + pcnt = iter->counters.pcnt; + } while (read_seqretry(lock, start)); + + ADD_COUNTER(counters[i], bcnt, pcnt); ++i; } - xt_info_wrunlock(cpu); - local_bh_enable(); } - put_cpu(); } static struct xt_counters *alloc_counters(const struct xt_table *table) @@ -945,7 +928,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table) (other than comefrom, which userspace doesn't care about). */ countersize = sizeof(struct xt_counters) * private->number; - counters = vmalloc(countersize); + counters = vzalloc(countersize); if (counters == NULL) return ERR_PTR(-ENOMEM); @@ -1217,7 +1200,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks, struct ip6t_entry *iter; ret = 0; - counters = vmalloc(num_counters * sizeof(struct xt_counters)); + counters = vzalloc(num_counters * sizeof(struct xt_counters)); if (!counters) { ret = -ENOMEM; goto out; diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c index f138b195d657..227ca82eef72 100644 --- a/net/mac80211/agg-rx.c +++ b/net/mac80211/agg-rx.c @@ -185,8 +185,6 @@ void ieee80211_process_addba_request(struct ieee80211_local *local, struct ieee80211_mgmt *mgmt, size_t len) { - struct ieee80211_hw *hw = &local->hw; - struct ieee80211_conf *conf = &hw->conf; struct tid_ampdu_rx *tid_agg_rx; u16 capab, tid, timeout, ba_policy, buf_size, start_seq_num, status; u8 dialog_token; @@ -231,13 +229,8 @@ void ieee80211_process_addba_request(struct ieee80211_local *local, goto end_no_lock; } /* determine default buffer size */ - if (buf_size == 0) { - struct ieee80211_supported_band *sband; - - sband = local->hw.wiphy->bands[conf->channel->band]; - buf_size = IEEE80211_MIN_AMPDU_BUF; - buf_size = buf_size << sband->ht_cap.ampdu_factor; - } + if (buf_size == 0) + buf_size = IEEE80211_MAX_AMPDU_BUF; /* examine state machine */ diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 485d36bc9a46..a46ff06d7cb8 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -39,6 +39,8 @@ module_param(ieee80211_disable_40mhz_24ghz, bool, 0644); MODULE_PARM_DESC(ieee80211_disable_40mhz_24ghz, "Disable 40MHz support in the 2.4GHz band"); +static struct lock_class_key ieee80211_rx_skb_queue_class; + void ieee80211_configure_filter(struct ieee80211_local *local) { u64 mc; @@ -569,7 +571,15 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, spin_lock_init(&local->filter_lock); spin_lock_init(&local->queue_stop_reason_lock); - skb_queue_head_init(&local->rx_skb_queue); + /* + * The rx_skb_queue is only accessed from tasklets, + * but other SKB queues are used from within IRQ + * context. Therefore, this one needs a different + * locking class so our direct, non-irq-safe use of + * the queue's lock doesn't throw lockdep warnings. + */ + skb_queue_head_init_class(&local->rx_skb_queue, + &ieee80211_rx_skb_queue_class); INIT_DELAYED_WORK(&local->scan_work, ieee80211_scan_work); diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 715d56c85475..61c73945bb94 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -688,25 +688,23 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb) struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh); u_int8_t l3proto = nfmsg->nfgen_family; - rcu_read_lock(); + spin_lock_bh(&nf_conntrack_lock); last = (struct nf_conn *)cb->args[1]; for (; cb->args[0] < net->ct.htable_size; cb->args[0]++) { restart: - hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[cb->args[0]], + hlist_nulls_for_each_entry(h, n, &net->ct.hash[cb->args[0]], hnnode) { if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL) continue; ct = nf_ct_tuplehash_to_ctrack(h); - if (!atomic_inc_not_zero(&ct->ct_general.use)) - continue; /* Dump entries of a given L3 protocol number. * If it is not specified, ie. l3proto == 0, * then dump everything. */ if (l3proto && nf_ct_l3num(ct) != l3proto) - goto releasect; + continue; if (cb->args[1]) { if (ct != last) - goto releasect; + continue; cb->args[1] = 0; } if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid, @@ -724,8 +722,6 @@ restart: if (acct) memset(acct, 0, sizeof(struct nf_conn_counter[IP_CT_DIR_MAX])); } -releasect: - nf_ct_put(ct); } if (cb->args[1]) { cb->args[1] = 0; @@ -733,7 +729,7 @@ releasect: } } out: - rcu_read_unlock(); + spin_unlock_bh(&nf_conntrack_lock); if (last) nf_ct_put(last); @@ -971,7 +967,7 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb, u16 zone; int err; - if (nlh->nlmsg_flags & NLM_F_DUMP) + if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) return netlink_dump_start(ctnl, skb, nlh, ctnetlink_dump_table, ctnetlink_done); @@ -1019,7 +1015,8 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb, free: kfree_skb(skb2); out: - return err; + /* this avoids a loop in nfnetlink. */ + return err == -EAGAIN ? -ENOBUFS : err; } #ifdef CONFIG_NF_NAT_NEEDED @@ -1835,7 +1832,7 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb, u16 zone; int err; - if (nlh->nlmsg_flags & NLM_F_DUMP) { + if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) { return netlink_dump_start(ctnl, skb, nlh, ctnetlink_exp_dump_table, ctnetlink_exp_done); diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index fbc2b72091e0..0a77d2ff2154 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c @@ -1355,7 +1355,8 @@ static int __init xt_init(void) for_each_possible_cpu(i) { struct xt_info_lock *lock = &per_cpu(xt_info_locks, i); - spin_lock_init(&lock->lock); + + seqlock_init(&lock->lock); lock->readers = 0; } diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c index 1781d99145e2..f83cb370292b 100644 --- a/net/netlink/genetlink.c +++ b/net/netlink/genetlink.c @@ -519,7 +519,7 @@ static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) security_netlink_recv(skb, CAP_NET_ADMIN)) return -EPERM; - if (nlh->nlmsg_flags & NLM_F_DUMP) { + if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) { if (ops->dumpit == NULL) return -EOPNOTSUPP; diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c index fd95beb72f5d..1072b2c19d31 100644 --- a/net/phonet/af_phonet.c +++ b/net/phonet/af_phonet.c @@ -37,7 +37,7 @@ /* Transport protocol registration */ static struct phonet_protocol *proto_tab[PHONET_NPROTO] __read_mostly; -static struct phonet_protocol *phonet_proto_get(int protocol) +static struct phonet_protocol *phonet_proto_get(unsigned int protocol) { struct phonet_protocol *pp; @@ -458,7 +458,7 @@ static struct packet_type phonet_packet_type __read_mostly = { static DEFINE_MUTEX(proto_tab_lock); -int __init_or_module phonet_proto_register(int protocol, +int __init_or_module phonet_proto_register(unsigned int protocol, struct phonet_protocol *pp) { int err = 0; @@ -481,7 +481,7 @@ int __init_or_module phonet_proto_register(int protocol, } EXPORT_SYMBOL(phonet_proto_register); -void phonet_proto_unregister(int protocol, struct phonet_protocol *pp) +void phonet_proto_unregister(unsigned int protocol, struct phonet_protocol *pp) { mutex_lock(&proto_tab_lock); BUG_ON(proto_tab[protocol] != pp); diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c index 0b9bb2085ce4..74c064c0dfdd 100644 --- a/net/rxrpc/af_rxrpc.c +++ b/net/rxrpc/af_rxrpc.c @@ -808,7 +808,7 @@ static int __init af_rxrpc_init(void) goto error_call_jar; } - rxrpc_workqueue = create_workqueue("krxrpcd"); + rxrpc_workqueue = alloc_workqueue("krxrpcd", 0, 1); if (!rxrpc_workqueue) { printk(KERN_NOTICE "RxRPC: Failed to allocate work queue\n"); goto error_work_queue; diff --git a/net/sched/Kconfig b/net/sched/Kconfig index 4b753ef70bb7..204055302992 100644 --- a/net/sched/Kconfig +++ b/net/sched/Kconfig @@ -24,7 +24,7 @@ menuconfig NET_SCHED To administer these schedulers, you'll need the user-level utilities from the package iproute2+tc at <ftp://ftp.tux.org/pub/net/ip-routing/>. That package also contains some documentation; for more, check out - <http://linux-net.osdl.org/index.php/Iproute2>. + <http://www.linuxfoundation.org/collaborate/workgroups/networking/iproute2>. This Quality of Service (QoS) support will enable you to use Differentiated Services (diffserv) and Resource Reservation Protocol diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c index 67dc7ce9b63a..83ddfc07e45d 100644 --- a/net/sched/act_csum.c +++ b/net/sched/act_csum.c @@ -508,8 +508,7 @@ static int tcf_csum(struct sk_buff *skb, spin_lock(&p->tcf_lock); p->tcf_tm.lastuse = jiffies; - p->tcf_bstats.bytes += qdisc_pkt_len(skb); - p->tcf_bstats.packets++; + bstats_update(&p->tcf_bstats, skb); action = p->tcf_action; update_flags = p->update_flags; spin_unlock(&p->tcf_lock); diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c index 8daef9632255..c2a7c20e81c1 100644 --- a/net/sched/act_ipt.c +++ b/net/sched/act_ipt.c @@ -209,8 +209,7 @@ static int tcf_ipt(struct sk_buff *skb, struct tc_action *a, spin_lock(&ipt->tcf_lock); ipt->tcf_tm.lastuse = jiffies; - ipt->tcf_bstats.bytes += qdisc_pkt_len(skb); - ipt->tcf_bstats.packets++; + bstats_update(&ipt->tcf_bstats, skb); /* yes, we have to worry about both in and out dev worry later - danger - this API seems to have changed diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index 0c311be92827..d765067e99db 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c @@ -165,8 +165,7 @@ static int tcf_mirred(struct sk_buff *skb, struct tc_action *a, spin_lock(&m->tcf_lock); m->tcf_tm.lastuse = jiffies; - m->tcf_bstats.bytes += qdisc_pkt_len(skb); - m->tcf_bstats.packets++; + bstats_update(&m->tcf_bstats, skb); dev = m->tcfm_dev; if (!dev) { diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c index 186eb837e600..178a4bd7b7cb 100644 --- a/net/sched/act_nat.c +++ b/net/sched/act_nat.c @@ -125,8 +125,7 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a, egress = p->flags & TCA_NAT_FLAG_EGRESS; action = p->tcf_action; - p->tcf_bstats.bytes += qdisc_pkt_len(skb); - p->tcf_bstats.packets++; + bstats_update(&p->tcf_bstats, skb); spin_unlock(&p->tcf_lock); diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index a0593c9640db..445bef716f77 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c @@ -187,8 +187,7 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a, bad: p->tcf_qstats.overlimits++; done: - p->tcf_bstats.bytes += qdisc_pkt_len(skb); - p->tcf_bstats.packets++; + bstats_update(&p->tcf_bstats, skb); spin_unlock(&p->tcf_lock); return p->tcf_action; } diff --git a/net/sched/act_police.c b/net/sched/act_police.c index 7ebf7439b478..e2f08b1e2e58 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c @@ -298,8 +298,7 @@ static int tcf_act_police(struct sk_buff *skb, struct tc_action *a, spin_lock(&police->tcf_lock); - police->tcf_bstats.bytes += qdisc_pkt_len(skb); - police->tcf_bstats.packets++; + bstats_update(&police->tcf_bstats, skb); if (police->tcfp_ewma_rate && police->tcf_rate_est.bps >= police->tcfp_ewma_rate) { diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c index 97e84f3ee775..7287cff7af3e 100644 --- a/net/sched/act_simple.c +++ b/net/sched/act_simple.c @@ -42,8 +42,7 @@ static int tcf_simp(struct sk_buff *skb, struct tc_action *a, struct tcf_result spin_lock(&d->tcf_lock); d->tcf_tm.lastuse = jiffies; - d->tcf_bstats.bytes += qdisc_pkt_len(skb); - d->tcf_bstats.packets++; + bstats_update(&d->tcf_bstats, skb); /* print policy string followed by _ then packet count * Example if this was the 3rd packet and the string was "hello" diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c index 66cbf4eb8855..836f5fee9e58 100644 --- a/net/sched/act_skbedit.c +++ b/net/sched/act_skbedit.c @@ -46,8 +46,7 @@ static int tcf_skbedit(struct sk_buff *skb, struct tc_action *a, spin_lock(&d->tcf_lock); d->tcf_tm.lastuse = jiffies; - d->tcf_bstats.bytes += qdisc_pkt_len(skb); - d->tcf_bstats.packets++; + bstats_update(&d->tcf_bstats, skb); if (d->flags & SKBEDIT_F_PRIORITY) skb->priority = d->priority; diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c index 282540778aa8..943d733409d0 100644 --- a/net/sched/sch_atm.c +++ b/net/sched/sch_atm.c @@ -422,10 +422,8 @@ drop: __maybe_unused } return ret; } - sch->bstats.bytes += qdisc_pkt_len(skb); - sch->bstats.packets++; - flow->bstats.bytes += qdisc_pkt_len(skb); - flow->bstats.packets++; + qdisc_bstats_update(sch, skb); + bstats_update(&flow->bstats, skb); /* * Okay, this may seem weird. We pretend we've dropped the packet if * it goes via ATM. The reason for this is that the outer qdisc diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index eb7631590865..c80d1c210c5d 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c @@ -390,8 +390,7 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch) ret = qdisc_enqueue(skb, cl->q); if (ret == NET_XMIT_SUCCESS) { sch->q.qlen++; - sch->bstats.packets++; - sch->bstats.bytes += qdisc_pkt_len(skb); + qdisc_bstats_update(sch, skb); cbq_mark_toplevel(q, cl); if (!cl->next_alive) cbq_activate_class(cl); @@ -650,8 +649,7 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child) ret = qdisc_enqueue(skb, cl->q); if (ret == NET_XMIT_SUCCESS) { sch->q.qlen++; - sch->bstats.packets++; - sch->bstats.bytes += qdisc_pkt_len(skb); + qdisc_bstats_update(sch, skb); if (!cl->next_alive) cbq_activate_class(cl); return 0; diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c index aa8b5313f8cf..de55e642eafc 100644 --- a/net/sched/sch_drr.c +++ b/net/sched/sch_drr.c @@ -351,7 +351,6 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch) { struct drr_sched *q = qdisc_priv(sch); struct drr_class *cl; - unsigned int len; int err; cl = drr_classify(skb, sch, &err); @@ -362,7 +361,6 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch) return err; } - len = qdisc_pkt_len(skb); err = qdisc_enqueue(skb, cl->qdisc); if (unlikely(err != NET_XMIT_SUCCESS)) { if (net_xmit_drop_count(err)) { @@ -377,10 +375,8 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch) cl->deficit = cl->quantum; } - cl->bstats.packets++; - cl->bstats.bytes += len; - sch->bstats.packets++; - sch->bstats.bytes += len; + bstats_update(&cl->bstats, skb); + qdisc_bstats_update(sch, skb); sch->q.qlen++; return err; diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c index 1d295d62bb5c..60f4bdd4408e 100644 --- a/net/sched/sch_dsmark.c +++ b/net/sched/sch_dsmark.c @@ -260,8 +260,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch) return err; } - sch->bstats.bytes += qdisc_pkt_len(skb); - sch->bstats.packets++; + qdisc_bstats_update(sch, skb); sch->q.qlen++; return NET_XMIT_SUCCESS; diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index 069c62b7bb36..2e45791d4f6c 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c @@ -1599,10 +1599,8 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch) if (cl->qdisc->q.qlen == 1) set_active(cl, qdisc_pkt_len(skb)); - cl->bstats.packets++; - cl->bstats.bytes += qdisc_pkt_len(skb); - sch->bstats.packets++; - sch->bstats.bytes += qdisc_pkt_len(skb); + bstats_update(&cl->bstats, skb); + qdisc_bstats_update(sch, skb); sch->q.qlen++; return NET_XMIT_SUCCESS; diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 01b519d6c52d..984c1b0c6836 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -569,15 +569,12 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch) } return ret; } else { - cl->bstats.packets += - skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1; - cl->bstats.bytes += qdisc_pkt_len(skb); + bstats_update(&cl->bstats, skb); htb_activate(q, cl); } sch->q.qlen++; - sch->bstats.packets += skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1; - sch->bstats.bytes += qdisc_pkt_len(skb); + qdisc_bstats_update(sch, skb); return NET_XMIT_SUCCESS; } @@ -648,12 +645,10 @@ static void htb_charge_class(struct htb_sched *q, struct htb_class *cl, htb_add_to_wait_tree(q, cl, diff); } - /* update byte stats except for leaves which are already updated */ - if (cl->level) { - cl->bstats.bytes += bytes; - cl->bstats.packets += skb_is_gso(skb)? - skb_shinfo(skb)->gso_segs:1; - } + /* update basic stats except for leaves which are already updated */ + if (cl->level) + bstats_update(&cl->bstats, skb); + cl = cl->parent; } } diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c index f10e34a68445..bce1665239b8 100644 --- a/net/sched/sch_ingress.c +++ b/net/sched/sch_ingress.c @@ -63,8 +63,7 @@ static int ingress_enqueue(struct sk_buff *skb, struct Qdisc *sch) result = tc_classify(skb, p->filter_list, &res); - sch->bstats.packets++; - sch->bstats.bytes += qdisc_pkt_len(skb); + qdisc_bstats_update(sch, skb); switch (result) { case TC_ACT_SHOT: result = TC_ACT_SHOT; diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c index 32690deab5d0..21f13da24763 100644 --- a/net/sched/sch_multiq.c +++ b/net/sched/sch_multiq.c @@ -83,8 +83,7 @@ multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch) ret = qdisc_enqueue(skb, qdisc); if (ret == NET_XMIT_SUCCESS) { - sch->bstats.bytes += qdisc_pkt_len(skb); - sch->bstats.packets++; + qdisc_bstats_update(sch, skb); sch->q.qlen++; return NET_XMIT_SUCCESS; } diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index e5593c083a78..1c4bce863479 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -240,8 +240,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) if (likely(ret == NET_XMIT_SUCCESS)) { sch->q.qlen++; - sch->bstats.bytes += qdisc_pkt_len(skb); - sch->bstats.packets++; + qdisc_bstats_update(sch, skb); } else if (net_xmit_drop_count(ret)) { sch->qstats.drops++; } @@ -477,8 +476,7 @@ static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) __skb_queue_after(list, skb, nskb); sch->qstats.backlog += qdisc_pkt_len(nskb); - sch->bstats.bytes += qdisc_pkt_len(nskb); - sch->bstats.packets++; + qdisc_bstats_update(sch, nskb); return NET_XMIT_SUCCESS; } diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index b1c95bce33ce..966158d49dd1 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c @@ -84,8 +84,7 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch) ret = qdisc_enqueue(skb, qdisc); if (ret == NET_XMIT_SUCCESS) { - sch->bstats.bytes += qdisc_pkt_len(skb); - sch->bstats.packets++; + qdisc_bstats_update(sch, skb); sch->q.qlen++; return NET_XMIT_SUCCESS; } diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c index a67ba3c5a0cc..a6009c5a2c97 100644 --- a/net/sched/sch_red.c +++ b/net/sched/sch_red.c @@ -94,8 +94,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch) ret = qdisc_enqueue(skb, child); if (likely(ret == NET_XMIT_SUCCESS)) { - sch->bstats.bytes += qdisc_pkt_len(skb); - sch->bstats.packets++; + qdisc_bstats_update(sch, skb); sch->q.qlen++; } else if (net_xmit_drop_count(ret)) { q->stats.pdrop++; diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index d54ac94066c2..239ec53a634d 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c @@ -403,8 +403,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) slot->allot = q->scaled_quantum; } if (++sch->q.qlen <= q->limit) { - sch->bstats.bytes += qdisc_pkt_len(skb); - sch->bstats.packets++; + qdisc_bstats_update(sch, skb); return NET_XMIT_SUCCESS; } diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index 641a30d64635..77565e721811 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c @@ -134,8 +134,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch) } sch->q.qlen++; - sch->bstats.bytes += qdisc_pkt_len(skb); - sch->bstats.packets++; + qdisc_bstats_update(sch, skb); return NET_XMIT_SUCCESS; } diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index 106479a7c94a..84ce48eadff4 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c @@ -59,6 +59,10 @@ struct teql_master struct net_device *dev; struct Qdisc *slaves; struct list_head master_list; + unsigned long tx_bytes; + unsigned long tx_packets; + unsigned long tx_errors; + unsigned long tx_dropped; }; struct teql_sched_data @@ -83,8 +87,7 @@ teql_enqueue(struct sk_buff *skb, struct Qdisc* sch) if (q->q.qlen < dev->tx_queue_len) { __skb_queue_tail(&q->q, skb); - sch->bstats.bytes += qdisc_pkt_len(skb); - sch->bstats.packets++; + qdisc_bstats_update(sch, skb); return NET_XMIT_SUCCESS; } @@ -275,7 +278,6 @@ static inline int teql_resolve(struct sk_buff *skb, static netdev_tx_t teql_master_xmit(struct sk_buff *skb, struct net_device *dev) { struct teql_master *master = netdev_priv(dev); - struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); struct Qdisc *start, *q; int busy; int nores; @@ -315,8 +317,8 @@ restart: __netif_tx_unlock(slave_txq); master->slaves = NEXT_SLAVE(q); netif_wake_queue(dev); - txq->tx_packets++; - txq->tx_bytes += length; + master->tx_packets++; + master->tx_bytes += length; return NETDEV_TX_OK; } __netif_tx_unlock(slave_txq); @@ -343,10 +345,10 @@ restart: netif_stop_queue(dev); return NETDEV_TX_BUSY; } - dev->stats.tx_errors++; + master->tx_errors++; drop: - txq->tx_dropped++; + master->tx_dropped++; dev_kfree_skb(skb); return NETDEV_TX_OK; } @@ -399,6 +401,18 @@ static int teql_master_close(struct net_device *dev) return 0; } +static struct rtnl_link_stats64 *teql_master_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) +{ + struct teql_master *m = netdev_priv(dev); + + stats->tx_packets = m->tx_packets; + stats->tx_bytes = m->tx_bytes; + stats->tx_errors = m->tx_errors; + stats->tx_dropped = m->tx_dropped; + return stats; +} + static int teql_master_mtu(struct net_device *dev, int new_mtu) { struct teql_master *m = netdev_priv(dev); @@ -423,6 +437,7 @@ static const struct net_device_ops teql_netdev_ops = { .ndo_open = teql_master_open, .ndo_stop = teql_master_close, .ndo_start_xmit = teql_master_xmit, + .ndo_get_stats64 = teql_master_stats64, .ndo_change_mtu = teql_master_mtu, }; diff --git a/net/socket.c b/net/socket.c index ccc576a6a508..ac2219f90d5d 100644 --- a/net/socket.c +++ b/net/socket.c @@ -306,20 +306,6 @@ static const struct super_operations sockfs_ops = { .statfs = simple_statfs, }; -static struct dentry *sockfs_mount(struct file_system_type *fs_type, - int flags, const char *dev_name, void *data) -{ - return mount_pseudo(fs_type, "socket:", &sockfs_ops, SOCKFS_MAGIC); -} - -static struct vfsmount *sock_mnt __read_mostly; - -static struct file_system_type sock_fs_type = { - .name = "sockfs", - .mount = sockfs_mount, - .kill_sb = kill_anon_super, -}; - /* * sockfs_dname() is called from d_path(). */ @@ -333,6 +319,21 @@ static const struct dentry_operations sockfs_dentry_operations = { .d_dname = sockfs_dname, }; +static struct dentry *sockfs_mount(struct file_system_type *fs_type, + int flags, const char *dev_name, void *data) +{ + return mount_pseudo(fs_type, "socket:", &sockfs_ops, + &sockfs_dentry_operations, SOCKFS_MAGIC); +} + +static struct vfsmount *sock_mnt __read_mostly; + +static struct file_system_type sock_fs_type = { + .name = "sockfs", + .mount = sockfs_mount, + .kill_sb = kill_anon_super, +}; + /* * Obtains the first available file descriptor and sets it up for use. * @@ -368,7 +369,6 @@ static int sock_alloc_file(struct socket *sock, struct file **f, int flags) } path.mnt = mntget(sock_mnt); - d_set_d_op(path.dentry, &sockfs_dentry_operations); d_instantiate(path.dentry, SOCK_INODE(sock)); SOCK_INODE(sock)->i_fop = &socket_file_ops; diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c index afe67849269f..67e31276682a 100644 --- a/net/sunrpc/auth.c +++ b/net/sunrpc/auth.c @@ -563,8 +563,17 @@ rpcauth_checkverf(struct rpc_task *task, __be32 *p) return cred->cr_ops->crvalidate(task, p); } +static void rpcauth_wrap_req_encode(kxdreproc_t encode, struct rpc_rqst *rqstp, + __be32 *data, void *obj) +{ + struct xdr_stream xdr; + + xdr_init_encode(&xdr, &rqstp->rq_snd_buf, data); + encode(rqstp, &xdr, obj); +} + int -rpcauth_wrap_req(struct rpc_task *task, kxdrproc_t encode, void *rqstp, +rpcauth_wrap_req(struct rpc_task *task, kxdreproc_t encode, void *rqstp, __be32 *data, void *obj) { struct rpc_cred *cred = task->tk_rqstp->rq_cred; @@ -574,11 +583,22 @@ rpcauth_wrap_req(struct rpc_task *task, kxdrproc_t encode, void *rqstp, if (cred->cr_ops->crwrap_req) return cred->cr_ops->crwrap_req(task, encode, rqstp, data, obj); /* By default, we encode the arguments normally. */ - return encode(rqstp, data, obj); + rpcauth_wrap_req_encode(encode, rqstp, data, obj); + return 0; +} + +static int +rpcauth_unwrap_req_decode(kxdrdproc_t decode, struct rpc_rqst *rqstp, + __be32 *data, void *obj) +{ + struct xdr_stream xdr; + + xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, data); + return decode(rqstp, &xdr, obj); } int -rpcauth_unwrap_resp(struct rpc_task *task, kxdrproc_t decode, void *rqstp, +rpcauth_unwrap_resp(struct rpc_task *task, kxdrdproc_t decode, void *rqstp, __be32 *data, void *obj) { struct rpc_cred *cred = task->tk_rqstp->rq_cred; @@ -589,7 +609,7 @@ rpcauth_unwrap_resp(struct rpc_task *task, kxdrproc_t decode, void *rqstp, return cred->cr_ops->crunwrap_resp(task, decode, rqstp, data, obj); /* By default, we decode the arguments normally. */ - return decode(rqstp, data, obj); + return rpcauth_unwrap_req_decode(decode, rqstp, data, obj); } int diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index 3835ce35e224..45dbf1521b9a 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c @@ -1231,9 +1231,19 @@ out_bad: return NULL; } +static void gss_wrap_req_encode(kxdreproc_t encode, struct rpc_rqst *rqstp, + __be32 *p, void *obj) +{ + struct xdr_stream xdr; + + xdr_init_encode(&xdr, &rqstp->rq_snd_buf, p); + encode(rqstp, &xdr, obj); +} + static inline int gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx, - kxdrproc_t encode, struct rpc_rqst *rqstp, __be32 *p, void *obj) + kxdreproc_t encode, struct rpc_rqst *rqstp, + __be32 *p, void *obj) { struct xdr_buf *snd_buf = &rqstp->rq_snd_buf; struct xdr_buf integ_buf; @@ -1249,9 +1259,7 @@ gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx, offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base; *p++ = htonl(rqstp->rq_seqno); - status = encode(rqstp, p, obj); - if (status) - return status; + gss_wrap_req_encode(encode, rqstp, p, obj); if (xdr_buf_subsegment(snd_buf, &integ_buf, offset, snd_buf->len - offset)) @@ -1325,7 +1333,8 @@ out: static inline int gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx, - kxdrproc_t encode, struct rpc_rqst *rqstp, __be32 *p, void *obj) + kxdreproc_t encode, struct rpc_rqst *rqstp, + __be32 *p, void *obj) { struct xdr_buf *snd_buf = &rqstp->rq_snd_buf; u32 offset; @@ -1342,9 +1351,7 @@ gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx, offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base; *p++ = htonl(rqstp->rq_seqno); - status = encode(rqstp, p, obj); - if (status) - return status; + gss_wrap_req_encode(encode, rqstp, p, obj); status = alloc_enc_pages(rqstp); if (status) @@ -1394,7 +1401,7 @@ gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx, static int gss_wrap_req(struct rpc_task *task, - kxdrproc_t encode, void *rqstp, __be32 *p, void *obj) + kxdreproc_t encode, void *rqstp, __be32 *p, void *obj) { struct rpc_cred *cred = task->tk_rqstp->rq_cred; struct gss_cred *gss_cred = container_of(cred, struct gss_cred, @@ -1407,12 +1414,14 @@ gss_wrap_req(struct rpc_task *task, /* The spec seems a little ambiguous here, but I think that not * wrapping context destruction requests makes the most sense. */ - status = encode(rqstp, p, obj); + gss_wrap_req_encode(encode, rqstp, p, obj); + status = 0; goto out; } switch (gss_cred->gc_service) { case RPC_GSS_SVC_NONE: - status = encode(rqstp, p, obj); + gss_wrap_req_encode(encode, rqstp, p, obj); + status = 0; break; case RPC_GSS_SVC_INTEGRITY: status = gss_wrap_req_integ(cred, ctx, encode, @@ -1494,10 +1503,19 @@ gss_unwrap_resp_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx, return 0; } +static int +gss_unwrap_req_decode(kxdrdproc_t decode, struct rpc_rqst *rqstp, + __be32 *p, void *obj) +{ + struct xdr_stream xdr; + + xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); + return decode(rqstp, &xdr, obj); +} static int gss_unwrap_resp(struct rpc_task *task, - kxdrproc_t decode, void *rqstp, __be32 *p, void *obj) + kxdrdproc_t decode, void *rqstp, __be32 *p, void *obj) { struct rpc_cred *cred = task->tk_rqstp->rq_cred; struct gss_cred *gss_cred = container_of(cred, struct gss_cred, @@ -1528,7 +1546,7 @@ gss_unwrap_resp(struct rpc_task *task, cred->cr_auth->au_rslack = cred->cr_auth->au_verfsize + (p - savedp) + (savedlen - head->iov_len); out_decode: - status = decode(rqstp, p, obj); + status = gss_unwrap_req_decode(decode, rqstp, p, obj); out: gss_put_ctx(ctx); dprintk("RPC: %5u gss_unwrap_resp returning %d\n", task->tk_pid, diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c index 75ee993ea057..9576f35ab701 100644 --- a/net/sunrpc/auth_gss/gss_krb5_crypto.c +++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c @@ -137,7 +137,7 @@ arcfour_hmac_md5_usage_to_salt(unsigned int usage, u8 salt[4]) ms_usage = 13; break; default: - return EINVAL;; + return -EINVAL; } salt[0] = (ms_usage >> 0) & 0xff; salt[1] = (ms_usage >> 8) & 0xff; diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c index dec2a6fc7c12..bcdae78fdfc6 100644 --- a/net/sunrpc/auth_gss/svcauth_gss.c +++ b/net/sunrpc/auth_gss/svcauth_gss.c @@ -67,7 +67,6 @@ static int netobj_equal(struct xdr_netobj *a, struct xdr_netobj *b) #define RSI_HASHBITS 6 #define RSI_HASHMAX (1<<RSI_HASHBITS) -#define RSI_HASHMASK (RSI_HASHMAX-1) struct rsi { struct cache_head h; @@ -319,7 +318,6 @@ static struct rsi *rsi_update(struct rsi *new, struct rsi *old) #define RSC_HASHBITS 10 #define RSC_HASHMAX (1<<RSC_HASHBITS) -#define RSC_HASHMASK (RSC_HASHMAX-1) #define GSS_SEQ_WIN 128 diff --git a/net/sunrpc/bc_svc.c b/net/sunrpc/bc_svc.c index 7dcfe0cc3500..1dd1a6890007 100644 --- a/net/sunrpc/bc_svc.c +++ b/net/sunrpc/bc_svc.c @@ -59,8 +59,8 @@ int bc_send(struct rpc_rqst *req) ret = task->tk_status; rpc_put_task(task); } - return ret; dprintk("RPC: bc_send ret= %d\n", ret); + return ret; } #endif /* CONFIG_NFS_V4_1 */ diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c index e433e7580e27..72ad836e4fe0 100644 --- a/net/sunrpc/cache.c +++ b/net/sunrpc/cache.c @@ -37,7 +37,7 @@ #define RPCDBG_FACILITY RPCDBG_CACHE -static void cache_defer_req(struct cache_req *req, struct cache_head *item); +static bool cache_defer_req(struct cache_req *req, struct cache_head *item); static void cache_revisit_request(struct cache_head *item); static void cache_init(struct cache_head *h) @@ -128,6 +128,7 @@ static void cache_fresh_locked(struct cache_head *head, time_t expiry) { head->expiry_time = expiry; head->last_refresh = seconds_since_boot(); + smp_wmb(); /* paired with smp_rmb() in cache_is_valid() */ set_bit(CACHE_VALID, &head->flags); } @@ -208,11 +209,36 @@ static inline int cache_is_valid(struct cache_detail *detail, struct cache_head /* entry is valid */ if (test_bit(CACHE_NEGATIVE, &h->flags)) return -ENOENT; - else + else { + /* + * In combination with write barrier in + * sunrpc_cache_update, ensures that anyone + * using the cache entry after this sees the + * updated contents: + */ + smp_rmb(); return 0; + } } } +static int try_to_negate_entry(struct cache_detail *detail, struct cache_head *h) +{ + int rv; + + write_lock(&detail->hash_lock); + rv = cache_is_valid(detail, h); + if (rv != -EAGAIN) { + write_unlock(&detail->hash_lock); + return rv; + } + set_bit(CACHE_NEGATIVE, &h->flags); + cache_fresh_locked(h, seconds_since_boot()+CACHE_NEW_EXPIRY); + write_unlock(&detail->hash_lock); + cache_fresh_unlocked(h, detail); + return -ENOENT; +} + /* * This is the generic cache management routine for all * the authentication caches. @@ -251,14 +277,8 @@ int cache_check(struct cache_detail *detail, case -EINVAL: clear_bit(CACHE_PENDING, &h->flags); cache_revisit_request(h); - if (rv == -EAGAIN) { - set_bit(CACHE_NEGATIVE, &h->flags); - cache_fresh_locked(h, seconds_since_boot()+CACHE_NEW_EXPIRY); - cache_fresh_unlocked(h, detail); - rv = -ENOENT; - } + rv = try_to_negate_entry(detail, h); break; - case -EAGAIN: clear_bit(CACHE_PENDING, &h->flags); cache_revisit_request(h); @@ -268,9 +288,11 @@ int cache_check(struct cache_detail *detail, } if (rv == -EAGAIN) { - cache_defer_req(rqstp, h); - if (!test_bit(CACHE_PENDING, &h->flags)) { - /* Request is not deferred */ + if (!cache_defer_req(rqstp, h)) { + /* + * Request was not deferred; handle it as best + * we can ourselves: + */ rv = cache_is_valid(detail, h); if (rv == -EAGAIN) rv = -ETIMEDOUT; @@ -618,18 +640,19 @@ static void cache_limit_defers(void) discard->revisit(discard, 1); } -static void cache_defer_req(struct cache_req *req, struct cache_head *item) +/* Return true if and only if a deferred request is queued. */ +static bool cache_defer_req(struct cache_req *req, struct cache_head *item) { struct cache_deferred_req *dreq; if (req->thread_wait) { cache_wait_req(req, item); if (!test_bit(CACHE_PENDING, &item->flags)) - return; + return false; } dreq = req->defer(req); if (dreq == NULL) - return; + return false; setup_deferral(dreq, item, 1); if (!test_bit(CACHE_PENDING, &item->flags)) /* Bit could have been cleared before we managed to @@ -638,6 +661,7 @@ static void cache_defer_req(struct cache_req *req, struct cache_head *item) cache_revisit_request(item); cache_limit_defers(); + return true; } static void cache_revisit_request(struct cache_head *item) diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 92ce94f5146b..57d344cf2256 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -1095,7 +1095,7 @@ static void rpc_xdr_encode(struct rpc_task *task) { struct rpc_rqst *req = task->tk_rqstp; - kxdrproc_t encode; + kxdreproc_t encode; __be32 *p; dprint_status(task); @@ -1535,7 +1535,7 @@ call_decode(struct rpc_task *task) { struct rpc_clnt *clnt = task->tk_client; struct rpc_rqst *req = task->tk_rqstp; - kxdrproc_t decode = task->tk_msg.rpc_proc->p_decode; + kxdrdproc_t decode = task->tk_msg.rpc_proc->p_decode; __be32 *p; dprintk("RPC: %5u call_decode (status %d)\n", @@ -1776,12 +1776,11 @@ out_overflow: goto out_garbage; } -static int rpcproc_encode_null(void *rqstp, __be32 *data, void *obj) +static void rpcproc_encode_null(void *rqstp, struct xdr_stream *xdr, void *obj) { - return 0; } -static int rpcproc_decode_null(void *rqstp, __be32 *data, void *obj) +static int rpcproc_decode_null(void *rqstp, struct xdr_stream *xdr, void *obj) { return 0; } @@ -1830,23 +1829,15 @@ static void rpc_show_task(const struct rpc_clnt *clnt, const struct rpc_task *task) { const char *rpc_waitq = "none"; - char *p, action[KSYM_SYMBOL_LEN]; if (RPC_IS_QUEUED(task)) rpc_waitq = rpc_qname(task->tk_waitqueue); - /* map tk_action pointer to a function name; then trim off - * the "+0x0 [sunrpc]" */ - sprint_symbol(action, (unsigned long)task->tk_action); - p = strchr(action, '+'); - if (p) - *p = '\0'; - - printk(KERN_INFO "%5u %04x %6d %8p %8p %8ld %8p %sv%u %s a:%s q:%s\n", + printk(KERN_INFO "%5u %04x %6d %8p %8p %8ld %8p %sv%u %s a:%ps q:%s\n", task->tk_pid, task->tk_flags, task->tk_status, clnt, task->tk_rqstp, task->tk_timeout, task->tk_ops, clnt->cl_protname, clnt->cl_vers, rpc_proc_name(task), - action, rpc_waitq); + task->tk_action, rpc_waitq); } void rpc_show_tasks(void) diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 09f01f41e55a..72bc53683965 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c @@ -474,7 +474,7 @@ static int __rpc_create_common(struct inode *dir, struct dentry *dentry, { struct inode *inode; - BUG_ON(!d_unhashed(dentry)); + d_drop(dentry); inode = rpc_get_inode(dir->i_sb, mode); if (!inode) goto out_err; diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c index fa6d7ca2c851..c652e4cc9fe9 100644 --- a/net/sunrpc/rpcb_clnt.c +++ b/net/sunrpc/rpcb_clnt.c @@ -57,10 +57,6 @@ enum { RPCBPROC_GETSTAT, }; -#define RPCB_HIGHPROC_2 RPCBPROC_CALLIT -#define RPCB_HIGHPROC_3 RPCBPROC_TADDR2UADDR -#define RPCB_HIGHPROC_4 RPCBPROC_GETSTAT - /* * r_owner * @@ -693,46 +689,37 @@ static void rpcb_getport_done(struct rpc_task *child, void *data) * XDR functions for rpcbind */ -static int rpcb_enc_mapping(struct rpc_rqst *req, __be32 *p, - const struct rpcbind_args *rpcb) +static void rpcb_enc_mapping(struct rpc_rqst *req, struct xdr_stream *xdr, + const struct rpcbind_args *rpcb) { struct rpc_task *task = req->rq_task; - struct xdr_stream xdr; + __be32 *p; dprintk("RPC: %5u encoding PMAP_%s call (%u, %u, %d, %u)\n", task->tk_pid, task->tk_msg.rpc_proc->p_name, rpcb->r_prog, rpcb->r_vers, rpcb->r_prot, rpcb->r_port); - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - - p = xdr_reserve_space(&xdr, sizeof(__be32) * RPCB_mappingargs_sz); - if (unlikely(p == NULL)) - return -EIO; - - *p++ = htonl(rpcb->r_prog); - *p++ = htonl(rpcb->r_vers); - *p++ = htonl(rpcb->r_prot); - *p = htonl(rpcb->r_port); - - return 0; + p = xdr_reserve_space(xdr, RPCB_mappingargs_sz << 2); + *p++ = cpu_to_be32(rpcb->r_prog); + *p++ = cpu_to_be32(rpcb->r_vers); + *p++ = cpu_to_be32(rpcb->r_prot); + *p = cpu_to_be32(rpcb->r_port); } -static int rpcb_dec_getport(struct rpc_rqst *req, __be32 *p, +static int rpcb_dec_getport(struct rpc_rqst *req, struct xdr_stream *xdr, struct rpcbind_args *rpcb) { struct rpc_task *task = req->rq_task; - struct xdr_stream xdr; unsigned long port; - - xdr_init_decode(&xdr, &req->rq_rcv_buf, p); + __be32 *p; rpcb->r_port = 0; - p = xdr_inline_decode(&xdr, sizeof(__be32)); + p = xdr_inline_decode(xdr, 4); if (unlikely(p == NULL)) return -EIO; - port = ntohl(*p); + port = be32_to_cpup(p); dprintk("RPC: %5u PMAP_%s result: %lu\n", task->tk_pid, task->tk_msg.rpc_proc->p_name, port); if (unlikely(port > USHRT_MAX)) @@ -742,20 +729,18 @@ static int rpcb_dec_getport(struct rpc_rqst *req, __be32 *p, return 0; } -static int rpcb_dec_set(struct rpc_rqst *req, __be32 *p, +static int rpcb_dec_set(struct rpc_rqst *req, struct xdr_stream *xdr, unsigned int *boolp) { struct rpc_task *task = req->rq_task; - struct xdr_stream xdr; - - xdr_init_decode(&xdr, &req->rq_rcv_buf, p); + __be32 *p; - p = xdr_inline_decode(&xdr, sizeof(__be32)); + p = xdr_inline_decode(xdr, 4); if (unlikely(p == NULL)) return -EIO; *boolp = 0; - if (*p) + if (*p != xdr_zero) *boolp = 1; dprintk("RPC: %5u RPCB_%s call %s\n", @@ -764,73 +749,53 @@ static int rpcb_dec_set(struct rpc_rqst *req, __be32 *p, return 0; } -static int encode_rpcb_string(struct xdr_stream *xdr, const char *string, - const u32 maxstrlen) +static void encode_rpcb_string(struct xdr_stream *xdr, const char *string, + const u32 maxstrlen) { - u32 len; __be32 *p; + u32 len; - if (unlikely(string == NULL)) - return -EIO; len = strlen(string); - if (unlikely(len > maxstrlen)) - return -EIO; - - p = xdr_reserve_space(xdr, sizeof(__be32) + len); - if (unlikely(p == NULL)) - return -EIO; + BUG_ON(len > maxstrlen); + p = xdr_reserve_space(xdr, 4 + len); xdr_encode_opaque(p, string, len); - - return 0; } -static int rpcb_enc_getaddr(struct rpc_rqst *req, __be32 *p, - const struct rpcbind_args *rpcb) +static void rpcb_enc_getaddr(struct rpc_rqst *req, struct xdr_stream *xdr, + const struct rpcbind_args *rpcb) { struct rpc_task *task = req->rq_task; - struct xdr_stream xdr; + __be32 *p; dprintk("RPC: %5u encoding RPCB_%s call (%u, %u, '%s', '%s')\n", task->tk_pid, task->tk_msg.rpc_proc->p_name, rpcb->r_prog, rpcb->r_vers, rpcb->r_netid, rpcb->r_addr); - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - - p = xdr_reserve_space(&xdr, - sizeof(__be32) * (RPCB_program_sz + RPCB_version_sz)); - if (unlikely(p == NULL)) - return -EIO; - *p++ = htonl(rpcb->r_prog); - *p = htonl(rpcb->r_vers); - - if (encode_rpcb_string(&xdr, rpcb->r_netid, RPCBIND_MAXNETIDLEN)) - return -EIO; - if (encode_rpcb_string(&xdr, rpcb->r_addr, RPCBIND_MAXUADDRLEN)) - return -EIO; - if (encode_rpcb_string(&xdr, rpcb->r_owner, RPCB_MAXOWNERLEN)) - return -EIO; + p = xdr_reserve_space(xdr, (RPCB_program_sz + RPCB_version_sz) << 2); + *p++ = cpu_to_be32(rpcb->r_prog); + *p = cpu_to_be32(rpcb->r_vers); - return 0; + encode_rpcb_string(xdr, rpcb->r_netid, RPCBIND_MAXNETIDLEN); + encode_rpcb_string(xdr, rpcb->r_addr, RPCBIND_MAXUADDRLEN); + encode_rpcb_string(xdr, rpcb->r_owner, RPCB_MAXOWNERLEN); } -static int rpcb_dec_getaddr(struct rpc_rqst *req, __be32 *p, +static int rpcb_dec_getaddr(struct rpc_rqst *req, struct xdr_stream *xdr, struct rpcbind_args *rpcb) { struct sockaddr_storage address; struct sockaddr *sap = (struct sockaddr *)&address; struct rpc_task *task = req->rq_task; - struct xdr_stream xdr; + __be32 *p; u32 len; rpcb->r_port = 0; - xdr_init_decode(&xdr, &req->rq_rcv_buf, p); - - p = xdr_inline_decode(&xdr, sizeof(__be32)); + p = xdr_inline_decode(xdr, 4); if (unlikely(p == NULL)) goto out_fail; - len = ntohl(*p); + len = be32_to_cpup(p); /* * If the returned universal address is a null string, @@ -845,7 +810,7 @@ static int rpcb_dec_getaddr(struct rpc_rqst *req, __be32 *p, if (unlikely(len > RPCBIND_MAXUADDRLEN)) goto out_fail; - p = xdr_inline_decode(&xdr, len); + p = xdr_inline_decode(xdr, len); if (unlikely(p == NULL)) goto out_fail; dprintk("RPC: %5u RPCB_%s reply: %s\n", task->tk_pid, @@ -871,8 +836,8 @@ out_fail: static struct rpc_procinfo rpcb_procedures2[] = { [RPCBPROC_SET] = { .p_proc = RPCBPROC_SET, - .p_encode = (kxdrproc_t)rpcb_enc_mapping, - .p_decode = (kxdrproc_t)rpcb_dec_set, + .p_encode = (kxdreproc_t)rpcb_enc_mapping, + .p_decode = (kxdrdproc_t)rpcb_dec_set, .p_arglen = RPCB_mappingargs_sz, .p_replen = RPCB_setres_sz, .p_statidx = RPCBPROC_SET, @@ -881,8 +846,8 @@ static struct rpc_procinfo rpcb_procedures2[] = { }, [RPCBPROC_UNSET] = { .p_proc = RPCBPROC_UNSET, - .p_encode = (kxdrproc_t)rpcb_enc_mapping, - .p_decode = (kxdrproc_t)rpcb_dec_set, + .p_encode = (kxdreproc_t)rpcb_enc_mapping, + .p_decode = (kxdrdproc_t)rpcb_dec_set, .p_arglen = RPCB_mappingargs_sz, .p_replen = RPCB_setres_sz, .p_statidx = RPCBPROC_UNSET, @@ -891,8 +856,8 @@ static struct rpc_procinfo rpcb_procedures2[] = { }, [RPCBPROC_GETPORT] = { .p_proc = RPCBPROC_GETPORT, - .p_encode = (kxdrproc_t)rpcb_enc_mapping, - .p_decode = (kxdrproc_t)rpcb_dec_getport, + .p_encode = (kxdreproc_t)rpcb_enc_mapping, + .p_decode = (kxdrdproc_t)rpcb_dec_getport, .p_arglen = RPCB_mappingargs_sz, .p_replen = RPCB_getportres_sz, .p_statidx = RPCBPROC_GETPORT, @@ -904,8 +869,8 @@ static struct rpc_procinfo rpcb_procedures2[] = { static struct rpc_procinfo rpcb_procedures3[] = { [RPCBPROC_SET] = { .p_proc = RPCBPROC_SET, - .p_encode = (kxdrproc_t)rpcb_enc_getaddr, - .p_decode = (kxdrproc_t)rpcb_dec_set, + .p_encode = (kxdreproc_t)rpcb_enc_getaddr, + .p_decode = (kxdrdproc_t)rpcb_dec_set, .p_arglen = RPCB_getaddrargs_sz, .p_replen = RPCB_setres_sz, .p_statidx = RPCBPROC_SET, @@ -914,8 +879,8 @@ static struct rpc_procinfo rpcb_procedures3[] = { }, [RPCBPROC_UNSET] = { .p_proc = RPCBPROC_UNSET, - .p_encode = (kxdrproc_t)rpcb_enc_getaddr, - .p_decode = (kxdrproc_t)rpcb_dec_set, + .p_encode = (kxdreproc_t)rpcb_enc_getaddr, + .p_decode = (kxdrdproc_t)rpcb_dec_set, .p_arglen = RPCB_getaddrargs_sz, .p_replen = RPCB_setres_sz, .p_statidx = RPCBPROC_UNSET, @@ -924,8 +889,8 @@ static struct rpc_procinfo rpcb_procedures3[] = { }, [RPCBPROC_GETADDR] = { .p_proc = RPCBPROC_GETADDR, - .p_encode = (kxdrproc_t)rpcb_enc_getaddr, - .p_decode = (kxdrproc_t)rpcb_dec_getaddr, + .p_encode = (kxdreproc_t)rpcb_enc_getaddr, + .p_decode = (kxdrdproc_t)rpcb_dec_getaddr, .p_arglen = RPCB_getaddrargs_sz, .p_replen = RPCB_getaddrres_sz, .p_statidx = RPCBPROC_GETADDR, @@ -937,8 +902,8 @@ static struct rpc_procinfo rpcb_procedures3[] = { static struct rpc_procinfo rpcb_procedures4[] = { [RPCBPROC_SET] = { .p_proc = RPCBPROC_SET, - .p_encode = (kxdrproc_t)rpcb_enc_getaddr, - .p_decode = (kxdrproc_t)rpcb_dec_set, + .p_encode = (kxdreproc_t)rpcb_enc_getaddr, + .p_decode = (kxdrdproc_t)rpcb_dec_set, .p_arglen = RPCB_getaddrargs_sz, .p_replen = RPCB_setres_sz, .p_statidx = RPCBPROC_SET, @@ -947,8 +912,8 @@ static struct rpc_procinfo rpcb_procedures4[] = { }, [RPCBPROC_UNSET] = { .p_proc = RPCBPROC_UNSET, - .p_encode = (kxdrproc_t)rpcb_enc_getaddr, - .p_decode = (kxdrproc_t)rpcb_dec_set, + .p_encode = (kxdreproc_t)rpcb_enc_getaddr, + .p_decode = (kxdrdproc_t)rpcb_dec_set, .p_arglen = RPCB_getaddrargs_sz, .p_replen = RPCB_setres_sz, .p_statidx = RPCBPROC_UNSET, @@ -957,8 +922,8 @@ static struct rpc_procinfo rpcb_procedures4[] = { }, [RPCBPROC_GETADDR] = { .p_proc = RPCBPROC_GETADDR, - .p_encode = (kxdrproc_t)rpcb_enc_getaddr, - .p_decode = (kxdrproc_t)rpcb_dec_getaddr, + .p_encode = (kxdreproc_t)rpcb_enc_getaddr, + .p_decode = (kxdrdproc_t)rpcb_dec_getaddr, .p_arglen = RPCB_getaddrargs_sz, .p_replen = RPCB_getaddrres_sz, .p_statidx = RPCBPROC_GETADDR, @@ -993,19 +958,19 @@ static struct rpcb_info rpcb_next_version6[] = { static struct rpc_version rpcb_version2 = { .number = RPCBVERS_2, - .nrprocs = RPCB_HIGHPROC_2, + .nrprocs = ARRAY_SIZE(rpcb_procedures2), .procs = rpcb_procedures2 }; static struct rpc_version rpcb_version3 = { .number = RPCBVERS_3, - .nrprocs = RPCB_HIGHPROC_3, + .nrprocs = ARRAY_SIZE(rpcb_procedures3), .procs = rpcb_procedures3 }; static struct rpc_version rpcb_version4 = { .number = RPCBVERS_4, - .nrprocs = RPCB_HIGHPROC_4, + .nrprocs = ARRAY_SIZE(rpcb_procedures4), .procs = rpcb_procedures4 }; diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index 6359c42c4941..08e05a8ce025 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c @@ -488,10 +488,6 @@ svc_destroy(struct svc_serv *serv) if (svc_serv_is_pooled(serv)) svc_pool_map_put(); -#if defined(CONFIG_NFS_V4_1) - svc_sock_destroy(serv->bc_xprt); -#endif /* CONFIG_NFS_V4_1 */ - svc_unregister(serv); kfree(serv->sv_pools); kfree(serv); @@ -1005,6 +1001,7 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv) rqstp->rq_splice_ok = 1; /* Will be turned off only when NFSv4 Sessions are used */ rqstp->rq_usedeferral = 1; + rqstp->rq_dropme = false; /* Setup reply header */ rqstp->rq_xprt->xpt_ops->xpo_prep_reply_hdr(rqstp); @@ -1106,7 +1103,7 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv) *statp = procp->pc_func(rqstp, rqstp->rq_argp, rqstp->rq_resp); /* Encode reply */ - if (*statp == rpc_drop_reply) { + if (rqstp->rq_dropme) { if (procp->pc_release) procp->pc_release(rqstp, NULL, rqstp->rq_resp); goto dropit; @@ -1147,7 +1144,6 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv) dropit: svc_authorise(rqstp); /* doesn't hurt to call this twice */ dprintk("svc: svc_process dropit\n"); - svc_drop(rqstp); return 0; err_short_len: @@ -1218,7 +1214,6 @@ svc_process(struct svc_rqst *rqstp) struct kvec *resv = &rqstp->rq_res.head[0]; struct svc_serv *serv = rqstp->rq_server; u32 dir; - int error; /* * Setup response xdr_buf. @@ -1246,11 +1241,13 @@ svc_process(struct svc_rqst *rqstp) return 0; } - error = svc_process_common(rqstp, argv, resv); - if (error <= 0) - return error; - - return svc_send(rqstp); + /* Returns 1 for send, 0 for drop */ + if (svc_process_common(rqstp, argv, resv)) + return svc_send(rqstp); + else { + svc_drop(rqstp); + return 0; + } } #if defined(CONFIG_NFS_V4_1) @@ -1264,10 +1261,9 @@ bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req, { struct kvec *argv = &rqstp->rq_arg.head[0]; struct kvec *resv = &rqstp->rq_res.head[0]; - int error; /* Build the svc_rqst used by the common processing routine */ - rqstp->rq_xprt = serv->bc_xprt; + rqstp->rq_xprt = serv->sv_bc_xprt; rqstp->rq_xid = req->rq_xid; rqstp->rq_prot = req->rq_xprt->prot; rqstp->rq_server = serv; @@ -1292,12 +1288,15 @@ bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req, svc_getu32(argv); /* XID */ svc_getnl(argv); /* CALLDIR */ - error = svc_process_common(rqstp, argv, resv); - if (error <= 0) - return error; - - memcpy(&req->rq_snd_buf, &rqstp->rq_res, sizeof(req->rq_snd_buf)); - return bc_send(req); + /* Returns 1 for send, 0 for drop */ + if (svc_process_common(rqstp, argv, resv)) { + memcpy(&req->rq_snd_buf, &rqstp->rq_res, + sizeof(req->rq_snd_buf)); + return bc_send(req); + } else { + /* Nothing to do to drop request */ + return 0; + } } EXPORT_SYMBOL(bc_svc_process); #endif /* CONFIG_NFS_V4_1 */ diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index 3f2c5559ca1a..ab86b7927f84 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c @@ -13,6 +13,7 @@ #include <linux/sunrpc/stats.h> #include <linux/sunrpc/svc_xprt.h> #include <linux/sunrpc/svcsock.h> +#include <linux/sunrpc/xprt.h> #define RPCDBG_FACILITY RPCDBG_SVCXPRT @@ -128,6 +129,9 @@ static void svc_xprt_free(struct kref *kref) if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags)) svcauth_unix_info_release(xprt); put_net(xprt->xpt_net); + /* See comment on corresponding get in xs_setup_bc_tcp(): */ + if (xprt->xpt_bc_xprt) + xprt_put(xprt->xpt_bc_xprt); xprt->xpt_ops->xpo_free(xprt); module_put(owner); } @@ -303,6 +307,15 @@ static void svc_thread_dequeue(struct svc_pool *pool, struct svc_rqst *rqstp) list_del(&rqstp->rq_list); } +static bool svc_xprt_has_something_to_do(struct svc_xprt *xprt) +{ + if (xprt->xpt_flags & ((1<<XPT_CONN)|(1<<XPT_CLOSE))) + return true; + if (xprt->xpt_flags & ((1<<XPT_DATA)|(1<<XPT_DEFERRED))) + return xprt->xpt_ops->xpo_has_wspace(xprt); + return false; +} + /* * Queue up a transport with data pending. If there are idle nfsd * processes, wake 'em up. @@ -315,8 +328,7 @@ void svc_xprt_enqueue(struct svc_xprt *xprt) struct svc_rqst *rqstp; int cpu; - if (!(xprt->xpt_flags & - ((1<<XPT_CONN)|(1<<XPT_DATA)|(1<<XPT_CLOSE)|(1<<XPT_DEFERRED)))) + if (!svc_xprt_has_something_to_do(xprt)) return; cpu = get_cpu(); @@ -343,28 +355,7 @@ void svc_xprt_enqueue(struct svc_xprt *xprt) dprintk("svc: transport %p busy, not enqueued\n", xprt); goto out_unlock; } - BUG_ON(xprt->xpt_pool != NULL); - xprt->xpt_pool = pool; - - /* Handle pending connection */ - if (test_bit(XPT_CONN, &xprt->xpt_flags)) - goto process; - - /* Handle close in-progress */ - if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) - goto process; - - /* Check if we have space to reply to a request */ - if (!xprt->xpt_ops->xpo_has_wspace(xprt)) { - /* Don't enqueue while not enough space for reply */ - dprintk("svc: no write space, transport %p not enqueued\n", - xprt); - xprt->xpt_pool = NULL; - clear_bit(XPT_BUSY, &xprt->xpt_flags); - goto out_unlock; - } - process: if (!list_empty(&pool->sp_threads)) { rqstp = list_entry(pool->sp_threads.next, struct svc_rqst, @@ -381,13 +372,11 @@ void svc_xprt_enqueue(struct svc_xprt *xprt) rqstp->rq_reserved = serv->sv_max_mesg; atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); pool->sp_stats.threads_woken++; - BUG_ON(xprt->xpt_pool != pool); wake_up(&rqstp->rq_wait); } else { dprintk("svc: transport %p put into queue\n", xprt); list_add_tail(&xprt->xpt_ready, &pool->sp_sockets); pool->sp_stats.sockets_queued++; - BUG_ON(xprt->xpt_pool != pool); } out_unlock: @@ -426,7 +415,6 @@ static struct svc_xprt *svc_xprt_dequeue(struct svc_pool *pool) void svc_xprt_received(struct svc_xprt *xprt) { BUG_ON(!test_bit(XPT_BUSY, &xprt->xpt_flags)); - xprt->xpt_pool = NULL; /* As soon as we clear busy, the xprt could be closed and * 'put', so we need a reference to call svc_xprt_enqueue with: */ @@ -722,7 +710,10 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) { dprintk("svc_recv: found XPT_CLOSE\n"); svc_delete_xprt(xprt); - } else if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) { + /* Leave XPT_BUSY set on the dead xprt: */ + goto out; + } + if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) { struct svc_xprt *newxpt; newxpt = xprt->xpt_ops->xpo_accept(xprt); if (newxpt) { @@ -747,28 +738,23 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) spin_unlock_bh(&serv->sv_lock); svc_xprt_received(newxpt); } - svc_xprt_received(xprt); - } else { + } else if (xprt->xpt_ops->xpo_has_wspace(xprt)) { dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n", rqstp, pool->sp_id, xprt, atomic_read(&xprt->xpt_ref.refcount)); rqstp->rq_deferred = svc_deferred_dequeue(xprt); - if (rqstp->rq_deferred) { - svc_xprt_received(xprt); + if (rqstp->rq_deferred) len = svc_deferred_recv(rqstp); - } else { + else len = xprt->xpt_ops->xpo_recvfrom(rqstp); - svc_xprt_received(xprt); - } dprintk("svc: got len=%d\n", len); } + svc_xprt_received(xprt); /* No data, incomplete (TCP) read, or accept() */ - if (len == 0 || len == -EAGAIN) { - rqstp->rq_res.len = 0; - svc_xprt_release(rqstp); - return -EAGAIN; - } + if (len == 0 || len == -EAGAIN) + goto out; + clear_bit(XPT_OLD, &xprt->xpt_flags); rqstp->rq_secure = svc_port_is_privileged(svc_addr(rqstp)); @@ -777,6 +763,10 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) if (serv->sv_stats) serv->sv_stats->netcnt++; return len; +out: + rqstp->rq_res.len = 0; + svc_xprt_release(rqstp); + return -EAGAIN; } EXPORT_SYMBOL_GPL(svc_recv); @@ -935,7 +925,12 @@ void svc_close_xprt(struct svc_xprt *xprt) if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags)) /* someone else will have to effect the close */ return; - + /* + * We expect svc_close_xprt() to work even when no threads are + * running (e.g., while configuring the server before starting + * any threads), so if the transport isn't busy, we delete + * it ourself: + */ svc_delete_xprt(xprt); } EXPORT_SYMBOL_GPL(svc_close_xprt); @@ -945,16 +940,16 @@ void svc_close_all(struct list_head *xprt_list) struct svc_xprt *xprt; struct svc_xprt *tmp; + /* + * The server is shutting down, and no more threads are running. + * svc_xprt_enqueue() might still be running, but at worst it + * will re-add the xprt to sp_sockets, which will soon get + * freed. So we don't bother with any more locking, and don't + * leave the close to the (nonexistent) server threads: + */ list_for_each_entry_safe(xprt, tmp, xprt_list, xpt_list) { set_bit(XPT_CLOSE, &xprt->xpt_flags); - if (test_bit(XPT_BUSY, &xprt->xpt_flags)) { - /* Waiting to be processed, but no threads left, - * So just remove it from the waiting list - */ - list_del_init(&xprt->xpt_ready); - clear_bit(XPT_BUSY, &xprt->xpt_flags); - } - svc_close_xprt(xprt); + svc_delete_xprt(xprt); } } @@ -1028,6 +1023,7 @@ static struct cache_deferred_req *svc_defer(struct cache_req *req) } svc_xprt_get(rqstp->rq_xprt); dr->xprt = rqstp->rq_xprt; + rqstp->rq_dropme = true; dr->handle.revisit = svc_revisit; return &dr->handle; @@ -1065,14 +1061,13 @@ static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt) if (!test_bit(XPT_DEFERRED, &xprt->xpt_flags)) return NULL; spin_lock(&xprt->xpt_lock); - clear_bit(XPT_DEFERRED, &xprt->xpt_flags); if (!list_empty(&xprt->xpt_deferred)) { dr = list_entry(xprt->xpt_deferred.next, struct svc_deferred_req, handle.recent); list_del_init(&dr->handle.recent); - set_bit(XPT_DEFERRED, &xprt->xpt_flags); - } + } else + clear_bit(XPT_DEFERRED, &xprt->xpt_flags); spin_unlock(&xprt->xpt_lock); return dr; } diff --git a/net/sunrpc/svcauth.c b/net/sunrpc/svcauth.c index 4e9393c24687..7963569fc04f 100644 --- a/net/sunrpc/svcauth.c +++ b/net/sunrpc/svcauth.c @@ -118,7 +118,6 @@ EXPORT_SYMBOL_GPL(svc_auth_unregister); #define DN_HASHBITS 6 #define DN_HASHMAX (1<<DN_HASHBITS) -#define DN_HASHMASK (DN_HASHMAX-1) static struct hlist_head auth_domain_table[DN_HASHMAX]; static spinlock_t auth_domain_lock = diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c index 560677d187f1..30916b06c12b 100644 --- a/net/sunrpc/svcauth_unix.c +++ b/net/sunrpc/svcauth_unix.c @@ -30,7 +30,9 @@ struct unix_domain { struct auth_domain h; +#ifdef CONFIG_NFSD_DEPRECATED int addr_changes; +#endif /* CONFIG_NFSD_DEPRECATED */ /* other stuff later */ }; @@ -64,7 +66,9 @@ struct auth_domain *unix_domain_find(char *name) return NULL; } new->h.flavour = &svcauth_unix; +#ifdef CONFIG_NFSD_DEPRECATED new->addr_changes = 0; +#endif /* CONFIG_NFSD_DEPRECATED */ rv = auth_domain_lookup(name, &new->h); } } @@ -85,14 +89,15 @@ static void svcauth_unix_domain_release(struct auth_domain *dom) */ #define IP_HASHBITS 8 #define IP_HASHMAX (1<<IP_HASHBITS) -#define IP_HASHMASK (IP_HASHMAX-1) struct ip_map { struct cache_head h; char m_class[8]; /* e.g. "nfsd" */ struct in6_addr m_addr; struct unix_domain *m_client; +#ifdef CONFIG_NFSD_DEPRECATED int m_add_change; +#endif /* CONFIG_NFSD_DEPRECATED */ }; static void ip_map_put(struct kref *kref) @@ -146,7 +151,9 @@ static void update(struct cache_head *cnew, struct cache_head *citem) kref_get(&item->m_client->h.ref); new->m_client = item->m_client; +#ifdef CONFIG_NFSD_DEPRECATED new->m_add_change = item->m_add_change; +#endif /* CONFIG_NFSD_DEPRECATED */ } static struct cache_head *ip_map_alloc(void) { @@ -331,6 +338,7 @@ static int __ip_map_update(struct cache_detail *cd, struct ip_map *ipm, ip.h.flags = 0; if (!udom) set_bit(CACHE_NEGATIVE, &ip.h.flags); +#ifdef CONFIG_NFSD_DEPRECATED else { ip.m_add_change = udom->addr_changes; /* if this is from the legacy set_client system call, @@ -339,6 +347,7 @@ static int __ip_map_update(struct cache_detail *cd, struct ip_map *ipm, if (expiry == NEVER) ip.m_add_change++; } +#endif /* CONFIG_NFSD_DEPRECATED */ ip.h.expiry_time = expiry; ch = sunrpc_cache_update(cd, &ip.h, &ipm->h, hash_str(ipm->m_class, IP_HASHBITS) ^ @@ -358,6 +367,7 @@ static inline int ip_map_update(struct net *net, struct ip_map *ipm, return __ip_map_update(sn->ip_map_cache, ipm, udom, expiry); } +#ifdef CONFIG_NFSD_DEPRECATED int auth_unix_add_addr(struct net *net, struct in6_addr *addr, struct auth_domain *dom) { struct unix_domain *udom; @@ -402,8 +412,7 @@ struct auth_domain *auth_unix_lookup(struct net *net, struct in6_addr *addr) return NULL; if ((ipm->m_client->addr_changes - ipm->m_add_change) >0) { - if (test_and_set_bit(CACHE_NEGATIVE, &ipm->h.flags) == 0) - auth_domain_put(&ipm->m_client->h); + sunrpc_invalidate(&ipm->h, sn->ip_map_cache); rv = NULL; } else { rv = &ipm->m_client->h; @@ -413,6 +422,7 @@ struct auth_domain *auth_unix_lookup(struct net *net, struct in6_addr *addr) return rv; } EXPORT_SYMBOL_GPL(auth_unix_lookup); +#endif /* CONFIG_NFSD_DEPRECATED */ void svcauth_unix_purge(void) { @@ -497,7 +507,6 @@ svcauth_unix_info_release(struct svc_xprt *xpt) */ #define GID_HASHBITS 8 #define GID_HASHMAX (1<<GID_HASHBITS) -#define GID_HASHMASK (GID_HASHMAX - 1) struct unix_gid { struct cache_head h; diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 07919e16be3e..7bd3bbba4710 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -66,6 +66,13 @@ static void svc_sock_free(struct svc_xprt *); static struct svc_xprt *svc_create_socket(struct svc_serv *, int, struct net *, struct sockaddr *, int, int); +#if defined(CONFIG_NFS_V4_1) +static struct svc_xprt *svc_bc_create_socket(struct svc_serv *, int, + struct net *, struct sockaddr *, + int, int); +static void svc_bc_sock_free(struct svc_xprt *xprt); +#endif /* CONFIG_NFS_V4_1 */ + #ifdef CONFIG_DEBUG_LOCK_ALLOC static struct lock_class_key svc_key[2]; static struct lock_class_key svc_slock_key[2]; @@ -324,19 +331,21 @@ int svc_sock_names(struct svc_serv *serv, char *buf, const size_t buflen, len = onelen; break; } - if (toclose && strcmp(toclose, buf + len) == 0) + if (toclose && strcmp(toclose, buf + len) == 0) { closesk = svsk; - else + svc_xprt_get(&closesk->sk_xprt); + } else len += onelen; } spin_unlock_bh(&serv->sv_lock); - if (closesk) + if (closesk) { /* Should unregister with portmap, but you cannot * unregister just one protocol... */ svc_close_xprt(&closesk->sk_xprt); - else if (toclose) + svc_xprt_put(&closesk->sk_xprt); + } else if (toclose) return -ENOENT; return len; } @@ -985,15 +994,17 @@ static int svc_process_calldir(struct svc_sock *svsk, struct svc_rqst *rqstp, vec[0] = rqstp->rq_arg.head[0]; } else { /* REPLY */ - if (svsk->sk_bc_xprt) - req = xprt_lookup_rqst(svsk->sk_bc_xprt, xid); + struct rpc_xprt *bc_xprt = svsk->sk_xprt.xpt_bc_xprt; + + if (bc_xprt) + req = xprt_lookup_rqst(bc_xprt, xid); if (!req) { printk(KERN_NOTICE "%s: Got unrecognized reply: " - "calldir 0x%x sk_bc_xprt %p xid %08x\n", + "calldir 0x%x xpt_bc_xprt %p xid %08x\n", __func__, ntohl(calldir), - svsk->sk_bc_xprt, xid); + bc_xprt, xid); vec[0] = rqstp->rq_arg.head[0]; goto out; } @@ -1184,6 +1195,57 @@ static struct svc_xprt *svc_tcp_create(struct svc_serv *serv, return svc_create_socket(serv, IPPROTO_TCP, net, sa, salen, flags); } +#if defined(CONFIG_NFS_V4_1) +static struct svc_xprt *svc_bc_create_socket(struct svc_serv *, int, + struct net *, struct sockaddr *, + int, int); +static void svc_bc_sock_free(struct svc_xprt *xprt); + +static struct svc_xprt *svc_bc_tcp_create(struct svc_serv *serv, + struct net *net, + struct sockaddr *sa, int salen, + int flags) +{ + return svc_bc_create_socket(serv, IPPROTO_TCP, net, sa, salen, flags); +} + +static void svc_bc_tcp_sock_detach(struct svc_xprt *xprt) +{ +} + +static struct svc_xprt_ops svc_tcp_bc_ops = { + .xpo_create = svc_bc_tcp_create, + .xpo_detach = svc_bc_tcp_sock_detach, + .xpo_free = svc_bc_sock_free, + .xpo_prep_reply_hdr = svc_tcp_prep_reply_hdr, +}; + +static struct svc_xprt_class svc_tcp_bc_class = { + .xcl_name = "tcp-bc", + .xcl_owner = THIS_MODULE, + .xcl_ops = &svc_tcp_bc_ops, + .xcl_max_payload = RPCSVC_MAXPAYLOAD_TCP, +}; + +static void svc_init_bc_xprt_sock(void) +{ + svc_reg_xprt_class(&svc_tcp_bc_class); +} + +static void svc_cleanup_bc_xprt_sock(void) +{ + svc_unreg_xprt_class(&svc_tcp_bc_class); +} +#else /* CONFIG_NFS_V4_1 */ +static void svc_init_bc_xprt_sock(void) +{ +} + +static void svc_cleanup_bc_xprt_sock(void) +{ +} +#endif /* CONFIG_NFS_V4_1 */ + static struct svc_xprt_ops svc_tcp_ops = { .xpo_create = svc_tcp_create, .xpo_recvfrom = svc_tcp_recvfrom, @@ -1207,12 +1269,14 @@ void svc_init_xprt_sock(void) { svc_reg_xprt_class(&svc_tcp_class); svc_reg_xprt_class(&svc_udp_class); + svc_init_bc_xprt_sock(); } void svc_cleanup_xprt_sock(void) { svc_unreg_xprt_class(&svc_tcp_class); svc_unreg_xprt_class(&svc_udp_class); + svc_cleanup_bc_xprt_sock(); } static void svc_tcp_init(struct svc_sock *svsk, struct svc_serv *serv) @@ -1509,41 +1573,45 @@ static void svc_sock_free(struct svc_xprt *xprt) kfree(svsk); } +#if defined(CONFIG_NFS_V4_1) /* - * Create a svc_xprt. - * - * For internal use only (e.g. nfsv4.1 backchannel). - * Callers should typically use the xpo_create() method. + * Create a back channel svc_xprt which shares the fore channel socket. */ -struct svc_xprt *svc_sock_create(struct svc_serv *serv, int prot) +static struct svc_xprt *svc_bc_create_socket(struct svc_serv *serv, + int protocol, + struct net *net, + struct sockaddr *sin, int len, + int flags) { struct svc_sock *svsk; - struct svc_xprt *xprt = NULL; + struct svc_xprt *xprt; + + if (protocol != IPPROTO_TCP) { + printk(KERN_WARNING "svc: only TCP sockets" + " supported on shared back channel\n"); + return ERR_PTR(-EINVAL); + } - dprintk("svc: %s\n", __func__); svsk = kzalloc(sizeof(*svsk), GFP_KERNEL); if (!svsk) - goto out; + return ERR_PTR(-ENOMEM); xprt = &svsk->sk_xprt; - if (prot == IPPROTO_TCP) - svc_xprt_init(&svc_tcp_class, xprt, serv); - else if (prot == IPPROTO_UDP) - svc_xprt_init(&svc_udp_class, xprt, serv); - else - BUG(); -out: - dprintk("svc: %s return %p\n", __func__, xprt); + svc_xprt_init(&svc_tcp_bc_class, xprt, serv); + + serv->sv_bc_xprt = xprt; + return xprt; } -EXPORT_SYMBOL_GPL(svc_sock_create); /* - * Destroy a svc_sock. + * Free a back channel svc_sock. */ -void svc_sock_destroy(struct svc_xprt *xprt) +static void svc_bc_sock_free(struct svc_xprt *xprt) { - if (xprt) + if (xprt) { + kfree(xprt->xpt_bc_sid); kfree(container_of(xprt, struct svc_sock, sk_xprt)); + } } -EXPORT_SYMBOL_GPL(svc_sock_destroy); +#endif /* CONFIG_NFS_V4_1 */ diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index cd9e841e7492..679cd674b81d 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c @@ -552,6 +552,74 @@ void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int b } EXPORT_SYMBOL_GPL(xdr_write_pages); +static void xdr_set_iov(struct xdr_stream *xdr, struct kvec *iov, + __be32 *p, unsigned int len) +{ + if (len > iov->iov_len) + len = iov->iov_len; + if (p == NULL) + p = (__be32*)iov->iov_base; + xdr->p = p; + xdr->end = (__be32*)(iov->iov_base + len); + xdr->iov = iov; + xdr->page_ptr = NULL; +} + +static int xdr_set_page_base(struct xdr_stream *xdr, + unsigned int base, unsigned int len) +{ + unsigned int pgnr; + unsigned int maxlen; + unsigned int pgoff; + unsigned int pgend; + void *kaddr; + + maxlen = xdr->buf->page_len; + if (base >= maxlen) + return -EINVAL; + maxlen -= base; + if (len > maxlen) + len = maxlen; + + base += xdr->buf->page_base; + + pgnr = base >> PAGE_SHIFT; + xdr->page_ptr = &xdr->buf->pages[pgnr]; + kaddr = page_address(*xdr->page_ptr); + + pgoff = base & ~PAGE_MASK; + xdr->p = (__be32*)(kaddr + pgoff); + + pgend = pgoff + len; + if (pgend > PAGE_SIZE) + pgend = PAGE_SIZE; + xdr->end = (__be32*)(kaddr + pgend); + xdr->iov = NULL; + return 0; +} + +static void xdr_set_next_page(struct xdr_stream *xdr) +{ + unsigned int newbase; + + newbase = (1 + xdr->page_ptr - xdr->buf->pages) << PAGE_SHIFT; + newbase -= xdr->buf->page_base; + + if (xdr_set_page_base(xdr, newbase, PAGE_SIZE) < 0) + xdr_set_iov(xdr, xdr->buf->tail, NULL, xdr->buf->len); +} + +static bool xdr_set_next_buffer(struct xdr_stream *xdr) +{ + if (xdr->page_ptr != NULL) + xdr_set_next_page(xdr); + else if (xdr->iov == xdr->buf->head) { + if (xdr_set_page_base(xdr, 0, PAGE_SIZE) < 0) + xdr_set_iov(xdr, xdr->buf->tail, NULL, xdr->buf->len); + } + return xdr->p != xdr->end; +} + /** * xdr_init_decode - Initialize an xdr_stream for decoding data. * @xdr: pointer to xdr_stream struct @@ -560,41 +628,67 @@ EXPORT_SYMBOL_GPL(xdr_write_pages); */ void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p) { - struct kvec *iov = buf->head; - unsigned int len = iov->iov_len; - - if (len > buf->len) - len = buf->len; xdr->buf = buf; - xdr->iov = iov; - xdr->p = p; - xdr->end = (__be32 *)((char *)iov->iov_base + len); + xdr->scratch.iov_base = NULL; + xdr->scratch.iov_len = 0; + if (buf->head[0].iov_len != 0) + xdr_set_iov(xdr, buf->head, p, buf->len); + else if (buf->page_len != 0) + xdr_set_page_base(xdr, 0, buf->len); } EXPORT_SYMBOL_GPL(xdr_init_decode); -/** - * xdr_inline_peek - Allow read-ahead in the XDR data stream - * @xdr: pointer to xdr_stream struct - * @nbytes: number of bytes of data to decode - * - * Check if the input buffer is long enough to enable us to decode - * 'nbytes' more bytes of data starting at the current position. - * If so return the current pointer without updating the current - * pointer position. - */ -__be32 * xdr_inline_peek(struct xdr_stream *xdr, size_t nbytes) +static __be32 * __xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes) { __be32 *p = xdr->p; __be32 *q = p + XDR_QUADLEN(nbytes); if (unlikely(q > xdr->end || q < p)) return NULL; + xdr->p = q; return p; } -EXPORT_SYMBOL_GPL(xdr_inline_peek); /** - * xdr_inline_decode - Retrieve non-page XDR data to decode + * xdr_set_scratch_buffer - Attach a scratch buffer for decoding data. + * @xdr: pointer to xdr_stream struct + * @buf: pointer to an empty buffer + * @buflen: size of 'buf' + * + * The scratch buffer is used when decoding from an array of pages. + * If an xdr_inline_decode() call spans across page boundaries, then + * we copy the data into the scratch buffer in order to allow linear + * access. + */ +void xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buflen) +{ + xdr->scratch.iov_base = buf; + xdr->scratch.iov_len = buflen; +} +EXPORT_SYMBOL_GPL(xdr_set_scratch_buffer); + +static __be32 *xdr_copy_to_scratch(struct xdr_stream *xdr, size_t nbytes) +{ + __be32 *p; + void *cpdest = xdr->scratch.iov_base; + size_t cplen = (char *)xdr->end - (char *)xdr->p; + + if (nbytes > xdr->scratch.iov_len) + return NULL; + memcpy(cpdest, xdr->p, cplen); + cpdest += cplen; + nbytes -= cplen; + if (!xdr_set_next_buffer(xdr)) + return NULL; + p = __xdr_inline_decode(xdr, nbytes); + if (p == NULL) + return NULL; + memcpy(cpdest, p, nbytes); + return xdr->scratch.iov_base; +} + +/** + * xdr_inline_decode - Retrieve XDR data to decode * @xdr: pointer to xdr_stream struct * @nbytes: number of bytes of data to decode * @@ -605,13 +699,16 @@ EXPORT_SYMBOL_GPL(xdr_inline_peek); */ __be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes) { - __be32 *p = xdr->p; - __be32 *q = p + XDR_QUADLEN(nbytes); + __be32 *p; - if (unlikely(q > xdr->end || q < p)) + if (nbytes == 0) + return xdr->p; + if (xdr->p == xdr->end && !xdr_set_next_buffer(xdr)) return NULL; - xdr->p = q; - return p; + p = __xdr_inline_decode(xdr, nbytes); + if (p != NULL) + return p; + return xdr_copy_to_scratch(xdr, nbytes); } EXPORT_SYMBOL_GPL(xdr_inline_decode); @@ -671,16 +768,12 @@ EXPORT_SYMBOL_GPL(xdr_read_pages); */ void xdr_enter_page(struct xdr_stream *xdr, unsigned int len) { - char * kaddr = page_address(xdr->buf->pages[0]); xdr_read_pages(xdr, len); /* * Position current pointer at beginning of tail, and * set remaining message length. */ - if (len > PAGE_CACHE_SIZE - xdr->buf->page_base) - len = PAGE_CACHE_SIZE - xdr->buf->page_base; - xdr->p = (__be32 *)(kaddr + xdr->buf->page_base); - xdr->end = (__be32 *)((char *)xdr->p + len); + xdr_set_page_base(xdr, 0, len); } EXPORT_SYMBOL_GPL(xdr_enter_page); diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 4c8f18aff7c3..856274d7e85c 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -965,6 +965,7 @@ struct rpc_xprt *xprt_alloc(struct net *net, int size, int max_req) xprt = kzalloc(size, GFP_KERNEL); if (xprt == NULL) goto out; + kref_init(&xprt->kref); xprt->max_reqs = max_req; xprt->slot = kcalloc(max_req, sizeof(struct rpc_rqst), GFP_KERNEL); @@ -1101,8 +1102,10 @@ found: -PTR_ERR(xprt)); return xprt; } + if (test_and_set_bit(XPRT_INITIALIZED, &xprt->state)) + /* ->setup returned a pre-initialized xprt: */ + return xprt; - kref_init(&xprt->kref); spin_lock_init(&xprt->transport_lock); spin_lock_init(&xprt->reserve_lock); diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 96549df836ee..c431f5a57960 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -2359,6 +2359,15 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args) struct svc_sock *bc_sock; struct rpc_xprt *ret; + if (args->bc_xprt->xpt_bc_xprt) { + /* + * This server connection already has a backchannel + * export; we can't create a new one, as we wouldn't be + * able to match replies based on xid any more. So, + * reuse the already-existing one: + */ + return args->bc_xprt->xpt_bc_xprt; + } xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries); if (IS_ERR(xprt)) return xprt; @@ -2375,16 +2384,6 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args) xprt->reestablish_timeout = 0; xprt->idle_timeout = 0; - /* - * The backchannel uses the same socket connection as the - * forechannel - */ - xprt->bc_xprt = args->bc_xprt; - bc_sock = container_of(args->bc_xprt, struct svc_sock, sk_xprt); - bc_sock->sk_bc_xprt = xprt; - transport->sock = bc_sock->sk_sock; - transport->inet = bc_sock->sk_sk; - xprt->ops = &bc_tcp_ops; switch (addr->sa_family) { @@ -2407,6 +2406,20 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args) xprt->address_strings[RPC_DISPLAY_PROTO]); /* + * Once we've associated a backchannel xprt with a connection, + * we want to keep it around as long as long as the connection + * lasts, in case we need to start using it for a backchannel + * again; this reference won't be dropped until bc_xprt is + * destroyed. + */ + xprt_get(xprt); + args->bc_xprt->xpt_bc_xprt = xprt; + xprt->bc_xprt = args->bc_xprt; + bc_sock = container_of(args->bc_xprt, struct svc_sock, sk_xprt); + transport->sock = bc_sock->sk_sock; + transport->inet = bc_sock->sk_sk; + + /* * Since we don't want connections for the backchannel, we set * the xprt status to connected */ @@ -2415,6 +2428,7 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args) if (try_module_get(THIS_MODULE)) return xprt; + xprt_put(xprt); ret = ERR_PTR(-EINVAL); out_err: xprt_free(xprt); diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 8eb889510916..d5e1e0b08890 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -26,6 +26,7 @@ #include <net/sock.h> #include <net/xfrm.h> #include <net/netlink.h> +#include <net/ah.h> #include <asm/uaccess.h> #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) #include <linux/in6.h> @@ -302,7 +303,8 @@ static int attach_auth_trunc(struct xfrm_algo_auth **algpp, u8 *props, algo = xfrm_aalg_get_byname(ualg->alg_name, 1); if (!algo) return -ENOSYS; - if (ualg->alg_trunc_len > algo->uinfo.auth.icv_fullbits) + if ((ualg->alg_trunc_len / 8) > MAX_AH_AUTH_LEN || + ualg->alg_trunc_len > algo->uinfo.auth.icv_fullbits) return -EINVAL; *props = algo->desc.sadb_alg_id; @@ -2187,7 +2189,7 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) || type == (XFRM_MSG_GETPOLICY - XFRM_MSG_BASE)) && - (nlh->nlmsg_flags & NLM_F_DUMP)) { + (nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) { if (link->dump == NULL) return -EINVAL; |