summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/networking/xfrm_proc.txt20
-rw-r--r--include/linux/netdevice.h8
-rw-r--r--include/net/xfrm.h29
-rw-r--r--net/core/dev.c19
-rw-r--r--net/ipv4/esp4.c36
-rw-r--r--net/ipv4/esp4_offload.c73
-rw-r--r--net/ipv4/xfrm4_mode_tunnel.c5
-rw-r--r--net/ipv6/esp6.c36
-rw-r--r--net/ipv6/esp6_offload.c80
-rw-r--r--net/ipv6/xfrm6_mode_tunnel.c5
-rw-r--r--net/packet/af_packet.c3
-rw-r--r--net/sched/sch_generic.c16
-rw-r--r--net/xfrm/xfrm_device.c186
13 files changed, 326 insertions, 190 deletions
diff --git a/Documentation/networking/xfrm_proc.txt b/Documentation/networking/xfrm_proc.txt
index d0d8bafa9016..2eae619ab67b 100644
--- a/Documentation/networking/xfrm_proc.txt
+++ b/Documentation/networking/xfrm_proc.txt
@@ -5,13 +5,15 @@ Masahide NAKAMURA <nakam@linux-ipv6.org>
Transformation Statistics
-------------------------
-xfrm_proc is a statistics shown factor dropped by transformation
-for developer.
-It is a counter designed from current transformation source code
-and defined like linux private MIB.
-Inbound statistics
-~~~~~~~~~~~~~~~~~~
+The xfrm_proc code is a set of statistics showing numbers of packets
+dropped by the transformation code and why. These counters are defined
+as part of the linux private MIB. These counters can be viewed in
+/proc/net/xfrm_stat.
+
+
+Inbound errors
+~~~~~~~~~~~~~~
XfrmInError:
All errors which is not matched others
XfrmInBufferError:
@@ -46,6 +48,10 @@ XfrmInPolBlock:
Policy discards
XfrmInPolError:
Policy error
+XfrmAcquireError:
+ State hasn't been fully acquired before use
+XfrmFwdHdrError:
+ Forward routing of a packet is not allowed
Outbound errors
~~~~~~~~~~~~~~~
@@ -72,3 +78,5 @@ XfrmOutPolDead:
Policy is dead
XfrmOutPolError:
Policy error
+XfrmOutStateInvalid:
+ State is invalid, perhaps expired
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index cc4ce7456e38..352066e4eeef 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1726,7 +1726,7 @@ struct net_device {
const struct ndisc_ops *ndisc_ops;
#endif
-#ifdef CONFIG_XFRM
+#ifdef CONFIG_XFRM_OFFLOAD
const struct xfrmdev_ops *xfrmdev_ops;
#endif
@@ -2793,7 +2793,9 @@ struct softnet_data {
struct Qdisc *output_queue;
struct Qdisc **output_queue_tailp;
struct sk_buff *completion_queue;
-
+#ifdef CONFIG_XFRM_OFFLOAD
+ struct sk_buff_head xfrm_backlog;
+#endif
#ifdef CONFIG_RPS
/* input_queue_head should be written by cpu owning this struct,
* and only read by other cpus. Worth using a cache line.
@@ -3325,7 +3327,7 @@ int dev_get_phys_port_id(struct net_device *dev,
int dev_get_phys_port_name(struct net_device *dev,
char *name, size_t len);
int dev_change_proto_down(struct net_device *dev, bool proto_down);
-struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev);
+struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again);
struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq, int *ret);
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 1ec0c4760646..079ea9455bcd 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -1051,6 +1051,7 @@ struct xfrm_offload {
#define XFRM_GSO_SEGMENT 16
#define XFRM_GRO 32
#define XFRM_ESP_NO_TRAILER 64
+#define XFRM_DEV_RESUME 128
__u32 status;
#define CRYPTO_SUCCESS 1
@@ -1874,21 +1875,28 @@ static inline struct xfrm_state *xfrm_input_state(struct sk_buff *skb)
{
return skb->sp->xvec[skb->sp->len - 1];
}
+#endif
+
static inline struct xfrm_offload *xfrm_offload(struct sk_buff *skb)
{
+#ifdef CONFIG_XFRM
struct sec_path *sp = skb->sp;
if (!sp || !sp->olen || sp->len != sp->olen)
return NULL;
return &sp->ovec[sp->olen - 1];
-}
+#else
+ return NULL;
#endif
+}
void __net_init xfrm_dev_init(void);
#ifdef CONFIG_XFRM_OFFLOAD
-int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features);
+void xfrm_dev_resume(struct sk_buff *skb);
+void xfrm_dev_backlog(struct softnet_data *sd);
+struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again);
int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
struct xfrm_user_offload *xuo);
bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
@@ -1902,6 +1910,8 @@ static inline bool xfrm_dst_offload_ok(struct dst_entry *dst)
return false;
xdst = (struct xfrm_dst *) dst;
+ if (!x->xso.offload_handle && !xdst->child->xfrm)
+ return true;
if (x->xso.offload_handle && (x->xso.dev == xfrm_dst_path(dst)->dev) &&
!xdst->child->xfrm)
return true;
@@ -1923,15 +1933,24 @@ static inline void xfrm_dev_state_free(struct xfrm_state *x)
struct net_device *dev = xso->dev;
if (dev && dev->xfrmdev_ops) {
- dev->xfrmdev_ops->xdo_dev_state_free(x);
+ if (dev->xfrmdev_ops->xdo_dev_state_free)
+ dev->xfrmdev_ops->xdo_dev_state_free(x);
xso->dev = NULL;
dev_put(dev);
}
}
#else
-static inline int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features)
+static inline void xfrm_dev_resume(struct sk_buff *skb)
{
- return 0;
+}
+
+static inline void xfrm_dev_backlog(struct softnet_data *sd)
+{
+}
+
+static inline struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again)
+{
+ return skb;
}
static inline int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, struct xfrm_user_offload *xuo)
diff --git a/net/core/dev.c b/net/core/dev.c
index 59ead3910ab7..2eb66c0d9cdb 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3059,7 +3059,7 @@ int skb_csum_hwoffload_help(struct sk_buff *skb,
}
EXPORT_SYMBOL(skb_csum_hwoffload_help);
-static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev)
+static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev, bool *again)
{
netdev_features_t features;
@@ -3083,9 +3083,6 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device
__skb_linearize(skb))
goto out_kfree_skb;
- if (validate_xmit_xfrm(skb, features))
- goto out_kfree_skb;
-
/* If packet is not checksummed and device does not
* support checksumming for this protocol, complete
* checksumming here.
@@ -3102,6 +3099,8 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device
}
}
+ skb = validate_xmit_xfrm(skb, features, again);
+
return skb;
out_kfree_skb:
@@ -3111,7 +3110,7 @@ out_null:
return NULL;
}
-struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev)
+struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again)
{
struct sk_buff *next, *head = NULL, *tail;
@@ -3122,7 +3121,7 @@ struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *d
/* in case skb wont be segmented, point to itself */
skb->prev = skb;
- skb = validate_xmit_skb(skb, dev);
+ skb = validate_xmit_skb(skb, dev, again);
if (!skb)
continue;
@@ -3449,6 +3448,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
struct netdev_queue *txq;
struct Qdisc *q;
int rc = -ENOMEM;
+ bool again = false;
skb_reset_mac_header(skb);
@@ -3510,7 +3510,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
XMIT_RECURSION_LIMIT))
goto recursion_alert;
- skb = validate_xmit_skb(skb, dev);
+ skb = validate_xmit_skb(skb, dev, &again);
if (!skb)
goto out;
@@ -4194,6 +4194,8 @@ static __latent_entropy void net_tx_action(struct softirq_action *h)
spin_unlock(root_lock);
}
}
+
+ xfrm_dev_backlog(sd);
}
#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE)
@@ -8875,6 +8877,9 @@ static int __init net_dev_init(void)
skb_queue_head_init(&sd->input_pkt_queue);
skb_queue_head_init(&sd->process_queue);
+#ifdef CONFIG_XFRM_OFFLOAD
+ skb_queue_head_init(&sd->xfrm_backlog);
+#endif
INIT_LIST_HEAD(&sd->poll_list);
sd->output_queue_tailp = &sd->output_queue;
#ifdef CONFIG_RPS
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index d57aa64fa7c7..6f00e43120a8 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -121,14 +121,32 @@ static void esp_ssg_unref(struct xfrm_state *x, void *tmp)
static void esp_output_done(struct crypto_async_request *base, int err)
{
struct sk_buff *skb = base->data;
+ struct xfrm_offload *xo = xfrm_offload(skb);
void *tmp;
- struct dst_entry *dst = skb_dst(skb);
- struct xfrm_state *x = dst->xfrm;
+ struct xfrm_state *x;
+
+ if (xo && (xo->flags & XFRM_DEV_RESUME))
+ x = skb->sp->xvec[skb->sp->len - 1];
+ else
+ x = skb_dst(skb)->xfrm;
tmp = ESP_SKB_CB(skb)->tmp;
esp_ssg_unref(x, tmp);
kfree(tmp);
- xfrm_output_resume(skb, err);
+
+ if (xo && (xo->flags & XFRM_DEV_RESUME)) {
+ if (err) {
+ XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
+ kfree_skb(skb);
+ return;
+ }
+
+ skb_push(skb, skb->data - skb_mac_header(skb));
+ secpath_reset(skb);
+ xfrm_dev_resume(skb);
+ } else {
+ xfrm_output_resume(skb, err);
+ }
}
/* Move ESP header back into place. */
@@ -825,17 +843,13 @@ static int esp_init_aead(struct xfrm_state *x)
char aead_name[CRYPTO_MAX_ALG_NAME];
struct crypto_aead *aead;
int err;
- u32 mask = 0;
err = -ENAMETOOLONG;
if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME)
goto error;
- if (x->xso.offload_handle)
- mask |= CRYPTO_ALG_ASYNC;
-
- aead = crypto_alloc_aead(aead_name, 0, mask);
+ aead = crypto_alloc_aead(aead_name, 0, 0);
err = PTR_ERR(aead);
if (IS_ERR(aead))
goto error;
@@ -865,7 +879,6 @@ static int esp_init_authenc(struct xfrm_state *x)
char authenc_name[CRYPTO_MAX_ALG_NAME];
unsigned int keylen;
int err;
- u32 mask = 0;
err = -EINVAL;
if (!x->ealg)
@@ -891,10 +904,7 @@ static int esp_init_authenc(struct xfrm_state *x)
goto error;
}
- if (x->xso.offload_handle)
- mask |= CRYPTO_ALG_ASYNC;
-
- aead = crypto_alloc_aead(authenc_name, 0, mask);
+ aead = crypto_alloc_aead(authenc_name, 0, 0);
err = PTR_ERR(aead);
if (IS_ERR(aead))
goto error;
diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c
index f8b918c766b0..c359f3cfeec3 100644
--- a/net/ipv4/esp4_offload.c
+++ b/net/ipv4/esp4_offload.c
@@ -108,75 +108,36 @@ static void esp4_gso_encap(struct xfrm_state *x, struct sk_buff *skb)
static struct sk_buff *esp4_gso_segment(struct sk_buff *skb,
netdev_features_t features)
{
- __u32 seq;
- int err = 0;
- struct sk_buff *skb2;
struct xfrm_state *x;
struct ip_esp_hdr *esph;
struct crypto_aead *aead;
- struct sk_buff *segs = ERR_PTR(-EINVAL);
netdev_features_t esp_features = features;
struct xfrm_offload *xo = xfrm_offload(skb);
if (!xo)
- goto out;
-
- seq = xo->seq.low;
+ return ERR_PTR(-EINVAL);
x = skb->sp->xvec[skb->sp->len - 1];
aead = x->data;
esph = ip_esp_hdr(skb);
if (esph->spi != x->id.spi)
- goto out;
+ return ERR_PTR(-EINVAL);
if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)))
- goto out;
+ return ERR_PTR(-EINVAL);
__skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead));
skb->encap_hdr_csum = 1;
- if (!(features & NETIF_F_HW_ESP))
+ if (!(features & NETIF_F_HW_ESP) || !x->xso.offload_handle ||
+ (x->xso.dev != skb->dev))
esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
- segs = x->outer_mode->gso_segment(x, skb, esp_features);
- if (IS_ERR_OR_NULL(segs))
- goto out;
-
- __skb_pull(skb, skb->data - skb_mac_header(skb));
-
- skb2 = segs;
- do {
- struct sk_buff *nskb = skb2->next;
-
- xo = xfrm_offload(skb2);
- xo->flags |= XFRM_GSO_SEGMENT;
- xo->seq.low = seq;
- xo->seq.hi = xfrm_replay_seqhi(x, seq);
+ xo->flags |= XFRM_GSO_SEGMENT;
- if(!(features & NETIF_F_HW_ESP))
- xo->flags |= CRYPTO_FALLBACK;
-
- x->outer_mode->xmit(x, skb2);
-
- err = x->type_offload->xmit(x, skb2, esp_features);
- if (err) {
- kfree_skb_list(segs);
- return ERR_PTR(err);
- }
-
- if (!skb_is_gso(skb2))
- seq++;
- else
- seq += skb_shinfo(skb2)->gso_segs;
-
- skb_push(skb2, skb2->mac_len);
- skb2 = nskb;
- } while (skb2);
-
-out:
- return segs;
+ return x->outer_mode->gso_segment(x, skb, esp_features);
}
static int esp_input_tail(struct xfrm_state *x, struct sk_buff *skb)
@@ -203,6 +164,7 @@ static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_
struct crypto_aead *aead;
struct esp_info esp;
bool hw_offload = true;
+ __u32 seq;
esp.inplace = true;
@@ -241,23 +203,30 @@ static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_
return esp.nfrags;
}
+ seq = xo->seq.low;
+
esph = esp.esph;
esph->spi = x->id.spi;
skb_push(skb, -skb_network_offset(skb));
if (xo->flags & XFRM_GSO_SEGMENT) {
- esph->seq_no = htonl(xo->seq.low);
- } else {
- ip_hdr(skb)->tot_len = htons(skb->len);
- ip_send_check(ip_hdr(skb));
+ esph->seq_no = htonl(seq);
+
+ if (!skb_is_gso(skb))
+ xo->seq.low++;
+ else
+ xo->seq.low += skb_shinfo(skb)->gso_segs;
}
+ esp.seqno = cpu_to_be64(seq + ((u64)xo->seq.hi << 32));
+
+ ip_hdr(skb)->tot_len = htons(skb->len);
+ ip_send_check(ip_hdr(skb));
+
if (hw_offload)
return 0;
- esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32));
-
err = esp_output_tail(x, skb, &esp);
if (err)
return err;
diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c
index 7d885a44dc9d..8affc6d83d58 100644
--- a/net/ipv4/xfrm4_mode_tunnel.c
+++ b/net/ipv4/xfrm4_mode_tunnel.c
@@ -105,18 +105,15 @@ static struct sk_buff *xfrm4_mode_tunnel_gso_segment(struct xfrm_state *x,
{
__skb_push(skb, skb->mac_len);
return skb_mac_gso_segment(skb, features);
-
}
static void xfrm4_mode_tunnel_xmit(struct xfrm_state *x, struct sk_buff *skb)
{
struct xfrm_offload *xo = xfrm_offload(skb);
- if (xo->flags & XFRM_GSO_SEGMENT) {
- skb->network_header = skb->network_header - x->props.header_len;
+ if (xo->flags & XFRM_GSO_SEGMENT)
skb->transport_header = skb->network_header +
sizeof(struct iphdr);
- }
skb_reset_mac_len(skb);
pskb_pull(skb, skb->mac_len + x->props.header_len);
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index a902ff8f59be..7c888c6e53a9 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -141,14 +141,32 @@ static void esp_ssg_unref(struct xfrm_state *x, void *tmp)
static void esp_output_done(struct crypto_async_request *base, int err)
{
struct sk_buff *skb = base->data;
+ struct xfrm_offload *xo = xfrm_offload(skb);
void *tmp;
- struct dst_entry *dst = skb_dst(skb);
- struct xfrm_state *x = dst->xfrm;
+ struct xfrm_state *x;
+
+ if (xo && (xo->flags & XFRM_DEV_RESUME))
+ x = skb->sp->xvec[skb->sp->len - 1];
+ else
+ x = skb_dst(skb)->xfrm;
tmp = ESP_SKB_CB(skb)->tmp;
esp_ssg_unref(x, tmp);
kfree(tmp);
- xfrm_output_resume(skb, err);
+
+ if (xo && (xo->flags & XFRM_DEV_RESUME)) {
+ if (err) {
+ XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
+ kfree_skb(skb);
+ return;
+ }
+
+ skb_push(skb, skb->data - skb_mac_header(skb));
+ secpath_reset(skb);
+ xfrm_dev_resume(skb);
+ } else {
+ xfrm_output_resume(skb, err);
+ }
}
/* Move ESP header back into place. */
@@ -734,17 +752,13 @@ static int esp_init_aead(struct xfrm_state *x)
char aead_name[CRYPTO_MAX_ALG_NAME];
struct crypto_aead *aead;
int err;
- u32 mask = 0;
err = -ENAMETOOLONG;
if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME)
goto error;
- if (x->xso.offload_handle)
- mask |= CRYPTO_ALG_ASYNC;
-
- aead = crypto_alloc_aead(aead_name, 0, mask);
+ aead = crypto_alloc_aead(aead_name, 0, 0);
err = PTR_ERR(aead);
if (IS_ERR(aead))
goto error;
@@ -774,7 +788,6 @@ static int esp_init_authenc(struct xfrm_state *x)
char authenc_name[CRYPTO_MAX_ALG_NAME];
unsigned int keylen;
int err;
- u32 mask = 0;
err = -EINVAL;
if (!x->ealg)
@@ -800,10 +813,7 @@ static int esp_init_authenc(struct xfrm_state *x)
goto error;
}
- if (x->xso.offload_handle)
- mask |= CRYPTO_ALG_ASYNC;
-
- aead = crypto_alloc_aead(authenc_name, 0, mask);
+ aead = crypto_alloc_aead(authenc_name, 0, 0);
err = PTR_ERR(aead);
if (IS_ERR(aead))
goto error;
diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c
index 333a478aa161..0bb7d54cf2cb 100644
--- a/net/ipv6/esp6_offload.c
+++ b/net/ipv6/esp6_offload.c
@@ -135,75 +135,36 @@ static void esp6_gso_encap(struct xfrm_state *x, struct sk_buff *skb)
static struct sk_buff *esp6_gso_segment(struct sk_buff *skb,
netdev_features_t features)
{
- __u32 seq;
- int err = 0;
- struct sk_buff *skb2;
struct xfrm_state *x;
struct ip_esp_hdr *esph;
struct crypto_aead *aead;
- struct sk_buff *segs = ERR_PTR(-EINVAL);
netdev_features_t esp_features = features;
struct xfrm_offload *xo = xfrm_offload(skb);
if (!xo)
- goto out;
-
- seq = xo->seq.low;
+ return ERR_PTR(-EINVAL);
x = skb->sp->xvec[skb->sp->len - 1];
aead = x->data;
esph = ip_esp_hdr(skb);
if (esph->spi != x->id.spi)
- goto out;
+ return ERR_PTR(-EINVAL);
if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)))
- goto out;
+ return ERR_PTR(-EINVAL);
__skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead));
skb->encap_hdr_csum = 1;
- if (!(features & NETIF_F_HW_ESP))
+ if (!(features & NETIF_F_HW_ESP) || !x->xso.offload_handle ||
+ (x->xso.dev != skb->dev))
esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
- segs = x->outer_mode->gso_segment(x, skb, esp_features);
- if (IS_ERR_OR_NULL(segs))
- goto out;
-
- __skb_pull(skb, skb->data - skb_mac_header(skb));
-
- skb2 = segs;
- do {
- struct sk_buff *nskb = skb2->next;
-
- xo = xfrm_offload(skb2);
- xo->flags |= XFRM_GSO_SEGMENT;
- xo->seq.low = seq;
- xo->seq.hi = xfrm_replay_seqhi(x, seq);
-
- if(!(features & NETIF_F_HW_ESP))
- xo->flags |= CRYPTO_FALLBACK;
-
- x->outer_mode->xmit(x, skb2);
-
- err = x->type_offload->xmit(x, skb2, esp_features);
- if (err) {
- kfree_skb_list(segs);
- return ERR_PTR(err);
- }
-
- if (!skb_is_gso(skb2))
- seq++;
- else
- seq += skb_shinfo(skb2)->gso_segs;
-
- skb_push(skb2, skb2->mac_len);
- skb2 = nskb;
- } while (skb2);
+ xo->flags |= XFRM_GSO_SEGMENT;
-out:
- return segs;
+ return x->outer_mode->gso_segment(x, skb, esp_features);
}
static int esp6_input_tail(struct xfrm_state *x, struct sk_buff *skb)
@@ -222,6 +183,7 @@ static int esp6_input_tail(struct xfrm_state *x, struct sk_buff *skb)
static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_t features)
{
+ int len;
int err;
int alen;
int blksize;
@@ -230,6 +192,7 @@ static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features
struct crypto_aead *aead;
struct esp_info esp;
bool hw_offload = true;
+ __u32 seq;
esp.inplace = true;
@@ -265,28 +228,33 @@ static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features
return esp.nfrags;
}
+ seq = xo->seq.low;
+
esph = ip_esp_hdr(skb);
esph->spi = x->id.spi;
skb_push(skb, -skb_network_offset(skb));
if (xo->flags & XFRM_GSO_SEGMENT) {
- esph->seq_no = htonl(xo->seq.low);
- } else {
- int len;
-
- len = skb->len - sizeof(struct ipv6hdr);
- if (len > IPV6_MAXPLEN)
- len = 0;
+ esph->seq_no = htonl(seq);
- ipv6_hdr(skb)->payload_len = htons(len);
+ if (!skb_is_gso(skb))
+ xo->seq.low++;
+ else
+ xo->seq.low += skb_shinfo(skb)->gso_segs;
}
+ esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32));
+
+ len = skb->len - sizeof(struct ipv6hdr);
+ if (len > IPV6_MAXPLEN)
+ len = 0;
+
+ ipv6_hdr(skb)->payload_len = htons(len);
+
if (hw_offload)
return 0;
- esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32));
-
err = esp6_output_tail(x, skb, &esp);
if (err)
return err;
diff --git a/net/ipv6/xfrm6_mode_tunnel.c b/net/ipv6/xfrm6_mode_tunnel.c
index e66b94f46532..4e12859bc2ee 100644
--- a/net/ipv6/xfrm6_mode_tunnel.c
+++ b/net/ipv6/xfrm6_mode_tunnel.c
@@ -105,17 +105,14 @@ static struct sk_buff *xfrm6_mode_tunnel_gso_segment(struct xfrm_state *x,
{
__skb_push(skb, skb->mac_len);
return skb_mac_gso_segment(skb, features);
-
}
static void xfrm6_mode_tunnel_xmit(struct xfrm_state *x, struct sk_buff *skb)
{
struct xfrm_offload *xo = xfrm_offload(skb);
- if (xo->flags & XFRM_GSO_SEGMENT) {
- skb->network_header = skb->network_header - x->props.header_len;
+ if (xo->flags & XFRM_GSO_SEGMENT)
skb->transport_header = skb->network_header + sizeof(struct ipv6hdr);
- }
skb_reset_mac_len(skb);
pskb_pull(skb, skb->mac_len + x->props.header_len);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index da215e5c1399..ee7aa0ba3a67 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -247,12 +247,13 @@ static int packet_direct_xmit(struct sk_buff *skb)
struct sk_buff *orig_skb = skb;
struct netdev_queue *txq;
int ret = NETDEV_TX_BUSY;
+ bool again = false;
if (unlikely(!netif_running(dev) ||
!netif_carrier_ok(dev)))
goto drop;
- skb = validate_xmit_skb_list(skb, dev);
+ skb = validate_xmit_skb_list(skb, dev, &again);
if (skb != orig_skb)
goto drop;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 10aaa3b615ce..28b2a7964133 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -32,6 +32,7 @@
#include <net/pkt_sched.h>
#include <net/dst.h>
#include <trace/events/qdisc.h>
+#include <net/xfrm.h>
/* Qdisc to use by default */
const struct Qdisc_ops *default_qdisc_ops = &pfifo_fast_ops;
@@ -230,6 +231,8 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
/* skb in gso_skb were already validated */
*validate = false;
+ if (xfrm_offload(skb))
+ *validate = true;
/* check the reason of requeuing without tx lock first */
txq = skb_get_tx_queue(txq->dev, skb);
if (!netif_xmit_frozen_or_stopped(txq)) {
@@ -285,6 +288,7 @@ bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
spinlock_t *root_lock, bool validate)
{
int ret = NETDEV_TX_BUSY;
+ bool again = false;
/* And release qdisc */
if (root_lock)
@@ -292,7 +296,17 @@ bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
/* Note that we validate skb (GSO, checksum, ...) outside of locks */
if (validate)
- skb = validate_xmit_skb_list(skb, dev);
+ skb = validate_xmit_skb_list(skb, dev, &again);
+
+#ifdef CONFIG_XFRM_OFFLOAD
+ if (unlikely(again)) {
+ if (root_lock)
+ spin_lock(root_lock);
+
+ dev_requeue_skb(skb, q);
+ return false;
+ }
+#endif
if (likely(skb)) {
HARD_TX_LOCK(dev, txq, smp_processor_id());
diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c
index 00641b611aed..75982506617b 100644
--- a/net/xfrm/xfrm_device.c
+++ b/net/xfrm/xfrm_device.c
@@ -23,32 +23,114 @@
#include <linux/notifier.h>
#ifdef CONFIG_XFRM_OFFLOAD
-int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features)
+struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again)
{
int err;
+ unsigned long flags;
struct xfrm_state *x;
+ struct sk_buff *skb2;
+ struct softnet_data *sd;
+ netdev_features_t esp_features = features;
struct xfrm_offload *xo = xfrm_offload(skb);
- if (skb_is_gso(skb))
- return 0;
+ if (!xo)
+ return skb;
- if (xo) {
- x = skb->sp->xvec[skb->sp->len - 1];
- if (xo->flags & XFRM_GRO || x->xso.flags & XFRM_OFFLOAD_INBOUND)
- return 0;
+ if (!(features & NETIF_F_HW_ESP))
+ esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
+
+ x = skb->sp->xvec[skb->sp->len - 1];
+ if (xo->flags & XFRM_GRO || x->xso.flags & XFRM_OFFLOAD_INBOUND)
+ return skb;
+
+ local_irq_save(flags);
+ sd = this_cpu_ptr(&softnet_data);
+ err = !skb_queue_empty(&sd->xfrm_backlog);
+ local_irq_restore(flags);
+ if (err) {
+ *again = true;
+ return skb;
+ }
+
+ if (skb_is_gso(skb)) {
+ struct net_device *dev = skb->dev;
+
+ if (unlikely(!x->xso.offload_handle || (x->xso.dev != dev))) {
+ struct sk_buff *segs;
+
+ /* Packet got rerouted, fixup features and segment it. */
+ esp_features = esp_features & ~(NETIF_F_HW_ESP
+ | NETIF_F_GSO_ESP);
+
+ segs = skb_gso_segment(skb, esp_features);
+ if (IS_ERR(segs)) {
+ kfree_skb(skb);
+ atomic_long_inc(&dev->tx_dropped);
+ return NULL;
+ } else {
+ consume_skb(skb);
+ skb = segs;
+ }
+ }
+ }
+
+ if (!skb->next) {
x->outer_mode->xmit(x, skb);
- err = x->type_offload->xmit(x, skb, features);
+ xo->flags |= XFRM_DEV_RESUME;
+
+ err = x->type_offload->xmit(x, skb, esp_features);
if (err) {
+ if (err == -EINPROGRESS)
+ return NULL;
+
XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
- return err;
+ kfree_skb(skb);
+ return NULL;
}
skb_push(skb, skb->data - skb_mac_header(skb));
+
+ return skb;
}
- return 0;
+ skb2 = skb;
+
+ do {
+ struct sk_buff *nskb = skb2->next;
+ skb2->next = NULL;
+
+ xo = xfrm_offload(skb2);
+ xo->flags |= XFRM_DEV_RESUME;
+
+ x->outer_mode->xmit(x, skb2);
+
+ err = x->type_offload->xmit(x, skb2, esp_features);
+ if (!err) {
+ skb2->next = nskb;
+ } else if (err != -EINPROGRESS) {
+ XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
+ skb2->next = nskb;
+ kfree_skb_list(skb2);
+ return NULL;
+ } else {
+ if (skb == skb2)
+ skb = nskb;
+
+ if (!skb)
+ return NULL;
+
+ goto skip_push;
+ }
+
+ skb_push(skb2, skb2->data - skb_mac_header(skb2));
+
+skip_push:
+ skb2 = nskb;
+ } while (skb2);
+
+ return skb;
}
EXPORT_SYMBOL_GPL(validate_xmit_xfrm);
@@ -120,8 +202,8 @@ bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
if (!x->type_offload || x->encap)
return false;
- if ((x->xso.offload_handle && (dev == xfrm_dst_path(dst)->dev)) &&
- !xdst->child->xfrm && x->type->get_mtu) {
+ if ((!dev || (x->xso.offload_handle && (dev == xfrm_dst_path(dst)->dev))) &&
+ (!xdst->child->xfrm && x->type->get_mtu)) {
mtu = x->type->get_mtu(x, xdst->child_mtu_cached);
if (skb->len <= mtu)
@@ -140,19 +222,82 @@ ok:
return true;
}
EXPORT_SYMBOL_GPL(xfrm_dev_offload_ok);
+
+void xfrm_dev_resume(struct sk_buff *skb)
+{
+ struct net_device *dev = skb->dev;
+ int ret = NETDEV_TX_BUSY;
+ struct netdev_queue *txq;
+ struct softnet_data *sd;
+ unsigned long flags;
+
+ rcu_read_lock();
+ txq = netdev_pick_tx(dev, skb, NULL);
+
+ HARD_TX_LOCK(dev, txq, smp_processor_id());
+ if (!netif_xmit_frozen_or_stopped(txq))
+ skb = dev_hard_start_xmit(skb, dev, txq, &ret);
+ HARD_TX_UNLOCK(dev, txq);
+
+ if (!dev_xmit_complete(ret)) {
+ local_irq_save(flags);
+ sd = this_cpu_ptr(&softnet_data);
+ skb_queue_tail(&sd->xfrm_backlog, skb);
+ raise_softirq_irqoff(NET_TX_SOFTIRQ);
+ local_irq_restore(flags);
+ }
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(xfrm_dev_resume);
+
+void xfrm_dev_backlog(struct softnet_data *sd)
+{
+ struct sk_buff_head *xfrm_backlog = &sd->xfrm_backlog;
+ struct sk_buff_head list;
+ struct sk_buff *skb;
+
+ if (skb_queue_empty(xfrm_backlog))
+ return;
+
+ __skb_queue_head_init(&list);
+
+ spin_lock(&xfrm_backlog->lock);
+ skb_queue_splice_init(xfrm_backlog, &list);
+ spin_unlock(&xfrm_backlog->lock);
+
+ while (!skb_queue_empty(&list)) {
+ skb = __skb_dequeue(&list);
+ xfrm_dev_resume(skb);
+ }
+
+}
#endif
-static int xfrm_dev_register(struct net_device *dev)
+static int xfrm_api_check(struct net_device *dev)
{
- if ((dev->features & NETIF_F_HW_ESP) && !dev->xfrmdev_ops)
- return NOTIFY_BAD;
+#ifdef CONFIG_XFRM_OFFLOAD
if ((dev->features & NETIF_F_HW_ESP_TX_CSUM) &&
!(dev->features & NETIF_F_HW_ESP))
return NOTIFY_BAD;
+ if ((dev->features & NETIF_F_HW_ESP) &&
+ (!(dev->xfrmdev_ops &&
+ dev->xfrmdev_ops->xdo_dev_state_add &&
+ dev->xfrmdev_ops->xdo_dev_state_delete)))
+ return NOTIFY_BAD;
+#else
+ if (dev->features & (NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM))
+ return NOTIFY_BAD;
+#endif
+
return NOTIFY_DONE;
}
+static int xfrm_dev_register(struct net_device *dev)
+{
+ return xfrm_api_check(dev);
+}
+
static int xfrm_dev_unregister(struct net_device *dev)
{
xfrm_policy_cache_flush();
@@ -161,16 +306,7 @@ static int xfrm_dev_unregister(struct net_device *dev)
static int xfrm_dev_feat_change(struct net_device *dev)
{
- if ((dev->features & NETIF_F_HW_ESP) && !dev->xfrmdev_ops)
- return NOTIFY_BAD;
- else if (!(dev->features & NETIF_F_HW_ESP))
- dev->xfrmdev_ops = NULL;
-
- if ((dev->features & NETIF_F_HW_ESP_TX_CSUM) &&
- !(dev->features & NETIF_F_HW_ESP))
- return NOTIFY_BAD;
-
- return NOTIFY_DONE;
+ return xfrm_api_check(dev);
}
static int xfrm_dev_down(struct net_device *dev)