diff options
author | Florian Westphal <fw@strlen.de> | 2011-01-18 15:27:28 +0100 |
---|---|---|
committer | Patrick McHardy <kaber@trash.net> | 2011-01-18 15:27:28 +0100 |
commit | f15850861860636c905b33a9a5be3dcbc2b0d56a (patch) | |
tree | 463d73647de2a43138bdd8259c259137a3bb3e3b /net/netfilter/nf_queue.c | |
parent | 5f2cafe73671d865af88494159f3e8c1b322e1c5 (diff) |
netfilter: nfnetlink_queue: return error number to caller
instead of returning -1 on error, return an error number to allow the
caller to handle some errors differently.
ECANCELED is used to indicate that the hook is going away and should be
ignored.
A followup patch will introduce more 'ignore this hook' conditions,
(depending on queue settings) and will move kfree_skb responsibility
to the caller.
Signed-off-by: Florian Westphal <fw@strlen.de>
Signed-off-by: Patrick McHardy <kaber@trash.net>
Diffstat (limited to 'net/netfilter/nf_queue.c')
-rw-r--r-- | net/netfilter/nf_queue.c | 44 |
1 files changed, 31 insertions, 13 deletions
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c index 1876f7411561..ad25c7e726bc 100644 --- a/net/netfilter/nf_queue.c +++ b/net/netfilter/nf_queue.c @@ -125,7 +125,7 @@ static int __nf_queue(struct sk_buff *skb, int (*okfn)(struct sk_buff *), unsigned int queuenum) { - int status; + int status = -ENOENT; struct nf_queue_entry *entry = NULL; #ifdef CONFIG_BRIDGE_NETFILTER struct net_device *physindev; @@ -146,8 +146,10 @@ static int __nf_queue(struct sk_buff *skb, goto err_unlock; entry = kmalloc(sizeof(*entry) + afinfo->route_key_size, GFP_ATOMIC); - if (!entry) + if (!entry) { + status = -ENOMEM; goto err_unlock; + } *entry = (struct nf_queue_entry) { .skb = skb, @@ -163,9 +165,8 @@ static int __nf_queue(struct sk_buff *skb, if (!try_module_get(entry->elem->owner)) { rcu_read_unlock(); kfree(entry); - return 0; + return -ECANCELED; } - /* Bump dev refs so they don't vanish while packet is out */ if (indev) dev_hold(indev); @@ -192,14 +193,14 @@ static int __nf_queue(struct sk_buff *skb, goto err; } - return 1; + return 0; err_unlock: rcu_read_unlock(); err: kfree_skb(skb); kfree(entry); - return 1; + return status; } int nf_queue(struct sk_buff *skb, @@ -211,6 +212,8 @@ int nf_queue(struct sk_buff *skb, unsigned int queuenum) { struct sk_buff *segs; + int err; + unsigned int queued; if (!skb_is_gso(skb)) return __nf_queue(skb, elem, pf, hook, indev, outdev, okfn, @@ -227,19 +230,32 @@ int nf_queue(struct sk_buff *skb, segs = skb_gso_segment(skb, 0); kfree_skb(skb); + /* Does not use PTR_ERR to limit the number of error codes that can be + * returned by nf_queue. For instance, callers rely on -ECANCELED to mean + * 'ignore this hook'. + */ if (IS_ERR(segs)) - return 1; + return -EINVAL; + queued = 0; + err = 0; do { struct sk_buff *nskb = segs->next; segs->next = NULL; - if (!__nf_queue(segs, elem, pf, hook, indev, outdev, okfn, - queuenum)) + if (err == 0) + err = __nf_queue(segs, elem, pf, hook, indev, + outdev, okfn, queuenum); + if (err == 0) + queued++; + else kfree_skb(segs); segs = nskb; } while (segs); - return 1; + + if (unlikely(err && queued)) + err = 0; + return err; } void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict) @@ -247,6 +263,7 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict) struct sk_buff *skb = entry->skb; struct list_head *elem = &entry->elem->list; const struct nf_afinfo *afinfo; + int err; rcu_read_lock(); @@ -280,9 +297,10 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict) local_bh_enable(); break; case NF_QUEUE: - if (!__nf_queue(skb, elem, entry->pf, entry->hook, - entry->indev, entry->outdev, entry->okfn, - verdict >> NF_VERDICT_BITS)) + err = __nf_queue(skb, elem, entry->pf, entry->hook, + entry->indev, entry->outdev, entry->okfn, + verdict >> NF_VERDICT_BITS); + if (err == -ECANCELED) goto next_hook; break; case NF_STOLEN: |