summaryrefslogtreecommitdiff
path: root/net/netfilter/nf_flow_table_xdp.c
blob: e1252d042699135187479fa9e37a81809b611164 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netfilter.h>
#include <linux/rhashtable.h>
#include <linux/netdevice.h>
#include <net/flow_offload.h>
#include <net/netfilter/nf_flow_table.h>

struct flow_offload_xdp_ft {
	struct list_head head;
	struct nf_flowtable *ft;
	struct rcu_head rcuhead;
};

struct flow_offload_xdp {
	struct hlist_node hnode;
	unsigned long net_device_addr;
	struct list_head head;
};

#define NF_XDP_HT_BITS	4
static DEFINE_HASHTABLE(nf_xdp_hashtable, NF_XDP_HT_BITS);
static DEFINE_MUTEX(nf_xdp_hashtable_lock);

/* caller must hold rcu read lock */
struct nf_flowtable *nf_flowtable_by_dev(const struct net_device *dev)
{
	unsigned long key = (unsigned long)dev;
	struct flow_offload_xdp *iter;

	hash_for_each_possible_rcu(nf_xdp_hashtable, iter, hnode, key) {
		if (key == iter->net_device_addr) {
			struct flow_offload_xdp_ft *ft_elem;

			/* The user is supposed to insert a given net_device
			 * just into a single nf_flowtable so we always return
			 * the first element here.
			 */
			ft_elem = list_first_or_null_rcu(&iter->head,
							 struct flow_offload_xdp_ft,
							 head);
			return ft_elem ? ft_elem->ft : NULL;
		}
	}

	return NULL;
}

static int nf_flowtable_by_dev_insert(struct nf_flowtable *ft,
				      const struct net_device *dev)
{
	struct flow_offload_xdp *iter, *elem = NULL;
	unsigned long key = (unsigned long)dev;
	struct flow_offload_xdp_ft *ft_elem;

	ft_elem = kzalloc(sizeof(*ft_elem), GFP_KERNEL_ACCOUNT);
	if (!ft_elem)
		return -ENOMEM;

	ft_elem->ft = ft;

	mutex_lock(&nf_xdp_hashtable_lock);

	hash_for_each_possible(nf_xdp_hashtable, iter, hnode, key) {
		if (key == iter->net_device_addr) {
			elem = iter;
			break;
		}
	}

	if (!elem) {
		elem = kzalloc(sizeof(*elem), GFP_KERNEL_ACCOUNT);
		if (!elem)
			goto err_unlock;

		elem->net_device_addr = key;
		INIT_LIST_HEAD(&elem->head);
		hash_add_rcu(nf_xdp_hashtable, &elem->hnode, key);
	}
	list_add_tail_rcu(&ft_elem->head, &elem->head);

	mutex_unlock(&nf_xdp_hashtable_lock);

	return 0;

err_unlock:
	mutex_unlock(&nf_xdp_hashtable_lock);
	kfree(ft_elem);

	return -ENOMEM;
}

static void nf_flowtable_by_dev_remove(struct nf_flowtable *ft,
				       const struct net_device *dev)
{
	struct flow_offload_xdp *iter, *elem = NULL;
	unsigned long key = (unsigned long)dev;

	mutex_lock(&nf_xdp_hashtable_lock);

	hash_for_each_possible(nf_xdp_hashtable, iter, hnode, key) {
		if (key == iter->net_device_addr) {
			elem = iter;
			break;
		}
	}

	if (elem) {
		struct flow_offload_xdp_ft *ft_elem, *ft_next;

		list_for_each_entry_safe(ft_elem, ft_next, &elem->head, head) {
			if (ft_elem->ft == ft) {
				list_del_rcu(&ft_elem->head);
				kfree_rcu(ft_elem, rcuhead);
			}
		}

		if (list_empty(&elem->head))
			hash_del_rcu(&elem->hnode);
		else
			elem = NULL;
	}

	mutex_unlock(&nf_xdp_hashtable_lock);

	if (elem) {
		synchronize_rcu();
		kfree(elem);
	}
}

int nf_flow_offload_xdp_setup(struct nf_flowtable *flowtable,
			      struct net_device *dev,
			      enum flow_block_command cmd)
{
	switch (cmd) {
	case FLOW_BLOCK_BIND:
		return nf_flowtable_by_dev_insert(flowtable, dev);
	case FLOW_BLOCK_UNBIND:
		nf_flowtable_by_dev_remove(flowtable, dev);
		return 0;
	}

	WARN_ON_ONCE(1);
	return 0;
}