diff options
author | David S. Miller <davem@davemloft.net> | 2018-01-10 11:17:21 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2018-01-10 11:17:21 -0500 |
commit | 661e4e33a984fbd05e6b573ce4bb639ca699c130 (patch) | |
tree | 421a7e3828bc73daae816e5cd8aa91403aa5a12d /include | |
parent | 4512c43eac7e007d982e7ea45152ea6f3f4d1921 (diff) | |
parent | 290af86629b25ffd1ed6232c4e9107da031705cb (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
Daniel Borkmann says:
====================
pull-request: bpf 2018-01-09
The following pull-request contains BPF updates for your *net* tree.
The main changes are:
1) Prevent out-of-bounds speculation in BPF maps by masking the
index after bounds checks in order to fix spectre v1, and
add an option BPF_JIT_ALWAYS_ON into Kconfig that allows for
removing the BPF interpreter from the kernel in favor of
JIT-only mode to make spectre v2 harder, from Alexei.
2) Remove false sharing of map refcount with max_entries which
was used in spectre v1, from Daniel.
3) Add a missing NULL psock check in sockmap in order to fix
a race, from John.
4) Fix test_align BPF selftest case since a recent change in
verifier rejects the bit-wise arithmetic on pointers
earlier but test_align update was missing, from Alexei.
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/bpf.h | 26 |
1 files changed, 18 insertions, 8 deletions
diff --git a/include/linux/bpf.h b/include/linux/bpf.h index b63a592ad29d..0b25cf87b6d6 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -43,7 +43,14 @@ struct bpf_map_ops { }; struct bpf_map { - atomic_t refcnt; + /* 1st cacheline with read-mostly members of which some + * are also accessed in fast-path (e.g. ops, max_entries). + */ + const struct bpf_map_ops *ops ____cacheline_aligned; + struct bpf_map *inner_map_meta; +#ifdef CONFIG_SECURITY + void *security; +#endif enum bpf_map_type map_type; u32 key_size; u32 value_size; @@ -52,15 +59,17 @@ struct bpf_map { u32 pages; u32 id; int numa_node; - struct user_struct *user; - const struct bpf_map_ops *ops; - struct work_struct work; + bool unpriv_array; + /* 7 bytes hole */ + + /* 2nd cacheline with misc members to avoid false sharing + * particularly with refcounting. + */ + struct user_struct *user ____cacheline_aligned; + atomic_t refcnt; atomic_t usercnt; - struct bpf_map *inner_map_meta; + struct work_struct work; char name[BPF_OBJ_NAME_LEN]; -#ifdef CONFIG_SECURITY - void *security; -#endif }; /* function argument constraints */ @@ -221,6 +230,7 @@ struct bpf_prog_aux { struct bpf_array { struct bpf_map map; u32 elem_size; + u32 index_mask; /* 'ownership' of prog_array is claimed by the first program that * is going to use this map or by the first program which FD is stored * in the map to make sure that all callers and callees have the same |