diff options
author | Jakub Kicinski <kuba@kernel.org> | 2024-03-11 18:06:04 -0700 |
---|---|---|
committer | Jakub Kicinski <kuba@kernel.org> | 2024-03-11 18:06:04 -0700 |
commit | 5f20e6ab1f65aaaaae248e6946d5cb6d039e7de8 (patch) | |
tree | 826f8a879f1d954b00e4b07a8afe271aed02992e /mm | |
parent | f095fefacdd35b4ea97dc6d88d054f2749a73d07 (diff) | |
parent | 66c8473135c62f478301a0e5b3012f203562dfa6 (diff) |
Merge tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Alexei Starovoitov says:
====================
pull-request: bpf-next 2024-03-11
We've added 59 non-merge commits during the last 9 day(s) which contain
a total of 88 files changed, 4181 insertions(+), 590 deletions(-).
The main changes are:
1) Enforce VM_IOREMAP flag and range in ioremap_page_range and introduce
VM_SPARSE kind and vm_area_[un]map_pages to be used in bpf_arena,
from Alexei.
2) Introduce bpf_arena which is sparse shared memory region between bpf
program and user space where structures inside the arena can have
pointers to other areas of the arena, and pointers work seamlessly for
both user-space programs and bpf programs, from Alexei and Andrii.
3) Introduce may_goto instruction that is a contract between the verifier
and the program. The verifier allows the program to loop assuming it's
behaving well, but reserves the right to terminate it, from Alexei.
4) Use IETF format for field definitions in the BPF standard
document, from Dave.
5) Extend struct_ops libbpf APIs to allow specify version suffixes for
stuct_ops map types, share the same BPF program between several map
definitions, and other improvements, from Eduard.
6) Enable struct_ops support for more than one page in trampolines,
from Kui-Feng.
7) Support kCFI + BPF on riscv64, from Puranjay.
8) Use bpf_prog_pack for arm64 bpf trampoline, from Puranjay.
9) Fix roundup_pow_of_two undefined behavior on 32-bit archs, from Toke.
====================
Link: https://lore.kernel.org/r/20240312003646.8692-1-alexei.starovoitov@gmail.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/vmalloc.c | 83 |
1 files changed, 79 insertions, 4 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index d12a17fc0c17..1e36322d83d8 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -304,8 +304,8 @@ static int vmap_range_noflush(unsigned long addr, unsigned long end, return err; } -int ioremap_page_range(unsigned long addr, unsigned long end, - phys_addr_t phys_addr, pgprot_t prot) +int vmap_page_range(unsigned long addr, unsigned long end, + phys_addr_t phys_addr, pgprot_t prot) { int err; @@ -318,6 +318,26 @@ int ioremap_page_range(unsigned long addr, unsigned long end, return err; } +int ioremap_page_range(unsigned long addr, unsigned long end, + phys_addr_t phys_addr, pgprot_t prot) +{ + struct vm_struct *area; + + area = find_vm_area((void *)addr); + if (!area || !(area->flags & VM_IOREMAP)) { + WARN_ONCE(1, "vm_area at addr %lx is not marked as VM_IOREMAP\n", addr); + return -EINVAL; + } + if (addr != (unsigned long)area->addr || + (void *)end != area->addr + get_vm_area_size(area)) { + WARN_ONCE(1, "ioremap request [%lx,%lx) doesn't match vm_area [%lx, %lx)\n", + addr, end, (long)area->addr, + (long)area->addr + get_vm_area_size(area)); + return -ERANGE; + } + return vmap_page_range(addr, end, phys_addr, prot); +} + static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, pgtbl_mod_mask *mask) { @@ -635,6 +655,58 @@ static int vmap_pages_range(unsigned long addr, unsigned long end, return err; } +static int check_sparse_vm_area(struct vm_struct *area, unsigned long start, + unsigned long end) +{ + might_sleep(); + if (WARN_ON_ONCE(area->flags & VM_FLUSH_RESET_PERMS)) + return -EINVAL; + if (WARN_ON_ONCE(area->flags & VM_NO_GUARD)) + return -EINVAL; + if (WARN_ON_ONCE(!(area->flags & VM_SPARSE))) + return -EINVAL; + if ((end - start) >> PAGE_SHIFT > totalram_pages()) + return -E2BIG; + if (start < (unsigned long)area->addr || + (void *)end > area->addr + get_vm_area_size(area)) + return -ERANGE; + return 0; +} + +/** + * vm_area_map_pages - map pages inside given sparse vm_area + * @area: vm_area + * @start: start address inside vm_area + * @end: end address inside vm_area + * @pages: pages to map (always PAGE_SIZE pages) + */ +int vm_area_map_pages(struct vm_struct *area, unsigned long start, + unsigned long end, struct page **pages) +{ + int err; + + err = check_sparse_vm_area(area, start, end); + if (err) + return err; + + return vmap_pages_range(start, end, PAGE_KERNEL, pages, PAGE_SHIFT); +} + +/** + * vm_area_unmap_pages - unmap pages inside given sparse vm_area + * @area: vm_area + * @start: start address inside vm_area + * @end: end address inside vm_area + */ +void vm_area_unmap_pages(struct vm_struct *area, unsigned long start, + unsigned long end) +{ + if (check_sparse_vm_area(area, start, end)) + return; + + vunmap_range(start, end); +} + int is_vmalloc_or_module_addr(const void *x) { /* @@ -3809,9 +3881,9 @@ long vread_iter(struct iov_iter *iter, const char *addr, size_t count) if (flags & VMAP_RAM) copied = vmap_ram_vread_iter(iter, addr, n, flags); - else if (!(vm && (vm->flags & VM_IOREMAP))) + else if (!(vm && (vm->flags & (VM_IOREMAP | VM_SPARSE)))) copied = aligned_vread_iter(iter, addr, n); - else /* IOREMAP area is treated as memory hole */ + else /* IOREMAP | SPARSE area is treated as memory hole */ copied = zero_iter(iter, n); addr += copied; @@ -4402,6 +4474,9 @@ static int s_show(struct seq_file *m, void *p) if (v->flags & VM_IOREMAP) seq_puts(m, " ioremap"); + if (v->flags & VM_SPARSE) + seq_puts(m, " sparse"); + if (v->flags & VM_ALLOC) seq_puts(m, " vmalloc"); |