diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2023-03-05 11:07:58 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2023-03-05 11:07:58 -0800 |
commit | 1a8d05a726dc5b82e608f0962511e15fcbcab1ab (patch) | |
tree | 770da7ce6c15abc4622066cf9f0b388d2ea71763 /arch | |
parent | 95207db8166ab95c42a03fdc5e3abd212c9987dc (diff) | |
parent | caa82ae7ef52b7cf5f80a2b2fbcbdbcfd16426cc (diff) |
Merge tag 'pull-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
Pull VM_FAULT_RETRY fixes from Al Viro:
"Some of the page fault handlers do not deal with the following case
correctly:
- handle_mm_fault() has returned VM_FAULT_RETRY
- there is a pending fatal signal
- fault had happened in kernel mode
Correct action in such case is not "return unconditionally" - fatal
signals are handled only upon return to userland and something like
copy_to_user() would end up retrying the faulting instruction and
triggering the same fault again and again.
What we need to do in such case is to make the caller to treat that as
failed uaccess attempt - handle exception if there is an exception
handler for faulting instruction or oops if there isn't one.
Over the years some architectures had been fixed and now are handling
that case properly; some still do not. This series should fix the
remaining ones.
Status:
- m68k, riscv, hexagon, parisc: tested/acked by maintainers.
- alpha, sparc32, sparc64: tested locally - bug has been reproduced
on the unpatched kernel and verified to be fixed by this series.
- ia64, microblaze, nios2, openrisc: build, but otherwise completely
untested"
* tag 'pull-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs:
openrisc: fix livelock in uaccess
nios2: fix livelock in uaccess
microblaze: fix livelock in uaccess
ia64: fix livelock in uaccess
sparc: fix livelock in uaccess
alpha: fix livelock in uaccess
parisc: fix livelock in uaccess
hexagon: fix livelock in uaccess
riscv: fix livelock in uaccess
m68k: fix livelock in uaccess
Diffstat (limited to 'arch')
-rw-r--r-- | arch/alpha/mm/fault.c | 5 | ||||
-rw-r--r-- | arch/hexagon/mm/vm_fault.c | 5 | ||||
-rw-r--r-- | arch/ia64/mm/fault.c | 5 | ||||
-rw-r--r-- | arch/m68k/mm/fault.c | 5 | ||||
-rw-r--r-- | arch/microblaze/mm/fault.c | 5 | ||||
-rw-r--r-- | arch/nios2/mm/fault.c | 5 | ||||
-rw-r--r-- | arch/openrisc/mm/fault.c | 5 | ||||
-rw-r--r-- | arch/parisc/mm/fault.c | 7 | ||||
-rw-r--r-- | arch/riscv/mm/fault.c | 5 | ||||
-rw-r--r-- | arch/sparc/mm/fault_32.c | 5 | ||||
-rw-r--r-- | arch/sparc/mm/fault_64.c | 7 |
11 files changed, 48 insertions, 11 deletions
diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c index ef427a6bdd1a..7b01ae4f3bc6 100644 --- a/arch/alpha/mm/fault.c +++ b/arch/alpha/mm/fault.c @@ -152,8 +152,11 @@ retry: the fault. */ fault = handle_mm_fault(vma, address, flags, regs); - if (fault_signal_pending(fault, regs)) + if (fault_signal_pending(fault, regs)) { + if (!user_mode(regs)) + goto no_context; return; + } /* The fault is fully completed (including releasing mmap lock) */ if (fault & VM_FAULT_COMPLETED) diff --git a/arch/hexagon/mm/vm_fault.c b/arch/hexagon/mm/vm_fault.c index f73c7cbfe326..4b578d02fd01 100644 --- a/arch/hexagon/mm/vm_fault.c +++ b/arch/hexagon/mm/vm_fault.c @@ -93,8 +93,11 @@ good_area: fault = handle_mm_fault(vma, address, flags, regs); - if (fault_signal_pending(fault, regs)) + if (fault_signal_pending(fault, regs)) { + if (!user_mode(regs)) + goto no_context; return; + } /* The fault is fully completed (including releasing mmap lock) */ if (fault & VM_FAULT_COMPLETED) diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c index ef78c2d66cdd..85c4d9ac8686 100644 --- a/arch/ia64/mm/fault.c +++ b/arch/ia64/mm/fault.c @@ -136,8 +136,11 @@ retry: */ fault = handle_mm_fault(vma, address, flags, regs); - if (fault_signal_pending(fault, regs)) + if (fault_signal_pending(fault, regs)) { + if (!user_mode(regs)) + goto no_context; return; + } /* The fault is fully completed (including releasing mmap lock) */ if (fault & VM_FAULT_COMPLETED) diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c index 4d2837eb3e2a..228128e45c67 100644 --- a/arch/m68k/mm/fault.c +++ b/arch/m68k/mm/fault.c @@ -138,8 +138,11 @@ good_area: fault = handle_mm_fault(vma, address, flags, regs); pr_debug("handle_mm_fault returns %x\n", fault); - if (fault_signal_pending(fault, regs)) + if (fault_signal_pending(fault, regs)) { + if (!user_mode(regs)) + goto no_context; return 0; + } /* The fault is fully completed (including releasing mmap lock) */ if (fault & VM_FAULT_COMPLETED) diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c index 5c40c3ebe52f..687714db6f4d 100644 --- a/arch/microblaze/mm/fault.c +++ b/arch/microblaze/mm/fault.c @@ -219,8 +219,11 @@ good_area: */ fault = handle_mm_fault(vma, address, flags, regs); - if (fault_signal_pending(fault, regs)) + if (fault_signal_pending(fault, regs)) { + if (!user_mode(regs)) + bad_page_fault(regs, address, SIGBUS); return; + } /* The fault is fully completed (including releasing mmap lock) */ if (fault & VM_FAULT_COMPLETED) diff --git a/arch/nios2/mm/fault.c b/arch/nios2/mm/fault.c index edaca0a6c1c1..ca64eccea551 100644 --- a/arch/nios2/mm/fault.c +++ b/arch/nios2/mm/fault.c @@ -136,8 +136,11 @@ good_area: */ fault = handle_mm_fault(vma, address, flags, regs); - if (fault_signal_pending(fault, regs)) + if (fault_signal_pending(fault, regs)) { + if (!user_mode(regs)) + goto no_context; return; + } /* The fault is fully completed (including releasing mmap lock) */ if (fault & VM_FAULT_COMPLETED) diff --git a/arch/openrisc/mm/fault.c b/arch/openrisc/mm/fault.c index b4762d66e9ef..6734fee3134f 100644 --- a/arch/openrisc/mm/fault.c +++ b/arch/openrisc/mm/fault.c @@ -162,8 +162,11 @@ good_area: fault = handle_mm_fault(vma, address, flags, regs); - if (fault_signal_pending(fault, regs)) + if (fault_signal_pending(fault, regs)) { + if (!user_mode(regs)) + goto no_context; return; + } /* The fault is fully completed (including releasing mmap lock) */ if (fault & VM_FAULT_COMPLETED) diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c index 869204e97ec9..6941fdbf2517 100644 --- a/arch/parisc/mm/fault.c +++ b/arch/parisc/mm/fault.c @@ -308,8 +308,13 @@ good_area: fault = handle_mm_fault(vma, address, flags, regs); - if (fault_signal_pending(fault, regs)) + if (fault_signal_pending(fault, regs)) { + if (!user_mode(regs)) { + msg = "Page fault: fault signal on kernel memory"; + goto no_context; + } return; + } /* The fault is fully completed (including releasing mmap lock) */ if (fault & VM_FAULT_COMPLETED) diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c index eb0774d9c03b..460f785f6e09 100644 --- a/arch/riscv/mm/fault.c +++ b/arch/riscv/mm/fault.c @@ -326,8 +326,11 @@ good_area: * signal first. We do not need to release the mmap_lock because it * would already be released in __lock_page_or_retry in mm/filemap.c. */ - if (fault_signal_pending(fault, regs)) + if (fault_signal_pending(fault, regs)) { + if (!user_mode(regs)) + no_context(regs, addr); return; + } /* The fault is fully completed (including releasing mmap lock) */ if (fault & VM_FAULT_COMPLETED) diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c index 91259f291c54..179295b14664 100644 --- a/arch/sparc/mm/fault_32.c +++ b/arch/sparc/mm/fault_32.c @@ -187,8 +187,11 @@ good_area: */ fault = handle_mm_fault(vma, address, flags, regs); - if (fault_signal_pending(fault, regs)) + if (fault_signal_pending(fault, regs)) { + if (!from_user) + goto no_context; return; + } /* The fault is fully completed (including releasing mmap lock) */ if (fault & VM_FAULT_COMPLETED) diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c index 4acc12eafbf5..d91305de694c 100644 --- a/arch/sparc/mm/fault_64.c +++ b/arch/sparc/mm/fault_64.c @@ -424,8 +424,13 @@ good_area: fault = handle_mm_fault(vma, address, flags, regs); - if (fault_signal_pending(fault, regs)) + if (fault_signal_pending(fault, regs)) { + if (regs->tstate & TSTATE_PRIV) { + insn = get_fault_insn(regs, insn); + goto handle_kernel_fault; + } goto exit_exception; + } /* The fault is fully completed (including releasing mmap lock) */ if (fault & VM_FAULT_COMPLETED) |