summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorKefeng Wang <wangkefeng.wang@huawei.com>2024-04-03 16:37:59 +0800
committerAndrew Morton <akpm@linux-foundation.org>2024-04-25 20:56:38 -0700
commit6ea02ee489799317c6640ac014c49b1d1b7124c5 (patch)
treeb372790fba7541e3ab35ff4de6c24ac2882b3d18 /arch
parent3931b871c4936c00c4e27c469056d8da47a3493f (diff)
arm64: mm: cleanup __do_page_fault()
Patch series "arch/mm/fault: accelerate pagefault when badaccess", v2. After VMA lock-based page fault handling enabled, if bad access met under per-vma lock, it will fallback to mmap_lock-based handling, so it leads to unnessary mmap lock and vma find again. A test from lmbench shows 34% improve after this changes on arm64, lat_sig -P 1 prot lat_sig 0.29194 -> 0.19198 This patch (of 7): The __do_page_fault() only calls handle_mm_fault() after vm_flags checked, and it is only called by do_page_fault(), let's squash it into do_page_fault() to cleanup code. Link: https://lkml.kernel.org/r/20240403083805.1818160-1-wangkefeng.wang@huawei.com Link: https://lkml.kernel.org/r/20240403083805.1818160-2-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> Reviewed-by: Suren Baghdasaryan <surenb@google.com> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Cc: Albert Ou <aou@eecs.berkeley.edu> Cc: Alexander Gordeev <agordeev@linux.ibm.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: Christophe Leroy <christophe.leroy@csgroup.eu> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Palmer Dabbelt <palmer@dabbelt.com> Cc: Paul Walmsley <paul.walmsley@sifive.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Russell King <linux@armlinux.org.uk> Cc: Will Deacon <will@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm64/mm/fault.c27
1 files changed, 7 insertions, 20 deletions
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 8251e2fea9c7..9bb9f395351a 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -486,25 +486,6 @@ static void do_bad_area(unsigned long far, unsigned long esr,
}
}
-#define VM_FAULT_BADMAP ((__force vm_fault_t)0x010000)
-#define VM_FAULT_BADACCESS ((__force vm_fault_t)0x020000)
-
-static vm_fault_t __do_page_fault(struct mm_struct *mm,
- struct vm_area_struct *vma, unsigned long addr,
- unsigned int mm_flags, unsigned long vm_flags,
- struct pt_regs *regs)
-{
- /*
- * Ok, we have a good vm_area for this memory access, so we can handle
- * it.
- * Check that the permissions on the VMA allow for the fault which
- * occurred.
- */
- if (!(vma->vm_flags & vm_flags))
- return VM_FAULT_BADACCESS;
- return handle_mm_fault(vma, addr, mm_flags, regs);
-}
-
static bool is_el0_instruction_abort(unsigned long esr)
{
return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_LOW;
@@ -519,6 +500,9 @@ static bool is_write_abort(unsigned long esr)
return (esr & ESR_ELx_WNR) && !(esr & ESR_ELx_CM);
}
+#define VM_FAULT_BADMAP ((__force vm_fault_t)0x010000)
+#define VM_FAULT_BADACCESS ((__force vm_fault_t)0x020000)
+
static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
struct pt_regs *regs)
{
@@ -617,7 +601,10 @@ retry:
goto done;
}
- fault = __do_page_fault(mm, vma, addr, mm_flags, vm_flags, regs);
+ if (!(vma->vm_flags & vm_flags))
+ fault = VM_FAULT_BADACCESS;
+ else
+ fault = handle_mm_fault(vma, addr, mm_flags, regs);
/* Quick path to respond to signals */
if (fault_signal_pending(fault, regs)) {