diff options
author | Andy Lutomirski <luto@kernel.org> | 2017-05-28 10:00:10 -0700 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2017-06-05 09:59:35 +0200 |
commit | a2055abe9c6789cedef29abbdaa488a087faccc3 (patch) | |
tree | 8d90aaffe9b58a290bb02e6abeaf39e60447b128 /arch/x86/mm/tlb.c | |
parent | 4241119eeb14f762e2d7f6e7c52afd22cea1de37 (diff) |
x86/mm: Pass flush_tlb_info to flush_tlb_others() etc
Rather than passing all the contents of flush_tlb_info to
flush_tlb_others(), pass a pointer to the structure directly. For
consistency, this also removes the unnecessary cpu parameter from
uv_flush_tlb_others() to make its signature match the other
*flush_tlb_others() functions.
This serves two purposes:
- It will dramatically simplify future patches that change struct
flush_tlb_info, which I'm planning to do.
- struct flush_tlb_info is an adequate description of what to do
for a local flush, too, so by reusing it we can remove duplicated
code between local and remove flushes in a future patch.
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Acked-by: Rik van Riel <riel@redhat.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Borislav Petkov <bpetkov@suse.de>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Nadav Amit <nadav.amit@gmail.com>
Cc: Nadav Amit <namit@vmware.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-mm@kvack.org
[ Fix build warning. ]
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/mm/tlb.c')
-rw-r--r-- | arch/x86/mm/tlb.c | 64 |
1 files changed, 32 insertions, 32 deletions
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 743e4c6b4529..776469cc54e0 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -30,12 +30,6 @@ #ifdef CONFIG_SMP -struct flush_tlb_info { - struct mm_struct *flush_mm; - unsigned long flush_start; - unsigned long flush_end; -}; - /* * We cannot call mmdrop() because we are in interrupt context, * instead update mm->cpu_vm_mask. @@ -229,11 +223,11 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, */ static void flush_tlb_func(void *info) { - struct flush_tlb_info *f = info; + const struct flush_tlb_info *f = info; inc_irq_stat(irq_tlb_count); - if (f->flush_mm && f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm)) + if (f->mm && f->mm != this_cpu_read(cpu_tlbstate.active_mm)) return; count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); @@ -243,15 +237,15 @@ static void flush_tlb_func(void *info) return; } - if (f->flush_end == TLB_FLUSH_ALL) { + if (f->end == TLB_FLUSH_ALL) { local_flush_tlb(); trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, TLB_FLUSH_ALL); } else { unsigned long addr; unsigned long nr_pages = - (f->flush_end - f->flush_start) / PAGE_SIZE; - addr = f->flush_start; - while (addr < f->flush_end) { + (f->end - f->start) / PAGE_SIZE; + addr = f->start; + while (addr < f->end) { __flush_tlb_single(addr); addr += PAGE_SIZE; } @@ -260,33 +254,27 @@ static void flush_tlb_func(void *info) } void native_flush_tlb_others(const struct cpumask *cpumask, - struct mm_struct *mm, unsigned long start, - unsigned long end) + const struct flush_tlb_info *info) { - struct flush_tlb_info info; - - info.flush_mm = mm; - info.flush_start = start; - info.flush_end = end; - count_vm_tlb_event(NR_TLB_REMOTE_FLUSH); - if (end == TLB_FLUSH_ALL) + if (info->end == TLB_FLUSH_ALL) trace_tlb_flush(TLB_REMOTE_SEND_IPI, TLB_FLUSH_ALL); else trace_tlb_flush(TLB_REMOTE_SEND_IPI, - (end - start) >> PAGE_SHIFT); + (info->end - info->start) >> PAGE_SHIFT); if (is_uv_system()) { unsigned int cpu; cpu = smp_processor_id(); - cpumask = uv_flush_tlb_others(cpumask, mm, start, end, cpu); + cpumask = uv_flush_tlb_others(cpumask, info); if (cpumask) smp_call_function_many(cpumask, flush_tlb_func, - &info, 1); + (void *)info, 1); return; } - smp_call_function_many(cpumask, flush_tlb_func, &info, 1); + smp_call_function_many(cpumask, flush_tlb_func, + (void *)info, 1); } /* @@ -305,6 +293,7 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long vmflag) { unsigned long addr; + struct flush_tlb_info info; /* do a global flush by default */ unsigned long base_pages_to_flush = TLB_FLUSH_ALL; @@ -347,15 +336,20 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, } trace_tlb_flush(TLB_LOCAL_MM_SHOOTDOWN, base_pages_to_flush); out: + info.mm = mm; if (base_pages_to_flush == TLB_FLUSH_ALL) { - start = 0UL; - end = TLB_FLUSH_ALL; + info.start = 0UL; + info.end = TLB_FLUSH_ALL; + } else { + info.start = start; + info.end = end; } if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) - flush_tlb_others(mm_cpumask(mm), mm, start, end); + flush_tlb_others(mm_cpumask(mm), &info); preempt_enable(); } + static void do_flush_tlb_all(void *info) { count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); @@ -376,7 +370,7 @@ static void do_kernel_range_flush(void *info) unsigned long addr; /* flush range by one by one 'invlpg' */ - for (addr = f->flush_start; addr < f->flush_end; addr += PAGE_SIZE) + for (addr = f->start; addr < f->end; addr += PAGE_SIZE) __flush_tlb_single(addr); } @@ -389,14 +383,20 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end) on_each_cpu(do_flush_tlb_all, NULL, 1); } else { struct flush_tlb_info info; - info.flush_start = start; - info.flush_end = end; + info.start = start; + info.end = end; on_each_cpu(do_kernel_range_flush, &info, 1); } } void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch) { + struct flush_tlb_info info = { + .mm = NULL, + .start = 0UL, + .end = TLB_FLUSH_ALL, + }; + int cpu = get_cpu(); if (cpumask_test_cpu(cpu, &batch->cpumask)) { @@ -406,7 +406,7 @@ void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch) } if (cpumask_any_but(&batch->cpumask, cpu) < nr_cpu_ids) - flush_tlb_others(&batch->cpumask, NULL, 0, TLB_FLUSH_ALL); + flush_tlb_others(&batch->cpumask, &info); cpumask_clear(&batch->cpumask); put_cpu(); |