diff options
author | Guo Ren <guoren@linux.alibaba.com> | 2020-01-31 20:33:10 +0800 |
---|---|---|
committer | Guo Ren <guoren@linux.alibaba.com> | 2020-02-21 15:43:24 +0800 |
commit | 997153b9a75c08d545ad45e6f8ceb432435d2425 (patch) | |
tree | 830915f73d05e7d094d2f293a712e05c0da1e6f5 /arch/csky/abiv2 | |
parent | cc1f6563a92ced0889775d0587316d725b6e1a68 (diff) |
csky: Add flush_icache_mm to defer flush icache all
Some CPUs don't support icache.va instruction to maintain the whole
smp cores' icache. Using icache.all + IPI casue a lot on performace
and using defer mechanism could reduce the number of calling icache
_flush_all functions.
Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
Diffstat (limited to 'arch/csky/abiv2')
-rw-r--r-- | arch/csky/abiv2/cacheflush.c | 55 | ||||
-rw-r--r-- | arch/csky/abiv2/inc/abi/cacheflush.h | 14 |
2 files changed, 66 insertions, 3 deletions
diff --git a/arch/csky/abiv2/cacheflush.c b/arch/csky/abiv2/cacheflush.c index ba469953a16e..790f1ebfba44 100644 --- a/arch/csky/abiv2/cacheflush.c +++ b/arch/csky/abiv2/cacheflush.c @@ -28,3 +28,58 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, kunmap_atomic((void *) addr); } + +void flush_icache_deferred(struct mm_struct *mm) +{ + unsigned int cpu = smp_processor_id(); + cpumask_t *mask = &mm->context.icache_stale_mask; + + if (cpumask_test_cpu(cpu, mask)) { + cpumask_clear_cpu(cpu, mask); + /* + * Ensure the remote hart's writes are visible to this hart. + * This pairs with a barrier in flush_icache_mm. + */ + smp_mb(); + local_icache_inv_all(NULL); + } +} + +void flush_icache_mm_range(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ + unsigned int cpu; + cpumask_t others, *mask; + + preempt_disable(); + +#ifdef CONFIG_CPU_HAS_ICACHE_INS + if (mm == current->mm) { + icache_inv_range(start, end); + preempt_enable(); + return; + } +#endif + + /* Mark every hart's icache as needing a flush for this MM. */ + mask = &mm->context.icache_stale_mask; + cpumask_setall(mask); + + /* Flush this hart's I$ now, and mark it as flushed. */ + cpu = smp_processor_id(); + cpumask_clear_cpu(cpu, mask); + local_icache_inv_all(NULL); + + /* + * Flush the I$ of other harts concurrently executing, and mark them as + * flushed. + */ + cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu)); + + if (mm != current->active_mm || !cpumask_empty(&others)) { + on_each_cpu_mask(&others, local_icache_inv_all, NULL, 1); + cpumask_clear(mask); + } + + preempt_enable(); +} diff --git a/arch/csky/abiv2/inc/abi/cacheflush.h b/arch/csky/abiv2/inc/abi/cacheflush.h index 28b7c3233175..a565e00c3f70 100644 --- a/arch/csky/abiv2/inc/abi/cacheflush.h +++ b/arch/csky/abiv2/inc/abi/cacheflush.h @@ -31,15 +31,23 @@ static inline void flush_dcache_page(struct page *page) #define flush_icache_range(start, end) cache_wbinv_range(start, end) +void flush_icache_mm_range(struct mm_struct *mm, + unsigned long start, unsigned long end); +void flush_icache_deferred(struct mm_struct *mm); + #define flush_cache_vmap(start, end) do { } while (0) #define flush_cache_vunmap(start, end) do { } while (0) #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ do { \ memcpy(dst, src, len); \ - if (vma->vm_flags & VM_EXEC) \ - cache_wbinv_range((unsigned long)dst, \ - (unsigned long)dst + len); \ + if (vma->vm_flags & VM_EXEC) { \ + dcache_wb_range((unsigned long)dst, \ + (unsigned long)dst + len); \ + flush_icache_mm_range(current->mm, \ + (unsigned long)dst, \ + (unsigned long)dst + len); \ + } \ } while (0) #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ memcpy(dst, src, len) |