diff options
author | Guo Ren <guoren@linux.alibaba.com> | 2020-01-27 01:20:36 +0800 |
---|---|---|
committer | Guo Ren <guoren@linux.alibaba.com> | 2020-02-21 15:43:24 +0800 |
commit | d936a7e708dcf22344c4420e8b0e36f5d5f8c073 (patch) | |
tree | b9b305756f2fb2696bb14f649b9ec5ccebb2858c | |
parent | a1176734132c630b50908c36563e05fb3599682c (diff) |
csky: Enable defer flush_dcache_page for abiv2 cpus (807/810/860)
Instead of flushing cache per update_mmu_cache() called, we use
flush_dcache_page to reduce the frequency of flashing the cache.
As abiv2 cpus are all PIPT for icache & dcache, we needn't handle
dcache aliasing problem. But their icache can't snoop dcache, so
we still need sync_icache_dcache in update_mmu_cache().
Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
-rw-r--r-- | arch/csky/abiv2/cacheflush.c | 14 | ||||
-rw-r--r-- | arch/csky/abiv2/inc/abi/cacheflush.h | 12 |
2 files changed, 18 insertions, 8 deletions
diff --git a/arch/csky/abiv2/cacheflush.c b/arch/csky/abiv2/cacheflush.c index f64b415f6fde..ba469953a16e 100644 --- a/arch/csky/abiv2/cacheflush.c +++ b/arch/csky/abiv2/cacheflush.c @@ -9,20 +9,22 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *pte) { - unsigned long addr, pfn; + unsigned long addr; struct page *page; - pfn = pte_pfn(*pte); - if (unlikely(!pfn_valid(pfn))) + page = pfn_to_page(pte_pfn(*pte)); + if (page == ZERO_PAGE(0)) return; - page = pfn_to_page(pfn); - if (page == ZERO_PAGE(0)) + if (test_and_set_bit(PG_dcache_clean, &page->flags)) return; addr = (unsigned long) kmap_atomic(page); - cache_wbinv_range(addr, addr + PAGE_SIZE); + dcache_wb_range(addr, addr + PAGE_SIZE); + + if (vma->vm_flags & VM_EXEC) + icache_inv_range(addr, addr + PAGE_SIZE); kunmap_atomic((void *) addr); } diff --git a/arch/csky/abiv2/inc/abi/cacheflush.h b/arch/csky/abiv2/inc/abi/cacheflush.h index 62a9031fffd8..acd7c6c55d61 100644 --- a/arch/csky/abiv2/inc/abi/cacheflush.h +++ b/arch/csky/abiv2/inc/abi/cacheflush.h @@ -15,8 +15,16 @@ #define flush_cache_dup_mm(mm) do { } while (0) #define flush_cache_range(vma, start, end) do { } while (0) #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) -#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 -#define flush_dcache_page(page) do { } while (0) + +#define PG_dcache_clean PG_arch_1 + +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 +static inline void flush_dcache_page(struct page *page) +{ + if (test_bit(PG_dcache_clean, &page->flags)) + clear_bit(PG_dcache_clean, &page->flags); +} + #define flush_dcache_mmap_lock(mapping) do { } while (0) #define flush_dcache_mmap_unlock(mapping) do { } while (0) #define flush_icache_page(vma, page) do { } while (0) |