diff options
Diffstat (limited to 'arch/mips/include/asm/pgtable.h')
-rw-r--r-- | arch/mips/include/asm/pgtable.h | 108 |
1 files changed, 91 insertions, 17 deletions
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h index f1801e7a4b15..85b39c9fd09e 100644 --- a/arch/mips/include/asm/pgtable.h +++ b/arch/mips/include/asm/pgtable.h @@ -400,7 +400,7 @@ static inline pte_t pte_mkwrite(pte_t pte) static inline pte_t pte_mkdirty(pte_t pte) { - pte_val(pte) |= _PAGE_MODIFIED; + pte_val(pte) |= _PAGE_MODIFIED | _PAGE_SOFT_DIRTY; if (pte_val(pte) & _PAGE_WRITE) pte_val(pte) |= _PAGE_SILENT_WRITE; return pte; @@ -414,6 +414,8 @@ static inline pte_t pte_mkyoung(pte_t pte) return pte; } +#define pte_sw_mkyoung pte_mkyoung + #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_HUGE; } @@ -423,6 +425,30 @@ static inline pte_t pte_mkhuge(pte_t pte) return pte; } #endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */ + +#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY +static inline bool pte_soft_dirty(pte_t pte) +{ + return pte_val(pte) & _PAGE_SOFT_DIRTY; +} +#define pte_swp_soft_dirty pte_soft_dirty + +static inline pte_t pte_mksoft_dirty(pte_t pte) +{ + pte_val(pte) |= _PAGE_SOFT_DIRTY; + return pte; +} +#define pte_swp_mksoft_dirty pte_mksoft_dirty + +static inline pte_t pte_clear_soft_dirty(pte_t pte) +{ + pte_val(pte) &= ~(_PAGE_SOFT_DIRTY); + return pte; +} +#define pte_swp_clear_soft_dirty pte_clear_soft_dirty + +#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */ + #endif /* @@ -454,6 +480,31 @@ static inline pgprot_t pgprot_writecombine(pgprot_t _prot) return __pgprot(prot); } +static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma, + unsigned long address) +{ +} + +#define __HAVE_ARCH_PTE_SAME +static inline int pte_same(pte_t pte_a, pte_t pte_b) +{ + return pte_val(pte_a) == pte_val(pte_b); +} + +#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS +static inline int ptep_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pte_t *ptep, + pte_t entry, int dirty) +{ + if (!pte_same(*ptep, entry)) + set_pte_at(vma->vm_mm, address, ptep, entry); + /* + * update_mmu_cache will unconditionally execute, handling both + * the case that the PTE changed and the spurious fault case. + */ + return true; +} + /* * Conversion functions: convert a page and protection to a page entry, * and a page entry and page directory to the page they refer to. @@ -481,8 +532,11 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) #else static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { - return __pte((pte_val(pte) & _PAGE_CHG_MASK) | - (pgprot_val(newprot) & ~_PAGE_CHG_MASK)); + pte_val(pte) &= _PAGE_CHG_MASK; + pte_val(pte) |= pgprot_val(newprot) & ~_PAGE_CHG_MASK; + if ((pte_val(pte) & _PAGE_ACCESSED) && !(pte_val(pte) & _PAGE_NO_READ)) + pte_val(pte) |= _PAGE_SILENT_READ; + return pte; } #endif @@ -497,6 +551,9 @@ static inline void update_mmu_cache(struct vm_area_struct *vma, __update_tlb(vma, address, pte); } +#define __HAVE_ARCH_UPDATE_MMU_TLB +#define update_mmu_tlb update_mmu_cache + static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) { @@ -507,20 +564,17 @@ static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, #define kern_addr_valid(addr) (1) -#ifdef CONFIG_PHYS_ADDR_T_64BIT -extern int remap_pfn_range(struct vm_area_struct *vma, unsigned long from, unsigned long pfn, unsigned long size, pgprot_t prot); - -static inline int io_remap_pfn_range(struct vm_area_struct *vma, - unsigned long vaddr, - unsigned long pfn, - unsigned long size, - pgprot_t prot) -{ - phys_addr_t phys_addr_high = fixup_bigphys_addr(pfn << PAGE_SHIFT, size); - return remap_pfn_range(vma, vaddr, phys_addr_high >> PAGE_SHIFT, size, prot); -} +/* + * Allow physical addresses to be fixed up to help 36-bit peripherals. + */ +#ifdef CONFIG_MIPS_FIXUP_BIGPHYS_ADDR +phys_addr_t fixup_bigphys_addr(phys_addr_t addr, phys_addr_t size); +int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long vaddr, + unsigned long pfn, unsigned long size, pgprot_t prot); #define io_remap_pfn_range io_remap_pfn_range -#endif +#else +#define fixup_bigphys_addr(addr, size) (addr) +#endif /* CONFIG_MIPS_FIXUP_BIGPHYS_ADDR */ #ifdef CONFIG_TRANSPARENT_HUGEPAGE @@ -579,7 +633,7 @@ static inline pmd_t pmd_mkclean(pmd_t pmd) static inline pmd_t pmd_mkdirty(pmd_t pmd) { - pmd_val(pmd) |= _PAGE_MODIFIED; + pmd_val(pmd) |= _PAGE_MODIFIED | _PAGE_SOFT_DIRTY; if (pmd_val(pmd) & _PAGE_WRITE) pmd_val(pmd) |= _PAGE_SILENT_WRITE; @@ -608,6 +662,26 @@ static inline pmd_t pmd_mkyoung(pmd_t pmd) return pmd; } +#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY +static inline int pmd_soft_dirty(pmd_t pmd) +{ + return !!(pmd_val(pmd) & _PAGE_SOFT_DIRTY); +} + +static inline pmd_t pmd_mksoft_dirty(pmd_t pmd) +{ + pmd_val(pmd) |= _PAGE_SOFT_DIRTY; + return pmd; +} + +static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd) +{ + pmd_val(pmd) &= ~(_PAGE_SOFT_DIRTY); + return pmd; +} + +#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */ + /* Extern to avoid header file madness */ extern pmd_t mk_pmd(struct page *page, pgprot_t prot); |