diff options
author | Nick Piggin <npiggin@suse.de> | 2008-07-30 15:23:13 +1000 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2008-07-30 15:26:54 +1000 |
commit | ce0ad7f0952581ba75ab6aee55bb1ed9bb22cf4f (patch) | |
tree | bf2a8845a031cb685219db2ddcb3d296b4a9ffab /include | |
parent | 7d2a175b9bf6e9422bebe95130a3c79a25ff4602 (diff) |
powerpc/mm: Lockless get_user_pages_fast() for 64-bit (v3)
Implement lockless get_user_pages_fast for 64-bit powerpc.
Page table existence is guaranteed with RCU, and speculative page references
are used to take a reference to the pages without having a prior existence
guarantee on them.
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Dave Kleikamp <shaggy@linux.vnet.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'include')
-rw-r--r-- | include/asm-powerpc/pgtable-ppc64.h | 2 | ||||
-rw-r--r-- | include/linux/pagemap.h | 23 |
2 files changed, 25 insertions, 0 deletions
diff --git a/include/asm-powerpc/pgtable-ppc64.h b/include/asm-powerpc/pgtable-ppc64.h index 5fc78c0be302..74c6f380b805 100644 --- a/include/asm-powerpc/pgtable-ppc64.h +++ b/include/asm-powerpc/pgtable-ppc64.h @@ -461,6 +461,8 @@ void pgtable_cache_init(void); return pt; } +pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long address); + #endif /* __ASSEMBLY__ */ #endif /* _ASM_POWERPC_PGTABLE_PPC64_H_ */ diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index a39b38ccdc97..69ed3cb1197a 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -143,6 +143,29 @@ static inline int page_cache_get_speculative(struct page *page) return 1; } +/* + * Same as above, but add instead of inc (could just be merged) + */ +static inline int page_cache_add_speculative(struct page *page, int count) +{ + VM_BUG_ON(in_interrupt()); + +#if !defined(CONFIG_SMP) && defined(CONFIG_CLASSIC_RCU) +# ifdef CONFIG_PREEMPT + VM_BUG_ON(!in_atomic()); +# endif + VM_BUG_ON(page_count(page) == 0); + atomic_add(count, &page->_count); + +#else + if (unlikely(!atomic_add_unless(&page->_count, count, 0))) + return 0; +#endif + VM_BUG_ON(PageCompound(page) && page != compound_head(page)); + + return 1; +} + static inline int page_freeze_refs(struct page *page, int count) { return likely(atomic_cmpxchg(&page->_count, count, 0) == count); |