diff options
author | Paul Mundt <lethal@linux-sh.org> | 2008-09-20 20:21:33 +0900 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2008-09-20 20:21:33 +0900 |
commit | c15c5f8c2bf0b00d036c5c6b67264764a6e5dffc (patch) | |
tree | fb4aca715702ba76edb95424fd2a573fa3ed6a28 /arch/sh/mm/init.c | |
parent | b817f7e020958c8f79842076c137daa6f72eb366 (diff) |
sh: Support kernel stacks smaller than a page.
This follows the powerpc commit f6a616800e68b61807d0f7bb0d5dc70665ef8046
'[POWERPC] Fix kernel stack allocation alignment'.
SH has traditionally forced the thread order to be relative to the page
size, so there were never any situations where the same bug was
triggered by slub. Regardless, the usage of > 8kB stacks for the larger
page sizes is overkill, so we switch to using slab allocations there,
as per the powerpc change.
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/mm/init.c')
-rw-r--r-- | arch/sh/mm/init.c | 29 |
1 files changed, 29 insertions, 0 deletions
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 31211bfdc6d8..2a53943924b2 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -265,6 +265,35 @@ void free_initrd_mem(unsigned long start, unsigned long end) } #endif +#if THREAD_SHIFT < PAGE_SHIFT +static struct kmem_cache *thread_info_cache; + +struct thread_info *alloc_thread_info(struct task_struct *tsk) +{ + struct thread_info *ti; + + ti = kmem_cache_alloc(thread_info_cache, GFP_KERNEL); + if (unlikely(ti == NULL)) + return NULL; +#ifdef CONFIG_DEBUG_STACK_USAGE + memset(ti, 0, THREAD_SIZE); +#endif + return ti; +} + +void free_thread_info(struct thread_info *ti) +{ + kmem_cache_free(thread_info_cache, ti); +} + +void thread_info_cache_init(void) +{ + thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE, + THREAD_SIZE, 0, NULL); + BUG_ON(thread_info_cache == NULL); +} +#endif /* THREAD_SHIFT < PAGE_SHIFT */ + #ifdef CONFIG_MEMORY_HOTPLUG int arch_add_memory(int nid, u64 start, u64 size) { |