summaryrefslogtreecommitdiff
path: root/include/asm-powerpc
diff options
context:
space:
mode:
authorJeremy Kerr <jk@ozlabs.org>2007-12-05 13:49:31 +1100
committerArnd Bergmann <arnd@arndb.de>2007-12-19 01:00:05 +0100
commit684bd614015188561197342fd336292e9e2ce196 (patch)
tree89307cd386307b6bdfa9c65165a8d0fc95eb77d5 /include/asm-powerpc
parentf6eb7d7ffef3e2fa40b0161c30486cb87203758d (diff)
[POWERPC] cell: handle SPE kernel mappings that cross segment boundaries
Currently, we have a possibilty that the SLBs setup during context switch don't cover the entirety of the necessary lscsa and code regions, if these regions cross a segment boundary. This change checks the start and end of each region, and inserts a SLB entry for each, if unique. We also remove the assumption that the spu_save_code and spu_restore_code reside in the same segment, by using the specific code array for save and restore. Signed-off-by: Jeremy Kerr <jk@ozlabs.org> Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'include/asm-powerpc')
-rw-r--r--include/asm-powerpc/spu.h4
1 files changed, 2 insertions, 2 deletions
diff --git a/include/asm-powerpc/spu.h b/include/asm-powerpc/spu.h
index 3308ed4933e0..314aad357d98 100644
--- a/include/asm-powerpc/spu.h
+++ b/include/asm-powerpc/spu.h
@@ -201,8 +201,8 @@ int spu_irq_class_0_bottom(struct spu *spu);
int spu_irq_class_1_bottom(struct spu *spu);
void spu_irq_setaffinity(struct spu *spu, int cpu);
-void spu_setup_kernel_slbs(struct spu *spu,
- struct spu_lscsa *lscsa, void *code);
+void spu_setup_kernel_slbs(struct spu *spu, struct spu_lscsa *lscsa,
+ void *code, int code_size);
#ifdef CONFIG_KEXEC
void crash_register_spus(struct list_head *list);