summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSven Schnelle <svens@linux.ibm.com>2022-04-06 08:35:26 +0200
committerHeiko Carstens <hca@linux.ibm.com>2022-04-25 13:54:14 +0200
commit57761da4dc5cd60bed2c81ba0edb7495c3c740b8 (patch)
treeabbdfb74d74e91abed372c2ad22f3ca29d7061d2
parentf2f47d0ef72c30622e62471903ea19446ea79ee2 (diff)
s390/vdso: move vdso mapping to its own function
This is a preparation patch for adding vdso randomization to s390. It adds a function vdso_size(), which will be used later in calculating the STACK_TOP value. It also moves the vdso mapping into a new function vdso_map(), to keep the code similar to other architectures. Signed-off-by: Sven Schnelle <svens@linux.ibm.com> Reviewed-by: Heiko Carstens <hca@linux.ibm.com> Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
-rw-r--r--arch/s390/include/asm/processor.h1
-rw-r--r--arch/s390/kernel/vdso.c24
2 files changed, 20 insertions, 5 deletions
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index ff1e25d515a8..a3ab8cbcc5e4 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -83,6 +83,7 @@ void cpu_detect_mhz_feature(void);
extern const struct seq_operations cpuinfo_op;
extern void execve_tail(void);
extern void __bpon(void);
+unsigned long vdso_size(void);
/*
* User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit.
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index 99694260cac9..22cb727d5821 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -160,10 +160,9 @@ int vdso_getcpu_init(void)
}
early_initcall(vdso_getcpu_init); /* Must be called before SMP init */
-int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+static int map_vdso(unsigned long addr, unsigned long vdso_mapping_len)
{
- unsigned long vdso_text_len, vdso_mapping_len;
- unsigned long vvar_start, vdso_text_start;
+ unsigned long vvar_start, vdso_text_start, vdso_text_len;
struct vm_special_mapping *vdso_mapping;
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
@@ -180,8 +179,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
vdso_text_len = vdso64_end - vdso64_start;
vdso_mapping = &vdso64_mapping;
}
- vdso_mapping_len = vdso_text_len + VVAR_NR_PAGES * PAGE_SIZE;
- vvar_start = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
+ vvar_start = get_unmapped_area(NULL, addr, vdso_mapping_len, 0, 0);
rc = vvar_start;
if (IS_ERR_VALUE(vvar_start))
goto out;
@@ -210,6 +208,22 @@ out:
return rc;
}
+unsigned long vdso_size(void)
+{
+ unsigned long size = VVAR_NR_PAGES * PAGE_SIZE;
+
+ if (is_compat_task())
+ size += vdso32_end - vdso32_start;
+ else
+ size += vdso64_end - vdso64_start;
+ return PAGE_ALIGN(size);
+}
+
+int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+{
+ return map_vdso(0, vdso_size());
+}
+
static struct page ** __init vdso_setup_pages(void *start, void *end)
{
int pages = (end - start) >> PAGE_SHIFT;