From c78d0c7484f0a8fc4da0047b81900d00cd26488b Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Wed, 4 Aug 2021 13:40:31 +0200 Subject: s390: rename dma section to amode31 The dma section name is confusing, since the code which resides within that section has nothing to do with direct memory access. Instead the limitation is that the code has to run in 31 bit addressing mode, and therefore has to reside below 2GB. So the name was chosen since ZONE_DMA is the same region. To reduce confusion rename the section to amode31, which hopefully describes better what this is about. Note: this will also change vmcoreinfo strings - SDMA=... gets renamed to SAMODE31=... - EDMA=... gets renamed to EAMODE31=... Acked-by: Vasily Gorbik Reviewed-by: Alexander Egorenkov Signed-off-by: Heiko Carstens --- arch/s390/hypfs/hypfs_diag0c.c | 2 +- arch/s390/include/asm/diag.h | 19 +++-- arch/s390/include/asm/extable.h | 4 +- arch/s390/include/asm/linkage.h | 4 +- arch/s390/include/asm/sections.h | 4 +- arch/s390/kernel/Makefile | 2 +- arch/s390/kernel/diag.c | 26 +++---- arch/s390/kernel/entry.h | 14 ++-- arch/s390/kernel/ipl.c | 2 +- arch/s390/kernel/machine_kexec.c | 4 +- arch/s390/kernel/setup.c | 86 ++++++++++----------- arch/s390/kernel/smp.c | 2 +- arch/s390/kernel/text_amode31.S | 158 +++++++++++++++++++++++++++++++++++++++ arch/s390/kernel/text_dma.S | 158 --------------------------------------- arch/s390/kernel/traps.c | 2 +- arch/s390/kernel/vmlinux.lds.S | 38 +++++----- arch/s390/mm/fault.c | 4 +- arch/s390/mm/vmem.c | 2 +- 18 files changed, 267 insertions(+), 264 deletions(-) create mode 100644 arch/s390/kernel/text_amode31.S delete mode 100644 arch/s390/kernel/text_dma.S (limited to 'arch') diff --git a/arch/s390/hypfs/hypfs_diag0c.c b/arch/s390/hypfs/hypfs_diag0c.c index 5cd3d8478ac1..9a2786079e3a 100644 --- a/arch/s390/hypfs/hypfs_diag0c.c +++ b/arch/s390/hypfs/hypfs_diag0c.c @@ -21,7 +21,7 @@ static void diag0c_fn(void *data) { diag_stat_inc(DIAG_STAT_X00C); - diag_dma_ops.diag0c(((void **) data)[smp_processor_id()]); + diag_amode31_ops.diag0c(((void **)data)[smp_processor_id()]); } /* diff --git a/arch/s390/include/asm/diag.h b/arch/s390/include/asm/diag.h index 0e943d753fcc..b3a8cb4daed6 100644 --- a/arch/s390/include/asm/diag.h +++ b/arch/s390/include/asm/diag.h @@ -309,7 +309,10 @@ int diag26c(void *req, void *resp, enum diag26c_sc subcode); struct hypfs_diag0c_entry; -/* This struct must contain only pointers/references into the text DMA section. */ +/* + * This structure must contain only pointers/references into + * the AMODE31 text section. + */ struct diag_ops { int (*diag210)(struct diag210 *addr); int (*diag26c)(void *req, void *resp, enum diag26c_sc subcode); @@ -318,13 +321,13 @@ struct diag_ops { void (*diag308_reset)(void); }; -extern struct diag_ops diag_dma_ops; -extern struct diag210 *__diag210_tmp_dma; +extern struct diag_ops diag_amode31_ops; +extern struct diag210 *__diag210_tmp_amode31; -int _diag210_dma(struct diag210 *addr); -int _diag26c_dma(void *req, void *resp, enum diag26c_sc subcode); -int _diag14_dma(unsigned long rx, unsigned long ry1, unsigned long subcode); -void _diag0c_dma(struct hypfs_diag0c_entry *entry); -void _diag308_reset_dma(void); +int _diag210_amode31(struct diag210 *addr); +int _diag26c_amode31(void *req, void *resp, enum diag26c_sc subcode); +int _diag14_amode31(unsigned long rx, unsigned long ry1, unsigned long subcode); +void _diag0c_amode31(struct hypfs_diag0c_entry *entry); +void _diag308_reset_amode31(void); #endif /* _ASM_S390_DIAG_H */ diff --git a/arch/s390/include/asm/extable.h b/arch/s390/include/asm/extable.h index 3beb294fd553..16dc57dd90b3 100644 --- a/arch/s390/include/asm/extable.h +++ b/arch/s390/include/asm/extable.h @@ -28,8 +28,8 @@ struct exception_table_entry long handler; }; -extern struct exception_table_entry *__start_dma_ex_table; -extern struct exception_table_entry *__stop_dma_ex_table; +extern struct exception_table_entry *__start_amode31_ex_table; +extern struct exception_table_entry *__stop_amode31_ex_table; const struct exception_table_entry *s390_search_extables(unsigned long addr); diff --git a/arch/s390/include/asm/linkage.h b/arch/s390/include/asm/linkage.h index 24e8fed150cf..1ffea75b8ebc 100644 --- a/arch/s390/include/asm/linkage.h +++ b/arch/s390/include/asm/linkage.h @@ -22,7 +22,7 @@ #define EX_TABLE(_fault, _target) \ __EX_TABLE(__ex_table, _fault, _target) -#define EX_TABLE_DMA(_fault, _target) \ - __EX_TABLE(.dma.ex_table, _fault, _target) +#define EX_TABLE_AMODE31(_fault, _target) \ + __EX_TABLE(.amode31.ex_table, _fault, _target) #endif diff --git a/arch/s390/include/asm/sections.h b/arch/s390/include/asm/sections.h index 0c2151451ba5..85881dd48022 100644 --- a/arch/s390/include/asm/sections.h +++ b/arch/s390/include/asm/sections.h @@ -35,7 +35,7 @@ static inline int arch_is_kernel_initmem_freed(unsigned long addr) */ #define __bootdata_preserved(var) __section(".boot.preserved.data." #var) var -extern unsigned long __sdma, __edma; -extern unsigned long __stext_dma, __etext_dma; +extern unsigned long __samode31, __eamode31; +extern unsigned long __stext_amode31, __etext_amode31; #endif diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index 389a3d7690c4..80f500ffb55c 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile @@ -40,7 +40,7 @@ obj-y += sysinfo.o lgr.o os_info.o machine_kexec.o obj-y += runtime_instr.o cache.o fpu.o dumpstack.o guarded_storage.o sthyi.o obj-y += entry.o reipl.o relocate_kernel.o kdebugfs.o alternative.o obj-y += nospec-branch.o ipl_vmparm.o machine_kexec_reloc.o unwind_bc.o -obj-y += smp.o text_dma.o +obj-y += smp.o text_amode31.o extra-y += head64.o vmlinux.lds diff --git a/arch/s390/kernel/diag.c b/arch/s390/kernel/diag.c index e212037cd473..76a656b2146f 100644 --- a/arch/s390/kernel/diag.c +++ b/arch/s390/kernel/diag.c @@ -51,16 +51,16 @@ static const struct diag_desc diag_map[NR_DIAG_STAT] = { [DIAG_STAT_X500] = { .code = 0x500, .name = "Virtio Service" }, }; -struct diag_ops __dma_ref diag_dma_ops = { - .diag210 = _diag210_dma, - .diag26c = _diag26c_dma, - .diag14 = _diag14_dma, - .diag0c = _diag0c_dma, - .diag308_reset = _diag308_reset_dma +struct diag_ops __amode31_ref diag_amode31_ops = { + .diag210 = _diag210_amode31, + .diag26c = _diag26c_amode31, + .diag14 = _diag14_amode31, + .diag0c = _diag0c_amode31, + .diag308_reset = _diag308_reset_amode31 }; -static struct diag210 _diag210_tmp_dma __section(".dma.data"); -struct diag210 __dma_ref *__diag210_tmp_dma = &_diag210_tmp_dma; +static struct diag210 _diag210_tmp_amode31 __section(".amode31.data"); +struct diag210 __amode31_ref *__diag210_tmp_amode31 = &_diag210_tmp_amode31; static int show_diag_stat(struct seq_file *m, void *v) { @@ -144,7 +144,7 @@ EXPORT_SYMBOL(diag_stat_inc_norecursion); int diag14(unsigned long rx, unsigned long ry1, unsigned long subcode) { diag_stat_inc(DIAG_STAT_X014); - return diag_dma_ops.diag14(rx, ry1, subcode); + return diag_amode31_ops.diag14(rx, ry1, subcode); } EXPORT_SYMBOL(diag14); @@ -181,12 +181,12 @@ int diag210(struct diag210 *addr) int ccode; spin_lock_irqsave(&diag210_lock, flags); - *__diag210_tmp_dma = *addr; + *__diag210_tmp_amode31 = *addr; diag_stat_inc(DIAG_STAT_X210); - ccode = diag_dma_ops.diag210(__diag210_tmp_dma); + ccode = diag_amode31_ops.diag210(__diag210_tmp_amode31); - *addr = *__diag210_tmp_dma; + *addr = *__diag210_tmp_amode31; spin_unlock_irqrestore(&diag210_lock, flags); return ccode; @@ -214,6 +214,6 @@ EXPORT_SYMBOL(diag224); int diag26c(void *req, void *resp, enum diag26c_sc subcode) { diag_stat_inc(DIAG_STAT_X26C); - return diag_dma_ops.diag26c(req, resp, subcode); + return diag_amode31_ops.diag26c(req, resp, subcode); } EXPORT_SYMBOL(diag26c); diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h index 80ef613815df..a41ddd462594 100644 --- a/arch/s390/kernel/entry.h +++ b/arch/s390/kernel/entry.h @@ -64,13 +64,13 @@ void stack_free(unsigned long stack); extern char kprobes_insn_page[]; -extern char _sdma[], _edma[]; -extern char _stext_dma[], _etext_dma[]; -extern struct exception_table_entry _start_dma_ex_table[]; -extern struct exception_table_entry _stop_dma_ex_table[]; +extern char _samode31[], _eamode31[]; +extern char _stext_amode31[], _etext_amode31[]; +extern struct exception_table_entry _start_amode31_ex_table[]; +extern struct exception_table_entry _stop_amode31_ex_table[]; -#define __dma_data __section(".dma.data") -#define __dma_ref __section(".dma.refs") -extern long _start_dma_refs[], _end_dma_refs[]; +#define __amode31_data __section(".amode31.data") +#define __amode31_ref __section(".amode31.refs") +extern long _start_amode31_refs[], _end_amode31_refs[]; #endif /* _ENTRY_H */ diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index 50e2c21e0ec9..546d729292cf 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c @@ -2082,7 +2082,7 @@ void s390_reset_system(void) /* Disable lowcore protection */ __ctl_clear_bit(0, 28); - diag_dma_ops.diag308_reset(); + diag_amode31_ops.diag308_reset(); } #ifdef CONFIG_KEXEC_FILE diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c index 1005a6935fbe..0b2f14da830f 100644 --- a/arch/s390/kernel/machine_kexec.c +++ b/arch/s390/kernel/machine_kexec.c @@ -224,8 +224,8 @@ void arch_crash_save_vmcoreinfo(void) VMCOREINFO_SYMBOL(lowcore_ptr); VMCOREINFO_SYMBOL(high_memory); VMCOREINFO_LENGTH(lowcore_ptr, NR_CPUS); - vmcoreinfo_append_str("SDMA=%lx\n", __sdma); - vmcoreinfo_append_str("EDMA=%lx\n", __edma); + vmcoreinfo_append_str("SAMODE31=%lx\n", __samode31); + vmcoreinfo_append_str("EAMODE31=%lx\n", __eamode31); vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset()); mem_assign_absolute(S390_lowcore.vmcore_info, paddr_vmcoreinfo_note()); } diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 957a94619795..3364ebfae215 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -92,36 +92,36 @@ EXPORT_SYMBOL(console_irq); /* * Some code and data needs to stay below 2 GB, even when the kernel would be * relocated above 2 GB, because it has to use 31 bit addresses. - * Such code and data is part of the .dma section. + * Such code and data is part of the .amode31 section. */ -unsigned long __dma_ref __sdma = __pa(&_sdma); -unsigned long __dma_ref __edma = __pa(&_edma); -unsigned long __dma_ref __stext_dma = __pa(&_stext_dma); -unsigned long __dma_ref __etext_dma = __pa(&_etext_dma); -struct exception_table_entry __dma_ref *__start_dma_ex_table = _start_dma_ex_table; -struct exception_table_entry __dma_ref *__stop_dma_ex_table = _stop_dma_ex_table; +unsigned long __amode31_ref __samode31 = __pa(&_samode31); +unsigned long __amode31_ref __eamode31 = __pa(&_eamode31); +unsigned long __amode31_ref __stext_amode31 = __pa(&_stext_amode31); +unsigned long __amode31_ref __etext_amode31 = __pa(&_etext_amode31); +struct exception_table_entry __amode31_ref *__start_amode31_ex_table = _start_amode31_ex_table; +struct exception_table_entry __amode31_ref *__stop_amode31_ex_table = _stop_amode31_ex_table; /* * Control registers CR2, CR5 and CR15 are initialized with addresses - * of tables that must be placed below 2G which is handled by the DMA + * of tables that must be placed below 2G which is handled by the AMODE31 * sections. - * Because the DMA sections are relocated below 2G at startup, + * Because the AMODE31 sections are relocated below 2G at startup, * the content of control registers CR2, CR5 and CR15 must be updated * with new addresses after the relocation. The initial initialization of - * control registers occurs in head64.S and then gets updated again after DMA - * relocation. We must access the relevant DMA tables indirectly via - * pointers placed in the .dma.refs linker section. Those pointers get - * updated automatically during DMA relocation and always contain a valid - * address within DMA sections. + * control registers occurs in head64.S and then gets updated again after AMODE31 + * relocation. We must access the relevant AMODE31 tables indirectly via + * pointers placed in the .amode31.refs linker section. Those pointers get + * updated automatically during AMODE31 relocation and always contain a valid + * address within AMODE31 sections. */ -static __dma_data u32 __ctl_duct_dma[16] __aligned(64); +static __amode31_data u32 __ctl_duct_amode31[16] __aligned(64); -static __dma_data u64 __ctl_aste_dma[8] __aligned(64) = { +static __amode31_data u64 __ctl_aste_amode31[8] __aligned(64) = { [1] = 0xffffffffffffffff }; -static __dma_data u32 __ctl_duald_dma[32] __aligned(128) = { +static __amode31_data u32 __ctl_duald_amode31[32] __aligned(128) = { 0x80000000, 0, 0, 0, 0x80000000, 0, 0, 0, 0x80000000, 0, 0, 0, @@ -132,15 +132,15 @@ static __dma_data u32 __ctl_duald_dma[32] __aligned(128) = { 0x80000000, 0, 0, 0 }; -static __dma_data u32 __ctl_linkage_stack_dma[8] __aligned(64) = { +static __amode31_data u32 __ctl_linkage_stack_amode31[8] __aligned(64) = { 0, 0, 0x89000000, 0, 0, 0, 0x8a000000, 0 }; -static u64 __dma_ref *__ctl_aste = __ctl_aste_dma; -static u32 __dma_ref *__ctl_duald = __ctl_duald_dma; -static u32 __dma_ref *__ctl_linkage_stack = __ctl_linkage_stack_dma; -static u32 __dma_ref *__ctl_duct = __ctl_duct_dma; +static u64 __amode31_ref *__ctl_aste = __ctl_aste_amode31; +static u32 __amode31_ref *__ctl_duald = __ctl_duald_amode31; +static u32 __amode31_ref *__ctl_linkage_stack = __ctl_linkage_stack_amode31; +static u32 __amode31_ref *__ctl_duct = __ctl_duct_amode31; int __bootdata(noexec_disabled); unsigned long __bootdata(ident_map_size); @@ -814,31 +814,31 @@ static void __init setup_memory(void) memblock_enforce_memory_limit(memblock_end_of_DRAM()); } -static void __init relocate_dma_section(void) +static void __init relocate_amode31_section(void) { - unsigned long dma_addr, dma_size; - long dma_offset; + unsigned long amode31_addr, amode31_size; + long amode31_offset; long *ptr; - /* Allocate a new DMA capable memory region */ - dma_size = __edma - __sdma; - pr_info("Relocating DMA section of size 0x%08lx\n", dma_size); - dma_addr = (unsigned long)memblock_alloc_low(dma_size, PAGE_SIZE); - if (!dma_addr) - panic("Failed to allocate memory for DMA section\n"); - dma_offset = dma_addr - __sdma; - - /* Move original DMA section to the new one */ - memmove((void *)dma_addr, (void *)__sdma, dma_size); - /* Zero out the old DMA section to catch invalid accesses within it */ - memset((void *)__sdma, 0, dma_size); - - /* Update all DMA region references */ - for (ptr = _start_dma_refs; ptr != _end_dma_refs; ptr++) - *ptr += dma_offset; + /* Allocate a new AMODE31 capable memory region */ + amode31_size = __eamode31 - __samode31; + pr_info("Relocating AMODE31 section of size 0x%08lx\n", amode31_size); + amode31_addr = (unsigned long)memblock_alloc_low(amode31_size, PAGE_SIZE); + if (!amode31_addr) + panic("Failed to allocate memory for AMODE31 section\n"); + amode31_offset = amode31_addr - __samode31; + + /* Move original AMODE31 section to the new one */ + memmove((void *)amode31_addr, (void *)__samode31, amode31_size); + /* Zero out the old AMODE31 section to catch invalid accesses within it */ + memset((void *)__samode31, 0, amode31_size); + + /* Update all AMODE31 region references */ + for (ptr = _start_amode31_refs; ptr != _end_amode31_refs; ptr++) + *ptr += amode31_offset; } -/* This must be called after DMA relocation */ +/* This must be called after AMODE31 relocation */ static void __init setup_cr(void) { union ctlreg2 cr2; @@ -1002,7 +1002,7 @@ void __init setup_arch(char **cmdline_p) free_mem_detect_info(); - relocate_dma_section(); + relocate_amode31_section(); setup_cr(); setup_uv(); diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index a96729a89eec..09dc13a8d390 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -709,7 +709,7 @@ void __init smp_save_dump_cpus(void) smp_save_cpu_regs(sa, addr, is_boot_cpu, page); } memblock_free(page, PAGE_SIZE); - diag_dma_ops.diag308_reset(); + diag_amode31_ops.diag308_reset(); pcpu_set_smt(0); } #endif /* CONFIG_CRASH_DUMP */ diff --git a/arch/s390/kernel/text_amode31.S b/arch/s390/kernel/text_amode31.S new file mode 100644 index 000000000000..672a5f63c92e --- /dev/null +++ b/arch/s390/kernel/text_amode31.S @@ -0,0 +1,158 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Code that needs to run below 2 GB. + * + * Copyright IBM Corp. 2019 + */ + +#include +#include +#include + + .section .amode31.text,"ax" +/* + * Simplified version of expoline thunk. The normal thunks can not be used here, + * because they might be more than 2 GB away, and not reachable by the relative + * branch. No comdat, exrl, etc. optimizations used here, because it only + * affects a few functions that are not performance-relevant. + */ + .macro BR_EX_AMODE31_r14 + larl %r1,0f + ex 0,0(%r1) + j . +0: br %r14 + .endm + +/* + * int _diag14_amode31(unsigned long rx, unsigned long ry1, unsigned long subcode) + */ +ENTRY(_diag14_amode31) + lgr %r1,%r2 + lgr %r2,%r3 + lgr %r3,%r4 + lhi %r5,-EIO + sam31 + diag %r1,%r2,0x14 +.Ldiag14_ex: + ipm %r5 + srl %r5,28 +.Ldiag14_fault: + sam64 + lgfr %r2,%r5 + BR_EX_AMODE31_r14 + EX_TABLE_AMODE31(.Ldiag14_ex, .Ldiag14_fault) +ENDPROC(_diag14_amode31) + +/* + * int _diag210_amode31(struct diag210 *addr) + */ +ENTRY(_diag210_amode31) + lgr %r1,%r2 + lhi %r2,-1 + sam31 + diag %r1,%r0,0x210 +.Ldiag210_ex: + ipm %r2 + srl %r2,28 +.Ldiag210_fault: + sam64 + lgfr %r2,%r2 + BR_EX_AMODE31_r14 + EX_TABLE_AMODE31(.Ldiag210_ex, .Ldiag210_fault) +ENDPROC(_diag210_amode31) + +/* + * int _diag26c_amode31(void *req, void *resp, enum diag26c_sc subcode) + */ +ENTRY(_diag26c_amode31) + lghi %r5,-EOPNOTSUPP + sam31 + diag %r2,%r4,0x26c +.Ldiag26c_ex: + sam64 + lgfr %r2,%r5 + BR_EX_AMODE31_r14 + EX_TABLE_AMODE31(.Ldiag26c_ex, .Ldiag26c_ex) +ENDPROC(_diag26c_amode31) + +/* + * void _diag0c_amode31(struct hypfs_diag0c_entry *entry) + */ +ENTRY(_diag0c_amode31) + sam31 + diag %r2,%r2,0x0c + sam64 + BR_EX_AMODE31_r14 +ENDPROC(_diag0c_amode31) + +/* + * void _diag308_reset_amode31(void) + * + * Calls diag 308 subcode 1 and continues execution + */ +ENTRY(_diag308_reset_amode31) + larl %r4,.Lctlregs # Save control registers + stctg %c0,%c15,0(%r4) + lg %r2,0(%r4) # Disable lowcore protection + nilh %r2,0xefff + larl %r4,.Lctlreg0 + stg %r2,0(%r4) + lctlg %c0,%c0,0(%r4) + larl %r4,.Lfpctl # Floating point control register + stfpc 0(%r4) + larl %r4,.Lprefix # Save prefix register + stpx 0(%r4) + larl %r4,.Lprefix_zero # Set prefix register to 0 + spx 0(%r4) + larl %r4,.Lcontinue_psw # Save PSW flags + epsw %r2,%r3 + stm %r2,%r3,0(%r4) + larl %r4,restart_part2 # Setup restart PSW at absolute 0 + larl %r3,.Lrestart_diag308_psw + og %r4,0(%r3) # Save PSW + lghi %r3,0 + sturg %r4,%r3 # Use sturg, because of large pages + lghi %r1,1 + lghi %r0,0 + diag %r0,%r1,0x308 +restart_part2: + lhi %r0,0 # Load r0 with zero + lhi %r1,2 # Use mode 2 = ESAME (dump) + sigp %r1,%r0,SIGP_SET_ARCHITECTURE # Switch to ESAME mode + sam64 # Switch to 64 bit addressing mode + larl %r4,.Lctlregs # Restore control registers + lctlg %c0,%c15,0(%r4) + larl %r4,.Lfpctl # Restore floating point ctl register + lfpc 0(%r4) + larl %r4,.Lprefix # Restore prefix register + spx 0(%r4) + larl %r4,.Lcontinue_psw # Restore PSW flags + larl %r2,.Lcontinue + stg %r2,8(%r4) + lpswe 0(%r4) +.Lcontinue: + BR_EX_AMODE31_r14 +ENDPROC(_diag308_reset_amode31) + + .section .amode31.data,"aw",@progbits +.align 8 +.Lrestart_diag308_psw: + .long 0x00080000,0x80000000 + +.align 8 +.Lcontinue_psw: + .quad 0,0 + +.align 8 +.Lctlreg0: + .quad 0 +.Lctlregs: + .rept 16 + .quad 0 + .endr +.Lfpctl: + .long 0 +.Lprefix: + .long 0 +.Lprefix_zero: + .long 0 diff --git a/arch/s390/kernel/text_dma.S b/arch/s390/kernel/text_dma.S deleted file mode 100644 index 65e037ab7df5..000000000000 --- a/arch/s390/kernel/text_dma.S +++ /dev/null @@ -1,158 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Code that needs to run below 2 GB. - * - * Copyright IBM Corp. 2019 - */ - -#include -#include -#include - - .section .dma.text,"ax" -/* - * Simplified version of expoline thunk. The normal thunks can not be used here, - * because they might be more than 2 GB away, and not reachable by the relative - * branch. No comdat, exrl, etc. optimizations used here, because it only - * affects a few functions that are not performance-relevant. - */ - .macro BR_EX_DMA_r14 - larl %r1,0f - ex 0,0(%r1) - j . -0: br %r14 - .endm - -/* - * int _diag14_dma(unsigned long rx, unsigned long ry1, unsigned long subcode) - */ -ENTRY(_diag14_dma) - lgr %r1,%r2 - lgr %r2,%r3 - lgr %r3,%r4 - lhi %r5,-EIO - sam31 - diag %r1,%r2,0x14 -.Ldiag14_ex: - ipm %r5 - srl %r5,28 -.Ldiag14_fault: - sam64 - lgfr %r2,%r5 - BR_EX_DMA_r14 - EX_TABLE_DMA(.Ldiag14_ex, .Ldiag14_fault) -ENDPROC(_diag14_dma) - -/* - * int _diag210_dma(struct diag210 *addr) - */ -ENTRY(_diag210_dma) - lgr %r1,%r2 - lhi %r2,-1 - sam31 - diag %r1,%r0,0x210 -.Ldiag210_ex: - ipm %r2 - srl %r2,28 -.Ldiag210_fault: - sam64 - lgfr %r2,%r2 - BR_EX_DMA_r14 - EX_TABLE_DMA(.Ldiag210_ex, .Ldiag210_fault) -ENDPROC(_diag210_dma) - -/* - * int _diag26c_dma(void *req, void *resp, enum diag26c_sc subcode) - */ -ENTRY(_diag26c_dma) - lghi %r5,-EOPNOTSUPP - sam31 - diag %r2,%r4,0x26c -.Ldiag26c_ex: - sam64 - lgfr %r2,%r5 - BR_EX_DMA_r14 - EX_TABLE_DMA(.Ldiag26c_ex, .Ldiag26c_ex) -ENDPROC(_diag26c_dma) - -/* - * void _diag0c_dma(struct hypfs_diag0c_entry *entry) - */ -ENTRY(_diag0c_dma) - sam31 - diag %r2,%r2,0x0c - sam64 - BR_EX_DMA_r14 -ENDPROC(_diag0c_dma) - -/* - * void _diag308_reset_dma(void) - * - * Calls diag 308 subcode 1 and continues execution - */ -ENTRY(_diag308_reset_dma) - larl %r4,.Lctlregs # Save control registers - stctg %c0,%c15,0(%r4) - lg %r2,0(%r4) # Disable lowcore protection - nilh %r2,0xefff - larl %r4,.Lctlreg0 - stg %r2,0(%r4) - lctlg %c0,%c0,0(%r4) - larl %r4,.Lfpctl # Floating point control register - stfpc 0(%r4) - larl %r4,.Lprefix # Save prefix register - stpx 0(%r4) - larl %r4,.Lprefix_zero # Set prefix register to 0 - spx 0(%r4) - larl %r4,.Lcontinue_psw # Save PSW flags - epsw %r2,%r3 - stm %r2,%r3,0(%r4) - larl %r4,restart_part2 # Setup restart PSW at absolute 0 - larl %r3,.Lrestart_diag308_psw - og %r4,0(%r3) # Save PSW - lghi %r3,0 - sturg %r4,%r3 # Use sturg, because of large pages - lghi %r1,1 - lghi %r0,0 - diag %r0,%r1,0x308 -restart_part2: - lhi %r0,0 # Load r0 with zero - lhi %r1,2 # Use mode 2 = ESAME (dump) - sigp %r1,%r0,SIGP_SET_ARCHITECTURE # Switch to ESAME mode - sam64 # Switch to 64 bit addressing mode - larl %r4,.Lctlregs # Restore control registers - lctlg %c0,%c15,0(%r4) - larl %r4,.Lfpctl # Restore floating point ctl register - lfpc 0(%r4) - larl %r4,.Lprefix # Restore prefix register - spx 0(%r4) - larl %r4,.Lcontinue_psw # Restore PSW flags - larl %r2,.Lcontinue - stg %r2,8(%r4) - lpswe 0(%r4) -.Lcontinue: - BR_EX_DMA_r14 -ENDPROC(_diag308_reset_dma) - - .section .dma.data,"aw",@progbits -.align 8 -.Lrestart_diag308_psw: - .long 0x00080000,0x80000000 - -.align 8 -.Lcontinue_psw: - .quad 0,0 - -.align 8 -.Lctlreg0: - .quad 0 -.Lctlregs: - .rept 16 - .quad 0 - .endr -.Lfpctl: - .long 0 -.Lprefix: - .long 0 -.Lprefix_zero: - .long 0 diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index 76947275fe8b..bcefc2173de4 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c @@ -291,7 +291,7 @@ static void __init test_monitor_call(void) void __init trap_init(void) { - sort_extable(__start_dma_ex_table, __stop_dma_ex_table); + sort_extable(__start_amode31_ex_table, __stop_amode31_ex_table); local_mcck_enable(); test_monitor_call(); } diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index b6caa810af3a..1d5394bf8c2e 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S @@ -74,10 +74,10 @@ SECTIONS BOOT_DATA_PRESERVED . = ALIGN(8); - .dma.refs : { - _start_dma_refs = .; - *(.dma.refs) - _end_dma_refs = .; + .amod31.refs : { + _start_amode31_refs = .; + *(.amode31.refs) + _end_amode31_refs = .; } _edata = .; /* End of data section */ @@ -146,30 +146,30 @@ SECTIONS BOOT_DATA /* - * .dma section for code, data, ex_table that need to stay below 2 GB, - * even when the kernel is relocated above 2 GB. + * .amode31 section for code, data, ex_table that need to stay + * below 2 GB, even when the kernel is relocated above 2 GB. */ . = ALIGN(PAGE_SIZE); - _sdma = .; - .dma.text : { - _stext_dma = .; - *(.dma.text) - *(.dma.text.*_indirect_*) + _samode31 = .; + .amode31.text : { + _stext_amode31 = .; + *(.amode31.text) + *(.amode31.text.*_indirect_*) . = ALIGN(PAGE_SIZE); - _etext_dma = .; + _etext_amode31 = .; } . = ALIGN(16); - .dma.ex_table : { - _start_dma_ex_table = .; - KEEP(*(.dma.ex_table)) - _stop_dma_ex_table = .; + .amode31.ex_table : { + _start_amode31_ex_table = .; + KEEP(*(.amode31.ex_table)) + _stop_amode31_ex_table = .; } . = ALIGN(PAGE_SIZE); - .dma.data : { - *(.dma.data) + .amode31.data : { + *(.amode31.data) } . = ALIGN(PAGE_SIZE); - _edma = .; + _eamode31 = .; /* early.c uses stsi, which requires page aligned data. */ . = ALIGN(PAGE_SIZE); diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 52d82410486e..212632d57db9 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -231,8 +231,8 @@ const struct exception_table_entry *s390_search_extables(unsigned long addr) { const struct exception_table_entry *fixup; - fixup = search_extable(__start_dma_ex_table, - __stop_dma_ex_table - __start_dma_ex_table, + fixup = search_extable(__start_amode31_ex_table, + __stop_amode31_ex_table - __start_amode31_ex_table, addr); if (!fixup) fixup = search_exception_tables(addr); diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index 96897fab89dc..2b1c6d916cf9 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c @@ -581,7 +581,7 @@ void __init vmem_map_init(void) __set_memory((unsigned long)_sinittext, (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT, SET_MEMORY_RO | SET_MEMORY_X); - __set_memory(__stext_dma, (__etext_dma - __stext_dma) >> PAGE_SHIFT, + __set_memory(__stext_amode31, (__etext_amode31 - __stext_amode31) >> PAGE_SHIFT, SET_MEMORY_RO | SET_MEMORY_X); /* we need lowcore executable for our LPSWE instructions */ -- cgit v1.2.3-58-ga151