From 8a5043b68320aa880d46744ab40915bee8939882 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Wed, 29 Jun 2016 04:18:45 +0300 Subject: xtensa: define ___unlock_[di]cache_all unconditionally Provide macro definitions regardless of whether caches are lockable or not, make definitions empty in latter case. Signed-off-by: Max Filippov --- arch/xtensa/include/asm/cacheasm.h | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) (limited to 'arch/xtensa') diff --git a/arch/xtensa/include/asm/cacheasm.h b/arch/xtensa/include/asm/cacheasm.h index e0f9e1109c83..2041abb10a23 100644 --- a/arch/xtensa/include/asm/cacheasm.h +++ b/arch/xtensa/include/asm/cacheasm.h @@ -69,26 +69,23 @@ .endm -#if XCHAL_DCACHE_LINE_LOCKABLE - .macro ___unlock_dcache_all ar at -#if XCHAL_DCACHE_SIZE +#if XCHAL_DCACHE_LINE_LOCKABLE && XCHAL_DCACHE_SIZE __loop_cache_all \ar \at diu XCHAL_DCACHE_SIZE XCHAL_DCACHE_LINEWIDTH #endif .endm -#endif - -#if XCHAL_ICACHE_LINE_LOCKABLE .macro ___unlock_icache_all ar at +#if XCHAL_ICACHE_LINE_LOCKABLE && XCHAL_ICACHE_SIZE __loop_cache_all \ar \at iiu XCHAL_ICACHE_SIZE XCHAL_ICACHE_LINEWIDTH +#endif .endm -#endif + .macro ___flush_invalidate_dcache_all ar at -- cgit v1.2.3-58-ga151 From 12c8007dafbb7a9719d7beb04b79c9406c265f47 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Mon, 25 Apr 2016 04:06:37 +0300 Subject: xtensa: fix __ffs result type Make __ffs result type unsigned long to match generic asm implementation. This fixes the following build warning: mm/nobootmem.c: In function '__free_pages_memory': include/linux/kernel.h:742:17: warning: comparison of distinct pointer types lacks a cast (void) (&_min1 == &_min2); \ ^ mm/nobootmem.c:100:11: note: in expansion of macro 'min' order = min(MAX_ORDER - 1UL, __ffs(start)); Signed-off-by: Max Filippov --- arch/xtensa/include/asm/bitops.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/xtensa') diff --git a/arch/xtensa/include/asm/bitops.h b/arch/xtensa/include/asm/bitops.h index 3f44fa2a53e9..d3490189792b 100644 --- a/arch/xtensa/include/asm/bitops.h +++ b/arch/xtensa/include/asm/bitops.h @@ -48,7 +48,7 @@ static inline int ffz(unsigned long x) * __ffs: Find first bit set in word. Return 0 for bit 0 */ -static inline int __ffs(unsigned long x) +static inline unsigned long __ffs(unsigned long x) { return 31 - __cntlz(x & -x); } -- cgit v1.2.3-58-ga151 From f1883aa7d63e3be92ad18da7a1bfc6c9b15c4f9a Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Mon, 11 Apr 2016 21:07:30 +0300 Subject: xtensa: move kernel mapping addresses into kmem_layout.h Create a header dedicated to memory layout definitions. Include it from places where these definitions are needed. Express vmalloc area address, VIRTUAL_MEMORY_ADDRESS and KERNELOFFSET through KSEG address. Signed-off-by: Max Filippov --- arch/xtensa/include/asm/kmem_layout.h | 25 +++++++++++++++++++++++++ arch/xtensa/include/asm/page.h | 10 +--------- arch/xtensa/include/asm/pgtable.h | 7 ++++--- arch/xtensa/include/asm/vectors.h | 5 +++-- arch/xtensa/mm/init.c | 2 +- 5 files changed, 34 insertions(+), 15 deletions(-) create mode 100644 arch/xtensa/include/asm/kmem_layout.h (limited to 'arch/xtensa') diff --git a/arch/xtensa/include/asm/kmem_layout.h b/arch/xtensa/include/asm/kmem_layout.h new file mode 100644 index 000000000000..4eb43b65a34d --- /dev/null +++ b/arch/xtensa/include/asm/kmem_layout.h @@ -0,0 +1,25 @@ +/* + * Kernel virtual memory layout definitions. + * + * This file is subject to the terms and conditions of the GNU General + * Public License. See the file "COPYING" in the main directory of + * this archive for more details. + * + * Copyright (C) 2016 Cadence Design Systems Inc. + */ + +#ifndef _XTENSA_KMEM_LAYOUT_H +#define _XTENSA_KMEM_LAYOUT_H + +#include + +/* + * Fixed TLB translations in the processor. + */ + +#define XCHAL_KSEG_CACHED_VADDR __XTENSA_UL_CONST(0xd0000000) +#define XCHAL_KSEG_BYPASS_VADDR __XTENSA_UL_CONST(0xd8000000) +#define XCHAL_KSEG_SIZE __XTENSA_UL_CONST(0x08000000) +#define XCHAL_KSEG_PADDR __XTENSA_UL_CONST(0x00000000) + +#endif diff --git a/arch/xtensa/include/asm/page.h b/arch/xtensa/include/asm/page.h index ad38500471fa..fd12a1977ba8 100644 --- a/arch/xtensa/include/asm/page.h +++ b/arch/xtensa/include/asm/page.h @@ -15,15 +15,7 @@ #include #include #include - -/* - * Fixed TLB translations in the processor. - */ - -#define XCHAL_KSEG_CACHED_VADDR __XTENSA_UL_CONST(0xd0000000) -#define XCHAL_KSEG_BYPASS_VADDR __XTENSA_UL_CONST(0xd8000000) -#define XCHAL_KSEG_PADDR __XTENSA_UL_CONST(0x00000000) -#define XCHAL_KSEG_SIZE __XTENSA_UL_CONST(0x08000000) +#include /* * PAGE_SHIFT determines the page size diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h index fb02fdc5ecee..8aa0e0d9cbb2 100644 --- a/arch/xtensa/include/asm/pgtable.h +++ b/arch/xtensa/include/asm/pgtable.h @@ -13,6 +13,7 @@ #include #include +#include /* * We only use two ring levels, user and kernel space. @@ -68,9 +69,9 @@ * Virtual memory area. We keep a distance to other memory regions to be * on the safe side. We also use this area for cache aliasing. */ -#define VMALLOC_START 0xC0000000 -#define VMALLOC_END 0xC7FEFFFF -#define TLBTEMP_BASE_1 0xC7FF0000 +#define VMALLOC_START (XCHAL_KSEG_CACHED_VADDR - 0x10000000) +#define VMALLOC_END (VMALLOC_START + 0x07FEFFFF) +#define TLBTEMP_BASE_1 (VMALLOC_END + 1) #define TLBTEMP_BASE_2 (TLBTEMP_BASE_1 + DCACHE_WAY_SIZE) #if 2 * DCACHE_WAY_SIZE > ICACHE_WAY_SIZE #define TLBTEMP_SIZE (2 * DCACHE_WAY_SIZE) diff --git a/arch/xtensa/include/asm/vectors.h b/arch/xtensa/include/asm/vectors.h index 288c776736d3..482171051ff0 100644 --- a/arch/xtensa/include/asm/vectors.h +++ b/arch/xtensa/include/asm/vectors.h @@ -20,6 +20,7 @@ #include #include +#include #if XCHAL_HAVE_PTP_MMU #define XCHAL_KIO_CACHED_VADDR 0xe0000000 @@ -48,10 +49,10 @@ static inline unsigned long xtensa_get_kio_paddr(void) #if defined(CONFIG_MMU) /* Will Become VECBASE */ -#define VIRTUAL_MEMORY_ADDRESS 0xD0000000 +#define VIRTUAL_MEMORY_ADDRESS XCHAL_KSEG_CACHED_VADDR /* Image Virtual Start Address */ -#define KERNELOFFSET 0xD0003000 +#define KERNELOFFSET (XCHAL_KSEG_CACHED_VADDR + 0x3000) #if defined(XCHAL_HAVE_PTP_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY /* MMU v3 - XCHAL_HAVE_PTP_MMU == 1 */ diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c index 9a9a5935bd36..4d7142beac72 100644 --- a/arch/xtensa/mm/init.c +++ b/arch/xtensa/mm/init.c @@ -344,7 +344,7 @@ void __init mem_init(void) " fixmap : 0x%08lx - 0x%08lx (%5lu kB)\n" #endif #ifdef CONFIG_MMU - " vmalloc : 0x%08x - 0x%08x (%5u MB)\n" + " vmalloc : 0x%08lx - 0x%08lx (%5lu MB)\n" #endif " lowmem : 0x%08lx - 0x%08lx (%5lu MB)\n", #ifdef CONFIG_HIGHMEM -- cgit v1.2.3-58-ga151 From d39af90265feb40ec198c4ca8268724645b4b50e Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Mon, 11 Apr 2016 21:14:17 +0300 Subject: xtensa: add alternative kernel memory layouts MMUv3 is able to support low memory bigger than 128MB. Implement 256MB and 512MB KSEG layouts: - add Kconfig selector for KSEG layout; - add KSEG base address, size and alignment definitions to arch/xtensa/include/asm/kmem_layout.h; - use new definitions in TLB initialization; - add build time memory map consistency checks. See Documentation/xtensa/mmu.txt for the details of new memory layouts. Signed-off-by: Max Filippov --- Documentation/xtensa/mmu.txt | 124 +++++++++++++++++++++++++++++++ arch/xtensa/Kconfig | 44 +++++++++++ arch/xtensa/include/asm/fixmap.h | 5 ++ arch/xtensa/include/asm/highmem.h | 5 ++ arch/xtensa/include/asm/initialize_mmu.h | 23 ++++-- arch/xtensa/include/asm/kmem_layout.h | 46 ++++++++++++ arch/xtensa/include/asm/page.h | 6 +- arch/xtensa/include/uapi/asm/types.h | 3 +- arch/xtensa/mm/init.c | 3 +- 9 files changed, 249 insertions(+), 10 deletions(-) (limited to 'arch/xtensa') diff --git a/Documentation/xtensa/mmu.txt b/Documentation/xtensa/mmu.txt index 0312fe66475c..867c0f837e28 100644 --- a/Documentation/xtensa/mmu.txt +++ b/Documentation/xtensa/mmu.txt @@ -62,3 +62,127 @@ limitations apply: 6. The IO area covers the entire 256MB segment of parent-bus-address; the "ranges" triplet length field is ignored + + +MMUv3 address space layouts. +============================ + +Default MMUv2-compatible layout. + + Symbol VADDR Size ++------------------+ +| Userspace | 0x00000000 TASK_SIZE ++------------------+ 0x40000000 ++------------------+ +| Page table | 0x80000000 ++------------------+ 0x80400000 ++------------------+ +| KMAP area | PKMAP_BASE PTRS_PER_PTE * +| | DCACHE_N_COLORS * +| | PAGE_SIZE +| | (4MB * DCACHE_N_COLORS) ++------------------+ +| Atomic KMAP area | FIXADDR_START KM_TYPE_NR * +| | NR_CPUS * +| | DCACHE_N_COLORS * +| | PAGE_SIZE ++------------------+ FIXADDR_TOP 0xbffff000 ++------------------+ +| VMALLOC area | VMALLOC_START 0xc0000000 128MB - 64KB ++------------------+ VMALLOC_END +| Cache aliasing | TLBTEMP_BASE_1 0xc7ff0000 DCACHE_WAY_SIZE +| remap area 1 | ++------------------+ +| Cache aliasing | TLBTEMP_BASE_2 DCACHE_WAY_SIZE +| remap area 2 | ++------------------+ ++------------------+ +| Cached KSEG | XCHAL_KSEG_CACHED_VADDR 0xd0000000 128MB ++------------------+ +| Uncached KSEG | XCHAL_KSEG_BYPASS_VADDR 0xd8000000 128MB ++------------------+ +| Cached KIO | XCHAL_KIO_CACHED_VADDR 0xe0000000 256MB ++------------------+ +| Uncached KIO | XCHAL_KIO_BYPASS_VADDR 0xf0000000 256MB ++------------------+ + + +256MB cached + 256MB uncached layout. + + Symbol VADDR Size ++------------------+ +| Userspace | 0x00000000 TASK_SIZE ++------------------+ 0x40000000 ++------------------+ +| Page table | 0x80000000 ++------------------+ 0x80400000 ++------------------+ +| KMAP area | PKMAP_BASE PTRS_PER_PTE * +| | DCACHE_N_COLORS * +| | PAGE_SIZE +| | (4MB * DCACHE_N_COLORS) ++------------------+ +| Atomic KMAP area | FIXADDR_START KM_TYPE_NR * +| | NR_CPUS * +| | DCACHE_N_COLORS * +| | PAGE_SIZE ++------------------+ FIXADDR_TOP 0x9ffff000 ++------------------+ +| VMALLOC area | VMALLOC_START 0xa0000000 128MB - 64KB ++------------------+ VMALLOC_END +| Cache aliasing | TLBTEMP_BASE_1 0xa7ff0000 DCACHE_WAY_SIZE +| remap area 1 | ++------------------+ +| Cache aliasing | TLBTEMP_BASE_2 DCACHE_WAY_SIZE +| remap area 2 | ++------------------+ ++------------------+ +| Cached KSEG | XCHAL_KSEG_CACHED_VADDR 0xb0000000 256MB ++------------------+ +| Uncached KSEG | XCHAL_KSEG_BYPASS_VADDR 0xc0000000 256MB ++------------------+ ++------------------+ +| Cached KIO | XCHAL_KIO_CACHED_VADDR 0xe0000000 256MB ++------------------+ +| Uncached KIO | XCHAL_KIO_BYPASS_VADDR 0xf0000000 256MB ++------------------+ + + +512MB cached + 512MB uncached layout. + + Symbol VADDR Size ++------------------+ +| Userspace | 0x00000000 TASK_SIZE ++------------------+ 0x40000000 ++------------------+ +| Page table | 0x80000000 ++------------------+ 0x80400000 ++------------------+ +| KMAP area | PKMAP_BASE PTRS_PER_PTE * +| | DCACHE_N_COLORS * +| | PAGE_SIZE +| | (4MB * DCACHE_N_COLORS) ++------------------+ +| Atomic KMAP area | FIXADDR_START KM_TYPE_NR * +| | NR_CPUS * +| | DCACHE_N_COLORS * +| | PAGE_SIZE ++------------------+ FIXADDR_TOP 0x8ffff000 ++------------------+ +| VMALLOC area | VMALLOC_START 0x90000000 128MB - 64KB ++------------------+ VMALLOC_END +| Cache aliasing | TLBTEMP_BASE_1 0x97ff0000 DCACHE_WAY_SIZE +| remap area 1 | ++------------------+ +| Cache aliasing | TLBTEMP_BASE_2 DCACHE_WAY_SIZE +| remap area 2 | ++------------------+ ++------------------+ +| Cached KSEG | XCHAL_KSEG_CACHED_VADDR 0xa0000000 512MB ++------------------+ +| Uncached KSEG | XCHAL_KSEG_BYPASS_VADDR 0xc0000000 512MB ++------------------+ +| Cached KIO | XCHAL_KIO_CACHED_VADDR 0xe0000000 256MB ++------------------+ +| Uncached KIO | XCHAL_KIO_BYPASS_VADDR 0xf0000000 256MB ++------------------+ diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index 64336f666fb6..c17496bc37f0 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -236,6 +236,50 @@ config INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX If in doubt, say Y. +config KSEG_PADDR + hex "Physical address of the KSEG mapping" + depends on INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX && MMU + default 0x00000000 + help + This is the physical address where KSEG is mapped. Please refer to + the chosen KSEG layout help for the required address alignment. + Unpacked kernel image (including vectors) must be located completely + within KSEG. + Physical memory below this address is not available to linux. + + If unsure, leave the default value here. + +choice + prompt "KSEG layout" + depends on MMU + default XTENSA_KSEG_MMU_V2 + +config XTENSA_KSEG_MMU_V2 + bool "MMUv2: 128MB cached + 128MB uncached" + help + MMUv2 compatible kernel memory map: TLB way 5 maps 128MB starting + at KSEG_PADDR to 0xd0000000 with cache and to 0xd8000000 + without cache. + KSEG_PADDR must be aligned to 128MB. + +config XTENSA_KSEG_256M + bool "256MB cached + 256MB uncached" + depends on INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX + help + TLB way 6 maps 256MB starting at KSEG_PADDR to 0xb0000000 + with cache and to 0xc0000000 without cache. + KSEG_PADDR must be aligned to 256MB. + +config XTENSA_KSEG_512M + bool "512MB cached + 512MB uncached" + depends on INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX + help + TLB way 6 maps 512MB starting at KSEG_PADDR to 0xa0000000 + with cache and to 0xc0000000 without cache. + KSEG_PADDR must be aligned to 256MB. + +endchoice + config HIGHMEM bool "High Memory Support" depends on MMU diff --git a/arch/xtensa/include/asm/fixmap.h b/arch/xtensa/include/asm/fixmap.h index 62b507deea9d..0d30403b6c95 100644 --- a/arch/xtensa/include/asm/fixmap.h +++ b/arch/xtensa/include/asm/fixmap.h @@ -59,6 +59,11 @@ enum fixed_addresses { */ static __always_inline unsigned long fix_to_virt(const unsigned int idx) { + /* Check if this memory layout is broken because fixmap overlaps page + * table. + */ + BUILD_BUG_ON(FIXADDR_START < + XCHAL_PAGE_TABLE_VADDR + XCHAL_PAGE_TABLE_SIZE); BUILD_BUG_ON(idx >= __end_of_fixed_addresses); return __fix_to_virt(idx); } diff --git a/arch/xtensa/include/asm/highmem.h b/arch/xtensa/include/asm/highmem.h index 01cef6b40829..6e070db1022e 100644 --- a/arch/xtensa/include/asm/highmem.h +++ b/arch/xtensa/include/asm/highmem.h @@ -68,6 +68,11 @@ void kunmap_high(struct page *page); static inline void *kmap(struct page *page) { + /* Check if this memory layout is broken because PKMAP overlaps + * page table. + */ + BUILD_BUG_ON(PKMAP_BASE < + XCHAL_PAGE_TABLE_VADDR + XCHAL_PAGE_TABLE_SIZE); BUG_ON(in_interrupt()); if (!PageHighMem(page)) return page_address(page); diff --git a/arch/xtensa/include/asm/initialize_mmu.h b/arch/xtensa/include/asm/initialize_mmu.h index 7a1e075969a3..46d4bef61655 100644 --- a/arch/xtensa/include/asm/initialize_mmu.h +++ b/arch/xtensa/include/asm/initialize_mmu.h @@ -116,22 +116,35 @@ add a5, a5, a4 bne a5, a2, 3b - /* Step 4: Setup MMU with the old V2 mappings. */ + /* Step 4: Setup MMU with the requested static mappings. */ + movi a6, 0x01000000 wsr a6, ITLBCFG wsr a6, DTLBCFG isync - movi a5, 0xd0000005 - movi a4, CA_WRITEBACK + movi a5, XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_TLB_WAY + movi a4, XCHAL_KSEG_PADDR + CA_WRITEBACK + wdtlb a4, a5 + witlb a4, a5 + + movi a5, XCHAL_KSEG_BYPASS_VADDR + XCHAL_KSEG_TLB_WAY + movi a4, XCHAL_KSEG_PADDR + CA_BYPASS wdtlb a4, a5 witlb a4, a5 - movi a5, 0xd8000005 - movi a4, CA_BYPASS +#ifdef CONFIG_XTENSA_KSEG_512M + movi a5, XCHAL_KSEG_CACHED_VADDR + 0x10000000 + XCHAL_KSEG_TLB_WAY + movi a4, XCHAL_KSEG_PADDR + 0x10000000 + CA_WRITEBACK wdtlb a4, a5 witlb a4, a5 + movi a5, XCHAL_KSEG_BYPASS_VADDR + 0x10000000 + XCHAL_KSEG_TLB_WAY + movi a4, XCHAL_KSEG_PADDR + 0x10000000 + CA_BYPASS + wdtlb a4, a5 + witlb a4, a5 +#endif + movi a5, XCHAL_KIO_CACHED_VADDR + 6 movi a4, XCHAL_KIO_DEFAULT_PADDR + CA_WRITEBACK wdtlb a4, a5 diff --git a/arch/xtensa/include/asm/kmem_layout.h b/arch/xtensa/include/asm/kmem_layout.h index 4eb43b65a34d..bfce7ee8efd8 100644 --- a/arch/xtensa/include/asm/kmem_layout.h +++ b/arch/xtensa/include/asm/kmem_layout.h @@ -13,13 +13,59 @@ #include +#ifdef CONFIG_MMU + /* * Fixed TLB translations in the processor. */ +#define XCHAL_PAGE_TABLE_VADDR __XTENSA_UL_CONST(0x80000000) +#define XCHAL_PAGE_TABLE_SIZE __XTENSA_UL_CONST(0x00400000) + +#if defined(CONFIG_XTENSA_KSEG_MMU_V2) + #define XCHAL_KSEG_CACHED_VADDR __XTENSA_UL_CONST(0xd0000000) #define XCHAL_KSEG_BYPASS_VADDR __XTENSA_UL_CONST(0xd8000000) #define XCHAL_KSEG_SIZE __XTENSA_UL_CONST(0x08000000) +#define XCHAL_KSEG_ALIGNMENT __XTENSA_UL_CONST(0x08000000) +#define XCHAL_KSEG_TLB_WAY 5 + +#elif defined(CONFIG_XTENSA_KSEG_256M) + +#define XCHAL_KSEG_CACHED_VADDR __XTENSA_UL_CONST(0xb0000000) +#define XCHAL_KSEG_BYPASS_VADDR __XTENSA_UL_CONST(0xc0000000) +#define XCHAL_KSEG_SIZE __XTENSA_UL_CONST(0x10000000) +#define XCHAL_KSEG_ALIGNMENT __XTENSA_UL_CONST(0x10000000) +#define XCHAL_KSEG_TLB_WAY 6 + +#elif defined(CONFIG_XTENSA_KSEG_512M) + +#define XCHAL_KSEG_CACHED_VADDR __XTENSA_UL_CONST(0xa0000000) +#define XCHAL_KSEG_BYPASS_VADDR __XTENSA_UL_CONST(0xc0000000) +#define XCHAL_KSEG_SIZE __XTENSA_UL_CONST(0x20000000) +#define XCHAL_KSEG_ALIGNMENT __XTENSA_UL_CONST(0x10000000) +#define XCHAL_KSEG_TLB_WAY 6 + +#else +#error Unsupported KSEG configuration +#endif + +#ifdef CONFIG_KSEG_PADDR +#define XCHAL_KSEG_PADDR __XTENSA_UL_CONST(CONFIG_KSEG_PADDR) +#else #define XCHAL_KSEG_PADDR __XTENSA_UL_CONST(0x00000000) +#endif + +#if XCHAL_KSEG_PADDR & (XCHAL_KSEG_ALIGNMENT - 1) +#error XCHAL_KSEG_PADDR is not properly aligned to XCHAL_KSEG_ALIGNMENT +#endif + +#else + +#define XCHAL_KSEG_CACHED_VADDR __XTENSA_UL_CONST(0xd0000000) +#define XCHAL_KSEG_BYPASS_VADDR __XTENSA_UL_CONST(0xd8000000) +#define XCHAL_KSEG_SIZE __XTENSA_UL_CONST(0x08000000) + +#endif #endif diff --git a/arch/xtensa/include/asm/page.h b/arch/xtensa/include/asm/page.h index fd12a1977ba8..8a02438232c4 100644 --- a/arch/xtensa/include/asm/page.h +++ b/arch/xtensa/include/asm/page.h @@ -27,10 +27,12 @@ #ifdef CONFIG_MMU #define PAGE_OFFSET XCHAL_KSEG_CACHED_VADDR -#define MAX_MEM_PFN XCHAL_KSEG_SIZE +#define MAX_LOW_PFN (PHYS_PFN(XCHAL_KSEG_PADDR) + \ + PHYS_PFN(XCHAL_KSEG_SIZE)) #else #define PAGE_OFFSET __XTENSA_UL_CONST(0) -#define MAX_MEM_PFN (PLATFORM_DEFAULT_MEM_START + PLATFORM_DEFAULT_MEM_SIZE) +#define MAX_LOW_PFN (PHYS_PFN(PLATFORM_DEFAULT_MEM_START) + \ + PHYS_PFN(PLATFORM_DEFAULT_MEM_SIZE)) #endif #define PGTABLE_START 0x80000000 diff --git a/arch/xtensa/include/uapi/asm/types.h b/arch/xtensa/include/uapi/asm/types.h index 87ec7ae73cb1..2efc921506c4 100644 --- a/arch/xtensa/include/uapi/asm/types.h +++ b/arch/xtensa/include/uapi/asm/types.h @@ -18,7 +18,8 @@ # define __XTENSA_UL_CONST(x) x #else # define __XTENSA_UL(x) ((unsigned long)(x)) -# define __XTENSA_UL_CONST(x) x##UL +# define ___XTENSA_UL_CONST(x) x##UL +# define __XTENSA_UL_CONST(x) ___XTENSA_UL_CONST(x) #endif #ifndef __ASSEMBLY__ diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c index 4d7142beac72..302fa72d0d5a 100644 --- a/arch/xtensa/mm/init.c +++ b/arch/xtensa/mm/init.c @@ -266,8 +266,7 @@ void __init bootmem_init(void) if (min_low_pfn > max_pfn) panic("No memory found!\n"); - max_low_pfn = max_pfn < MAX_MEM_PFN >> PAGE_SHIFT ? - max_pfn : MAX_MEM_PFN >> PAGE_SHIFT; + max_low_pfn = min(max_pfn, MAX_LOW_PFN); /* Find an area to use for the bootmem bitmap. */ -- cgit v1.2.3-58-ga151 From a9f2fc628e3a26a829fd79aff74eb49839d1e74b Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Wed, 13 Apr 2016 05:20:02 +0300 Subject: xtensa: cleanup MMU setup and kernel layout macros Make kernel load address explicit, independent of the selected MMU configuration and configurable from Kconfig. Do not restrict it to the first 512MB of the physical address space. Cleanup kernel memory layout macros: - rename VECBASE_RESET_VADDR to VECBASE_VADDR, XC_VADDR to VECTOR_VADDR; - drop VIRTUAL_MEMORY_ADDRESS and LOAD_MEMORY_ADDRESS; - introduce PHYS_OFFSET and use it in __va and __pa definitions; - synchronize MMU/noMMU vectors, drop unused NMI vector; - replace hardcoded vectors offset of 0x3000 with Kconfig symbol. Signed-off-by: Max Filippov --- Documentation/xtensa/mmu.txt | 49 +++++++++++------------- arch/xtensa/Kconfig | 26 ++++++++++--- arch/xtensa/boot/boot-elf/boot.lds.S | 2 +- arch/xtensa/boot/boot-elf/bootstrap.S | 7 +++- arch/xtensa/boot/boot-uboot/Makefile | 10 +---- arch/xtensa/include/asm/initialize_mmu.h | 21 +++++----- arch/xtensa/include/asm/kmem_layout.h | 3 ++ arch/xtensa/include/asm/page.h | 8 +++- arch/xtensa/include/asm/vectors.h | 66 +++++++++++--------------------- arch/xtensa/kernel/entry.S | 5 ++- arch/xtensa/kernel/head.S | 2 +- arch/xtensa/kernel/vmlinux.lds.S | 4 -- 12 files changed, 98 insertions(+), 105 deletions(-) (limited to 'arch/xtensa') diff --git a/Documentation/xtensa/mmu.txt b/Documentation/xtensa/mmu.txt index 867c0f837e28..222a2c6748e6 100644 --- a/Documentation/xtensa/mmu.txt +++ b/Documentation/xtensa/mmu.txt @@ -3,15 +3,8 @@ MMUv3 initialization sequence. The code in the initialize_mmu macro sets up MMUv3 memory mapping identically to MMUv2 fixed memory mapping. Depending on CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX symbol this code is -located in one of the following address ranges: - - 0xF0000000..0xFFFFFFFF (will keep same address in MMU v2 layout; - typically ROM) - 0x00000000..0x07FFFFFF (system RAM; this code is actually linked - at 0xD0000000..0xD7FFFFFF [cached] - or 0xD8000000..0xDFFFFFFF [uncached]; - in any case, initially runs elsewhere - than linked, so have to be careful) +located in addresses it was linked for (symbol undefined), or not +(symbol defined), so it needs to be position-independent. The code has the following assumptions: This code fragment is run only on an MMU v3. @@ -28,24 +21,26 @@ TLB setup proceeds along the following steps. PA = physical address (two upper nibbles of it); pc = physical range that contains this code; -After step 2, we jump to virtual address in 0x40000000..0x5fffffff -that corresponds to next instruction to execute in this code. -After step 4, we jump to intended (linked) address of this code. - - Step 0 Step1 Step 2 Step3 Step 4 Step5 - ============ ===== ============ ===== ============ ===== - VA PA PA VA PA PA VA PA PA - ------ -- -- ------ -- -- ------ -- -- - E0..FF -> E0 -> E0 E0..FF -> E0 F0..FF -> F0 -> F0 - C0..DF -> C0 -> C0 C0..DF -> C0 E0..EF -> F0 -> F0 - A0..BF -> A0 -> A0 A0..BF -> A0 D8..DF -> 00 -> 00 - 80..9F -> 80 -> 80 80..9F -> 80 D0..D7 -> 00 -> 00 - 60..7F -> 60 -> 60 60..7F -> 60 - 40..5F -> 40 40..5F -> pc -> pc 40..5F -> pc - 20..3F -> 20 -> 20 20..3F -> 20 - 00..1F -> 00 -> 00 00..1F -> 00 - -The default location of IO peripherals is above 0xf0000000. This may change +After step 2, we jump to virtual address in the range 0x40000000..0x5fffffff +or 0x00000000..0x1fffffff, depending on whether the kernel was loaded below +0x40000000 or above. That address corresponds to next instruction to execute +in this code. After step 4, we jump to intended (linked) address of this code. +The scheme below assumes that the kernel is loaded below 0x40000000. + + Step0 Step1 Step2 Step3 Step4 Step5 + ===== ===== ===== ===== ===== ===== + VA PA PA PA PA VA PA PA + ------ -- -- -- -- ------ -- -- + E0..FF -> E0 -> E0 -> E0 F0..FF -> F0 -> F0 + C0..DF -> C0 -> C0 -> C0 E0..EF -> F0 -> F0 + A0..BF -> A0 -> A0 -> A0 D8..DF -> 00 -> 00 + 80..9F -> 80 -> 80 -> 80 D0..D7 -> 00 -> 00 + 60..7F -> 60 -> 60 -> 60 + 40..5F -> 40 -> pc -> pc 40..5F -> pc + 20..3F -> 20 -> 20 -> 20 + 00..1F -> 00 -> 00 -> 00 + +The default location of IO peripherals is above 0xf0000000. This may be changed using a "ranges" property in a device tree simple-bus node. See ePAPR 1.1, ยง6.5 for details on the syntax and semantic of simple-bus nodes. The following limitations apply: diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index c17496bc37f0..9b1f8c3c8cba 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -249,6 +249,25 @@ config KSEG_PADDR If unsure, leave the default value here. +config KERNEL_LOAD_ADDRESS + hex "Kernel load address" + default 0x00003000 + help + This is the address where the kernel is loaded. + It is virtual address for MMUv2 configurations and physical address + for all other configurations. + + If unsure, leave the default value here. + +config VECTORS_OFFSET + hex "Kernel vectors offset" + default 0x00003000 + help + This is the offset of the kernel image from the relocatable vectors + base. + + If unsure, leave the default value here. + choice prompt "KSEG layout" depends on MMU @@ -487,12 +506,7 @@ config DEFAULT_MEM_START used when no physical memory size is passed through DTB or through boot parameter from bootloader. - In noMMU configuration the following parameters are derived from it: - - kernel load address; - - kernel entry point address; - - relocatable vectors base address; - - uBoot load address; - - TASK_SIZE. + It's also used for TASK_SIZE calculation in noMMU configuration. If unsure, leave the default value here. diff --git a/arch/xtensa/boot/boot-elf/boot.lds.S b/arch/xtensa/boot/boot-elf/boot.lds.S index e54f2c9df63a..a30993054e9c 100644 --- a/arch/xtensa/boot/boot-elf/boot.lds.S +++ b/arch/xtensa/boot/boot-elf/boot.lds.S @@ -23,7 +23,7 @@ SECTIONS *(.ResetVector.text) } - .image KERNELOFFSET: AT (LOAD_MEMORY_ADDRESS) + .image KERNELOFFSET: AT (CONFIG_KERNEL_LOAD_ADDRESS) { _image_start = .; *(image) diff --git a/arch/xtensa/boot/boot-elf/bootstrap.S b/arch/xtensa/boot/boot-elf/bootstrap.S index e6bf313613cf..b6aa85328ac0 100644 --- a/arch/xtensa/boot/boot-elf/bootstrap.S +++ b/arch/xtensa/boot/boot-elf/bootstrap.S @@ -35,7 +35,12 @@ _ResetVector: .align 4 RomInitAddr: - .word LOAD_MEMORY_ADDRESS +#if defined(CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX) && \ + XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY + .word CONFIG_KERNEL_LOAD_ADDRESS +#else + .word KERNELOFFSET +#endif RomBootParam: .word _bootparam _bootparam: diff --git a/arch/xtensa/boot/boot-uboot/Makefile b/arch/xtensa/boot/boot-uboot/Makefile index 403fcf23405c..0f4c417b4196 100644 --- a/arch/xtensa/boot/boot-uboot/Makefile +++ b/arch/xtensa/boot/boot-uboot/Makefile @@ -4,15 +4,7 @@ # for more details. # -ifdef CONFIG_MMU -ifdef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX -UIMAGE_LOADADDR = 0x00003000 -else -UIMAGE_LOADADDR = 0xd0003000 -endif -else -UIMAGE_LOADADDR = $(shell printf "0x%x" $$(( ${CONFIG_DEFAULT_MEM_START} + 0x3000 )) ) -endif +UIMAGE_LOADADDR = $(CONFIG_KERNEL_LOAD_ADDRESS) UIMAGE_COMPRESSION = gzip $(obj)/../uImage: vmlinux.bin.gz FORCE diff --git a/arch/xtensa/include/asm/initialize_mmu.h b/arch/xtensa/include/asm/initialize_mmu.h index 46d4bef61655..42410f253597 100644 --- a/arch/xtensa/include/asm/initialize_mmu.h +++ b/arch/xtensa/include/asm/initialize_mmu.h @@ -77,13 +77,16 @@ .align 4 1: movi a2, 0x10000000 - movi a3, 0x18000000 - add a2, a2, a0 -9: bgeu a2, a3, 9b /* PC is out of the expected range */ + +#if CONFIG_KERNEL_LOAD_ADDRESS < 0x40000000ul +#define TEMP_MAPPING_VADDR 0x40000000 +#else +#define TEMP_MAPPING_VADDR 0x00000000 +#endif /* Step 1: invalidate mapping at 0x40000000..0x5FFFFFFF. */ - movi a2, 0x40000000 | XCHAL_SPANNING_WAY + movi a2, TEMP_MAPPING_VADDR | XCHAL_SPANNING_WAY idtlb a2 iitlb a2 isync @@ -95,14 +98,14 @@ srli a3, a0, 27 slli a3, a3, 27 addi a3, a3, CA_BYPASS - addi a7, a2, -1 + addi a7, a2, 5 - XCHAL_SPANNING_WAY wdtlb a3, a7 witlb a3, a7 isync slli a4, a0, 5 srli a4, a4, 5 - addi a5, a2, -6 + addi a5, a2, -XCHAL_SPANNING_WAY add a4, a4, a5 jx a4 @@ -145,19 +148,19 @@ witlb a4, a5 #endif - movi a5, XCHAL_KIO_CACHED_VADDR + 6 + movi a5, XCHAL_KIO_CACHED_VADDR + XCHAL_KIO_TLB_WAY movi a4, XCHAL_KIO_DEFAULT_PADDR + CA_WRITEBACK wdtlb a4, a5 witlb a4, a5 - movi a5, XCHAL_KIO_BYPASS_VADDR + 6 + movi a5, XCHAL_KIO_BYPASS_VADDR + XCHAL_KIO_TLB_WAY movi a4, XCHAL_KIO_DEFAULT_PADDR + CA_BYPASS wdtlb a4, a5 witlb a4, a5 isync - /* Jump to self, using MMU v2 mappings. */ + /* Jump to self, using final mappings. */ movi a4, 1f jx a4 diff --git a/arch/xtensa/include/asm/kmem_layout.h b/arch/xtensa/include/asm/kmem_layout.h index bfce7ee8efd8..561f8729bcde 100644 --- a/arch/xtensa/include/asm/kmem_layout.h +++ b/arch/xtensa/include/asm/kmem_layout.h @@ -29,6 +29,7 @@ #define XCHAL_KSEG_SIZE __XTENSA_UL_CONST(0x08000000) #define XCHAL_KSEG_ALIGNMENT __XTENSA_UL_CONST(0x08000000) #define XCHAL_KSEG_TLB_WAY 5 +#define XCHAL_KIO_TLB_WAY 6 #elif defined(CONFIG_XTENSA_KSEG_256M) @@ -37,6 +38,7 @@ #define XCHAL_KSEG_SIZE __XTENSA_UL_CONST(0x10000000) #define XCHAL_KSEG_ALIGNMENT __XTENSA_UL_CONST(0x10000000) #define XCHAL_KSEG_TLB_WAY 6 +#define XCHAL_KIO_TLB_WAY 6 #elif defined(CONFIG_XTENSA_KSEG_512M) @@ -45,6 +47,7 @@ #define XCHAL_KSEG_SIZE __XTENSA_UL_CONST(0x20000000) #define XCHAL_KSEG_ALIGNMENT __XTENSA_UL_CONST(0x10000000) #define XCHAL_KSEG_TLB_WAY 6 +#define XCHAL_KIO_TLB_WAY 6 #else #error Unsupported KSEG configuration diff --git a/arch/xtensa/include/asm/page.h b/arch/xtensa/include/asm/page.h index 8a02438232c4..3b5a49dbf8b2 100644 --- a/arch/xtensa/include/asm/page.h +++ b/arch/xtensa/include/asm/page.h @@ -27,10 +27,12 @@ #ifdef CONFIG_MMU #define PAGE_OFFSET XCHAL_KSEG_CACHED_VADDR +#define PHYS_OFFSET XCHAL_KSEG_PADDR #define MAX_LOW_PFN (PHYS_PFN(XCHAL_KSEG_PADDR) + \ PHYS_PFN(XCHAL_KSEG_SIZE)) #else #define PAGE_OFFSET __XTENSA_UL_CONST(0) +#define PHYS_OFFSET __XTENSA_UL_CONST(0) #define MAX_LOW_PFN (PHYS_PFN(PLATFORM_DEFAULT_MEM_START) + \ PHYS_PFN(PLATFORM_DEFAULT_MEM_SIZE)) #endif @@ -163,8 +165,10 @@ void copy_user_highpage(struct page *to, struct page *from, #define ARCH_PFN_OFFSET (PLATFORM_DEFAULT_MEM_START >> PAGE_SHIFT) -#define __pa(x) ((unsigned long) (x) - PAGE_OFFSET) -#define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET)) +#define __pa(x) \ + ((unsigned long) (x) - PAGE_OFFSET + PHYS_OFFSET) +#define __va(x) \ + ((void *)((unsigned long) (x) - PHYS_OFFSET + PAGE_OFFSET)) #define pfn_valid(pfn) \ ((pfn) >= ARCH_PFN_OFFSET && ((pfn) - ARCH_PFN_OFFSET) < max_mapnr) diff --git a/arch/xtensa/include/asm/vectors.h b/arch/xtensa/include/asm/vectors.h index 482171051ff0..77d41cc7a688 100644 --- a/arch/xtensa/include/asm/vectors.h +++ b/arch/xtensa/include/asm/vectors.h @@ -48,61 +48,42 @@ static inline unsigned long xtensa_get_kio_paddr(void) #if defined(CONFIG_MMU) -/* Will Become VECBASE */ -#define VIRTUAL_MEMORY_ADDRESS XCHAL_KSEG_CACHED_VADDR - +#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY /* Image Virtual Start Address */ -#define KERNELOFFSET (XCHAL_KSEG_CACHED_VADDR + 0x3000) - -#if defined(XCHAL_HAVE_PTP_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY - /* MMU v3 - XCHAL_HAVE_PTP_MMU == 1 */ - #define LOAD_MEMORY_ADDRESS 0x00003000 +#define KERNELOFFSET (XCHAL_KSEG_CACHED_VADDR + \ + CONFIG_KERNEL_LOAD_ADDRESS - \ + XCHAL_KSEG_PADDR) #else - /* MMU V2 - XCHAL_HAVE_PTP_MMU == 0 */ - #define LOAD_MEMORY_ADDRESS 0xD0003000 +#define KERNELOFFSET CONFIG_KERNEL_LOAD_ADDRESS #endif -#define RESET_VECTOR1_VADDR (VIRTUAL_MEMORY_ADDRESS + \ - XCHAL_RESET_VECTOR1_PADDR) - #else /* !defined(CONFIG_MMU) */ /* MMU Not being used - Virtual == Physical */ - /* VECBASE */ - #define VIRTUAL_MEMORY_ADDRESS (PLATFORM_DEFAULT_MEM_START + 0x2000) +/* Location of the start of the kernel text, _start */ +#define KERNELOFFSET CONFIG_KERNEL_LOAD_ADDRESS - /* Location of the start of the kernel text, _start */ - #define KERNELOFFSET (PLATFORM_DEFAULT_MEM_START + 0x3000) - - /* Loaded just above possibly live vectors */ - #define LOAD_MEMORY_ADDRESS (PLATFORM_DEFAULT_MEM_START + 0x3000) - -#define RESET_VECTOR1_VADDR (XCHAL_RESET_VECTOR1_VADDR) #endif /* CONFIG_MMU */ -#define XC_VADDR(offset) (VIRTUAL_MEMORY_ADDRESS + offset) - -/* Used to set VECBASE register */ -#define VECBASE_RESET_VADDR VIRTUAL_MEMORY_ADDRESS +#define RESET_VECTOR1_VADDR (XCHAL_RESET_VECTOR1_VADDR) +#define VECBASE_VADDR (KERNELOFFSET - CONFIG_VECTORS_OFFSET) #if defined(XCHAL_HAVE_VECBASE) && XCHAL_HAVE_VECBASE -#define USER_VECTOR_VADDR XC_VADDR(XCHAL_USER_VECOFS) -#define KERNEL_VECTOR_VADDR XC_VADDR(XCHAL_KERNEL_VECOFS) -#define DOUBLEEXC_VECTOR_VADDR XC_VADDR(XCHAL_DOUBLEEXC_VECOFS) -#define WINDOW_VECTORS_VADDR XC_VADDR(XCHAL_WINDOW_OF4_VECOFS) -#define INTLEVEL2_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL2_VECOFS) -#define INTLEVEL3_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL3_VECOFS) -#define INTLEVEL4_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL4_VECOFS) -#define INTLEVEL5_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL5_VECOFS) -#define INTLEVEL6_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL6_VECOFS) - -#define DEBUG_VECTOR_VADDR XC_VADDR(XCHAL_DEBUG_VECOFS) +#define VECTOR_VADDR(offset) (VECBASE_VADDR + offset) -#define NMI_VECTOR_VADDR XC_VADDR(XCHAL_NMI_VECOFS) - -#define INTLEVEL7_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL7_VECOFS) +#define USER_VECTOR_VADDR VECTOR_VADDR(XCHAL_USER_VECOFS) +#define KERNEL_VECTOR_VADDR VECTOR_VADDR(XCHAL_KERNEL_VECOFS) +#define DOUBLEEXC_VECTOR_VADDR VECTOR_VADDR(XCHAL_DOUBLEEXC_VECOFS) +#define WINDOW_VECTORS_VADDR VECTOR_VADDR(XCHAL_WINDOW_OF4_VECOFS) +#define INTLEVEL2_VECTOR_VADDR VECTOR_VADDR(XCHAL_INTLEVEL2_VECOFS) +#define INTLEVEL3_VECTOR_VADDR VECTOR_VADDR(XCHAL_INTLEVEL3_VECOFS) +#define INTLEVEL4_VECTOR_VADDR VECTOR_VADDR(XCHAL_INTLEVEL4_VECOFS) +#define INTLEVEL5_VECTOR_VADDR VECTOR_VADDR(XCHAL_INTLEVEL5_VECOFS) +#define INTLEVEL6_VECTOR_VADDR VECTOR_VADDR(XCHAL_INTLEVEL6_VECOFS) +#define INTLEVEL7_VECTOR_VADDR VECTOR_VADDR(XCHAL_INTLEVEL7_VECOFS) +#define DEBUG_VECTOR_VADDR VECTOR_VADDR(XCHAL_DEBUG_VECOFS) /* * These XCHAL_* #defines from varian/core.h @@ -110,7 +91,6 @@ static inline unsigned long xtensa_get_kio_paddr(void) * constants are defined above and should be used. */ #undef XCHAL_VECBASE_RESET_VADDR -#undef XCHAL_RESET_VECTOR0_VADDR #undef XCHAL_USER_VECTOR_VADDR #undef XCHAL_KERNEL_VECTOR_VADDR #undef XCHAL_DOUBLEEXC_VECTOR_VADDR @@ -120,9 +100,8 @@ static inline unsigned long xtensa_get_kio_paddr(void) #undef XCHAL_INTLEVEL4_VECTOR_VADDR #undef XCHAL_INTLEVEL5_VECTOR_VADDR #undef XCHAL_INTLEVEL6_VECTOR_VADDR -#undef XCHAL_DEBUG_VECTOR_VADDR -#undef XCHAL_NMI_VECTOR_VADDR #undef XCHAL_INTLEVEL7_VECTOR_VADDR +#undef XCHAL_DEBUG_VECTOR_VADDR #else @@ -135,6 +114,7 @@ static inline unsigned long xtensa_get_kio_paddr(void) #define INTLEVEL4_VECTOR_VADDR XCHAL_INTLEVEL4_VECTOR_VADDR #define INTLEVEL5_VECTOR_VADDR XCHAL_INTLEVEL5_VECTOR_VADDR #define INTLEVEL6_VECTOR_VADDR XCHAL_INTLEVEL6_VECTOR_VADDR +#define INTLEVEL7_VECTOR_VADDR XCHAL_INTLEVEL6_VECTOR_VADDR #define DEBUG_VECTOR_VADDR XCHAL_DEBUG_VECTOR_VADDR #endif diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S index fe8f7e7efb9d..fa04d9d368a7 100644 --- a/arch/xtensa/kernel/entry.S +++ b/arch/xtensa/kernel/entry.S @@ -1632,10 +1632,11 @@ ENTRY(fast_second_level_miss) * The messy computation for 'pteval' above really simplifies * into the following: * - * pteval = ((pmdval - PAGE_OFFSET) & PAGE_MASK) | PAGE_DIRECTORY + * pteval = ((pmdval - PAGE_OFFSET + PHYS_OFFSET) & PAGE_MASK) + * | PAGE_DIRECTORY */ - movi a1, (-PAGE_OFFSET) & 0xffffffff + movi a1, (PHYS_OFFSET - PAGE_OFFSET) & 0xffffffff add a0, a0, a1 # pmdval - PAGE_OFFSET extui a1, a0, 0, PAGE_SHIFT # ... & PAGE_MASK xor a0, a0, a1 diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S index bc4f4bf05099..23ce62e60435 100644 --- a/arch/xtensa/kernel/head.S +++ b/arch/xtensa/kernel/head.S @@ -113,7 +113,7 @@ ENTRY(_startup) movi a0, 0 #if XCHAL_HAVE_VECBASE - movi a2, VECBASE_RESET_VADDR + movi a2, VECBASE_VADDR wsr a2, vecbase #endif diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S index c417cbe4ec87..72cfe3587dd8 100644 --- a/arch/xtensa/kernel/vmlinux.lds.S +++ b/arch/xtensa/kernel/vmlinux.lds.S @@ -30,10 +30,6 @@ jiffies = jiffies_64 + 4; jiffies = jiffies_64; #endif -#ifndef KERNELOFFSET -#define KERNELOFFSET 0xd0003000 -#endif - /* Note: In the following macros, it would be nice to specify only the vector name and section kind and construct "sym" and "section" using CPP concatenation, but that does not work reliably. Concatenating a -- cgit v1.2.3-58-ga151 From 3de00482b006daa110151ac6775adc52538a3d6a Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Sat, 23 Jul 2016 02:47:58 +0300 Subject: xtensa: minimize use of PLATFORM_DEFAULT_MEM_{ADDR,SIZE} Now that the kernel load address and KSEG physical base address have their own Kconfig symbols PLATFORM_DEFAULT_MEM seems redundant. It makes little sense to use it in MMU configurations instead of KSEG_PADDR. In noMMU configurations there's no explicit KSEG, so it's still useful for the early cache initialization and definition of ARCH_PFN_OFFSET, which affects mem_map size. - limit it to noMMU; MMU variants have XCHAL_KSEG_PADDR and XCHAL_KSEG_SIZE; - don't use it to define TASK_SIZE or MAX_LOW_PFN: first doesn't make any difference in noMMU, second is meaningless as there's no high memory; - don't add default physical memory region: memory layout should come from the DT, bootloader tags, or memmap= command line parameter. Signed-off-by: Max Filippov --- arch/xtensa/Kconfig | 18 +++++++----------- arch/xtensa/include/asm/page.h | 9 ++++----- arch/xtensa/include/asm/processor.h | 2 +- arch/xtensa/kernel/setup.c | 13 ------------- 4 files changed, 12 insertions(+), 30 deletions(-) (limited to 'arch/xtensa') diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index 9b1f8c3c8cba..7a4c77b39ce0 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -394,7 +394,7 @@ config XTENSA_PLATFORM_XT2000 config XTENSA_PLATFORM_XTFPGA bool "XTFPGA" select ETHOC if ETHERNET - select PLATFORM_WANT_DEFAULT_MEM + select PLATFORM_WANT_DEFAULT_MEM if !MMU select SERIAL_CONSOLE select XTENSA_CALIBRATE_CCOUNT help @@ -502,11 +502,9 @@ config DEFAULT_MEM_START default 0x00000000 if MMU default 0x60000000 if !MMU help - This is a fallback start address of the default memory area, it is - used when no physical memory size is passed through DTB or through - boot parameter from bootloader. - - It's also used for TASK_SIZE calculation in noMMU configuration. + This is the base address of the default memory area. + Default memory area has platform-specific meaning, it may be used + for e.g. early cache initialization. If unsure, leave the default value here. @@ -515,11 +513,9 @@ config DEFAULT_MEM_SIZE depends on PLATFORM_WANT_DEFAULT_MEM default 0x04000000 help - This is a fallback size of the default memory area, it is used when - no physical memory size is passed through DTB or through boot - parameter from bootloader. - - It's also used for TASK_SIZE calculation in noMMU configuration. + This is the size of the default memory area. + Default memory area has platform-specific meaning, it may be used + for e.g. early cache initialization. If unsure, leave the default value here. diff --git a/arch/xtensa/include/asm/page.h b/arch/xtensa/include/asm/page.h index 3b5a49dbf8b2..976b1d70edbc 100644 --- a/arch/xtensa/include/asm/page.h +++ b/arch/xtensa/include/asm/page.h @@ -31,10 +31,9 @@ #define MAX_LOW_PFN (PHYS_PFN(XCHAL_KSEG_PADDR) + \ PHYS_PFN(XCHAL_KSEG_SIZE)) #else -#define PAGE_OFFSET __XTENSA_UL_CONST(0) -#define PHYS_OFFSET __XTENSA_UL_CONST(0) -#define MAX_LOW_PFN (PHYS_PFN(PLATFORM_DEFAULT_MEM_START) + \ - PHYS_PFN(PLATFORM_DEFAULT_MEM_SIZE)) +#define PAGE_OFFSET PLATFORM_DEFAULT_MEM_START +#define PHYS_OFFSET PLATFORM_DEFAULT_MEM_START +#define MAX_LOW_PFN PHYS_PFN(0xfffffffful) #endif #define PGTABLE_START 0x80000000 @@ -163,7 +162,7 @@ void copy_user_highpage(struct page *to, struct page *from, * addresses. */ -#define ARCH_PFN_OFFSET (PLATFORM_DEFAULT_MEM_START >> PAGE_SHIFT) +#define ARCH_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT) #define __pa(x) \ ((unsigned long) (x) - PAGE_OFFSET + PHYS_OFFSET) diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h index d2e40d39c615..b42d68bfe3cf 100644 --- a/arch/xtensa/include/asm/processor.h +++ b/arch/xtensa/include/asm/processor.h @@ -37,7 +37,7 @@ #ifdef CONFIG_MMU #define TASK_SIZE __XTENSA_UL_CONST(0x40000000) #else -#define TASK_SIZE (PLATFORM_DEFAULT_MEM_START + PLATFORM_DEFAULT_MEM_SIZE) +#define TASK_SIZE __XTENSA_UL_CONST(0xffffffff) #endif #define STACK_TOP TASK_SIZE diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c index 9735691f37f1..0e59b8900e6a 100644 --- a/arch/xtensa/kernel/setup.c +++ b/arch/xtensa/kernel/setup.c @@ -188,7 +188,6 @@ static int __init parse_bootparam(const bp_tag_t* tag) } #ifdef CONFIG_OF -bool __initdata dt_memory_scan = false; #if !XCHAL_HAVE_PTP_MMU || XCHAL_HAVE_SPANNING_WAY unsigned long xtensa_kio_paddr = XCHAL_KIO_DEFAULT_PADDR; @@ -228,9 +227,6 @@ static int __init xtensa_dt_io_area(unsigned long node, const char *uname, void __init early_init_dt_add_memory_arch(u64 base, u64 size) { - if (!dt_memory_scan) - return; - size &= PAGE_MASK; add_sysmem_bank(base, base + size); } @@ -242,9 +238,6 @@ void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align) void __init early_init_devtree(void *params) { - if (sysmem.nr_banks == 0) - dt_memory_scan = true; - early_init_dt_scan(params); of_scan_flat_dt(xtensa_dt_io_area, NULL); @@ -278,12 +271,6 @@ void __init init_arch(bp_tag_t *bp_start) early_init_devtree(dtb_start); #endif - if (sysmem.nr_banks == 0) { - add_sysmem_bank(PLATFORM_DEFAULT_MEM_START, - PLATFORM_DEFAULT_MEM_START + - PLATFORM_DEFAULT_MEM_SIZE); - } - #ifdef CONFIG_CMDLINE_BOOL if (!command_line[0]) strlcpy(command_line, default_command_line, COMMAND_LINE_SIZE); -- cgit v1.2.3-58-ga151 From 0e46c1115f5816949220d62dd3ff04aa68e7ac6b Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Mon, 25 Apr 2016 22:08:20 +0300 Subject: xtensa: drop sysmem and switch to memblock Memblock is the standard kernel boot-time memory tracker/allocator. Use it instead of the custom sysmem allocator. This allows using kmemleak, CMA and device tree memory reservation. Signed-off-by: Max Filippov --- arch/xtensa/Kconfig | 3 + arch/xtensa/include/asm/sysmem.h | 21 +-- arch/xtensa/kernel/setup.c | 36 +++--- arch/xtensa/mm/init.c | 272 +++------------------------------------ 4 files changed, 40 insertions(+), 292 deletions(-) (limited to 'arch/xtensa') diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index 7a4c77b39ce0..d20ddf7bf6a9 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -13,16 +13,19 @@ config XTENSA select GENERIC_IRQ_SHOW select GENERIC_PCI_IOMAP select GENERIC_SCHED_CLOCK + select HAVE_DEBUG_KMEMLEAK select HAVE_DMA_API_DEBUG select HAVE_EXIT_THREAD select HAVE_FUNCTION_TRACER select HAVE_FUTEX_CMPXCHG if !MMU select HAVE_HW_BREAKPOINT if PERF_EVENTS select HAVE_IRQ_TIME_ACCOUNTING + select HAVE_MEMBLOCK select HAVE_OPROFILE select HAVE_PERF_EVENTS select IRQ_DOMAIN select MODULES_USE_ELF_RELA + select NO_BOOTMEM select PERF_USE_VMALLOC select VIRT_TO_BUS help diff --git a/arch/xtensa/include/asm/sysmem.h b/arch/xtensa/include/asm/sysmem.h index c015c5c8e3f7..552cdfd8590e 100644 --- a/arch/xtensa/include/asm/sysmem.h +++ b/arch/xtensa/include/asm/sysmem.h @@ -11,27 +11,8 @@ #ifndef _XTENSA_SYSMEM_H #define _XTENSA_SYSMEM_H -#define SYSMEM_BANKS_MAX 31 +#include -struct meminfo { - unsigned long start; - unsigned long end; -}; - -/* - * Bank array is sorted by .start. - * Banks don't overlap and there's at least one page gap - * between adjacent bank entries. - */ -struct sysmem_info { - int nr_banks; - struct meminfo bank[SYSMEM_BANKS_MAX]; -}; - -extern struct sysmem_info sysmem; - -int add_sysmem_bank(unsigned long start, unsigned long end); -int mem_reserve(unsigned long, unsigned long, int); void bootmem_init(void); void zones_init(void); diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c index 0e59b8900e6a..393206b6aabc 100644 --- a/arch/xtensa/kernel/setup.c +++ b/arch/xtensa/kernel/setup.c @@ -7,6 +7,7 @@ * * Copyright (C) 1995 Linus Torvalds * Copyright (C) 2001 - 2005 Tensilica Inc. + * Copyright (C) 2014 - 2016 Cadence Design Systems Inc. * * Chris Zankel * Joe Taylor @@ -24,6 +25,7 @@ #include #include #include +#include #include #include @@ -114,7 +116,7 @@ static int __init parse_tag_mem(const bp_tag_t *tag) if (mi->type != MEMORY_TYPE_CONVENTIONAL) return -1; - return add_sysmem_bank(mi->start, mi->end); + return memblock_add(mi->start, mi->end - mi->start); } __tagtable(BP_TAG_MEMORY, parse_tag_mem); @@ -228,7 +230,7 @@ static int __init xtensa_dt_io_area(unsigned long node, const char *uname, void __init early_init_dt_add_memory_arch(u64 base, u64 size) { size &= PAGE_MASK; - add_sysmem_bank(base, base + size); + memblock_add(base, size); } void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align) @@ -440,6 +442,10 @@ static int __init check_s32c1i(void) early_initcall(check_s32c1i); #endif /* CONFIG_S32C1I_SELFTEST */ +static inline int mem_reserve(unsigned long start, unsigned long end) +{ + return memblock_reserve(start, end - start); +} void __init setup_arch(char **cmdline_p) { @@ -451,54 +457,54 @@ void __init setup_arch(char **cmdline_p) #ifdef CONFIG_BLK_DEV_INITRD if (initrd_start < initrd_end) { initrd_is_mapped = mem_reserve(__pa(initrd_start), - __pa(initrd_end), 0) == 0; + __pa(initrd_end)) == 0; initrd_below_start_ok = 1; } else { initrd_start = 0; } #endif - mem_reserve(__pa(&_stext),__pa(&_end), 1); + mem_reserve(__pa(&_stext), __pa(&_end)); mem_reserve(__pa(&_WindowVectors_text_start), - __pa(&_WindowVectors_text_end), 0); + __pa(&_WindowVectors_text_end)); mem_reserve(__pa(&_DebugInterruptVector_literal_start), - __pa(&_DebugInterruptVector_text_end), 0); + __pa(&_DebugInterruptVector_text_end)); mem_reserve(__pa(&_KernelExceptionVector_literal_start), - __pa(&_KernelExceptionVector_text_end), 0); + __pa(&_KernelExceptionVector_text_end)); mem_reserve(__pa(&_UserExceptionVector_literal_start), - __pa(&_UserExceptionVector_text_end), 0); + __pa(&_UserExceptionVector_text_end)); mem_reserve(__pa(&_DoubleExceptionVector_literal_start), - __pa(&_DoubleExceptionVector_text_end), 0); + __pa(&_DoubleExceptionVector_text_end)); #if XCHAL_EXCM_LEVEL >= 2 mem_reserve(__pa(&_Level2InterruptVector_text_start), - __pa(&_Level2InterruptVector_text_end), 0); + __pa(&_Level2InterruptVector_text_end)); #endif #if XCHAL_EXCM_LEVEL >= 3 mem_reserve(__pa(&_Level3InterruptVector_text_start), - __pa(&_Level3InterruptVector_text_end), 0); + __pa(&_Level3InterruptVector_text_end)); #endif #if XCHAL_EXCM_LEVEL >= 4 mem_reserve(__pa(&_Level4InterruptVector_text_start), - __pa(&_Level4InterruptVector_text_end), 0); + __pa(&_Level4InterruptVector_text_end)); #endif #if XCHAL_EXCM_LEVEL >= 5 mem_reserve(__pa(&_Level5InterruptVector_text_start), - __pa(&_Level5InterruptVector_text_end), 0); + __pa(&_Level5InterruptVector_text_end)); #endif #if XCHAL_EXCM_LEVEL >= 6 mem_reserve(__pa(&_Level6InterruptVector_text_start), - __pa(&_Level6InterruptVector_text_end), 0); + __pa(&_Level6InterruptVector_text_end)); #endif #ifdef CONFIG_SMP mem_reserve(__pa(&_SecondaryResetVector_text_start), - __pa(&_SecondaryResetVector_text_end), 0); + __pa(&_SecondaryResetVector_text_end)); #endif parse_early_param(); bootmem_init(); diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c index 302fa72d0d5a..3ee7e29603af 100644 --- a/arch/xtensa/mm/init.c +++ b/arch/xtensa/mm/init.c @@ -8,7 +8,7 @@ * for more details. * * Copyright (C) 2001 - 2005 Tensilica Inc. - * Copyright (C) 2014 Cadence Design Systems Inc. + * Copyright (C) 2014 - 2016 Cadence Design Systems Inc. * * Chris Zankel * Joe Taylor @@ -31,277 +31,35 @@ #include #include -struct sysmem_info sysmem __initdata; - -static void __init sysmem_dump(void) -{ - unsigned i; - - pr_debug("Sysmem:\n"); - for (i = 0; i < sysmem.nr_banks; ++i) - pr_debug(" 0x%08lx - 0x%08lx (%ldK)\n", - sysmem.bank[i].start, sysmem.bank[i].end, - (sysmem.bank[i].end - sysmem.bank[i].start) >> 10); -} - -/* - * Find bank with maximal .start such that bank.start <= start - */ -static inline struct meminfo * __init find_bank(unsigned long start) -{ - unsigned i; - struct meminfo *it = NULL; - - for (i = 0; i < sysmem.nr_banks; ++i) - if (sysmem.bank[i].start <= start) - it = sysmem.bank + i; - else - break; - return it; -} - -/* - * Move all memory banks starting at 'from' to a new place at 'to', - * adjust nr_banks accordingly. - * Both 'from' and 'to' must be inside the sysmem.bank. - * - * Returns: 0 (success), -ENOMEM (not enough space in the sysmem.bank). - */ -static int __init move_banks(struct meminfo *to, struct meminfo *from) -{ - unsigned n = sysmem.nr_banks - (from - sysmem.bank); - - if (to > from && to - from + sysmem.nr_banks > SYSMEM_BANKS_MAX) - return -ENOMEM; - if (to != from) - memmove(to, from, n * sizeof(struct meminfo)); - sysmem.nr_banks += to - from; - return 0; -} - -/* - * Add new bank to sysmem. Resulting sysmem is the union of bytes of the - * original sysmem and the new bank. - * - * Returns: 0 (success), < 0 (error) - */ -int __init add_sysmem_bank(unsigned long start, unsigned long end) -{ - unsigned i; - struct meminfo *it = NULL; - unsigned long sz; - unsigned long bank_sz = 0; - - if (start == end || - (start < end) != (PAGE_ALIGN(start) < (end & PAGE_MASK))) { - pr_warn("Ignoring small memory bank 0x%08lx size: %ld bytes\n", - start, end - start); - return -EINVAL; - } - - start = PAGE_ALIGN(start); - end &= PAGE_MASK; - sz = end - start; - - it = find_bank(start); - - if (it) - bank_sz = it->end - it->start; - - if (it && bank_sz >= start - it->start) { - if (end - it->start > bank_sz) - it->end = end; - else - return 0; - } else { - if (!it) - it = sysmem.bank; - else - ++it; - - if (it - sysmem.bank < sysmem.nr_banks && - it->start - start <= sz) { - it->start = start; - if (it->end - it->start < sz) - it->end = end; - else - return 0; - } else { - if (move_banks(it + 1, it) < 0) { - pr_warn("Ignoring memory bank 0x%08lx size %ld bytes\n", - start, end - start); - return -EINVAL; - } - it->start = start; - it->end = end; - return 0; - } - } - sz = it->end - it->start; - for (i = it + 1 - sysmem.bank; i < sysmem.nr_banks; ++i) - if (sysmem.bank[i].start - it->start <= sz) { - if (sz < sysmem.bank[i].end - it->start) - it->end = sysmem.bank[i].end; - } else { - break; - } - - move_banks(it + 1, sysmem.bank + i); - return 0; -} - -/* - * mem_reserve(start, end, must_exist) - * - * Reserve some memory from the memory pool. - * If must_exist is set and a part of the region being reserved does not exist - * memory map is not altered. - * - * Parameters: - * start Start of region, - * end End of region, - * must_exist Must exist in memory pool. - * - * Returns: - * 0 (success) - * < 0 (error) - */ - -int __init mem_reserve(unsigned long start, unsigned long end, int must_exist) -{ - struct meminfo *it; - struct meminfo *rm = NULL; - unsigned long sz; - unsigned long bank_sz = 0; - - start = start & PAGE_MASK; - end = PAGE_ALIGN(end); - sz = end - start; - if (!sz) - return -EINVAL; - - it = find_bank(start); - - if (it) - bank_sz = it->end - it->start; - - if ((!it || end - it->start > bank_sz) && must_exist) { - pr_warn("mem_reserve: [0x%0lx, 0x%0lx) not in any region!\n", - start, end); - return -EINVAL; - } - - if (it && start - it->start <= bank_sz) { - if (start == it->start) { - if (end - it->start < bank_sz) { - it->start = end; - return 0; - } else { - rm = it; - } - } else { - it->end = start; - if (end - it->start < bank_sz) - return add_sysmem_bank(end, - it->start + bank_sz); - ++it; - } - } - - if (!it) - it = sysmem.bank; - - for (; it < sysmem.bank + sysmem.nr_banks; ++it) { - if (it->end - start <= sz) { - if (!rm) - rm = it; - } else { - if (it->start - start < sz) - it->start = end; - break; - } - } - - if (rm) - move_banks(rm, it); - - return 0; -} - - /* * Initialize the bootmem system and give it all low memory we have available. */ void __init bootmem_init(void) { - unsigned long pfn; - unsigned long bootmap_start, bootmap_size; - int i; - - /* Reserve all memory below PLATFORM_DEFAULT_MEM_START, as memory + /* Reserve all memory below PHYS_OFFSET, as memory * accounting doesn't work for pages below that address. * - * If PLATFORM_DEFAULT_MEM_START is zero reserve page at address 0: + * If PHYS_OFFSET is zero reserve page at address 0: * successfull allocations should never return NULL. */ - if (PLATFORM_DEFAULT_MEM_START) - mem_reserve(0, PLATFORM_DEFAULT_MEM_START, 0); + if (PHYS_OFFSET) + memblock_reserve(0, PHYS_OFFSET); else - mem_reserve(0, 1, 0); + memblock_reserve(0, 1); - sysmem_dump(); - max_low_pfn = max_pfn = 0; - min_low_pfn = ~0; - - for (i=0; i < sysmem.nr_banks; i++) { - pfn = PAGE_ALIGN(sysmem.bank[i].start) >> PAGE_SHIFT; - if (pfn < min_low_pfn) - min_low_pfn = pfn; - pfn = PAGE_ALIGN(sysmem.bank[i].end - 1) >> PAGE_SHIFT; - if (pfn > max_pfn) - max_pfn = pfn; - } - if (min_low_pfn > max_pfn) + if (!memblock_phys_mem_size()) panic("No memory found!\n"); + min_low_pfn = PFN_UP(memblock_start_of_DRAM()); + min_low_pfn = max(min_low_pfn, PFN_UP(PHYS_OFFSET)); + max_pfn = PFN_DOWN(memblock_end_of_DRAM()); max_low_pfn = min(max_pfn, MAX_LOW_PFN); - /* Find an area to use for the bootmem bitmap. */ - - bootmap_size = bootmem_bootmap_pages(max_low_pfn - min_low_pfn); - bootmap_size <<= PAGE_SHIFT; - bootmap_start = ~0; - - for (i=0; i= bootmap_size) { - bootmap_start = sysmem.bank[i].start; - break; - } - - if (bootmap_start == ~0UL) - panic("Cannot find %ld bytes for bootmap\n", bootmap_size); - - /* Reserve the bootmem bitmap area */ - - mem_reserve(bootmap_start, bootmap_start + bootmap_size, 1); - bootmap_size = init_bootmem_node(NODE_DATA(0), - bootmap_start >> PAGE_SHIFT, - min_low_pfn, - max_low_pfn); - - /* Add all remaining memory pieces into the bootmem map */ - - for (i = 0; i < sysmem.nr_banks; i++) { - if (sysmem.bank[i].start >> PAGE_SHIFT < max_low_pfn) { - unsigned long end = min(max_low_pfn << PAGE_SHIFT, - sysmem.bank[i].end); - free_bootmem(sysmem.bank[i].start, - end - sysmem.bank[i].start); - } - } + memblock_set_current_limit(PFN_PHYS(max_low_pfn)); + memblock_dump_all(); } @@ -394,16 +152,16 @@ static void __init parse_memmap_one(char *p) switch (*p) { case '@': start_at = memparse(p + 1, &p); - add_sysmem_bank(start_at, start_at + mem_size); + memblock_add(start_at, mem_size); break; case '$': start_at = memparse(p + 1, &p); - mem_reserve(start_at, start_at + mem_size, 0); + memblock_reserve(start_at, mem_size); break; case 0: - mem_reserve(mem_size, 0, 0); + memblock_reserve(mem_size, -mem_size); break; default: -- cgit v1.2.3-58-ga151 From 4e7c84ec045921dacc78d36295e2e61390249665 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Tue, 19 Jul 2016 00:37:05 +0300 Subject: xtensa: support reserved-memory DT node This allows reserving regions of physical memory from the device tree. See Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt for more details. Signed-off-by: Max Filippov --- arch/xtensa/Kconfig | 1 + arch/xtensa/mm/init.c | 2 ++ 2 files changed, 3 insertions(+) (limited to 'arch/xtensa') diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index d20ddf7bf6a9..3f6659c53023 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -435,6 +435,7 @@ config USE_OF bool "Flattened Device Tree support" select OF select OF_EARLY_FLATTREE + select OF_RESERVED_MEM help Include support for flattened device tree machine descriptions. diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c index 3ee7e29603af..80e4cfb2471a 100644 --- a/arch/xtensa/mm/init.c +++ b/arch/xtensa/mm/init.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -48,6 +49,7 @@ void __init bootmem_init(void) else memblock_reserve(0, 1); + early_init_fdt_scan_reserved_mem(); if (!memblock_phys_mem_size()) panic("No memory found!\n"); -- cgit v1.2.3-58-ga151 From d8d2f7f64592f3e8c51dac6d20aed044dca4009a Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Sun, 31 Jul 2016 06:49:45 +0300 Subject: xtensa: wire up new syscalls Wire up userfaultfd, membarrier, mlock2, copy_file_range, preadv2, pwritev2 Signed-off-by: Max Filippov --- arch/xtensa/include/uapi/asm/unistd.h | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) (limited to 'arch/xtensa') diff --git a/arch/xtensa/include/uapi/asm/unistd.h b/arch/xtensa/include/uapi/asm/unistd.h index b95c30594355..de9b14b2d348 100644 --- a/arch/xtensa/include/uapi/asm/unistd.h +++ b/arch/xtensa/include/uapi/asm/unistd.h @@ -754,7 +754,20 @@ __SYSCALL(340, sys_bpf, 3) #define __NR_execveat 341 __SYSCALL(341, sys_execveat, 5) -#define __NR_syscall_count 342 +#define __NR_userfaultfd 342 +__SYSCALL(342, sys_userfaultfd, 1) +#define __NR_membarrier 343 +__SYSCALL(343, sys_membarrier, 2) +#define __NR_mlock2 344 +__SYSCALL(344, sys_mlock2, 3) +#define __NR_copy_file_range 345 +__SYSCALL(345, sys_copy_file_range, 6) +#define __NR_preadv2 346 +__SYSCALL(346, sys_preadv2, 6) +#define __NR_pwritev2 347 +__SYSCALL(347, sys_pwritev2, 6) + +#define __NR_syscall_count 348 /* * sysxtensa syscall handler -- cgit v1.2.3-58-ga151