Browse Source

Merge tag 'xtensa-for-next-20160731' of git://github.com/jcmvbkbc/linux-xtensa into for_next

Xtensa improvements for 4.8:

- add new kernel memory layouts for MMUv3 cores: with 256MB and 512MB
  KSEG size, starting at physical address other than 0;
- make kernel load address configurable;
- clean up kernel memory layout macros;
- drop sysmem early allocator and switch to memblock;
- enable kmemleak and memory reservation from the device tree;
- wire up new syscalls: userfaultfd, membarrier, mlock2, copy_file_range,
  preadv2 and pwritev2.
Chris Zankel 9 years ago
parent
commit
9e8511ff7e

+ 146 - 27
Documentation/xtensa/mmu.txt

@@ -3,15 +3,8 @@ MMUv3 initialization sequence.
 The code in the initialize_mmu macro sets up MMUv3 memory mapping
 identically to MMUv2 fixed memory mapping. Depending on
 CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX symbol this code is
-located in one of the following address ranges:
-
-    0xF0000000..0xFFFFFFFF (will keep same address in MMU v2 layout;
-    			 typically ROM)
-    0x00000000..0x07FFFFFF (system RAM; this code is actually linked
-    			 at 0xD0000000..0xD7FFFFFF [cached]
-    			 or 0xD8000000..0xDFFFFFFF [uncached];
-    			 in any case, initially runs elsewhere
-    			 than linked, so have to be careful)
+located in addresses it was linked for (symbol undefined), or not
+(symbol defined), so it needs to be position-independent.
 
 The code has the following assumptions:
   This code fragment is run only on an MMU v3.
@@ -28,24 +21,26 @@ TLB setup proceeds along the following steps.
     PA = physical address (two upper nibbles of it);
     pc = physical range that contains this code;
 
-After step 2, we jump to virtual address in 0x40000000..0x5fffffff
-that corresponds to next instruction to execute in this code.
-After step 4, we jump to intended (linked) address of this code.
-
-    Step 0     Step1     Step 2     Step3     Step 4     Step5
- ============  =====  ============  =====  ============  =====
-   VA      PA     PA    VA      PA     PA    VA      PA     PA
- ------    --     --  ------    --     --  ------    --     --
- E0..FF -> E0  -> E0  E0..FF -> E0         F0..FF -> F0  -> F0
- C0..DF -> C0  -> C0  C0..DF -> C0         E0..EF -> F0  -> F0
- A0..BF -> A0  -> A0  A0..BF -> A0         D8..DF -> 00  -> 00
- 80..9F -> 80  -> 80  80..9F -> 80         D0..D7 -> 00  -> 00
- 60..7F -> 60  -> 60  60..7F -> 60
- 40..5F -> 40         40..5F -> pc  -> pc  40..5F -> pc
- 20..3F -> 20  -> 20  20..3F -> 20
- 00..1F -> 00  -> 00  00..1F -> 00
-
-The default location of IO peripherals is above 0xf0000000. This may change
+After step 2, we jump to virtual address in the range 0x40000000..0x5fffffff
+or 0x00000000..0x1fffffff, depending on whether the kernel was loaded below
+0x40000000 or above. That address corresponds to next instruction to execute
+in this code. After step 4, we jump to intended (linked) address of this code.
+The scheme below assumes that the kernel is loaded below 0x40000000.
+
+        Step0  Step1  Step2  Step3          Step4  Step5
+        =====  =====  =====  =====          =====  =====
+   VA      PA     PA     PA     PA     VA      PA     PA
+ ------    --     --     --     --   ------    --     --
+ E0..FF -> E0  -> E0  -> E0          F0..FF -> F0  -> F0
+ C0..DF -> C0  -> C0  -> C0          E0..EF -> F0  -> F0
+ A0..BF -> A0  -> A0  -> A0          D8..DF -> 00  -> 00
+ 80..9F -> 80  -> 80  -> 80          D0..D7 -> 00  -> 00
+ 60..7F -> 60  -> 60  -> 60
+ 40..5F -> 40         -> pc  -> pc   40..5F -> pc
+ 20..3F -> 20  -> 20  -> 20
+ 00..1F -> 00  -> 00  -> 00
+
+The default location of IO peripherals is above 0xf0000000. This may be changed
 using a "ranges" property in a device tree simple-bus node. See ePAPR 1.1, §6.5
 for details on the syntax and semantic of simple-bus nodes. The following
 limitations apply:
@@ -62,3 +57,127 @@ limitations apply:
 
 6. The IO area covers the entire 256MB segment of parent-bus-address; the
    "ranges" triplet length field is ignored
+
+
+MMUv3 address space layouts.
+============================
+
+Default MMUv2-compatible layout.
+
+                      Symbol                   VADDR       Size
++------------------+
+| Userspace        |                           0x00000000  TASK_SIZE
++------------------+                           0x40000000
++------------------+
+| Page table       |                           0x80000000
++------------------+                           0x80400000
++------------------+
+| KMAP area        |  PKMAP_BASE                           PTRS_PER_PTE *
+|                  |                                       DCACHE_N_COLORS *
+|                  |                                       PAGE_SIZE
+|                  |                                       (4MB * DCACHE_N_COLORS)
++------------------+
+| Atomic KMAP area |  FIXADDR_START                        KM_TYPE_NR *
+|                  |                                       NR_CPUS *
+|                  |                                       DCACHE_N_COLORS *
+|                  |                                       PAGE_SIZE
++------------------+  FIXADDR_TOP              0xbffff000
++------------------+
+| VMALLOC area     |  VMALLOC_START            0xc0000000  128MB - 64KB
++------------------+  VMALLOC_END
+| Cache aliasing   |  TLBTEMP_BASE_1           0xc7ff0000  DCACHE_WAY_SIZE
+| remap area 1     |
++------------------+
+| Cache aliasing   |  TLBTEMP_BASE_2                       DCACHE_WAY_SIZE
+| remap area 2     |
++------------------+
++------------------+
+| Cached KSEG      |  XCHAL_KSEG_CACHED_VADDR  0xd0000000  128MB
++------------------+
+| Uncached KSEG    |  XCHAL_KSEG_BYPASS_VADDR  0xd8000000  128MB
++------------------+
+| Cached KIO       |  XCHAL_KIO_CACHED_VADDR   0xe0000000  256MB
++------------------+
+| Uncached KIO     |  XCHAL_KIO_BYPASS_VADDR   0xf0000000  256MB
++------------------+
+
+
+256MB cached + 256MB uncached layout.
+
+                      Symbol                   VADDR       Size
++------------------+
+| Userspace        |                           0x00000000  TASK_SIZE
++------------------+                           0x40000000
++------------------+
+| Page table       |                           0x80000000
++------------------+                           0x80400000
++------------------+
+| KMAP area        |  PKMAP_BASE                           PTRS_PER_PTE *
+|                  |                                       DCACHE_N_COLORS *
+|                  |                                       PAGE_SIZE
+|                  |                                       (4MB * DCACHE_N_COLORS)
++------------------+
+| Atomic KMAP area |  FIXADDR_START                        KM_TYPE_NR *
+|                  |                                       NR_CPUS *
+|                  |                                       DCACHE_N_COLORS *
+|                  |                                       PAGE_SIZE
++------------------+  FIXADDR_TOP              0x9ffff000
++------------------+
+| VMALLOC area     |  VMALLOC_START            0xa0000000  128MB - 64KB
++------------------+  VMALLOC_END
+| Cache aliasing   |  TLBTEMP_BASE_1           0xa7ff0000  DCACHE_WAY_SIZE
+| remap area 1     |
++------------------+
+| Cache aliasing   |  TLBTEMP_BASE_2                       DCACHE_WAY_SIZE
+| remap area 2     |
++------------------+
++------------------+
+| Cached KSEG      |  XCHAL_KSEG_CACHED_VADDR  0xb0000000  256MB
++------------------+
+| Uncached KSEG    |  XCHAL_KSEG_BYPASS_VADDR  0xc0000000  256MB
++------------------+
++------------------+
+| Cached KIO       |  XCHAL_KIO_CACHED_VADDR   0xe0000000  256MB
++------------------+
+| Uncached KIO     |  XCHAL_KIO_BYPASS_VADDR   0xf0000000  256MB
++------------------+
+
+
+512MB cached + 512MB uncached layout.
+
+                      Symbol                   VADDR       Size
++------------------+
+| Userspace        |                           0x00000000  TASK_SIZE
++------------------+                           0x40000000
++------------------+
+| Page table       |                           0x80000000
++------------------+                           0x80400000
++------------------+
+| KMAP area        |  PKMAP_BASE                           PTRS_PER_PTE *
+|                  |                                       DCACHE_N_COLORS *
+|                  |                                       PAGE_SIZE
+|                  |                                       (4MB * DCACHE_N_COLORS)
++------------------+
+| Atomic KMAP area |  FIXADDR_START                        KM_TYPE_NR *
+|                  |                                       NR_CPUS *
+|                  |                                       DCACHE_N_COLORS *
+|                  |                                       PAGE_SIZE
++------------------+  FIXADDR_TOP              0x8ffff000
++------------------+
+| VMALLOC area     |  VMALLOC_START            0x90000000  128MB - 64KB
++------------------+  VMALLOC_END
+| Cache aliasing   |  TLBTEMP_BASE_1           0x97ff0000  DCACHE_WAY_SIZE
+| remap area 1     |
++------------------+
+| Cache aliasing   |  TLBTEMP_BASE_2                       DCACHE_WAY_SIZE
+| remap area 2     |
++------------------+
++------------------+
+| Cached KSEG      |  XCHAL_KSEG_CACHED_VADDR  0xa0000000  512MB
++------------------+
+| Uncached KSEG    |  XCHAL_KSEG_BYPASS_VADDR  0xc0000000  512MB
++------------------+
+| Cached KIO       |  XCHAL_KIO_CACHED_VADDR   0xe0000000  256MB
++------------------+
+| Uncached KIO     |  XCHAL_KIO_BYPASS_VADDR   0xf0000000  256MB
++------------------+

+ 74 - 16
arch/xtensa/Kconfig

@@ -13,16 +13,19 @@ config XTENSA
 	select GENERIC_IRQ_SHOW
 	select GENERIC_PCI_IOMAP
 	select GENERIC_SCHED_CLOCK
+	select HAVE_DEBUG_KMEMLEAK
 	select HAVE_DMA_API_DEBUG
 	select HAVE_EXIT_THREAD
 	select HAVE_FUNCTION_TRACER
 	select HAVE_FUTEX_CMPXCHG if !MMU
 	select HAVE_HW_BREAKPOINT if PERF_EVENTS
 	select HAVE_IRQ_TIME_ACCOUNTING
+	select HAVE_MEMBLOCK
 	select HAVE_OPROFILE
 	select HAVE_PERF_EVENTS
 	select IRQ_DOMAIN
 	select MODULES_USE_ELF_RELA
+	select NO_BOOTMEM
 	select PERF_USE_VMALLOC
 	select VIRT_TO_BUS
 	help
@@ -236,6 +239,69 @@ config INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
 
 	  If in doubt, say Y.
 
+config KSEG_PADDR
+	hex "Physical address of the KSEG mapping"
+	depends on INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX && MMU
+	default 0x00000000
+	help
+	  This is the physical address where KSEG is mapped. Please refer to
+	  the chosen KSEG layout help for the required address alignment.
+	  Unpacked kernel image (including vectors) must be located completely
+	  within KSEG.
+	  Physical memory below this address is not available to linux.
+
+	  If unsure, leave the default value here.
+
+config KERNEL_LOAD_ADDRESS
+	hex "Kernel load address"
+	default 0x00003000
+	help
+	  This is the address where the kernel is loaded.
+	  It is virtual address for MMUv2 configurations and physical address
+	  for all other configurations.
+
+	  If unsure, leave the default value here.
+
+config VECTORS_OFFSET
+	hex "Kernel vectors offset"
+	default 0x00003000
+	help
+	  This is the offset of the kernel image from the relocatable vectors
+	  base.
+
+	  If unsure, leave the default value here.
+
+choice
+	prompt "KSEG layout"
+	depends on MMU
+	default XTENSA_KSEG_MMU_V2
+
+config XTENSA_KSEG_MMU_V2
+	bool "MMUv2: 128MB cached + 128MB uncached"
+	help
+	  MMUv2 compatible kernel memory map: TLB way 5 maps 128MB starting
+	  at KSEG_PADDR to 0xd0000000 with cache and to 0xd8000000
+	  without cache.
+	  KSEG_PADDR must be aligned to 128MB.
+
+config XTENSA_KSEG_256M
+	bool "256MB cached + 256MB uncached"
+	depends on INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
+	help
+	  TLB way 6 maps 256MB starting at KSEG_PADDR to 0xb0000000
+	  with cache and to 0xc0000000 without cache.
+	  KSEG_PADDR must be aligned to 256MB.
+
+config XTENSA_KSEG_512M
+	bool "512MB cached + 512MB uncached"
+	depends on INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
+	help
+	  TLB way 6 maps 512MB starting at KSEG_PADDR to 0xa0000000
+	  with cache and to 0xc0000000 without cache.
+	  KSEG_PADDR must be aligned to 256MB.
+
+endchoice
+
 config HIGHMEM
 	bool "High Memory Support"
 	depends on MMU
@@ -331,7 +397,7 @@ config XTENSA_PLATFORM_XT2000
 config XTENSA_PLATFORM_XTFPGA
 	bool "XTFPGA"
 	select ETHOC if ETHERNET
-	select PLATFORM_WANT_DEFAULT_MEM
+	select PLATFORM_WANT_DEFAULT_MEM if !MMU
 	select SERIAL_CONSOLE
 	select XTENSA_CALIBRATE_CCOUNT
 	help
@@ -369,6 +435,7 @@ config USE_OF
 	bool "Flattened Device Tree support"
 	select OF
 	select OF_EARLY_FLATTREE
+	select OF_RESERVED_MEM
 	help
 	  Include support for flattened device tree machine descriptions.
 
@@ -439,16 +506,9 @@ config DEFAULT_MEM_START
 	default 0x00000000 if MMU
 	default 0x60000000 if !MMU
 	help
-	  This is a fallback start address of the default memory area, it is
-	  used when no physical memory size is passed through DTB or through
-	  boot parameter from bootloader.
-
-	  In noMMU configuration the following parameters are derived from it:
-	  - kernel load address;
-	  - kernel entry point address;
-	  - relocatable vectors base address;
-	  - uBoot load address;
-	  - TASK_SIZE.
+	  This is the base address of the default memory area.
+	  Default memory area has platform-specific meaning, it may be used
+	  for e.g. early cache initialization.
 
 	  If unsure, leave the default value here.
 
@@ -457,11 +517,9 @@ config DEFAULT_MEM_SIZE
 	depends on PLATFORM_WANT_DEFAULT_MEM
 	default 0x04000000
 	help
-	  This is a fallback size of the default memory area, it is used when
-	  no physical memory size is passed through DTB or through boot
-	  parameter from bootloader.
-
-	  It's also used for TASK_SIZE calculation in noMMU configuration.
+	  This is the size of the default memory area.
+	  Default memory area has platform-specific meaning, it may be used
+	  for e.g. early cache initialization.
 
 	  If unsure, leave the default value here.
 

+ 1 - 1
arch/xtensa/boot/boot-elf/boot.lds.S

@@ -23,7 +23,7 @@ SECTIONS
 		*(.ResetVector.text)
 	}
 
-	.image KERNELOFFSET: AT (LOAD_MEMORY_ADDRESS)
+	.image KERNELOFFSET: AT (CONFIG_KERNEL_LOAD_ADDRESS)
 	{
 		_image_start = .;
 		*(image)

+ 6 - 1
arch/xtensa/boot/boot-elf/bootstrap.S

@@ -35,7 +35,12 @@ _ResetVector:
 
 	.align 4
 RomInitAddr:
-	.word	LOAD_MEMORY_ADDRESS
+#if defined(CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX) && \
+	XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
+	.word	CONFIG_KERNEL_LOAD_ADDRESS
+#else
+	.word	KERNELOFFSET
+#endif
 RomBootParam:
 	.word _bootparam
 _bootparam:

+ 1 - 9
arch/xtensa/boot/boot-uboot/Makefile

@@ -4,15 +4,7 @@
 # for more details.
 #
 
-ifdef CONFIG_MMU
-ifdef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
-UIMAGE_LOADADDR = 0x00003000
-else
-UIMAGE_LOADADDR = 0xd0003000
-endif
-else
-UIMAGE_LOADADDR = $(shell printf "0x%x" $$(( ${CONFIG_DEFAULT_MEM_START} + 0x3000 )) )
-endif
+UIMAGE_LOADADDR = $(CONFIG_KERNEL_LOAD_ADDRESS)
 UIMAGE_COMPRESSION = gzip
 
 $(obj)/../uImage: vmlinux.bin.gz FORCE

+ 1 - 1
arch/xtensa/include/asm/bitops.h

@@ -48,7 +48,7 @@ static inline int ffz(unsigned long x)
  * __ffs: Find first bit set in word. Return 0 for bit 0
  */
 
-static inline int __ffs(unsigned long x)
+static inline unsigned long __ffs(unsigned long x)
 {
 	return 31 - __cntlz(x & -x);
 }

+ 4 - 7
arch/xtensa/include/asm/cacheasm.h

@@ -69,26 +69,23 @@
 	.endm
 
 
-#if XCHAL_DCACHE_LINE_LOCKABLE
-
 	.macro	___unlock_dcache_all ar at
 
-#if XCHAL_DCACHE_SIZE
+#if XCHAL_DCACHE_LINE_LOCKABLE && XCHAL_DCACHE_SIZE
 	__loop_cache_all \ar \at diu XCHAL_DCACHE_SIZE XCHAL_DCACHE_LINEWIDTH
 #endif
 
 	.endm
 
-#endif
-
-#if XCHAL_ICACHE_LINE_LOCKABLE
 
 	.macro	___unlock_icache_all ar at
 
+#if XCHAL_ICACHE_LINE_LOCKABLE && XCHAL_ICACHE_SIZE
 	__loop_cache_all \ar \at iiu XCHAL_ICACHE_SIZE XCHAL_ICACHE_LINEWIDTH
+#endif
 
 	.endm
-#endif
+
 
 	.macro	___flush_invalidate_dcache_all ar at
 

+ 5 - 0
arch/xtensa/include/asm/fixmap.h

@@ -59,6 +59,11 @@ enum fixed_addresses {
  */
 static __always_inline unsigned long fix_to_virt(const unsigned int idx)
 {
+	/* Check if this memory layout is broken because fixmap overlaps page
+	 * table.
+	 */
+	BUILD_BUG_ON(FIXADDR_START <
+		     XCHAL_PAGE_TABLE_VADDR + XCHAL_PAGE_TABLE_SIZE);
 	BUILD_BUG_ON(idx >= __end_of_fixed_addresses);
 	return __fix_to_virt(idx);
 }

+ 5 - 0
arch/xtensa/include/asm/highmem.h

@@ -68,6 +68,11 @@ void kunmap_high(struct page *page);
 
 static inline void *kmap(struct page *page)
 {
+	/* Check if this memory layout is broken because PKMAP overlaps
+	 * page table.
+	 */
+	BUILD_BUG_ON(PKMAP_BASE <
+		     XCHAL_PAGE_TABLE_VADDR + XCHAL_PAGE_TABLE_SIZE);
 	BUG_ON(in_interrupt());
 	if (!PageHighMem(page))
 		return page_address(page);

+ 30 - 14
arch/xtensa/include/asm/initialize_mmu.h

@@ -77,13 +77,16 @@
 
 	.align	4
 1:	movi	a2, 0x10000000
-	movi	a3, 0x18000000
-	add	a2, a2, a0
-9:	bgeu	a2, a3, 9b	/* PC is out of the expected range */
+
+#if CONFIG_KERNEL_LOAD_ADDRESS < 0x40000000ul
+#define TEMP_MAPPING_VADDR 0x40000000
+#else
+#define TEMP_MAPPING_VADDR 0x00000000
+#endif
 
 	/* Step 1: invalidate mapping at 0x40000000..0x5FFFFFFF. */
 
-	movi	a2, 0x40000000 | XCHAL_SPANNING_WAY
+	movi	a2, TEMP_MAPPING_VADDR | XCHAL_SPANNING_WAY
 	idtlb	a2
 	iitlb	a2
 	isync
@@ -95,14 +98,14 @@
 	srli	a3, a0, 27
 	slli	a3, a3, 27
 	addi	a3, a3, CA_BYPASS
-	addi	a7, a2, -1
+	addi	a7, a2, 5 - XCHAL_SPANNING_WAY
 	wdtlb	a3, a7
 	witlb	a3, a7
 	isync
 
 	slli	a4, a0, 5
 	srli	a4, a4, 5
-	addi	a5, a2, -6
+	addi	a5, a2, -XCHAL_SPANNING_WAY
 	add	a4, a4, a5
 	jx	a4
 
@@ -116,35 +119,48 @@
 	add	a5, a5, a4
 	bne	a5, a2, 3b
 
-	/* Step 4: Setup MMU with the old V2 mappings. */
+	/* Step 4: Setup MMU with the requested static mappings. */
+
 	movi	a6, 0x01000000
 	wsr	a6, ITLBCFG
 	wsr	a6, DTLBCFG
 	isync
 
-	movi	a5, 0xd0000005
-	movi	a4, CA_WRITEBACK
+	movi	a5, XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_TLB_WAY
+	movi	a4, XCHAL_KSEG_PADDR + CA_WRITEBACK
 	wdtlb	a4, a5
 	witlb	a4, a5
 
-	movi	a5, 0xd8000005
-	movi	a4, CA_BYPASS
+	movi	a5, XCHAL_KSEG_BYPASS_VADDR + XCHAL_KSEG_TLB_WAY
+	movi	a4, XCHAL_KSEG_PADDR + CA_BYPASS
 	wdtlb	a4, a5
 	witlb	a4, a5
 
-	movi	a5, XCHAL_KIO_CACHED_VADDR + 6
+#ifdef CONFIG_XTENSA_KSEG_512M
+	movi	a5, XCHAL_KSEG_CACHED_VADDR + 0x10000000 + XCHAL_KSEG_TLB_WAY
+	movi	a4, XCHAL_KSEG_PADDR + 0x10000000 + CA_WRITEBACK
+	wdtlb	a4, a5
+	witlb	a4, a5
+
+	movi	a5, XCHAL_KSEG_BYPASS_VADDR + 0x10000000 + XCHAL_KSEG_TLB_WAY
+	movi	a4, XCHAL_KSEG_PADDR + 0x10000000 + CA_BYPASS
+	wdtlb	a4, a5
+	witlb	a4, a5
+#endif
+
+	movi	a5, XCHAL_KIO_CACHED_VADDR + XCHAL_KIO_TLB_WAY
 	movi	a4, XCHAL_KIO_DEFAULT_PADDR + CA_WRITEBACK
 	wdtlb	a4, a5
 	witlb	a4, a5
 
-	movi	a5, XCHAL_KIO_BYPASS_VADDR + 6
+	movi	a5, XCHAL_KIO_BYPASS_VADDR + XCHAL_KIO_TLB_WAY
 	movi	a4, XCHAL_KIO_DEFAULT_PADDR + CA_BYPASS
 	wdtlb	a4, a5
 	witlb	a4, a5
 
 	isync
 
-	/* Jump to self, using MMU v2 mappings. */
+	/* Jump to self, using final mappings. */
 	movi	a4, 1f
 	jx	a4
 

+ 74 - 0
arch/xtensa/include/asm/kmem_layout.h

@@ -0,0 +1,74 @@
+/*
+ * Kernel virtual memory layout definitions.
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License.  See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * Copyright (C) 2016 Cadence Design Systems Inc.
+ */
+
+#ifndef _XTENSA_KMEM_LAYOUT_H
+#define _XTENSA_KMEM_LAYOUT_H
+
+#include <asm/types.h>
+
+#ifdef CONFIG_MMU
+
+/*
+ * Fixed TLB translations in the processor.
+ */
+
+#define XCHAL_PAGE_TABLE_VADDR	__XTENSA_UL_CONST(0x80000000)
+#define XCHAL_PAGE_TABLE_SIZE	__XTENSA_UL_CONST(0x00400000)
+
+#if defined(CONFIG_XTENSA_KSEG_MMU_V2)
+
+#define XCHAL_KSEG_CACHED_VADDR	__XTENSA_UL_CONST(0xd0000000)
+#define XCHAL_KSEG_BYPASS_VADDR	__XTENSA_UL_CONST(0xd8000000)
+#define XCHAL_KSEG_SIZE		__XTENSA_UL_CONST(0x08000000)
+#define XCHAL_KSEG_ALIGNMENT	__XTENSA_UL_CONST(0x08000000)
+#define XCHAL_KSEG_TLB_WAY	5
+#define XCHAL_KIO_TLB_WAY	6
+
+#elif defined(CONFIG_XTENSA_KSEG_256M)
+
+#define XCHAL_KSEG_CACHED_VADDR	__XTENSA_UL_CONST(0xb0000000)
+#define XCHAL_KSEG_BYPASS_VADDR	__XTENSA_UL_CONST(0xc0000000)
+#define XCHAL_KSEG_SIZE		__XTENSA_UL_CONST(0x10000000)
+#define XCHAL_KSEG_ALIGNMENT	__XTENSA_UL_CONST(0x10000000)
+#define XCHAL_KSEG_TLB_WAY	6
+#define XCHAL_KIO_TLB_WAY	6
+
+#elif defined(CONFIG_XTENSA_KSEG_512M)
+
+#define XCHAL_KSEG_CACHED_VADDR	__XTENSA_UL_CONST(0xa0000000)
+#define XCHAL_KSEG_BYPASS_VADDR	__XTENSA_UL_CONST(0xc0000000)
+#define XCHAL_KSEG_SIZE		__XTENSA_UL_CONST(0x20000000)
+#define XCHAL_KSEG_ALIGNMENT	__XTENSA_UL_CONST(0x10000000)
+#define XCHAL_KSEG_TLB_WAY	6
+#define XCHAL_KIO_TLB_WAY	6
+
+#else
+#error Unsupported KSEG configuration
+#endif
+
+#ifdef CONFIG_KSEG_PADDR
+#define XCHAL_KSEG_PADDR        __XTENSA_UL_CONST(CONFIG_KSEG_PADDR)
+#else
+#define XCHAL_KSEG_PADDR	__XTENSA_UL_CONST(0x00000000)
+#endif
+
+#if XCHAL_KSEG_PADDR & (XCHAL_KSEG_ALIGNMENT - 1)
+#error XCHAL_KSEG_PADDR is not properly aligned to XCHAL_KSEG_ALIGNMENT
+#endif
+
+#else
+
+#define XCHAL_KSEG_CACHED_VADDR	__XTENSA_UL_CONST(0xd0000000)
+#define XCHAL_KSEG_BYPASS_VADDR	__XTENSA_UL_CONST(0xd8000000)
+#define XCHAL_KSEG_SIZE		__XTENSA_UL_CONST(0x08000000)
+
+#endif
+
+#endif

+ 12 - 15
arch/xtensa/include/asm/page.h

@@ -15,15 +15,7 @@
 #include <asm/types.h>
 #include <asm/cache.h>
 #include <platform/hardware.h>
-
-/*
- * Fixed TLB translations in the processor.
- */
-
-#define XCHAL_KSEG_CACHED_VADDR __XTENSA_UL_CONST(0xd0000000)
-#define XCHAL_KSEG_BYPASS_VADDR __XTENSA_UL_CONST(0xd8000000)
-#define XCHAL_KSEG_PADDR        __XTENSA_UL_CONST(0x00000000)
-#define XCHAL_KSEG_SIZE         __XTENSA_UL_CONST(0x08000000)
+#include <asm/kmem_layout.h>
 
 /*
  * PAGE_SHIFT determines the page size
@@ -35,10 +27,13 @@
 
 #ifdef CONFIG_MMU
 #define PAGE_OFFSET	XCHAL_KSEG_CACHED_VADDR
-#define MAX_MEM_PFN	XCHAL_KSEG_SIZE
+#define PHYS_OFFSET	XCHAL_KSEG_PADDR
+#define MAX_LOW_PFN	(PHYS_PFN(XCHAL_KSEG_PADDR) + \
+			 PHYS_PFN(XCHAL_KSEG_SIZE))
 #else
-#define PAGE_OFFSET	__XTENSA_UL_CONST(0)
-#define MAX_MEM_PFN	(PLATFORM_DEFAULT_MEM_START + PLATFORM_DEFAULT_MEM_SIZE)
+#define PAGE_OFFSET	PLATFORM_DEFAULT_MEM_START
+#define PHYS_OFFSET	PLATFORM_DEFAULT_MEM_START
+#define MAX_LOW_PFN	PHYS_PFN(0xfffffffful)
 #endif
 
 #define PGTABLE_START	0x80000000
@@ -167,10 +162,12 @@ void copy_user_highpage(struct page *to, struct page *from,
  * addresses.
  */
 
-#define ARCH_PFN_OFFSET		(PLATFORM_DEFAULT_MEM_START >> PAGE_SHIFT)
+#define ARCH_PFN_OFFSET		(PHYS_OFFSET >> PAGE_SHIFT)
 
-#define __pa(x)			((unsigned long) (x) - PAGE_OFFSET)
-#define __va(x)			((void *)((unsigned long) (x) + PAGE_OFFSET))
+#define __pa(x)	\
+	((unsigned long) (x) - PAGE_OFFSET + PHYS_OFFSET)
+#define __va(x)	\
+	((void *)((unsigned long) (x) - PHYS_OFFSET + PAGE_OFFSET))
 #define pfn_valid(pfn) \
 	((pfn) >= ARCH_PFN_OFFSET && ((pfn) - ARCH_PFN_OFFSET) < max_mapnr)
 

+ 4 - 3
arch/xtensa/include/asm/pgtable.h

@@ -13,6 +13,7 @@
 
 #include <asm-generic/pgtable-nopmd.h>
 #include <asm/page.h>
+#include <asm/kmem_layout.h>
 
 /*
  * We only use two ring levels, user and kernel space.
@@ -68,9 +69,9 @@
  * Virtual memory area. We keep a distance to other memory regions to be
  * on the safe side. We also use this area for cache aliasing.
  */
-#define VMALLOC_START		0xC0000000
-#define VMALLOC_END		0xC7FEFFFF
-#define TLBTEMP_BASE_1		0xC7FF0000
+#define VMALLOC_START		(XCHAL_KSEG_CACHED_VADDR - 0x10000000)
+#define VMALLOC_END		(VMALLOC_START + 0x07FEFFFF)
+#define TLBTEMP_BASE_1		(VMALLOC_END + 1)
 #define TLBTEMP_BASE_2		(TLBTEMP_BASE_1 + DCACHE_WAY_SIZE)
 #if 2 * DCACHE_WAY_SIZE > ICACHE_WAY_SIZE
 #define TLBTEMP_SIZE		(2 * DCACHE_WAY_SIZE)

+ 1 - 1
arch/xtensa/include/asm/processor.h

@@ -37,7 +37,7 @@
 #ifdef CONFIG_MMU
 #define TASK_SIZE	__XTENSA_UL_CONST(0x40000000)
 #else
-#define TASK_SIZE	(PLATFORM_DEFAULT_MEM_START + PLATFORM_DEFAULT_MEM_SIZE)
+#define TASK_SIZE	__XTENSA_UL_CONST(0xffffffff)
 #endif
 
 #define STACK_TOP	TASK_SIZE

+ 1 - 20
arch/xtensa/include/asm/sysmem.h

@@ -11,27 +11,8 @@
 #ifndef _XTENSA_SYSMEM_H
 #define _XTENSA_SYSMEM_H
 
-#define SYSMEM_BANKS_MAX 31
+#include <linux/memblock.h>
 
-struct meminfo {
-	unsigned long start;
-	unsigned long end;
-};
-
-/*
- * Bank array is sorted by .start.
- * Banks don't overlap and there's at least one page gap
- * between adjacent bank entries.
- */
-struct sysmem_info {
-	int nr_banks;
-	struct meminfo bank[SYSMEM_BANKS_MAX];
-};
-
-extern struct sysmem_info sysmem;
-
-int add_sysmem_bank(unsigned long start, unsigned long end);
-int mem_reserve(unsigned long, unsigned long, int);
 void bootmem_init(void);
 void zones_init(void);
 

+ 24 - 43
arch/xtensa/include/asm/vectors.h

@@ -20,6 +20,7 @@
 
 #include <variant/core.h>
 #include <platform/hardware.h>
+#include <asm/kmem_layout.h>
 
 #if XCHAL_HAVE_PTP_MMU
 #define XCHAL_KIO_CACHED_VADDR		0xe0000000
@@ -47,61 +48,42 @@ static inline unsigned long xtensa_get_kio_paddr(void)
 
 #if defined(CONFIG_MMU)
 
-/* Will Become VECBASE */
-#define VIRTUAL_MEMORY_ADDRESS		0xD0000000
-
+#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
 /* Image Virtual Start Address */
-#define KERNELOFFSET			0xD0003000
-
-#if defined(XCHAL_HAVE_PTP_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
-  /* MMU v3  - XCHAL_HAVE_PTP_MMU  == 1 */
-  #define LOAD_MEMORY_ADDRESS		0x00003000
+#define KERNELOFFSET			(XCHAL_KSEG_CACHED_VADDR + \
+					 CONFIG_KERNEL_LOAD_ADDRESS - \
+					 XCHAL_KSEG_PADDR)
 #else
-  /* MMU V2 -  XCHAL_HAVE_PTP_MMU  == 0 */
-  #define LOAD_MEMORY_ADDRESS		0xD0003000
+#define KERNELOFFSET			CONFIG_KERNEL_LOAD_ADDRESS
 #endif
 
-#define RESET_VECTOR1_VADDR		(VIRTUAL_MEMORY_ADDRESS + \
-					 XCHAL_RESET_VECTOR1_PADDR)
-
 #else /* !defined(CONFIG_MMU) */
   /* MMU Not being used - Virtual == Physical */
 
-  /* VECBASE */
-  #define VIRTUAL_MEMORY_ADDRESS	(PLATFORM_DEFAULT_MEM_START + 0x2000)
+/* Location of the start of the kernel text, _start */
+#define KERNELOFFSET			CONFIG_KERNEL_LOAD_ADDRESS
 
-  /* Location of the start of the kernel text, _start */
-  #define KERNELOFFSET			(PLATFORM_DEFAULT_MEM_START + 0x3000)
-
-  /* Loaded just above possibly live vectors */
-  #define LOAD_MEMORY_ADDRESS		(PLATFORM_DEFAULT_MEM_START + 0x3000)
-
-#define RESET_VECTOR1_VADDR		(XCHAL_RESET_VECTOR1_VADDR)
 
 #endif /* CONFIG_MMU */
 
-#define XC_VADDR(offset)		(VIRTUAL_MEMORY_ADDRESS  + offset)
-
-/* Used to set VECBASE register */
-#define VECBASE_RESET_VADDR		VIRTUAL_MEMORY_ADDRESS
+#define RESET_VECTOR1_VADDR		(XCHAL_RESET_VECTOR1_VADDR)
+#define VECBASE_VADDR			(KERNELOFFSET - CONFIG_VECTORS_OFFSET)
 
 #if defined(XCHAL_HAVE_VECBASE) && XCHAL_HAVE_VECBASE
 
-#define USER_VECTOR_VADDR		XC_VADDR(XCHAL_USER_VECOFS)
-#define KERNEL_VECTOR_VADDR		XC_VADDR(XCHAL_KERNEL_VECOFS)
-#define DOUBLEEXC_VECTOR_VADDR		XC_VADDR(XCHAL_DOUBLEEXC_VECOFS)
-#define WINDOW_VECTORS_VADDR		XC_VADDR(XCHAL_WINDOW_OF4_VECOFS)
-#define INTLEVEL2_VECTOR_VADDR		XC_VADDR(XCHAL_INTLEVEL2_VECOFS)
-#define INTLEVEL3_VECTOR_VADDR		XC_VADDR(XCHAL_INTLEVEL3_VECOFS)
-#define INTLEVEL4_VECTOR_VADDR		XC_VADDR(XCHAL_INTLEVEL4_VECOFS)
-#define INTLEVEL5_VECTOR_VADDR		XC_VADDR(XCHAL_INTLEVEL5_VECOFS)
-#define INTLEVEL6_VECTOR_VADDR		XC_VADDR(XCHAL_INTLEVEL6_VECOFS)
-
-#define DEBUG_VECTOR_VADDR		XC_VADDR(XCHAL_DEBUG_VECOFS)
+#define VECTOR_VADDR(offset)		(VECBASE_VADDR + offset)
 
-#define NMI_VECTOR_VADDR		XC_VADDR(XCHAL_NMI_VECOFS)
-
-#define INTLEVEL7_VECTOR_VADDR		XC_VADDR(XCHAL_INTLEVEL7_VECOFS)
+#define USER_VECTOR_VADDR		VECTOR_VADDR(XCHAL_USER_VECOFS)
+#define KERNEL_VECTOR_VADDR		VECTOR_VADDR(XCHAL_KERNEL_VECOFS)
+#define DOUBLEEXC_VECTOR_VADDR		VECTOR_VADDR(XCHAL_DOUBLEEXC_VECOFS)
+#define WINDOW_VECTORS_VADDR		VECTOR_VADDR(XCHAL_WINDOW_OF4_VECOFS)
+#define INTLEVEL2_VECTOR_VADDR		VECTOR_VADDR(XCHAL_INTLEVEL2_VECOFS)
+#define INTLEVEL3_VECTOR_VADDR		VECTOR_VADDR(XCHAL_INTLEVEL3_VECOFS)
+#define INTLEVEL4_VECTOR_VADDR		VECTOR_VADDR(XCHAL_INTLEVEL4_VECOFS)
+#define INTLEVEL5_VECTOR_VADDR		VECTOR_VADDR(XCHAL_INTLEVEL5_VECOFS)
+#define INTLEVEL6_VECTOR_VADDR		VECTOR_VADDR(XCHAL_INTLEVEL6_VECOFS)
+#define INTLEVEL7_VECTOR_VADDR		VECTOR_VADDR(XCHAL_INTLEVEL7_VECOFS)
+#define DEBUG_VECTOR_VADDR		VECTOR_VADDR(XCHAL_DEBUG_VECOFS)
 
 /*
  * These XCHAL_* #defines from varian/core.h
@@ -109,7 +91,6 @@ static inline unsigned long xtensa_get_kio_paddr(void)
  * constants are defined above and should be used.
  */
 #undef  XCHAL_VECBASE_RESET_VADDR
-#undef  XCHAL_RESET_VECTOR0_VADDR
 #undef  XCHAL_USER_VECTOR_VADDR
 #undef  XCHAL_KERNEL_VECTOR_VADDR
 #undef  XCHAL_DOUBLEEXC_VECTOR_VADDR
@@ -119,9 +100,8 @@ static inline unsigned long xtensa_get_kio_paddr(void)
 #undef  XCHAL_INTLEVEL4_VECTOR_VADDR
 #undef  XCHAL_INTLEVEL5_VECTOR_VADDR
 #undef  XCHAL_INTLEVEL6_VECTOR_VADDR
-#undef  XCHAL_DEBUG_VECTOR_VADDR
-#undef  XCHAL_NMI_VECTOR_VADDR
 #undef  XCHAL_INTLEVEL7_VECTOR_VADDR
+#undef  XCHAL_DEBUG_VECTOR_VADDR
 
 #else
 
@@ -134,6 +114,7 @@ static inline unsigned long xtensa_get_kio_paddr(void)
 #define INTLEVEL4_VECTOR_VADDR		XCHAL_INTLEVEL4_VECTOR_VADDR
 #define INTLEVEL5_VECTOR_VADDR		XCHAL_INTLEVEL5_VECTOR_VADDR
 #define INTLEVEL6_VECTOR_VADDR		XCHAL_INTLEVEL6_VECTOR_VADDR
+#define INTLEVEL7_VECTOR_VADDR		XCHAL_INTLEVEL6_VECTOR_VADDR
 #define DEBUG_VECTOR_VADDR		XCHAL_DEBUG_VECTOR_VADDR
 
 #endif

+ 2 - 1
arch/xtensa/include/uapi/asm/types.h

@@ -18,7 +18,8 @@
 # define __XTENSA_UL_CONST(x)	x
 #else
 # define __XTENSA_UL(x)		((unsigned long)(x))
-# define __XTENSA_UL_CONST(x)	x##UL
+# define ___XTENSA_UL_CONST(x)	x##UL
+# define __XTENSA_UL_CONST(x)	___XTENSA_UL_CONST(x)
 #endif
 
 #ifndef __ASSEMBLY__

+ 14 - 1
arch/xtensa/include/uapi/asm/unistd.h

@@ -754,7 +754,20 @@ __SYSCALL(340, sys_bpf, 3)
 #define __NR_execveat				341
 __SYSCALL(341, sys_execveat, 5)
 
-#define __NR_syscall_count			342
+#define __NR_userfaultfd			342
+__SYSCALL(342, sys_userfaultfd, 1)
+#define __NR_membarrier				343
+__SYSCALL(343, sys_membarrier, 2)
+#define __NR_mlock2				344
+__SYSCALL(344, sys_mlock2, 3)
+#define __NR_copy_file_range			345
+__SYSCALL(345, sys_copy_file_range, 6)
+#define __NR_preadv2				346
+__SYSCALL(346, sys_preadv2, 6)
+#define __NR_pwritev2				347
+__SYSCALL(347, sys_pwritev2, 6)
+
+#define __NR_syscall_count			348
 
 /*
  * sysxtensa syscall handler

+ 3 - 2
arch/xtensa/kernel/entry.S

@@ -1632,10 +1632,11 @@ ENTRY(fast_second_level_miss)
 	 * The messy computation for 'pteval' above really simplifies
 	 * into the following:
 	 *
-	 * pteval = ((pmdval - PAGE_OFFSET) & PAGE_MASK) | PAGE_DIRECTORY
+	 * pteval = ((pmdval - PAGE_OFFSET + PHYS_OFFSET) & PAGE_MASK)
+	 *                 | PAGE_DIRECTORY
 	 */
 
-	movi	a1, (-PAGE_OFFSET) & 0xffffffff
+	movi	a1, (PHYS_OFFSET - PAGE_OFFSET) & 0xffffffff
 	add	a0, a0, a1		# pmdval - PAGE_OFFSET
 	extui	a1, a0, 0, PAGE_SHIFT	# ... & PAGE_MASK
 	xor	a0, a0, a1

+ 1 - 1
arch/xtensa/kernel/head.S

@@ -113,7 +113,7 @@ ENTRY(_startup)
 	movi	a0, 0
 
 #if XCHAL_HAVE_VECBASE
-	movi    a2, VECBASE_RESET_VADDR
+	movi    a2, VECBASE_VADDR
 	wsr	a2, vecbase
 #endif
 

+ 21 - 28
arch/xtensa/kernel/setup.c

@@ -7,6 +7,7 @@
  *
  * Copyright (C) 1995  Linus Torvalds
  * Copyright (C) 2001 - 2005  Tensilica Inc.
+ * Copyright (C) 2014 - 2016  Cadence Design Systems Inc.
  *
  * Chris Zankel	<chris@zankel.net>
  * Joe Taylor	<joe@tensilica.com, joetylr@yahoo.com>
@@ -24,6 +25,7 @@
 #include <linux/percpu.h>
 #include <linux/clk-provider.h>
 #include <linux/cpu.h>
+#include <linux/of.h>
 #include <linux/of_fdt.h>
 #include <linux/of_platform.h>
 
@@ -114,7 +116,7 @@ static int __init parse_tag_mem(const bp_tag_t *tag)
 	if (mi->type != MEMORY_TYPE_CONVENTIONAL)
 		return -1;
 
-	return add_sysmem_bank(mi->start, mi->end);
+	return memblock_add(mi->start, mi->end - mi->start);
 }
 
 __tagtable(BP_TAG_MEMORY, parse_tag_mem);
@@ -188,7 +190,6 @@ static int __init parse_bootparam(const bp_tag_t* tag)
 }
 
 #ifdef CONFIG_OF
-bool __initdata dt_memory_scan = false;
 
 #if !XCHAL_HAVE_PTP_MMU || XCHAL_HAVE_SPANNING_WAY
 unsigned long xtensa_kio_paddr = XCHAL_KIO_DEFAULT_PADDR;
@@ -228,11 +229,8 @@ static int __init xtensa_dt_io_area(unsigned long node, const char *uname,
 
 void __init early_init_dt_add_memory_arch(u64 base, u64 size)
 {
-	if (!dt_memory_scan)
-		return;
-
 	size &= PAGE_MASK;
-	add_sysmem_bank(base, base + size);
+	memblock_add(base, size);
 }
 
 void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
@@ -242,9 +240,6 @@ void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
 
 void __init early_init_devtree(void *params)
 {
-	if (sysmem.nr_banks == 0)
-		dt_memory_scan = true;
-
 	early_init_dt_scan(params);
 	of_scan_flat_dt(xtensa_dt_io_area, NULL);
 
@@ -278,12 +273,6 @@ void __init init_arch(bp_tag_t *bp_start)
 	early_init_devtree(dtb_start);
 #endif
 
-	if (sysmem.nr_banks == 0) {
-		add_sysmem_bank(PLATFORM_DEFAULT_MEM_START,
-				PLATFORM_DEFAULT_MEM_START +
-				PLATFORM_DEFAULT_MEM_SIZE);
-	}
-
 #ifdef CONFIG_CMDLINE_BOOL
 	if (!command_line[0])
 		strlcpy(command_line, default_command_line, COMMAND_LINE_SIZE);
@@ -453,6 +442,10 @@ static int __init check_s32c1i(void)
 early_initcall(check_s32c1i);
 #endif /* CONFIG_S32C1I_SELFTEST */
 
+static inline int mem_reserve(unsigned long start, unsigned long end)
+{
+	return memblock_reserve(start, end - start);
+}
 
 void __init setup_arch(char **cmdline_p)
 {
@@ -464,54 +457,54 @@ void __init setup_arch(char **cmdline_p)
 #ifdef CONFIG_BLK_DEV_INITRD
 	if (initrd_start < initrd_end) {
 		initrd_is_mapped = mem_reserve(__pa(initrd_start),
-					       __pa(initrd_end), 0) == 0;
+					       __pa(initrd_end)) == 0;
 		initrd_below_start_ok = 1;
 	} else {
 		initrd_start = 0;
 	}
 #endif
 
-	mem_reserve(__pa(&_stext),__pa(&_end), 1);
+	mem_reserve(__pa(&_stext), __pa(&_end));
 
 	mem_reserve(__pa(&_WindowVectors_text_start),
-		    __pa(&_WindowVectors_text_end), 0);
+		    __pa(&_WindowVectors_text_end));
 
 	mem_reserve(__pa(&_DebugInterruptVector_literal_start),
-		    __pa(&_DebugInterruptVector_text_end), 0);
+		    __pa(&_DebugInterruptVector_text_end));
 
 	mem_reserve(__pa(&_KernelExceptionVector_literal_start),
-		    __pa(&_KernelExceptionVector_text_end), 0);
+		    __pa(&_KernelExceptionVector_text_end));
 
 	mem_reserve(__pa(&_UserExceptionVector_literal_start),
-		    __pa(&_UserExceptionVector_text_end), 0);
+		    __pa(&_UserExceptionVector_text_end));
 
 	mem_reserve(__pa(&_DoubleExceptionVector_literal_start),
-		    __pa(&_DoubleExceptionVector_text_end), 0);
+		    __pa(&_DoubleExceptionVector_text_end));
 
 #if XCHAL_EXCM_LEVEL >= 2
 	mem_reserve(__pa(&_Level2InterruptVector_text_start),
-		    __pa(&_Level2InterruptVector_text_end), 0);
+		    __pa(&_Level2InterruptVector_text_end));
 #endif
 #if XCHAL_EXCM_LEVEL >= 3
 	mem_reserve(__pa(&_Level3InterruptVector_text_start),
-		    __pa(&_Level3InterruptVector_text_end), 0);
+		    __pa(&_Level3InterruptVector_text_end));
 #endif
 #if XCHAL_EXCM_LEVEL >= 4
 	mem_reserve(__pa(&_Level4InterruptVector_text_start),
-		    __pa(&_Level4InterruptVector_text_end), 0);
+		    __pa(&_Level4InterruptVector_text_end));
 #endif
 #if XCHAL_EXCM_LEVEL >= 5
 	mem_reserve(__pa(&_Level5InterruptVector_text_start),
-		    __pa(&_Level5InterruptVector_text_end), 0);
+		    __pa(&_Level5InterruptVector_text_end));
 #endif
 #if XCHAL_EXCM_LEVEL >= 6
 	mem_reserve(__pa(&_Level6InterruptVector_text_start),
-		    __pa(&_Level6InterruptVector_text_end), 0);
+		    __pa(&_Level6InterruptVector_text_end));
 #endif
 
 #ifdef CONFIG_SMP
 	mem_reserve(__pa(&_SecondaryResetVector_text_start),
-		    __pa(&_SecondaryResetVector_text_end), 0);
+		    __pa(&_SecondaryResetVector_text_end));
 #endif
 	parse_early_param();
 	bootmem_init();

+ 0 - 4
arch/xtensa/kernel/vmlinux.lds.S

@@ -30,10 +30,6 @@ jiffies = jiffies_64 + 4;
 jiffies = jiffies_64;
 #endif
 
-#ifndef KERNELOFFSET
-#define KERNELOFFSET 0xd0003000
-#endif
-
 /* Note: In the following macros, it would be nice to specify only the
    vector name and section kind and construct "sym" and "section" using
    CPP concatenation, but that does not work reliably.  Concatenating a

+ 19 - 260
arch/xtensa/mm/init.c

@@ -8,7 +8,7 @@
  * for more details.
  *
  * Copyright (C) 2001 - 2005 Tensilica Inc.
- * Copyright (C) 2014 Cadence Design Systems Inc.
+ * Copyright (C) 2014 - 2016 Cadence Design Systems Inc.
  *
  * Chris Zankel	<chris@zankel.net>
  * Joe Taylor	<joe@tensilica.com, joetylr@yahoo.com>
@@ -25,284 +25,43 @@
 #include <linux/mman.h>
 #include <linux/nodemask.h>
 #include <linux/mm.h>
+#include <linux/of_fdt.h>
 
 #include <asm/bootparam.h>
 #include <asm/page.h>
 #include <asm/sections.h>
 #include <asm/sysmem.h>
 
-struct sysmem_info sysmem __initdata;
-
-static void __init sysmem_dump(void)
-{
-	unsigned i;
-
-	pr_debug("Sysmem:\n");
-	for (i = 0; i < sysmem.nr_banks; ++i)
-		pr_debug("  0x%08lx - 0x%08lx (%ldK)\n",
-			 sysmem.bank[i].start, sysmem.bank[i].end,
-			 (sysmem.bank[i].end - sysmem.bank[i].start) >> 10);
-}
-
-/*
- * Find bank with maximal .start such that bank.start <= start
- */
-static inline struct meminfo * __init find_bank(unsigned long start)
-{
-	unsigned i;
-	struct meminfo *it = NULL;
-
-	for (i = 0; i < sysmem.nr_banks; ++i)
-		if (sysmem.bank[i].start <= start)
-			it = sysmem.bank + i;
-		else
-			break;
-	return it;
-}
-
-/*
- * Move all memory banks starting at 'from' to a new place at 'to',
- * adjust nr_banks accordingly.
- * Both 'from' and 'to' must be inside the sysmem.bank.
- *
- * Returns: 0 (success), -ENOMEM (not enough space in the sysmem.bank).
- */
-static int __init move_banks(struct meminfo *to, struct meminfo *from)
-{
-	unsigned n = sysmem.nr_banks - (from - sysmem.bank);
-
-	if (to > from && to - from + sysmem.nr_banks > SYSMEM_BANKS_MAX)
-		return -ENOMEM;
-	if (to != from)
-		memmove(to, from, n * sizeof(struct meminfo));
-	sysmem.nr_banks += to - from;
-	return 0;
-}
-
-/*
- * Add new bank to sysmem. Resulting sysmem is the union of bytes of the
- * original sysmem and the new bank.
- *
- * Returns: 0 (success), < 0 (error)
- */
-int __init add_sysmem_bank(unsigned long start, unsigned long end)
-{
-	unsigned i;
-	struct meminfo *it = NULL;
-	unsigned long sz;
-	unsigned long bank_sz = 0;
-
-	if (start == end ||
-	    (start < end) != (PAGE_ALIGN(start) < (end & PAGE_MASK))) {
-		pr_warn("Ignoring small memory bank 0x%08lx size: %ld bytes\n",
-			start, end - start);
-		return -EINVAL;
-	}
-
-	start = PAGE_ALIGN(start);
-	end &= PAGE_MASK;
-	sz = end - start;
-
-	it = find_bank(start);
-
-	if (it)
-		bank_sz = it->end - it->start;
-
-	if (it && bank_sz >= start - it->start) {
-		if (end - it->start > bank_sz)
-			it->end = end;
-		else
-			return 0;
-	} else {
-		if (!it)
-			it = sysmem.bank;
-		else
-			++it;
-
-		if (it - sysmem.bank < sysmem.nr_banks &&
-		    it->start - start <= sz) {
-			it->start = start;
-			if (it->end - it->start < sz)
-				it->end = end;
-			else
-				return 0;
-		} else {
-			if (move_banks(it + 1, it) < 0) {
-				pr_warn("Ignoring memory bank 0x%08lx size %ld bytes\n",
-					start, end - start);
-				return -EINVAL;
-			}
-			it->start = start;
-			it->end = end;
-			return 0;
-		}
-	}
-	sz = it->end - it->start;
-	for (i = it + 1 - sysmem.bank; i < sysmem.nr_banks; ++i)
-		if (sysmem.bank[i].start - it->start <= sz) {
-			if (sz < sysmem.bank[i].end - it->start)
-				it->end = sysmem.bank[i].end;
-		} else {
-			break;
-		}
-
-	move_banks(it + 1, sysmem.bank + i);
-	return 0;
-}
-
-/*
- * mem_reserve(start, end, must_exist)
- *
- * Reserve some memory from the memory pool.
- * If must_exist is set and a part of the region being reserved does not exist
- * memory map is not altered.
- *
- * Parameters:
- *  start	Start of region,
- *  end		End of region,
- *  must_exist	Must exist in memory pool.
- *
- * Returns:
- *  0 (success)
- *  < 0 (error)
- */
-
-int __init mem_reserve(unsigned long start, unsigned long end, int must_exist)
-{
-	struct meminfo *it;
-	struct meminfo *rm = NULL;
-	unsigned long sz;
-	unsigned long bank_sz = 0;
-
-	start = start & PAGE_MASK;
-	end = PAGE_ALIGN(end);
-	sz = end - start;
-	if (!sz)
-		return -EINVAL;
-
-	it = find_bank(start);
-
-	if (it)
-		bank_sz = it->end - it->start;
-
-	if ((!it || end - it->start > bank_sz) && must_exist) {
-		pr_warn("mem_reserve: [0x%0lx, 0x%0lx) not in any region!\n",
-			start, end);
-		return -EINVAL;
-	}
-
-	if (it && start - it->start <= bank_sz) {
-		if (start == it->start) {
-			if (end - it->start < bank_sz) {
-				it->start = end;
-				return 0;
-			} else {
-				rm = it;
-			}
-		} else {
-			it->end = start;
-			if (end - it->start < bank_sz)
-				return add_sysmem_bank(end,
-						       it->start + bank_sz);
-			++it;
-		}
-	}
-
-	if (!it)
-		it = sysmem.bank;
-
-	for (; it < sysmem.bank + sysmem.nr_banks; ++it) {
-		if (it->end - start <= sz) {
-			if (!rm)
-				rm = it;
-		} else {
-			if (it->start - start < sz)
-				it->start = end;
-			break;
-		}
-	}
-
-	if (rm)
-		move_banks(rm, it);
-
-	return 0;
-}
-
-
 /*
  * Initialize the bootmem system and give it all low memory we have available.
  */
 
 void __init bootmem_init(void)
 {
-	unsigned long pfn;
-	unsigned long bootmap_start, bootmap_size;
-	int i;
-
-	/* Reserve all memory below PLATFORM_DEFAULT_MEM_START, as memory
+	/* Reserve all memory below PHYS_OFFSET, as memory
 	 * accounting doesn't work for pages below that address.
 	 *
-	 * If PLATFORM_DEFAULT_MEM_START is zero reserve page at address 0:
+	 * If PHYS_OFFSET is zero reserve page at address 0:
 	 * successfull allocations should never return NULL.
 	 */
-	if (PLATFORM_DEFAULT_MEM_START)
-		mem_reserve(0, PLATFORM_DEFAULT_MEM_START, 0);
+	if (PHYS_OFFSET)
+		memblock_reserve(0, PHYS_OFFSET);
 	else
-		mem_reserve(0, 1, 0);
+		memblock_reserve(0, 1);
 
-	sysmem_dump();
-	max_low_pfn = max_pfn = 0;
-	min_low_pfn = ~0;
-
-	for (i=0; i < sysmem.nr_banks; i++) {
-		pfn = PAGE_ALIGN(sysmem.bank[i].start) >> PAGE_SHIFT;
-		if (pfn < min_low_pfn)
-			min_low_pfn = pfn;
-		pfn = PAGE_ALIGN(sysmem.bank[i].end - 1) >> PAGE_SHIFT;
-		if (pfn > max_pfn)
-			max_pfn = pfn;
-	}
+	early_init_fdt_scan_reserved_mem();
 
-	if (min_low_pfn > max_pfn)
+	if (!memblock_phys_mem_size())
 		panic("No memory found!\n");
 
-	max_low_pfn = max_pfn < MAX_MEM_PFN >> PAGE_SHIFT ?
-		max_pfn : MAX_MEM_PFN >> PAGE_SHIFT;
+	min_low_pfn = PFN_UP(memblock_start_of_DRAM());
+	min_low_pfn = max(min_low_pfn, PFN_UP(PHYS_OFFSET));
+	max_pfn = PFN_DOWN(memblock_end_of_DRAM());
+	max_low_pfn = min(max_pfn, MAX_LOW_PFN);
 
-	/* Find an area to use for the bootmem bitmap. */
-
-	bootmap_size = bootmem_bootmap_pages(max_low_pfn - min_low_pfn);
-	bootmap_size <<= PAGE_SHIFT;
-	bootmap_start = ~0;
-
-	for (i=0; i<sysmem.nr_banks; i++)
-		if (sysmem.bank[i].end - sysmem.bank[i].start >= bootmap_size) {
-			bootmap_start = sysmem.bank[i].start;
-			break;
-		}
-
-	if (bootmap_start == ~0UL)
-		panic("Cannot find %ld bytes for bootmap\n", bootmap_size);
-
-	/* Reserve the bootmem bitmap area */
-
-	mem_reserve(bootmap_start, bootmap_start + bootmap_size, 1);
-	bootmap_size = init_bootmem_node(NODE_DATA(0),
-					 bootmap_start >> PAGE_SHIFT,
-					 min_low_pfn,
-					 max_low_pfn);
-
-	/* Add all remaining memory pieces into the bootmem map */
-
-	for (i = 0; i < sysmem.nr_banks; i++) {
-		if (sysmem.bank[i].start >> PAGE_SHIFT < max_low_pfn) {
-			unsigned long end = min(max_low_pfn << PAGE_SHIFT,
-						sysmem.bank[i].end);
-			free_bootmem(sysmem.bank[i].start,
-				     end - sysmem.bank[i].start);
-		}
-	}
+	memblock_set_current_limit(PFN_PHYS(max_low_pfn));
 
+	memblock_dump_all();
 }
 
 
@@ -344,7 +103,7 @@ void __init mem_init(void)
 		"    fixmap  : 0x%08lx - 0x%08lx  (%5lu kB)\n"
 #endif
 #ifdef CONFIG_MMU
-		"    vmalloc : 0x%08x - 0x%08x  (%5u MB)\n"
+		"    vmalloc : 0x%08lx - 0x%08lx  (%5lu MB)\n"
 #endif
 		"    lowmem  : 0x%08lx - 0x%08lx  (%5lu MB)\n",
 #ifdef CONFIG_HIGHMEM
@@ -395,16 +154,16 @@ static void __init parse_memmap_one(char *p)
 	switch (*p) {
 	case '@':
 		start_at = memparse(p + 1, &p);
-		add_sysmem_bank(start_at, start_at + mem_size);
+		memblock_add(start_at, mem_size);
 		break;
 
 	case '$':
 		start_at = memparse(p + 1, &p);
-		mem_reserve(start_at, start_at + mem_size, 0);
+		memblock_reserve(start_at, mem_size);
 		break;
 
 	case 0:
-		mem_reserve(mem_size, 0, 0);
+		memblock_reserve(mem_size, -mem_size);
 		break;
 
 	default: