Browse Source

Merge branch 'fixes' of git://git.armlinux.org.uk/~rmk/linux-arm

Pull ARM fixes from Russell King:

 - omit EFI memory map sorting, which was recently introduced, but
   caused problems with the decompressor due to additional sections
   being emitted.

 - avoid unaligned load fault-generating instructions in the
   decompressor by switching to a private unaligned implementation.

 - add a symbol into the decompressor to further debug non-boot
   situations (ld's documentation is extremely poor for how "." works,
   ld doesn't seem to follow its own documentation!)

 - parse endian information to sparse

* 'fixes' of git://git.armlinux.org.uk/~rmk/linux-arm:
  ARM: add debug ".edata_real" symbol
  ARM: 8716/1: pass endianness info to sparse
  efi/libstub: arm: omit sorting of the UEFI memory map
  ARM: 8715/1: add a private asm/unaligned.h
Linus Torvalds 7 years ago
parent
commit
2d6349944d

+ 2 - 0
arch/arm/Makefile

@@ -44,10 +44,12 @@ endif
 
 ifeq ($(CONFIG_CPU_BIG_ENDIAN),y)
 KBUILD_CPPFLAGS	+= -mbig-endian
+CHECKFLAGS	+= -D__ARMEB__
 AS		+= -EB
 LD		+= -EB
 else
 KBUILD_CPPFLAGS	+= -mlittle-endian
+CHECKFLAGS	+= -D__ARMEL__
 AS		+= -EL
 LD		+= -EL
 endif

+ 9 - 0
arch/arm/boot/compressed/vmlinux.lds.S

@@ -85,6 +85,15 @@ SECTIONS
 
   _edata = .;
 
+  /*
+   * The image_end section appears after any additional loadable sections
+   * that the linker may decide to insert in the binary image.  Having
+   * this symbol allows further debug in the near future.
+   */
+  .image_end (NOLOAD) : {
+    _edata_real = .;
+  }
+
   _magic_sig = ZIMAGE_MAGIC(0x016f2818);
   _magic_start = ZIMAGE_MAGIC(_start);
   _magic_end = ZIMAGE_MAGIC(_edata);

+ 0 - 1
arch/arm/include/asm/Kbuild

@@ -20,7 +20,6 @@ generic-y += simd.h
 generic-y += sizes.h
 generic-y += timex.h
 generic-y += trace_clock.h
-generic-y += unaligned.h
 
 generated-y += mach-types.h
 generated-y += unistd-nr.h

+ 27 - 0
arch/arm/include/asm/unaligned.h

@@ -0,0 +1,27 @@
+#ifndef __ASM_ARM_UNALIGNED_H
+#define __ASM_ARM_UNALIGNED_H
+
+/*
+ * We generally want to set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS on ARMv6+,
+ * but we don't want to use linux/unaligned/access_ok.h since that can lead
+ * to traps on unaligned stm/ldm or strd/ldrd.
+ */
+#include <asm/byteorder.h>
+
+#if defined(__LITTLE_ENDIAN)
+# include <linux/unaligned/le_struct.h>
+# include <linux/unaligned/be_byteshift.h>
+# include <linux/unaligned/generic.h>
+# define get_unaligned	__get_unaligned_le
+# define put_unaligned	__put_unaligned_le
+#elif defined(__BIG_ENDIAN)
+# include <linux/unaligned/be_struct.h>
+# include <linux/unaligned/le_byteshift.h>
+# include <linux/unaligned/generic.h>
+# define get_unaligned	__get_unaligned_be
+# define put_unaligned	__put_unaligned_be
+#else
+# error need to define endianess
+#endif
+
+#endif /* __ASM_ARM_UNALIGNED_H */

+ 3 - 3
drivers/firmware/efi/libstub/Makefile

@@ -34,13 +34,14 @@ lib-y				:= efi-stub-helper.o gop.o secureboot.o
 lib-$(CONFIG_RESET_ATTACK_MITIGATION) += tpm.o
 
 # include the stub's generic dependencies from lib/ when building for ARM/arm64
-arm-deps := fdt_rw.c fdt_ro.c fdt_wip.c fdt.c fdt_empty_tree.c fdt_sw.c sort.c
+arm-deps-y := fdt_rw.c fdt_ro.c fdt_wip.c fdt.c fdt_empty_tree.c fdt_sw.c
+arm-deps-$(CONFIG_ARM64) += sort.c
 
 $(obj)/lib-%.o: $(srctree)/lib/%.c FORCE
 	$(call if_changed_rule,cc_o_c)
 
 lib-$(CONFIG_EFI_ARMSTUB)	+= arm-stub.o fdt.o string.o random.o \
-				   $(patsubst %.c,lib-%.o,$(arm-deps))
+				   $(patsubst %.c,lib-%.o,$(arm-deps-y))
 
 lib-$(CONFIG_ARM)		+= arm32-stub.o
 lib-$(CONFIG_ARM64)		+= arm64-stub.o
@@ -91,5 +92,4 @@ quiet_cmd_stubcopy = STUBCPY $@
 # explicitly by the decompressor linker script.
 #
 STUBCOPY_FLAGS-$(CONFIG_ARM)	+= --rename-section .data=.data.efistub
-STUBCOPY_RM-$(CONFIG_ARM)	+= -R ___ksymtab+sort -R ___kcrctab+sort
 STUBCOPY_RELOC-$(CONFIG_ARM)	:= R_ARM_ABS

+ 5 - 2
drivers/firmware/efi/libstub/arm-stub.c

@@ -350,7 +350,9 @@ void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size,
 	 * The easiest way to find adjacent regions is to sort the memory map
 	 * before traversing it.
 	 */
-	sort(memory_map, map_size / desc_size, desc_size, cmp_mem_desc, NULL);
+	if (IS_ENABLED(CONFIG_ARM64))
+		sort(memory_map, map_size / desc_size, desc_size, cmp_mem_desc,
+		     NULL);
 
 	for (l = 0; l < map_size; l += desc_size, prev = in) {
 		u64 paddr, size;
@@ -367,7 +369,8 @@ void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size,
 		 * a 4k page size kernel to kexec a 64k page size kernel and
 		 * vice versa.
 		 */
-		if (!regions_are_adjacent(prev, in) ||
+		if ((IS_ENABLED(CONFIG_ARM64) &&
+		     !regions_are_adjacent(prev, in)) ||
 		    !regions_have_compatible_memory_type_attrs(prev, in)) {
 
 			paddr = round_down(in->phys_addr, SZ_64K);