Browse Source

Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 fixes from Will Deacon:
 - Wire up compat_sys_execveat for compat (AArch32) tasks
 - Revert 421520ba9829, as this breaks our side of the boot protocol

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64: partially revert "ARM: 8167/1: extend the reserved memory for initrd to be page aligned"
  arm64: compat: wire up compat_sys_execveat
Linus Torvalds 10 years ago
parent
commit
fa818dc488
3 changed files with 4 additions and 8 deletions
  1. 1 1
      arch/arm64/include/asm/unistd.h
  2. 2 0
      arch/arm64/include/asm/unistd32.h
  3. 1 7
      arch/arm64/mm/init.c

+ 1 - 1
arch/arm64/include/asm/unistd.h

@@ -44,7 +44,7 @@
 #define __ARM_NR_compat_cacheflush	(__ARM_NR_COMPAT_BASE+2)
 #define __ARM_NR_compat_set_tls		(__ARM_NR_COMPAT_BASE+5)
 
-#define __NR_compat_syscalls		387
+#define __NR_compat_syscalls		388
 #endif
 
 #define __ARCH_WANT_SYS_CLONE

+ 2 - 0
arch/arm64/include/asm/unistd32.h

@@ -795,3 +795,5 @@ __SYSCALL(__NR_getrandom, sys_getrandom)
 __SYSCALL(__NR_memfd_create, sys_memfd_create)
 #define __NR_bpf 386
 __SYSCALL(__NR_bpf, sys_bpf)
+#define __NR_execveat 387
+__SYSCALL(__NR_execveat, compat_sys_execveat)

+ 1 - 7
arch/arm64/mm/init.c

@@ -335,14 +335,8 @@ static int keep_initrd;
 
 void free_initrd_mem(unsigned long start, unsigned long end)
 {
-	if (!keep_initrd) {
-		if (start == initrd_start)
-			start = round_down(start, PAGE_SIZE);
-		if (end == initrd_end)
-			end = round_up(end, PAGE_SIZE);
-
+	if (!keep_initrd)
 		free_reserved_area((void *)start, (void *)end, 0, "initrd");
-	}
 }
 
 static int __init keepinitrd_setup(char *__unused)