Browse Source

Merge tag 'pr-20141220-x86-vdso' of git://git.kernel.org/pub/scm/linux/kernel/git/luto/linux into x86/urgent

Pull a VDSO fix from Andy Lutomirski:

  "One vdso fix for a longstanding ASLR bug that's been in the news lately.

   The vdso base address has always been randomized, and I don't think there's
   anything particularly wrong with the range over which it's randomized,
   but the implementation seems to have been buggy since the very beginning.

   This fixes the implementation to remove a large bias that caused a small
   fraction of possible vdso load addresess to be vastly more likely than
   the rest of the possible addresses."

Signed-off-by: Ingo Molnar <mingo@kernel.org>
Ingo Molnar 11 years ago
parent
commit
fbe1bf1406
1 changed files with 29 additions and 16 deletions
  1. 29 16
      arch/x86/vdso/vma.c

+ 29 - 16
arch/x86/vdso/vma.c

@@ -41,12 +41,17 @@ void __init init_vdso_image(const struct vdso_image *image)
 
 
 struct linux_binprm;
 struct linux_binprm;
 
 
-/* Put the vdso above the (randomized) stack with another randomized offset.
-   This way there is no hole in the middle of address space.
-   To save memory make sure it is still in the same PTE as the stack top.
-   This doesn't give that many random bits.
-
-   Only used for the 64-bit and x32 vdsos. */
+/*
+ * Put the vdso above the (randomized) stack with another randomized
+ * offset.  This way there is no hole in the middle of address space.
+ * To save memory make sure it is still in the same PTE as the stack
+ * top.  This doesn't give that many random bits.
+ *
+ * Note that this algorithm is imperfect: the distribution of the vdso
+ * start address within a PMD is biased toward the end.
+ *
+ * Only used for the 64-bit and x32 vdsos.
+ */
 static unsigned long vdso_addr(unsigned long start, unsigned len)
 static unsigned long vdso_addr(unsigned long start, unsigned len)
 {
 {
 #ifdef CONFIG_X86_32
 #ifdef CONFIG_X86_32
@@ -54,22 +59,30 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
 #else
 #else
 	unsigned long addr, end;
 	unsigned long addr, end;
 	unsigned offset;
 	unsigned offset;
-	end = (start + PMD_SIZE - 1) & PMD_MASK;
+
+	/*
+	 * Round up the start address.  It can start out unaligned as a result
+	 * of stack start randomization.
+	 */
+	start = PAGE_ALIGN(start);
+
+	/* Round the lowest possible end address up to a PMD boundary. */
+	end = (start + len + PMD_SIZE - 1) & PMD_MASK;
 	if (end >= TASK_SIZE_MAX)
 	if (end >= TASK_SIZE_MAX)
 		end = TASK_SIZE_MAX;
 		end = TASK_SIZE_MAX;
 	end -= len;
 	end -= len;
-	/* This loses some more bits than a modulo, but is cheaper */
-	offset = get_random_int() & (PTRS_PER_PTE - 1);
-	addr = start + (offset << PAGE_SHIFT);
-	if (addr >= end)
-		addr = end;
+
+	if (end > start) {
+		offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
+		addr = start + (offset << PAGE_SHIFT);
+	} else {
+		addr = start;
+	}
 
 
 	/*
 	/*
-	 * page-align it here so that get_unmapped_area doesn't
-	 * align it wrongfully again to the next page. addr can come in 4K
-	 * unaligned here as a result of stack start randomization.
+	 * Forcibly align the final address in case we have a hardware
+	 * issue that requires alignment for performance reasons.
 	 */
 	 */
-	addr = PAGE_ALIGN(addr);
 	addr = align_vdso_addr(addr);
 	addr = align_vdso_addr(addr);
 
 
 	return addr;
 	return addr;