mmap.c 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214
  1. /*
  2. * flexible mmap layout support
  3. *
  4. * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
  5. * All Rights Reserved.
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software
  19. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  20. *
  21. *
  22. * Started by Ingo Molnar <mingo@elte.hu>
  23. */
  24. #include <linux/personality.h>
  25. #include <linux/mm.h>
  26. #include <linux/random.h>
  27. #include <linux/sched/signal.h>
  28. #include <linux/sched/mm.h>
  29. #include <linux/elf-randomize.h>
  30. #include <linux/security.h>
  31. #include <linux/mman.h>
  32. /*
  33. * Top of mmap area (just below the process stack).
  34. *
  35. * Leave at least a ~128 MB hole on 32bit applications.
  36. *
  37. * On 64bit applications we randomise the stack by 1GB so we need to
  38. * space our mmap start address by a further 1GB, otherwise there is a
  39. * chance the mmap area will end up closer to the stack than our ulimit
  40. * requires.
  41. */
  42. #define MIN_GAP32 (128*1024*1024)
  43. #define MIN_GAP64 ((128 + 1024)*1024*1024UL)
  44. #define MIN_GAP ((is_32bit_task()) ? MIN_GAP32 : MIN_GAP64)
  45. #define MAX_GAP (TASK_SIZE/6*5)
  46. static inline int mmap_is_legacy(void)
  47. {
  48. if (current->personality & ADDR_COMPAT_LAYOUT)
  49. return 1;
  50. if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
  51. return 1;
  52. return sysctl_legacy_va_layout;
  53. }
  54. unsigned long arch_mmap_rnd(void)
  55. {
  56. unsigned long rnd;
  57. /* 8MB for 32bit, 1GB for 64bit */
  58. if (is_32bit_task())
  59. rnd = get_random_long() % (1<<(23-PAGE_SHIFT));
  60. else
  61. rnd = get_random_long() % (1UL<<(30-PAGE_SHIFT));
  62. return rnd << PAGE_SHIFT;
  63. }
  64. static inline unsigned long mmap_base(unsigned long rnd)
  65. {
  66. unsigned long gap = rlimit(RLIMIT_STACK);
  67. if (gap < MIN_GAP)
  68. gap = MIN_GAP;
  69. else if (gap > MAX_GAP)
  70. gap = MAX_GAP;
  71. return PAGE_ALIGN(TASK_SIZE - gap - rnd);
  72. }
  73. #ifdef CONFIG_PPC_RADIX_MMU
  74. /*
  75. * Same function as generic code used only for radix, because we don't need to overload
  76. * the generic one. But we will have to duplicate, because hash select
  77. * HAVE_ARCH_UNMAPPED_AREA
  78. */
  79. static unsigned long
  80. radix__arch_get_unmapped_area(struct file *filp, unsigned long addr,
  81. unsigned long len, unsigned long pgoff,
  82. unsigned long flags)
  83. {
  84. struct mm_struct *mm = current->mm;
  85. struct vm_area_struct *vma;
  86. struct vm_unmapped_area_info info;
  87. if (len > TASK_SIZE - mmap_min_addr)
  88. return -ENOMEM;
  89. if (flags & MAP_FIXED)
  90. return addr;
  91. if (addr) {
  92. addr = PAGE_ALIGN(addr);
  93. vma = find_vma(mm, addr);
  94. if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
  95. (!vma || addr + len <= vma->vm_start))
  96. return addr;
  97. }
  98. info.flags = 0;
  99. info.length = len;
  100. info.low_limit = mm->mmap_base;
  101. info.high_limit = TASK_SIZE;
  102. info.align_mask = 0;
  103. return vm_unmapped_area(&info);
  104. }
  105. static unsigned long
  106. radix__arch_get_unmapped_area_topdown(struct file *filp,
  107. const unsigned long addr0,
  108. const unsigned long len,
  109. const unsigned long pgoff,
  110. const unsigned long flags)
  111. {
  112. struct vm_area_struct *vma;
  113. struct mm_struct *mm = current->mm;
  114. unsigned long addr = addr0;
  115. struct vm_unmapped_area_info info;
  116. /* requested length too big for entire address space */
  117. if (len > TASK_SIZE - mmap_min_addr)
  118. return -ENOMEM;
  119. if (flags & MAP_FIXED)
  120. return addr;
  121. /* requesting a specific address */
  122. if (addr) {
  123. addr = PAGE_ALIGN(addr);
  124. vma = find_vma(mm, addr);
  125. if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
  126. (!vma || addr + len <= vma->vm_start))
  127. return addr;
  128. }
  129. info.flags = VM_UNMAPPED_AREA_TOPDOWN;
  130. info.length = len;
  131. info.low_limit = max(PAGE_SIZE, mmap_min_addr);
  132. info.high_limit = mm->mmap_base;
  133. info.align_mask = 0;
  134. addr = vm_unmapped_area(&info);
  135. /*
  136. * A failed mmap() very likely causes application failure,
  137. * so fall back to the bottom-up function here. This scenario
  138. * can happen with large stack limits and large mmap()
  139. * allocations.
  140. */
  141. if (addr & ~PAGE_MASK) {
  142. VM_BUG_ON(addr != -ENOMEM);
  143. info.flags = 0;
  144. info.low_limit = TASK_UNMAPPED_BASE;
  145. info.high_limit = TASK_SIZE;
  146. addr = vm_unmapped_area(&info);
  147. }
  148. return addr;
  149. }
  150. static void radix__arch_pick_mmap_layout(struct mm_struct *mm,
  151. unsigned long random_factor)
  152. {
  153. if (mmap_is_legacy()) {
  154. mm->mmap_base = TASK_UNMAPPED_BASE;
  155. mm->get_unmapped_area = radix__arch_get_unmapped_area;
  156. } else {
  157. mm->mmap_base = mmap_base(random_factor);
  158. mm->get_unmapped_area = radix__arch_get_unmapped_area_topdown;
  159. }
  160. }
  161. #else
  162. /* dummy */
  163. extern void radix__arch_pick_mmap_layout(struct mm_struct *mm,
  164. unsigned long random_factor);
  165. #endif
  166. /*
  167. * This function, called very early during the creation of a new
  168. * process VM image, sets up which VM layout function to use:
  169. */
  170. void arch_pick_mmap_layout(struct mm_struct *mm)
  171. {
  172. unsigned long random_factor = 0UL;
  173. if (current->flags & PF_RANDOMIZE)
  174. random_factor = arch_mmap_rnd();
  175. if (radix_enabled())
  176. return radix__arch_pick_mmap_layout(mm, random_factor);
  177. /*
  178. * Fall back to the standard layout if the personality
  179. * bit is set, or if the expected stack growth is unlimited:
  180. */
  181. if (mmap_is_legacy()) {
  182. mm->mmap_base = TASK_UNMAPPED_BASE;
  183. mm->get_unmapped_area = arch_get_unmapped_area;
  184. } else {
  185. mm->mmap_base = mmap_base(random_factor);
  186. mm->get_unmapped_area = arch_get_unmapped_area_topdown;
  187. }
  188. }