mmap.c 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226
  1. /*
  2. * flexible mmap layout support
  3. *
  4. * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
  5. * All Rights Reserved.
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software
  19. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  20. *
  21. *
  22. * Started by Ingo Molnar <mingo@elte.hu>
  23. */
  24. #include <linux/personality.h>
  25. #include <linux/mm.h>
  26. #include <linux/random.h>
  27. #include <linux/sched/signal.h>
  28. #include <linux/sched/mm.h>
  29. #include <linux/elf-randomize.h>
  30. #include <linux/security.h>
  31. #include <linux/mman.h>
  32. /*
  33. * Top of mmap area (just below the process stack).
  34. *
  35. * Leave at least a ~128 MB hole on 32bit applications.
  36. *
  37. * On 64bit applications we randomise the stack by 1GB so we need to
  38. * space our mmap start address by a further 1GB, otherwise there is a
  39. * chance the mmap area will end up closer to the stack than our ulimit
  40. * requires.
  41. */
  42. #define MIN_GAP32 (128*1024*1024)
  43. #define MIN_GAP64 ((128 + 1024)*1024*1024UL)
  44. #define MIN_GAP ((is_32bit_task()) ? MIN_GAP32 : MIN_GAP64)
  45. #define MAX_GAP (TASK_SIZE/6*5)
  46. static inline int mmap_is_legacy(void)
  47. {
  48. if (current->personality & ADDR_COMPAT_LAYOUT)
  49. return 1;
  50. if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
  51. return 1;
  52. return sysctl_legacy_va_layout;
  53. }
  54. unsigned long arch_mmap_rnd(void)
  55. {
  56. unsigned long rnd;
  57. /* 8MB for 32bit, 1GB for 64bit */
  58. if (is_32bit_task())
  59. rnd = get_random_long() % (1<<(23-PAGE_SHIFT));
  60. else
  61. rnd = get_random_long() % (1UL<<(30-PAGE_SHIFT));
  62. return rnd << PAGE_SHIFT;
  63. }
  64. static inline unsigned long mmap_base(unsigned long rnd)
  65. {
  66. unsigned long gap = rlimit(RLIMIT_STACK);
  67. if (gap < MIN_GAP)
  68. gap = MIN_GAP;
  69. else if (gap > MAX_GAP)
  70. gap = MAX_GAP;
  71. return PAGE_ALIGN(DEFAULT_MAP_WINDOW - gap - rnd);
  72. }
  73. #ifdef CONFIG_PPC_RADIX_MMU
  74. /*
  75. * Same function as generic code used only for radix, because we don't need to overload
  76. * the generic one. But we will have to duplicate, because hash select
  77. * HAVE_ARCH_UNMAPPED_AREA
  78. */
  79. static unsigned long
  80. radix__arch_get_unmapped_area(struct file *filp, unsigned long addr,
  81. unsigned long len, unsigned long pgoff,
  82. unsigned long flags)
  83. {
  84. struct mm_struct *mm = current->mm;
  85. struct vm_area_struct *vma;
  86. struct vm_unmapped_area_info info;
  87. if (unlikely(addr > mm->context.addr_limit &&
  88. mm->context.addr_limit != TASK_SIZE))
  89. mm->context.addr_limit = TASK_SIZE;
  90. if (len > mm->task_size - mmap_min_addr)
  91. return -ENOMEM;
  92. if (flags & MAP_FIXED)
  93. return addr;
  94. if (addr) {
  95. addr = PAGE_ALIGN(addr);
  96. vma = find_vma(mm, addr);
  97. if (mm->task_size - len >= addr && addr >= mmap_min_addr &&
  98. (!vma || addr + len <= vma->vm_start))
  99. return addr;
  100. }
  101. info.flags = 0;
  102. info.length = len;
  103. info.low_limit = mm->mmap_base;
  104. info.align_mask = 0;
  105. if (unlikely(addr > DEFAULT_MAP_WINDOW))
  106. info.high_limit = mm->context.addr_limit;
  107. else
  108. info.high_limit = DEFAULT_MAP_WINDOW;
  109. return vm_unmapped_area(&info);
  110. }
  111. static unsigned long
  112. radix__arch_get_unmapped_area_topdown(struct file *filp,
  113. const unsigned long addr0,
  114. const unsigned long len,
  115. const unsigned long pgoff,
  116. const unsigned long flags)
  117. {
  118. struct vm_area_struct *vma;
  119. struct mm_struct *mm = current->mm;
  120. unsigned long addr = addr0;
  121. struct vm_unmapped_area_info info;
  122. if (unlikely(addr > mm->context.addr_limit &&
  123. mm->context.addr_limit != TASK_SIZE))
  124. mm->context.addr_limit = TASK_SIZE;
  125. /* requested length too big for entire address space */
  126. if (len > mm->task_size - mmap_min_addr)
  127. return -ENOMEM;
  128. if (flags & MAP_FIXED)
  129. return addr;
  130. /* requesting a specific address */
  131. if (addr) {
  132. addr = PAGE_ALIGN(addr);
  133. vma = find_vma(mm, addr);
  134. if (mm->task_size - len >= addr && addr >= mmap_min_addr &&
  135. (!vma || addr + len <= vma->vm_start))
  136. return addr;
  137. }
  138. info.flags = VM_UNMAPPED_AREA_TOPDOWN;
  139. info.length = len;
  140. info.low_limit = max(PAGE_SIZE, mmap_min_addr);
  141. info.high_limit = mm->mmap_base;
  142. info.align_mask = 0;
  143. if (addr > DEFAULT_MAP_WINDOW)
  144. info.high_limit += mm->context.addr_limit - DEFAULT_MAP_WINDOW;
  145. addr = vm_unmapped_area(&info);
  146. if (!(addr & ~PAGE_MASK))
  147. return addr;
  148. VM_BUG_ON(addr != -ENOMEM);
  149. /*
  150. * A failed mmap() very likely causes application failure,
  151. * so fall back to the bottom-up function here. This scenario
  152. * can happen with large stack limits and large mmap()
  153. * allocations.
  154. */
  155. return radix__arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
  156. }
  157. static void radix__arch_pick_mmap_layout(struct mm_struct *mm,
  158. unsigned long random_factor)
  159. {
  160. if (mmap_is_legacy()) {
  161. mm->mmap_base = TASK_UNMAPPED_BASE;
  162. mm->get_unmapped_area = radix__arch_get_unmapped_area;
  163. } else {
  164. mm->mmap_base = mmap_base(random_factor);
  165. mm->get_unmapped_area = radix__arch_get_unmapped_area_topdown;
  166. }
  167. }
  168. #else
  169. /* dummy */
  170. extern void radix__arch_pick_mmap_layout(struct mm_struct *mm,
  171. unsigned long random_factor);
  172. #endif
  173. /*
  174. * This function, called very early during the creation of a new
  175. * process VM image, sets up which VM layout function to use:
  176. */
  177. void arch_pick_mmap_layout(struct mm_struct *mm)
  178. {
  179. unsigned long random_factor = 0UL;
  180. if (current->flags & PF_RANDOMIZE)
  181. random_factor = arch_mmap_rnd();
  182. if (radix_enabled())
  183. return radix__arch_pick_mmap_layout(mm, random_factor);
  184. /*
  185. * Fall back to the standard layout if the personality
  186. * bit is set, or if the expected stack growth is unlimited:
  187. */
  188. if (mmap_is_legacy()) {
  189. mm->mmap_base = TASK_UNMAPPED_BASE;
  190. mm->get_unmapped_area = arch_get_unmapped_area;
  191. } else {
  192. mm->mmap_base = mmap_base(random_factor);
  193. mm->get_unmapped_area = arch_get_unmapped_area_topdown;
  194. }
  195. }