mmap.c 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 2011 Wind River Systems,
  7. * written by Ralf Baechle <ralf@linux-mips.org>
  8. */
  9. #include <linux/compiler.h>
  10. #include <linux/elf-randomize.h>
  11. #include <linux/errno.h>
  12. #include <linux/mm.h>
  13. #include <linux/mman.h>
  14. #include <linux/export.h>
  15. #include <linux/personality.h>
  16. #include <linux/random.h>
  17. #include <linux/sched/signal.h>
  18. #include <linux/sched/mm.h>
  19. unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
  20. EXPORT_SYMBOL(shm_align_mask);
  21. /* gap between mmap and stack */
  22. #define MIN_GAP (128*1024*1024UL)
  23. #define MAX_GAP ((TASK_SIZE)/6*5)
  24. static int mmap_is_legacy(struct rlimit *rlim_stack)
  25. {
  26. if (current->personality & ADDR_COMPAT_LAYOUT)
  27. return 1;
  28. if (rlim_stack->rlim_cur == RLIM_INFINITY)
  29. return 1;
  30. return sysctl_legacy_va_layout;
  31. }
  32. static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
  33. {
  34. unsigned long gap = rlim_stack->rlim_cur;
  35. if (gap < MIN_GAP)
  36. gap = MIN_GAP;
  37. else if (gap > MAX_GAP)
  38. gap = MAX_GAP;
  39. return PAGE_ALIGN(TASK_SIZE - gap - rnd);
  40. }
  41. #define COLOUR_ALIGN(addr, pgoff) \
  42. ((((addr) + shm_align_mask) & ~shm_align_mask) + \
  43. (((pgoff) << PAGE_SHIFT) & shm_align_mask))
  44. enum mmap_allocation_direction {UP, DOWN};
  45. static unsigned long arch_get_unmapped_area_common(struct file *filp,
  46. unsigned long addr0, unsigned long len, unsigned long pgoff,
  47. unsigned long flags, enum mmap_allocation_direction dir)
  48. {
  49. struct mm_struct *mm = current->mm;
  50. struct vm_area_struct *vma;
  51. unsigned long addr = addr0;
  52. int do_color_align;
  53. struct vm_unmapped_area_info info;
  54. if (unlikely(len > TASK_SIZE))
  55. return -ENOMEM;
  56. if (flags & MAP_FIXED) {
  57. /* Even MAP_FIXED mappings must reside within TASK_SIZE */
  58. if (TASK_SIZE - len < addr)
  59. return -EINVAL;
  60. /*
  61. * We do not accept a shared mapping if it would violate
  62. * cache aliasing constraints.
  63. */
  64. if ((flags & MAP_SHARED) &&
  65. ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
  66. return -EINVAL;
  67. return addr;
  68. }
  69. do_color_align = 0;
  70. if (filp || (flags & MAP_SHARED))
  71. do_color_align = 1;
  72. /* requesting a specific address */
  73. if (addr) {
  74. if (do_color_align)
  75. addr = COLOUR_ALIGN(addr, pgoff);
  76. else
  77. addr = PAGE_ALIGN(addr);
  78. vma = find_vma(mm, addr);
  79. if (TASK_SIZE - len >= addr &&
  80. (!vma || addr + len <= vm_start_gap(vma)))
  81. return addr;
  82. }
  83. info.length = len;
  84. info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
  85. info.align_offset = pgoff << PAGE_SHIFT;
  86. if (dir == DOWN) {
  87. info.flags = VM_UNMAPPED_AREA_TOPDOWN;
  88. info.low_limit = PAGE_SIZE;
  89. info.high_limit = mm->mmap_base;
  90. addr = vm_unmapped_area(&info);
  91. if (!(addr & ~PAGE_MASK))
  92. return addr;
  93. /*
  94. * A failed mmap() very likely causes application failure,
  95. * so fall back to the bottom-up function here. This scenario
  96. * can happen with large stack limits and large mmap()
  97. * allocations.
  98. */
  99. }
  100. info.flags = 0;
  101. info.low_limit = mm->mmap_base;
  102. info.high_limit = TASK_SIZE;
  103. return vm_unmapped_area(&info);
  104. }
  105. unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0,
  106. unsigned long len, unsigned long pgoff, unsigned long flags)
  107. {
  108. return arch_get_unmapped_area_common(filp,
  109. addr0, len, pgoff, flags, UP);
  110. }
  111. /*
  112. * There is no need to export this but sched.h declares the function as
  113. * extern so making it static here results in an error.
  114. */
  115. unsigned long arch_get_unmapped_area_topdown(struct file *filp,
  116. unsigned long addr0, unsigned long len, unsigned long pgoff,
  117. unsigned long flags)
  118. {
  119. return arch_get_unmapped_area_common(filp,
  120. addr0, len, pgoff, flags, DOWN);
  121. }
  122. unsigned long arch_mmap_rnd(void)
  123. {
  124. unsigned long rnd;
  125. #ifdef CONFIG_COMPAT
  126. if (TASK_IS_32BIT_ADDR)
  127. rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1);
  128. else
  129. #endif /* CONFIG_COMPAT */
  130. rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
  131. return rnd << PAGE_SHIFT;
  132. }
  133. void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
  134. {
  135. unsigned long random_factor = 0UL;
  136. if (current->flags & PF_RANDOMIZE)
  137. random_factor = arch_mmap_rnd();
  138. if (mmap_is_legacy(rlim_stack)) {
  139. mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
  140. mm->get_unmapped_area = arch_get_unmapped_area;
  141. } else {
  142. mm->mmap_base = mmap_base(random_factor, rlim_stack);
  143. mm->get_unmapped_area = arch_get_unmapped_area_topdown;
  144. }
  145. }
  146. static inline unsigned long brk_rnd(void)
  147. {
  148. unsigned long rnd = get_random_long();
  149. rnd = rnd << PAGE_SHIFT;
  150. /* 8MB for 32bit, 256MB for 64bit */
  151. if (TASK_IS_32BIT_ADDR)
  152. rnd = rnd & 0x7ffffful;
  153. else
  154. rnd = rnd & 0xffffffful;
  155. return rnd;
  156. }
  157. unsigned long arch_randomize_brk(struct mm_struct *mm)
  158. {
  159. unsigned long base = mm->brk;
  160. unsigned long ret;
  161. ret = PAGE_ALIGN(base + brk_rnd());
  162. if (ret < mm->brk)
  163. return mm->brk;
  164. return ret;
  165. }
  166. int __virt_addr_valid(const volatile void *kaddr)
  167. {
  168. return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
  169. }
  170. EXPORT_SYMBOL_GPL(__virt_addr_valid);