slice.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813
  1. /*
  2. * address space "slices" (meta-segments) support
  3. *
  4. * Copyright (C) 2007 Benjamin Herrenschmidt, IBM Corporation.
  5. *
  6. * Based on hugetlb implementation
  7. *
  8. * Copyright (C) 2003 David Gibson, IBM Corporation.
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License as published by
  12. * the Free Software Foundation; either version 2 of the License, or
  13. * (at your option) any later version.
  14. *
  15. * This program is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  18. * GNU General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU General Public License
  21. * along with this program; if not, write to the Free Software
  22. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  23. */
  24. #undef DEBUG
  25. #include <linux/kernel.h>
  26. #include <linux/mm.h>
  27. #include <linux/pagemap.h>
  28. #include <linux/err.h>
  29. #include <linux/spinlock.h>
  30. #include <linux/export.h>
  31. #include <linux/hugetlb.h>
  32. #include <asm/mman.h>
  33. #include <asm/mmu.h>
  34. #include <asm/copro.h>
  35. #include <asm/hugetlb.h>
  36. static DEFINE_SPINLOCK(slice_convert_lock);
  37. #ifdef DEBUG
  38. int _slice_debug = 1;
  39. static void slice_print_mask(const char *label, const struct slice_mask *mask)
  40. {
  41. if (!_slice_debug)
  42. return;
  43. pr_devel("%s low_slice: %*pbl\n", label,
  44. (int)SLICE_NUM_LOW, &mask->low_slices);
  45. pr_devel("%s high_slice: %*pbl\n", label,
  46. (int)SLICE_NUM_HIGH, mask->high_slices);
  47. }
  48. #define slice_dbg(fmt...) do { if (_slice_debug) pr_devel(fmt); } while (0)
  49. #else
  50. static void slice_print_mask(const char *label, const struct slice_mask *mask) {}
  51. #define slice_dbg(fmt...)
  52. #endif
  53. static void slice_range_to_mask(unsigned long start, unsigned long len,
  54. struct slice_mask *ret)
  55. {
  56. unsigned long end = start + len - 1;
  57. ret->low_slices = 0;
  58. if (SLICE_NUM_HIGH)
  59. bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
  60. if (start < SLICE_LOW_TOP) {
  61. unsigned long mend = min(end,
  62. (unsigned long)(SLICE_LOW_TOP - 1));
  63. ret->low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1))
  64. - (1u << GET_LOW_SLICE_INDEX(start));
  65. }
  66. if ((start + len) > SLICE_LOW_TOP) {
  67. unsigned long start_index = GET_HIGH_SLICE_INDEX(start);
  68. unsigned long align_end = ALIGN(end, (1UL << SLICE_HIGH_SHIFT));
  69. unsigned long count = GET_HIGH_SLICE_INDEX(align_end) - start_index;
  70. bitmap_set(ret->high_slices, start_index, count);
  71. }
  72. }
  73. static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
  74. unsigned long len)
  75. {
  76. struct vm_area_struct *vma;
  77. if ((mm->context.slb_addr_limit - len) < addr)
  78. return 0;
  79. vma = find_vma(mm, addr);
  80. return (!vma || (addr + len) <= vm_start_gap(vma));
  81. }
  82. static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
  83. {
  84. return !slice_area_is_free(mm, slice << SLICE_LOW_SHIFT,
  85. 1ul << SLICE_LOW_SHIFT);
  86. }
  87. static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice)
  88. {
  89. unsigned long start = slice << SLICE_HIGH_SHIFT;
  90. unsigned long end = start + (1ul << SLICE_HIGH_SHIFT);
  91. #ifdef CONFIG_PPC64
  92. /* Hack, so that each addresses is controlled by exactly one
  93. * of the high or low area bitmaps, the first high area starts
  94. * at 4GB, not 0 */
  95. if (start == 0)
  96. start = SLICE_LOW_TOP;
  97. #endif
  98. return !slice_area_is_free(mm, start, end - start);
  99. }
  100. static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret,
  101. unsigned long high_limit)
  102. {
  103. unsigned long i;
  104. ret->low_slices = 0;
  105. if (SLICE_NUM_HIGH)
  106. bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
  107. for (i = 0; i < SLICE_NUM_LOW; i++)
  108. if (!slice_low_has_vma(mm, i))
  109. ret->low_slices |= 1u << i;
  110. if (high_limit <= SLICE_LOW_TOP)
  111. return;
  112. for (i = 0; i < GET_HIGH_SLICE_INDEX(high_limit); i++)
  113. if (!slice_high_has_vma(mm, i))
  114. __set_bit(i, ret->high_slices);
  115. }
  116. #ifdef CONFIG_PPC_BOOK3S_64
  117. static struct slice_mask *slice_mask_for_size(struct mm_struct *mm, int psize)
  118. {
  119. #ifdef CONFIG_PPC_64K_PAGES
  120. if (psize == MMU_PAGE_64K)
  121. return &mm->context.mask_64k;
  122. #endif
  123. if (psize == MMU_PAGE_4K)
  124. return &mm->context.mask_4k;
  125. #ifdef CONFIG_HUGETLB_PAGE
  126. if (psize == MMU_PAGE_16M)
  127. return &mm->context.mask_16m;
  128. if (psize == MMU_PAGE_16G)
  129. return &mm->context.mask_16g;
  130. #endif
  131. BUG();
  132. }
  133. #elif defined(CONFIG_PPC_8xx)
  134. static struct slice_mask *slice_mask_for_size(struct mm_struct *mm, int psize)
  135. {
  136. if (psize == mmu_virtual_psize)
  137. return &mm->context.mask_base_psize;
  138. #ifdef CONFIG_HUGETLB_PAGE
  139. if (psize == MMU_PAGE_512K)
  140. return &mm->context.mask_512k;
  141. if (psize == MMU_PAGE_8M)
  142. return &mm->context.mask_8m;
  143. #endif
  144. BUG();
  145. }
  146. #else
  147. #error "Must define the slice masks for page sizes supported by the platform"
  148. #endif
  149. static bool slice_check_range_fits(struct mm_struct *mm,
  150. const struct slice_mask *available,
  151. unsigned long start, unsigned long len)
  152. {
  153. unsigned long end = start + len - 1;
  154. u64 low_slices = 0;
  155. if (start < SLICE_LOW_TOP) {
  156. unsigned long mend = min(end,
  157. (unsigned long)(SLICE_LOW_TOP - 1));
  158. low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1))
  159. - (1u << GET_LOW_SLICE_INDEX(start));
  160. }
  161. if ((low_slices & available->low_slices) != low_slices)
  162. return false;
  163. if (SLICE_NUM_HIGH && ((start + len) > SLICE_LOW_TOP)) {
  164. unsigned long start_index = GET_HIGH_SLICE_INDEX(start);
  165. unsigned long align_end = ALIGN(end, (1UL << SLICE_HIGH_SHIFT));
  166. unsigned long count = GET_HIGH_SLICE_INDEX(align_end) - start_index;
  167. unsigned long i;
  168. for (i = start_index; i < start_index + count; i++) {
  169. if (!test_bit(i, available->high_slices))
  170. return false;
  171. }
  172. }
  173. return true;
  174. }
  175. static void slice_flush_segments(void *parm)
  176. {
  177. #ifdef CONFIG_PPC64
  178. struct mm_struct *mm = parm;
  179. unsigned long flags;
  180. if (mm != current->active_mm)
  181. return;
  182. copy_mm_to_paca(current->active_mm);
  183. local_irq_save(flags);
  184. slb_flush_and_rebolt();
  185. local_irq_restore(flags);
  186. #endif
  187. }
  188. static void slice_convert(struct mm_struct *mm,
  189. const struct slice_mask *mask, int psize)
  190. {
  191. int index, mask_index;
  192. /* Write the new slice psize bits */
  193. unsigned char *hpsizes, *lpsizes;
  194. struct slice_mask *psize_mask, *old_mask;
  195. unsigned long i, flags;
  196. int old_psize;
  197. slice_dbg("slice_convert(mm=%p, psize=%d)\n", mm, psize);
  198. slice_print_mask(" mask", mask);
  199. psize_mask = slice_mask_for_size(mm, psize);
  200. /* We need to use a spinlock here to protect against
  201. * concurrent 64k -> 4k demotion ...
  202. */
  203. spin_lock_irqsave(&slice_convert_lock, flags);
  204. lpsizes = mm->context.low_slices_psize;
  205. for (i = 0; i < SLICE_NUM_LOW; i++) {
  206. if (!(mask->low_slices & (1u << i)))
  207. continue;
  208. mask_index = i & 0x1;
  209. index = i >> 1;
  210. /* Update the slice_mask */
  211. old_psize = (lpsizes[index] >> (mask_index * 4)) & 0xf;
  212. old_mask = slice_mask_for_size(mm, old_psize);
  213. old_mask->low_slices &= ~(1u << i);
  214. psize_mask->low_slices |= 1u << i;
  215. /* Update the sizes array */
  216. lpsizes[index] = (lpsizes[index] & ~(0xf << (mask_index * 4))) |
  217. (((unsigned long)psize) << (mask_index * 4));
  218. }
  219. hpsizes = mm->context.high_slices_psize;
  220. for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.slb_addr_limit); i++) {
  221. if (!test_bit(i, mask->high_slices))
  222. continue;
  223. mask_index = i & 0x1;
  224. index = i >> 1;
  225. /* Update the slice_mask */
  226. old_psize = (hpsizes[index] >> (mask_index * 4)) & 0xf;
  227. old_mask = slice_mask_for_size(mm, old_psize);
  228. __clear_bit(i, old_mask->high_slices);
  229. __set_bit(i, psize_mask->high_slices);
  230. /* Update the sizes array */
  231. hpsizes[index] = (hpsizes[index] & ~(0xf << (mask_index * 4))) |
  232. (((unsigned long)psize) << (mask_index * 4));
  233. }
  234. slice_dbg(" lsps=%lx, hsps=%lx\n",
  235. (unsigned long)mm->context.low_slices_psize,
  236. (unsigned long)mm->context.high_slices_psize);
  237. spin_unlock_irqrestore(&slice_convert_lock, flags);
  238. copro_flush_all_slbs(mm);
  239. }
  240. /*
  241. * Compute which slice addr is part of;
  242. * set *boundary_addr to the start or end boundary of that slice
  243. * (depending on 'end' parameter);
  244. * return boolean indicating if the slice is marked as available in the
  245. * 'available' slice_mark.
  246. */
  247. static bool slice_scan_available(unsigned long addr,
  248. const struct slice_mask *available,
  249. int end, unsigned long *boundary_addr)
  250. {
  251. unsigned long slice;
  252. if (addr < SLICE_LOW_TOP) {
  253. slice = GET_LOW_SLICE_INDEX(addr);
  254. *boundary_addr = (slice + end) << SLICE_LOW_SHIFT;
  255. return !!(available->low_slices & (1u << slice));
  256. } else {
  257. slice = GET_HIGH_SLICE_INDEX(addr);
  258. *boundary_addr = (slice + end) ?
  259. ((slice + end) << SLICE_HIGH_SHIFT) : SLICE_LOW_TOP;
  260. return !!test_bit(slice, available->high_slices);
  261. }
  262. }
  263. static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
  264. unsigned long len,
  265. const struct slice_mask *available,
  266. int psize, unsigned long high_limit)
  267. {
  268. int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
  269. unsigned long addr, found, next_end;
  270. struct vm_unmapped_area_info info;
  271. info.flags = 0;
  272. info.length = len;
  273. info.align_mask = PAGE_MASK & ((1ul << pshift) - 1);
  274. info.align_offset = 0;
  275. addr = TASK_UNMAPPED_BASE;
  276. /*
  277. * Check till the allow max value for this mmap request
  278. */
  279. while (addr < high_limit) {
  280. info.low_limit = addr;
  281. if (!slice_scan_available(addr, available, 1, &addr))
  282. continue;
  283. next_slice:
  284. /*
  285. * At this point [info.low_limit; addr) covers
  286. * available slices only and ends at a slice boundary.
  287. * Check if we need to reduce the range, or if we can
  288. * extend it to cover the next available slice.
  289. */
  290. if (addr >= high_limit)
  291. addr = high_limit;
  292. else if (slice_scan_available(addr, available, 1, &next_end)) {
  293. addr = next_end;
  294. goto next_slice;
  295. }
  296. info.high_limit = addr;
  297. found = vm_unmapped_area(&info);
  298. if (!(found & ~PAGE_MASK))
  299. return found;
  300. }
  301. return -ENOMEM;
  302. }
  303. static unsigned long slice_find_area_topdown(struct mm_struct *mm,
  304. unsigned long len,
  305. const struct slice_mask *available,
  306. int psize, unsigned long high_limit)
  307. {
  308. int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
  309. unsigned long addr, found, prev;
  310. struct vm_unmapped_area_info info;
  311. info.flags = VM_UNMAPPED_AREA_TOPDOWN;
  312. info.length = len;
  313. info.align_mask = PAGE_MASK & ((1ul << pshift) - 1);
  314. info.align_offset = 0;
  315. addr = mm->mmap_base;
  316. /*
  317. * If we are trying to allocate above DEFAULT_MAP_WINDOW
  318. * Add the different to the mmap_base.
  319. * Only for that request for which high_limit is above
  320. * DEFAULT_MAP_WINDOW we should apply this.
  321. */
  322. if (high_limit > DEFAULT_MAP_WINDOW)
  323. addr += mm->context.slb_addr_limit - DEFAULT_MAP_WINDOW;
  324. while (addr > PAGE_SIZE) {
  325. info.high_limit = addr;
  326. if (!slice_scan_available(addr - 1, available, 0, &addr))
  327. continue;
  328. prev_slice:
  329. /*
  330. * At this point [addr; info.high_limit) covers
  331. * available slices only and starts at a slice boundary.
  332. * Check if we need to reduce the range, or if we can
  333. * extend it to cover the previous available slice.
  334. */
  335. if (addr < PAGE_SIZE)
  336. addr = PAGE_SIZE;
  337. else if (slice_scan_available(addr - 1, available, 0, &prev)) {
  338. addr = prev;
  339. goto prev_slice;
  340. }
  341. info.low_limit = addr;
  342. found = vm_unmapped_area(&info);
  343. if (!(found & ~PAGE_MASK))
  344. return found;
  345. }
  346. /*
  347. * A failed mmap() very likely causes application failure,
  348. * so fall back to the bottom-up function here. This scenario
  349. * can happen with large stack limits and large mmap()
  350. * allocations.
  351. */
  352. return slice_find_area_bottomup(mm, len, available, psize, high_limit);
  353. }
  354. static unsigned long slice_find_area(struct mm_struct *mm, unsigned long len,
  355. const struct slice_mask *mask, int psize,
  356. int topdown, unsigned long high_limit)
  357. {
  358. if (topdown)
  359. return slice_find_area_topdown(mm, len, mask, psize, high_limit);
  360. else
  361. return slice_find_area_bottomup(mm, len, mask, psize, high_limit);
  362. }
  363. static inline void slice_copy_mask(struct slice_mask *dst,
  364. const struct slice_mask *src)
  365. {
  366. dst->low_slices = src->low_slices;
  367. if (!SLICE_NUM_HIGH)
  368. return;
  369. bitmap_copy(dst->high_slices, src->high_slices, SLICE_NUM_HIGH);
  370. }
  371. static inline void slice_or_mask(struct slice_mask *dst,
  372. const struct slice_mask *src1,
  373. const struct slice_mask *src2)
  374. {
  375. dst->low_slices = src1->low_slices | src2->low_slices;
  376. if (!SLICE_NUM_HIGH)
  377. return;
  378. bitmap_or(dst->high_slices, src1->high_slices, src2->high_slices, SLICE_NUM_HIGH);
  379. }
  380. static inline void slice_andnot_mask(struct slice_mask *dst,
  381. const struct slice_mask *src1,
  382. const struct slice_mask *src2)
  383. {
  384. dst->low_slices = src1->low_slices & ~src2->low_slices;
  385. if (!SLICE_NUM_HIGH)
  386. return;
  387. bitmap_andnot(dst->high_slices, src1->high_slices, src2->high_slices, SLICE_NUM_HIGH);
  388. }
  389. #ifdef CONFIG_PPC_64K_PAGES
  390. #define MMU_PAGE_BASE MMU_PAGE_64K
  391. #else
  392. #define MMU_PAGE_BASE MMU_PAGE_4K
  393. #endif
  394. unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
  395. unsigned long flags, unsigned int psize,
  396. int topdown)
  397. {
  398. struct slice_mask good_mask;
  399. struct slice_mask potential_mask;
  400. const struct slice_mask *maskp;
  401. const struct slice_mask *compat_maskp = NULL;
  402. int fixed = (flags & MAP_FIXED);
  403. int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
  404. unsigned long page_size = 1UL << pshift;
  405. struct mm_struct *mm = current->mm;
  406. unsigned long newaddr;
  407. unsigned long high_limit;
  408. high_limit = DEFAULT_MAP_WINDOW;
  409. if (addr >= high_limit || (fixed && (addr + len > high_limit)))
  410. high_limit = TASK_SIZE;
  411. if (len > high_limit)
  412. return -ENOMEM;
  413. if (len & (page_size - 1))
  414. return -EINVAL;
  415. if (fixed) {
  416. if (addr & (page_size - 1))
  417. return -EINVAL;
  418. if (addr > high_limit - len)
  419. return -ENOMEM;
  420. }
  421. if (high_limit > mm->context.slb_addr_limit) {
  422. /*
  423. * Increasing the slb_addr_limit does not require
  424. * slice mask cache to be recalculated because it should
  425. * be already initialised beyond the old address limit.
  426. */
  427. mm->context.slb_addr_limit = high_limit;
  428. on_each_cpu(slice_flush_segments, mm, 1);
  429. }
  430. /* Sanity checks */
  431. BUG_ON(mm->task_size == 0);
  432. BUG_ON(mm->context.slb_addr_limit == 0);
  433. VM_BUG_ON(radix_enabled());
  434. slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm, psize);
  435. slice_dbg(" addr=%lx, len=%lx, flags=%lx, topdown=%d\n",
  436. addr, len, flags, topdown);
  437. /* If hint, make sure it matches our alignment restrictions */
  438. if (!fixed && addr) {
  439. addr = _ALIGN_UP(addr, page_size);
  440. slice_dbg(" aligned addr=%lx\n", addr);
  441. /* Ignore hint if it's too large or overlaps a VMA */
  442. if (addr > high_limit - len ||
  443. !slice_area_is_free(mm, addr, len))
  444. addr = 0;
  445. }
  446. /* First make up a "good" mask of slices that have the right size
  447. * already
  448. */
  449. maskp = slice_mask_for_size(mm, psize);
  450. /*
  451. * Here "good" means slices that are already the right page size,
  452. * "compat" means slices that have a compatible page size (i.e.
  453. * 4k in a 64k pagesize kernel), and "free" means slices without
  454. * any VMAs.
  455. *
  456. * If MAP_FIXED:
  457. * check if fits in good | compat => OK
  458. * check if fits in good | compat | free => convert free
  459. * else bad
  460. * If have hint:
  461. * check if hint fits in good => OK
  462. * check if hint fits in good | free => convert free
  463. * Otherwise:
  464. * search in good, found => OK
  465. * search in good | free, found => convert free
  466. * search in good | compat | free, found => convert free.
  467. */
  468. /*
  469. * If we support combo pages, we can allow 64k pages in 4k slices
  470. * The mask copies could be avoided in most cases here if we had
  471. * a pointer to good mask for the next code to use.
  472. */
  473. if (IS_ENABLED(CONFIG_PPC_64K_PAGES) && psize == MMU_PAGE_64K) {
  474. compat_maskp = slice_mask_for_size(mm, MMU_PAGE_4K);
  475. if (fixed)
  476. slice_or_mask(&good_mask, maskp, compat_maskp);
  477. else
  478. slice_copy_mask(&good_mask, maskp);
  479. } else {
  480. slice_copy_mask(&good_mask, maskp);
  481. }
  482. slice_print_mask(" good_mask", &good_mask);
  483. if (compat_maskp)
  484. slice_print_mask(" compat_mask", compat_maskp);
  485. /* First check hint if it's valid or if we have MAP_FIXED */
  486. if (addr != 0 || fixed) {
  487. /* Check if we fit in the good mask. If we do, we just return,
  488. * nothing else to do
  489. */
  490. if (slice_check_range_fits(mm, &good_mask, addr, len)) {
  491. slice_dbg(" fits good !\n");
  492. newaddr = addr;
  493. goto return_addr;
  494. }
  495. } else {
  496. /* Now let's see if we can find something in the existing
  497. * slices for that size
  498. */
  499. newaddr = slice_find_area(mm, len, &good_mask,
  500. psize, topdown, high_limit);
  501. if (newaddr != -ENOMEM) {
  502. /* Found within the good mask, we don't have to setup,
  503. * we thus return directly
  504. */
  505. slice_dbg(" found area at 0x%lx\n", newaddr);
  506. goto return_addr;
  507. }
  508. }
  509. /*
  510. * We don't fit in the good mask, check what other slices are
  511. * empty and thus can be converted
  512. */
  513. slice_mask_for_free(mm, &potential_mask, high_limit);
  514. slice_or_mask(&potential_mask, &potential_mask, &good_mask);
  515. slice_print_mask(" potential", &potential_mask);
  516. if (addr != 0 || fixed) {
  517. if (slice_check_range_fits(mm, &potential_mask, addr, len)) {
  518. slice_dbg(" fits potential !\n");
  519. newaddr = addr;
  520. goto convert;
  521. }
  522. }
  523. /* If we have MAP_FIXED and failed the above steps, then error out */
  524. if (fixed)
  525. return -EBUSY;
  526. slice_dbg(" search...\n");
  527. /* If we had a hint that didn't work out, see if we can fit
  528. * anywhere in the good area.
  529. */
  530. if (addr) {
  531. newaddr = slice_find_area(mm, len, &good_mask,
  532. psize, topdown, high_limit);
  533. if (newaddr != -ENOMEM) {
  534. slice_dbg(" found area at 0x%lx\n", newaddr);
  535. goto return_addr;
  536. }
  537. }
  538. /* Now let's see if we can find something in the existing slices
  539. * for that size plus free slices
  540. */
  541. newaddr = slice_find_area(mm, len, &potential_mask,
  542. psize, topdown, high_limit);
  543. #ifdef CONFIG_PPC_64K_PAGES
  544. if (newaddr == -ENOMEM && psize == MMU_PAGE_64K) {
  545. /* retry the search with 4k-page slices included */
  546. slice_or_mask(&potential_mask, &potential_mask, compat_maskp);
  547. newaddr = slice_find_area(mm, len, &potential_mask,
  548. psize, topdown, high_limit);
  549. }
  550. #endif
  551. if (newaddr == -ENOMEM)
  552. return -ENOMEM;
  553. slice_range_to_mask(newaddr, len, &potential_mask);
  554. slice_dbg(" found potential area at 0x%lx\n", newaddr);
  555. slice_print_mask(" mask", &potential_mask);
  556. convert:
  557. /*
  558. * Try to allocate the context before we do slice convert
  559. * so that we handle the context allocation failure gracefully.
  560. */
  561. if (need_extra_context(mm, newaddr)) {
  562. if (alloc_extended_context(mm, newaddr) < 0)
  563. return -ENOMEM;
  564. }
  565. slice_andnot_mask(&potential_mask, &potential_mask, &good_mask);
  566. if (compat_maskp && !fixed)
  567. slice_andnot_mask(&potential_mask, &potential_mask, compat_maskp);
  568. if (potential_mask.low_slices ||
  569. (SLICE_NUM_HIGH &&
  570. !bitmap_empty(potential_mask.high_slices, SLICE_NUM_HIGH))) {
  571. slice_convert(mm, &potential_mask, psize);
  572. if (psize > MMU_PAGE_BASE)
  573. on_each_cpu(slice_flush_segments, mm, 1);
  574. }
  575. return newaddr;
  576. return_addr:
  577. if (need_extra_context(mm, newaddr)) {
  578. if (alloc_extended_context(mm, newaddr) < 0)
  579. return -ENOMEM;
  580. }
  581. return newaddr;
  582. }
  583. EXPORT_SYMBOL_GPL(slice_get_unmapped_area);
  584. unsigned long arch_get_unmapped_area(struct file *filp,
  585. unsigned long addr,
  586. unsigned long len,
  587. unsigned long pgoff,
  588. unsigned long flags)
  589. {
  590. return slice_get_unmapped_area(addr, len, flags,
  591. current->mm->context.user_psize, 0);
  592. }
  593. unsigned long arch_get_unmapped_area_topdown(struct file *filp,
  594. const unsigned long addr0,
  595. const unsigned long len,
  596. const unsigned long pgoff,
  597. const unsigned long flags)
  598. {
  599. return slice_get_unmapped_area(addr0, len, flags,
  600. current->mm->context.user_psize, 1);
  601. }
  602. unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr)
  603. {
  604. unsigned char *psizes;
  605. int index, mask_index;
  606. VM_BUG_ON(radix_enabled());
  607. if (addr < SLICE_LOW_TOP) {
  608. psizes = mm->context.low_slices_psize;
  609. index = GET_LOW_SLICE_INDEX(addr);
  610. } else {
  611. psizes = mm->context.high_slices_psize;
  612. index = GET_HIGH_SLICE_INDEX(addr);
  613. }
  614. mask_index = index & 0x1;
  615. return (psizes[index >> 1] >> (mask_index * 4)) & 0xf;
  616. }
  617. EXPORT_SYMBOL_GPL(get_slice_psize);
  618. void slice_init_new_context_exec(struct mm_struct *mm)
  619. {
  620. unsigned char *hpsizes, *lpsizes;
  621. struct slice_mask *mask;
  622. unsigned int psize = mmu_virtual_psize;
  623. slice_dbg("slice_init_new_context_exec(mm=%p)\n", mm);
  624. /*
  625. * In the case of exec, use the default limit. In the
  626. * case of fork it is just inherited from the mm being
  627. * duplicated.
  628. */
  629. #ifdef CONFIG_PPC64
  630. mm->context.slb_addr_limit = DEFAULT_MAP_WINDOW_USER64;
  631. #else
  632. mm->context.slb_addr_limit = DEFAULT_MAP_WINDOW;
  633. #endif
  634. mm->context.user_psize = psize;
  635. /*
  636. * Set all slice psizes to the default.
  637. */
  638. lpsizes = mm->context.low_slices_psize;
  639. memset(lpsizes, (psize << 4) | psize, SLICE_NUM_LOW >> 1);
  640. hpsizes = mm->context.high_slices_psize;
  641. memset(hpsizes, (psize << 4) | psize, SLICE_NUM_HIGH >> 1);
  642. /*
  643. * Slice mask cache starts zeroed, fill the default size cache.
  644. */
  645. mask = slice_mask_for_size(mm, psize);
  646. mask->low_slices = ~0UL;
  647. if (SLICE_NUM_HIGH)
  648. bitmap_fill(mask->high_slices, SLICE_NUM_HIGH);
  649. }
  650. void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
  651. unsigned long len, unsigned int psize)
  652. {
  653. struct slice_mask mask;
  654. VM_BUG_ON(radix_enabled());
  655. slice_range_to_mask(start, len, &mask);
  656. slice_convert(mm, &mask, psize);
  657. }
  658. #ifdef CONFIG_HUGETLB_PAGE
  659. /*
  660. * is_hugepage_only_range() is used by generic code to verify whether
  661. * a normal mmap mapping (non hugetlbfs) is valid on a given area.
  662. *
  663. * until the generic code provides a more generic hook and/or starts
  664. * calling arch get_unmapped_area for MAP_FIXED (which our implementation
  665. * here knows how to deal with), we hijack it to keep standard mappings
  666. * away from us.
  667. *
  668. * because of that generic code limitation, MAP_FIXED mapping cannot
  669. * "convert" back a slice with no VMAs to the standard page size, only
  670. * get_unmapped_area() can. It would be possible to fix it here but I
  671. * prefer working on fixing the generic code instead.
  672. *
  673. * WARNING: This will not work if hugetlbfs isn't enabled since the
  674. * generic code will redefine that function as 0 in that. This is ok
  675. * for now as we only use slices with hugetlbfs enabled. This should
  676. * be fixed as the generic code gets fixed.
  677. */
  678. int slice_is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
  679. unsigned long len)
  680. {
  681. const struct slice_mask *maskp;
  682. unsigned int psize = mm->context.user_psize;
  683. VM_BUG_ON(radix_enabled());
  684. maskp = slice_mask_for_size(mm, psize);
  685. #ifdef CONFIG_PPC_64K_PAGES
  686. /* We need to account for 4k slices too */
  687. if (psize == MMU_PAGE_64K) {
  688. const struct slice_mask *compat_maskp;
  689. struct slice_mask available;
  690. compat_maskp = slice_mask_for_size(mm, MMU_PAGE_4K);
  691. slice_or_mask(&available, maskp, compat_maskp);
  692. return !slice_check_range_fits(mm, &available, addr, len);
  693. }
  694. #endif
  695. return !slice_check_range_fits(mm, maskp, addr, len);
  696. }
  697. #endif