hash_utils_64.c 47 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797
  1. /*
  2. * PowerPC64 port by Mike Corrigan and Dave Engebretsen
  3. * {mikejc|engebret}@us.ibm.com
  4. *
  5. * Copyright (c) 2000 Mike Corrigan <mikejc@us.ibm.com>
  6. *
  7. * SMP scalability work:
  8. * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
  9. *
  10. * Module name: htab.c
  11. *
  12. * Description:
  13. * PowerPC Hashed Page Table functions
  14. *
  15. * This program is free software; you can redistribute it and/or
  16. * modify it under the terms of the GNU General Public License
  17. * as published by the Free Software Foundation; either version
  18. * 2 of the License, or (at your option) any later version.
  19. */
  20. #undef DEBUG
  21. #undef DEBUG_LOW
  22. #include <linux/spinlock.h>
  23. #include <linux/errno.h>
  24. #include <linux/sched.h>
  25. #include <linux/proc_fs.h>
  26. #include <linux/stat.h>
  27. #include <linux/sysctl.h>
  28. #include <linux/export.h>
  29. #include <linux/ctype.h>
  30. #include <linux/cache.h>
  31. #include <linux/init.h>
  32. #include <linux/signal.h>
  33. #include <linux/memblock.h>
  34. #include <linux/context_tracking.h>
  35. #include <linux/libfdt.h>
  36. #include <asm/processor.h>
  37. #include <asm/pgtable.h>
  38. #include <asm/mmu.h>
  39. #include <asm/mmu_context.h>
  40. #include <asm/page.h>
  41. #include <asm/types.h>
  42. #include <linux/uaccess.h>
  43. #include <asm/machdep.h>
  44. #include <asm/prom.h>
  45. #include <asm/tlbflush.h>
  46. #include <asm/io.h>
  47. #include <asm/eeh.h>
  48. #include <asm/tlb.h>
  49. #include <asm/cacheflush.h>
  50. #include <asm/cputable.h>
  51. #include <asm/sections.h>
  52. #include <asm/copro.h>
  53. #include <asm/udbg.h>
  54. #include <asm/code-patching.h>
  55. #include <asm/fadump.h>
  56. #include <asm/firmware.h>
  57. #include <asm/tm.h>
  58. #include <asm/trace.h>
  59. #include <asm/ps3.h>
  60. #ifdef DEBUG
  61. #define DBG(fmt...) udbg_printf(fmt)
  62. #else
  63. #define DBG(fmt...)
  64. #endif
  65. #ifdef DEBUG_LOW
  66. #define DBG_LOW(fmt...) udbg_printf(fmt)
  67. #else
  68. #define DBG_LOW(fmt...)
  69. #endif
  70. #define KB (1024)
  71. #define MB (1024*KB)
  72. #define GB (1024L*MB)
  73. /*
  74. * Note: pte --> Linux PTE
  75. * HPTE --> PowerPC Hashed Page Table Entry
  76. *
  77. * Execution context:
  78. * htab_initialize is called with the MMU off (of course), but
  79. * the kernel has been copied down to zero so it can directly
  80. * reference global data. At this point it is very difficult
  81. * to print debug info.
  82. *
  83. */
  84. static unsigned long _SDR1;
  85. struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
  86. EXPORT_SYMBOL_GPL(mmu_psize_defs);
  87. u8 hpte_page_sizes[1 << LP_BITS];
  88. EXPORT_SYMBOL_GPL(hpte_page_sizes);
  89. struct hash_pte *htab_address;
  90. unsigned long htab_size_bytes;
  91. unsigned long htab_hash_mask;
  92. EXPORT_SYMBOL_GPL(htab_hash_mask);
  93. int mmu_linear_psize = MMU_PAGE_4K;
  94. EXPORT_SYMBOL_GPL(mmu_linear_psize);
  95. int mmu_virtual_psize = MMU_PAGE_4K;
  96. int mmu_vmalloc_psize = MMU_PAGE_4K;
  97. #ifdef CONFIG_SPARSEMEM_VMEMMAP
  98. int mmu_vmemmap_psize = MMU_PAGE_4K;
  99. #endif
  100. int mmu_io_psize = MMU_PAGE_4K;
  101. int mmu_kernel_ssize = MMU_SEGSIZE_256M;
  102. EXPORT_SYMBOL_GPL(mmu_kernel_ssize);
  103. int mmu_highuser_ssize = MMU_SEGSIZE_256M;
  104. u16 mmu_slb_size = 64;
  105. EXPORT_SYMBOL_GPL(mmu_slb_size);
  106. #ifdef CONFIG_PPC_64K_PAGES
  107. int mmu_ci_restrictions;
  108. #endif
  109. #ifdef CONFIG_DEBUG_PAGEALLOC
  110. static u8 *linear_map_hash_slots;
  111. static unsigned long linear_map_hash_count;
  112. static DEFINE_SPINLOCK(linear_map_hash_lock);
  113. #endif /* CONFIG_DEBUG_PAGEALLOC */
  114. struct mmu_hash_ops mmu_hash_ops;
  115. EXPORT_SYMBOL(mmu_hash_ops);
  116. /* There are definitions of page sizes arrays to be used when none
  117. * is provided by the firmware.
  118. */
  119. /* Pre-POWER4 CPUs (4k pages only)
  120. */
  121. static struct mmu_psize_def mmu_psize_defaults_old[] = {
  122. [MMU_PAGE_4K] = {
  123. .shift = 12,
  124. .sllp = 0,
  125. .penc = {[MMU_PAGE_4K] = 0, [1 ... MMU_PAGE_COUNT - 1] = -1},
  126. .avpnm = 0,
  127. .tlbiel = 0,
  128. },
  129. };
  130. /* POWER4, GPUL, POWER5
  131. *
  132. * Support for 16Mb large pages
  133. */
  134. static struct mmu_psize_def mmu_psize_defaults_gp[] = {
  135. [MMU_PAGE_4K] = {
  136. .shift = 12,
  137. .sllp = 0,
  138. .penc = {[MMU_PAGE_4K] = 0, [1 ... MMU_PAGE_COUNT - 1] = -1},
  139. .avpnm = 0,
  140. .tlbiel = 1,
  141. },
  142. [MMU_PAGE_16M] = {
  143. .shift = 24,
  144. .sllp = SLB_VSID_L,
  145. .penc = {[0 ... MMU_PAGE_16M - 1] = -1, [MMU_PAGE_16M] = 0,
  146. [MMU_PAGE_16M + 1 ... MMU_PAGE_COUNT - 1] = -1 },
  147. .avpnm = 0x1UL,
  148. .tlbiel = 0,
  149. },
  150. };
  151. /*
  152. * 'R' and 'C' update notes:
  153. * - Under pHyp or KVM, the updatepp path will not set C, thus it *will*
  154. * create writeable HPTEs without C set, because the hcall H_PROTECT
  155. * that we use in that case will not update C
  156. * - The above is however not a problem, because we also don't do that
  157. * fancy "no flush" variant of eviction and we use H_REMOVE which will
  158. * do the right thing and thus we don't have the race I described earlier
  159. *
  160. * - Under bare metal, we do have the race, so we need R and C set
  161. * - We make sure R is always set and never lost
  162. * - C is _PAGE_DIRTY, and *should* always be set for a writeable mapping
  163. */
  164. unsigned long htab_convert_pte_flags(unsigned long pteflags)
  165. {
  166. unsigned long rflags = 0;
  167. /* _PAGE_EXEC -> NOEXEC */
  168. if ((pteflags & _PAGE_EXEC) == 0)
  169. rflags |= HPTE_R_N;
  170. /*
  171. * PPP bits:
  172. * Linux uses slb key 0 for kernel and 1 for user.
  173. * kernel RW areas are mapped with PPP=0b000
  174. * User area is mapped with PPP=0b010 for read/write
  175. * or PPP=0b011 for read-only (including writeable but clean pages).
  176. */
  177. if (pteflags & _PAGE_PRIVILEGED) {
  178. /*
  179. * Kernel read only mapped with ppp bits 0b110
  180. */
  181. if (!(pteflags & _PAGE_WRITE)) {
  182. if (mmu_has_feature(MMU_FTR_KERNEL_RO))
  183. rflags |= (HPTE_R_PP0 | 0x2);
  184. else
  185. rflags |= 0x3;
  186. }
  187. } else {
  188. if (pteflags & _PAGE_RWX)
  189. rflags |= 0x2;
  190. if (!((pteflags & _PAGE_WRITE) && (pteflags & _PAGE_DIRTY)))
  191. rflags |= 0x1;
  192. }
  193. /*
  194. * We can't allow hardware to update hpte bits. Hence always
  195. * set 'R' bit and set 'C' if it is a write fault
  196. */
  197. rflags |= HPTE_R_R;
  198. if (pteflags & _PAGE_DIRTY)
  199. rflags |= HPTE_R_C;
  200. /*
  201. * Add in WIG bits
  202. */
  203. if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_TOLERANT)
  204. rflags |= HPTE_R_I;
  205. else if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_NON_IDEMPOTENT)
  206. rflags |= (HPTE_R_I | HPTE_R_G);
  207. else if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_SAO)
  208. rflags |= (HPTE_R_W | HPTE_R_I | HPTE_R_M);
  209. else
  210. /*
  211. * Add memory coherence if cache inhibited is not set
  212. */
  213. rflags |= HPTE_R_M;
  214. return rflags;
  215. }
  216. int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
  217. unsigned long pstart, unsigned long prot,
  218. int psize, int ssize)
  219. {
  220. unsigned long vaddr, paddr;
  221. unsigned int step, shift;
  222. int ret = 0;
  223. shift = mmu_psize_defs[psize].shift;
  224. step = 1 << shift;
  225. prot = htab_convert_pte_flags(prot);
  226. DBG("htab_bolt_mapping(%lx..%lx -> %lx (%lx,%d,%d)\n",
  227. vstart, vend, pstart, prot, psize, ssize);
  228. for (vaddr = vstart, paddr = pstart; vaddr < vend;
  229. vaddr += step, paddr += step) {
  230. unsigned long hash, hpteg;
  231. unsigned long vsid = get_kernel_vsid(vaddr, ssize);
  232. unsigned long vpn = hpt_vpn(vaddr, vsid, ssize);
  233. unsigned long tprot = prot;
  234. /*
  235. * If we hit a bad address return error.
  236. */
  237. if (!vsid)
  238. return -1;
  239. /* Make kernel text executable */
  240. if (overlaps_kernel_text(vaddr, vaddr + step))
  241. tprot &= ~HPTE_R_N;
  242. /* Make kvm guest trampolines executable */
  243. if (overlaps_kvm_tmp(vaddr, vaddr + step))
  244. tprot &= ~HPTE_R_N;
  245. /*
  246. * If relocatable, check if it overlaps interrupt vectors that
  247. * are copied down to real 0. For relocatable kernel
  248. * (e.g. kdump case) we copy interrupt vectors down to real
  249. * address 0. Mark that region as executable. This is
  250. * because on p8 system with relocation on exception feature
  251. * enabled, exceptions are raised with MMU (IR=DR=1) ON. Hence
  252. * in order to execute the interrupt handlers in virtual
  253. * mode the vector region need to be marked as executable.
  254. */
  255. if ((PHYSICAL_START > MEMORY_START) &&
  256. overlaps_interrupt_vector_text(vaddr, vaddr + step))
  257. tprot &= ~HPTE_R_N;
  258. hash = hpt_hash(vpn, shift, ssize);
  259. hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
  260. BUG_ON(!mmu_hash_ops.hpte_insert);
  261. ret = mmu_hash_ops.hpte_insert(hpteg, vpn, paddr, tprot,
  262. HPTE_V_BOLTED, psize, psize,
  263. ssize);
  264. if (ret < 0)
  265. break;
  266. #ifdef CONFIG_DEBUG_PAGEALLOC
  267. if (debug_pagealloc_enabled() &&
  268. (paddr >> PAGE_SHIFT) < linear_map_hash_count)
  269. linear_map_hash_slots[paddr >> PAGE_SHIFT] = ret | 0x80;
  270. #endif /* CONFIG_DEBUG_PAGEALLOC */
  271. }
  272. return ret < 0 ? ret : 0;
  273. }
  274. int htab_remove_mapping(unsigned long vstart, unsigned long vend,
  275. int psize, int ssize)
  276. {
  277. unsigned long vaddr;
  278. unsigned int step, shift;
  279. int rc;
  280. int ret = 0;
  281. shift = mmu_psize_defs[psize].shift;
  282. step = 1 << shift;
  283. if (!mmu_hash_ops.hpte_removebolted)
  284. return -ENODEV;
  285. for (vaddr = vstart; vaddr < vend; vaddr += step) {
  286. rc = mmu_hash_ops.hpte_removebolted(vaddr, psize, ssize);
  287. if (rc == -ENOENT) {
  288. ret = -ENOENT;
  289. continue;
  290. }
  291. if (rc < 0)
  292. return rc;
  293. }
  294. return ret;
  295. }
  296. static bool disable_1tb_segments = false;
  297. static int __init parse_disable_1tb_segments(char *p)
  298. {
  299. disable_1tb_segments = true;
  300. return 0;
  301. }
  302. early_param("disable_1tb_segments", parse_disable_1tb_segments);
  303. static int __init htab_dt_scan_seg_sizes(unsigned long node,
  304. const char *uname, int depth,
  305. void *data)
  306. {
  307. const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
  308. const __be32 *prop;
  309. int size = 0;
  310. /* We are scanning "cpu" nodes only */
  311. if (type == NULL || strcmp(type, "cpu") != 0)
  312. return 0;
  313. prop = of_get_flat_dt_prop(node, "ibm,processor-segment-sizes", &size);
  314. if (prop == NULL)
  315. return 0;
  316. for (; size >= 4; size -= 4, ++prop) {
  317. if (be32_to_cpu(prop[0]) == 40) {
  318. DBG("1T segment support detected\n");
  319. if (disable_1tb_segments) {
  320. DBG("1T segments disabled by command line\n");
  321. break;
  322. }
  323. cur_cpu_spec->mmu_features |= MMU_FTR_1T_SEGMENT;
  324. return 1;
  325. }
  326. }
  327. cur_cpu_spec->mmu_features &= ~MMU_FTR_NO_SLBIE_B;
  328. return 0;
  329. }
  330. static int __init get_idx_from_shift(unsigned int shift)
  331. {
  332. int idx = -1;
  333. switch (shift) {
  334. case 0xc:
  335. idx = MMU_PAGE_4K;
  336. break;
  337. case 0x10:
  338. idx = MMU_PAGE_64K;
  339. break;
  340. case 0x14:
  341. idx = MMU_PAGE_1M;
  342. break;
  343. case 0x18:
  344. idx = MMU_PAGE_16M;
  345. break;
  346. case 0x22:
  347. idx = MMU_PAGE_16G;
  348. break;
  349. }
  350. return idx;
  351. }
  352. static int __init htab_dt_scan_page_sizes(unsigned long node,
  353. const char *uname, int depth,
  354. void *data)
  355. {
  356. const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
  357. const __be32 *prop;
  358. int size = 0;
  359. /* We are scanning "cpu" nodes only */
  360. if (type == NULL || strcmp(type, "cpu") != 0)
  361. return 0;
  362. prop = of_get_flat_dt_prop(node, "ibm,segment-page-sizes", &size);
  363. if (!prop)
  364. return 0;
  365. pr_info("Page sizes from device-tree:\n");
  366. size /= 4;
  367. cur_cpu_spec->mmu_features &= ~(MMU_FTR_16M_PAGE);
  368. while(size > 0) {
  369. unsigned int base_shift = be32_to_cpu(prop[0]);
  370. unsigned int slbenc = be32_to_cpu(prop[1]);
  371. unsigned int lpnum = be32_to_cpu(prop[2]);
  372. struct mmu_psize_def *def;
  373. int idx, base_idx;
  374. size -= 3; prop += 3;
  375. base_idx = get_idx_from_shift(base_shift);
  376. if (base_idx < 0) {
  377. /* skip the pte encoding also */
  378. prop += lpnum * 2; size -= lpnum * 2;
  379. continue;
  380. }
  381. def = &mmu_psize_defs[base_idx];
  382. if (base_idx == MMU_PAGE_16M)
  383. cur_cpu_spec->mmu_features |= MMU_FTR_16M_PAGE;
  384. def->shift = base_shift;
  385. if (base_shift <= 23)
  386. def->avpnm = 0;
  387. else
  388. def->avpnm = (1 << (base_shift - 23)) - 1;
  389. def->sllp = slbenc;
  390. /*
  391. * We don't know for sure what's up with tlbiel, so
  392. * for now we only set it for 4K and 64K pages
  393. */
  394. if (base_idx == MMU_PAGE_4K || base_idx == MMU_PAGE_64K)
  395. def->tlbiel = 1;
  396. else
  397. def->tlbiel = 0;
  398. while (size > 0 && lpnum) {
  399. unsigned int shift = be32_to_cpu(prop[0]);
  400. int penc = be32_to_cpu(prop[1]);
  401. prop += 2; size -= 2;
  402. lpnum--;
  403. idx = get_idx_from_shift(shift);
  404. if (idx < 0)
  405. continue;
  406. if (penc == -1)
  407. pr_err("Invalid penc for base_shift=%d "
  408. "shift=%d\n", base_shift, shift);
  409. def->penc[idx] = penc;
  410. pr_info("base_shift=%d: shift=%d, sllp=0x%04lx,"
  411. " avpnm=0x%08lx, tlbiel=%d, penc=%d\n",
  412. base_shift, shift, def->sllp,
  413. def->avpnm, def->tlbiel, def->penc[idx]);
  414. }
  415. }
  416. return 1;
  417. }
  418. #ifdef CONFIG_HUGETLB_PAGE
  419. /* Scan for 16G memory blocks that have been set aside for huge pages
  420. * and reserve those blocks for 16G huge pages.
  421. */
  422. static int __init htab_dt_scan_hugepage_blocks(unsigned long node,
  423. const char *uname, int depth,
  424. void *data) {
  425. const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
  426. const __be64 *addr_prop;
  427. const __be32 *page_count_prop;
  428. unsigned int expected_pages;
  429. long unsigned int phys_addr;
  430. long unsigned int block_size;
  431. /* We are scanning "memory" nodes only */
  432. if (type == NULL || strcmp(type, "memory") != 0)
  433. return 0;
  434. /* This property is the log base 2 of the number of virtual pages that
  435. * will represent this memory block. */
  436. page_count_prop = of_get_flat_dt_prop(node, "ibm,expected#pages", NULL);
  437. if (page_count_prop == NULL)
  438. return 0;
  439. expected_pages = (1 << be32_to_cpu(page_count_prop[0]));
  440. addr_prop = of_get_flat_dt_prop(node, "reg", NULL);
  441. if (addr_prop == NULL)
  442. return 0;
  443. phys_addr = be64_to_cpu(addr_prop[0]);
  444. block_size = be64_to_cpu(addr_prop[1]);
  445. if (block_size != (16 * GB))
  446. return 0;
  447. printk(KERN_INFO "Huge page(16GB) memory: "
  448. "addr = 0x%lX size = 0x%lX pages = %d\n",
  449. phys_addr, block_size, expected_pages);
  450. if (phys_addr + (16 * GB) <= memblock_end_of_DRAM()) {
  451. memblock_reserve(phys_addr, block_size * expected_pages);
  452. add_gpage(phys_addr, block_size, expected_pages);
  453. }
  454. return 0;
  455. }
  456. #endif /* CONFIG_HUGETLB_PAGE */
  457. static void mmu_psize_set_default_penc(void)
  458. {
  459. int bpsize, apsize;
  460. for (bpsize = 0; bpsize < MMU_PAGE_COUNT; bpsize++)
  461. for (apsize = 0; apsize < MMU_PAGE_COUNT; apsize++)
  462. mmu_psize_defs[bpsize].penc[apsize] = -1;
  463. }
  464. #ifdef CONFIG_PPC_64K_PAGES
  465. static bool might_have_hea(void)
  466. {
  467. /*
  468. * The HEA ethernet adapter requires awareness of the
  469. * GX bus. Without that awareness we can easily assume
  470. * we will never see an HEA ethernet device.
  471. */
  472. #ifdef CONFIG_IBMEBUS
  473. return !cpu_has_feature(CPU_FTR_ARCH_207S) &&
  474. firmware_has_feature(FW_FEATURE_SPLPAR);
  475. #else
  476. return false;
  477. #endif
  478. }
  479. #endif /* #ifdef CONFIG_PPC_64K_PAGES */
  480. static void __init htab_scan_page_sizes(void)
  481. {
  482. int rc;
  483. /* se the invalid penc to -1 */
  484. mmu_psize_set_default_penc();
  485. /* Default to 4K pages only */
  486. memcpy(mmu_psize_defs, mmu_psize_defaults_old,
  487. sizeof(mmu_psize_defaults_old));
  488. /*
  489. * Try to find the available page sizes in the device-tree
  490. */
  491. rc = of_scan_flat_dt(htab_dt_scan_page_sizes, NULL);
  492. if (rc == 0 && early_mmu_has_feature(MMU_FTR_16M_PAGE)) {
  493. /*
  494. * Nothing in the device-tree, but the CPU supports 16M pages,
  495. * so let's fallback on a known size list for 16M capable CPUs.
  496. */
  497. memcpy(mmu_psize_defs, mmu_psize_defaults_gp,
  498. sizeof(mmu_psize_defaults_gp));
  499. }
  500. #ifdef CONFIG_HUGETLB_PAGE
  501. /* Reserve 16G huge page memory sections for huge pages */
  502. of_scan_flat_dt(htab_dt_scan_hugepage_blocks, NULL);
  503. #endif /* CONFIG_HUGETLB_PAGE */
  504. }
  505. /*
  506. * Fill in the hpte_page_sizes[] array.
  507. * We go through the mmu_psize_defs[] array looking for all the
  508. * supported base/actual page size combinations. Each combination
  509. * has a unique pagesize encoding (penc) value in the low bits of
  510. * the LP field of the HPTE. For actual page sizes less than 1MB,
  511. * some of the upper LP bits are used for RPN bits, meaning that
  512. * we need to fill in several entries in hpte_page_sizes[].
  513. *
  514. * In diagrammatic form, with r = RPN bits and z = page size bits:
  515. * PTE LP actual page size
  516. * rrrr rrrz >=8KB
  517. * rrrr rrzz >=16KB
  518. * rrrr rzzz >=32KB
  519. * rrrr zzzz >=64KB
  520. * ...
  521. *
  522. * The zzzz bits are implementation-specific but are chosen so that
  523. * no encoding for a larger page size uses the same value in its
  524. * low-order N bits as the encoding for the 2^(12+N) byte page size
  525. * (if it exists).
  526. */
  527. static void init_hpte_page_sizes(void)
  528. {
  529. long int ap, bp;
  530. long int shift, penc;
  531. for (bp = 0; bp < MMU_PAGE_COUNT; ++bp) {
  532. if (!mmu_psize_defs[bp].shift)
  533. continue; /* not a supported page size */
  534. for (ap = bp; ap < MMU_PAGE_COUNT; ++ap) {
  535. penc = mmu_psize_defs[bp].penc[ap];
  536. if (penc == -1)
  537. continue;
  538. shift = mmu_psize_defs[ap].shift - LP_SHIFT;
  539. if (shift <= 0)
  540. continue; /* should never happen */
  541. /*
  542. * For page sizes less than 1MB, this loop
  543. * replicates the entry for all possible values
  544. * of the rrrr bits.
  545. */
  546. while (penc < (1 << LP_BITS)) {
  547. hpte_page_sizes[penc] = (ap << 4) | bp;
  548. penc += 1 << shift;
  549. }
  550. }
  551. }
  552. }
  553. static void __init htab_init_page_sizes(void)
  554. {
  555. init_hpte_page_sizes();
  556. if (!debug_pagealloc_enabled()) {
  557. /*
  558. * Pick a size for the linear mapping. Currently, we only
  559. * support 16M, 1M and 4K which is the default
  560. */
  561. if (mmu_psize_defs[MMU_PAGE_16M].shift)
  562. mmu_linear_psize = MMU_PAGE_16M;
  563. else if (mmu_psize_defs[MMU_PAGE_1M].shift)
  564. mmu_linear_psize = MMU_PAGE_1M;
  565. }
  566. #ifdef CONFIG_PPC_64K_PAGES
  567. /*
  568. * Pick a size for the ordinary pages. Default is 4K, we support
  569. * 64K for user mappings and vmalloc if supported by the processor.
  570. * We only use 64k for ioremap if the processor
  571. * (and firmware) support cache-inhibited large pages.
  572. * If not, we use 4k and set mmu_ci_restrictions so that
  573. * hash_page knows to switch processes that use cache-inhibited
  574. * mappings to 4k pages.
  575. */
  576. if (mmu_psize_defs[MMU_PAGE_64K].shift) {
  577. mmu_virtual_psize = MMU_PAGE_64K;
  578. mmu_vmalloc_psize = MMU_PAGE_64K;
  579. if (mmu_linear_psize == MMU_PAGE_4K)
  580. mmu_linear_psize = MMU_PAGE_64K;
  581. if (mmu_has_feature(MMU_FTR_CI_LARGE_PAGE)) {
  582. /*
  583. * When running on pSeries using 64k pages for ioremap
  584. * would stop us accessing the HEA ethernet. So if we
  585. * have the chance of ever seeing one, stay at 4k.
  586. */
  587. if (!might_have_hea())
  588. mmu_io_psize = MMU_PAGE_64K;
  589. } else
  590. mmu_ci_restrictions = 1;
  591. }
  592. #endif /* CONFIG_PPC_64K_PAGES */
  593. #ifdef CONFIG_SPARSEMEM_VMEMMAP
  594. /* We try to use 16M pages for vmemmap if that is supported
  595. * and we have at least 1G of RAM at boot
  596. */
  597. if (mmu_psize_defs[MMU_PAGE_16M].shift &&
  598. memblock_phys_mem_size() >= 0x40000000)
  599. mmu_vmemmap_psize = MMU_PAGE_16M;
  600. else if (mmu_psize_defs[MMU_PAGE_64K].shift)
  601. mmu_vmemmap_psize = MMU_PAGE_64K;
  602. else
  603. mmu_vmemmap_psize = MMU_PAGE_4K;
  604. #endif /* CONFIG_SPARSEMEM_VMEMMAP */
  605. printk(KERN_DEBUG "Page orders: linear mapping = %d, "
  606. "virtual = %d, io = %d"
  607. #ifdef CONFIG_SPARSEMEM_VMEMMAP
  608. ", vmemmap = %d"
  609. #endif
  610. "\n",
  611. mmu_psize_defs[mmu_linear_psize].shift,
  612. mmu_psize_defs[mmu_virtual_psize].shift,
  613. mmu_psize_defs[mmu_io_psize].shift
  614. #ifdef CONFIG_SPARSEMEM_VMEMMAP
  615. ,mmu_psize_defs[mmu_vmemmap_psize].shift
  616. #endif
  617. );
  618. }
  619. static int __init htab_dt_scan_pftsize(unsigned long node,
  620. const char *uname, int depth,
  621. void *data)
  622. {
  623. const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
  624. const __be32 *prop;
  625. /* We are scanning "cpu" nodes only */
  626. if (type == NULL || strcmp(type, "cpu") != 0)
  627. return 0;
  628. prop = of_get_flat_dt_prop(node, "ibm,pft-size", NULL);
  629. if (prop != NULL) {
  630. /* pft_size[0] is the NUMA CEC cookie */
  631. ppc64_pft_size = be32_to_cpu(prop[1]);
  632. return 1;
  633. }
  634. return 0;
  635. }
  636. unsigned htab_shift_for_mem_size(unsigned long mem_size)
  637. {
  638. unsigned memshift = __ilog2(mem_size);
  639. unsigned pshift = mmu_psize_defs[mmu_virtual_psize].shift;
  640. unsigned pteg_shift;
  641. /* round mem_size up to next power of 2 */
  642. if ((1UL << memshift) < mem_size)
  643. memshift += 1;
  644. /* aim for 2 pages / pteg */
  645. pteg_shift = memshift - (pshift + 1);
  646. /*
  647. * 2^11 PTEGS of 128 bytes each, ie. 2^18 bytes is the minimum htab
  648. * size permitted by the architecture.
  649. */
  650. return max(pteg_shift + 7, 18U);
  651. }
  652. static unsigned long __init htab_get_table_size(void)
  653. {
  654. /* If hash size isn't already provided by the platform, we try to
  655. * retrieve it from the device-tree. If it's not there neither, we
  656. * calculate it now based on the total RAM size
  657. */
  658. if (ppc64_pft_size == 0)
  659. of_scan_flat_dt(htab_dt_scan_pftsize, NULL);
  660. if (ppc64_pft_size)
  661. return 1UL << ppc64_pft_size;
  662. return 1UL << htab_shift_for_mem_size(memblock_phys_mem_size());
  663. }
  664. #ifdef CONFIG_MEMORY_HOTPLUG
  665. int hash__create_section_mapping(unsigned long start, unsigned long end)
  666. {
  667. int rc = htab_bolt_mapping(start, end, __pa(start),
  668. pgprot_val(PAGE_KERNEL), mmu_linear_psize,
  669. mmu_kernel_ssize);
  670. if (rc < 0) {
  671. int rc2 = htab_remove_mapping(start, end, mmu_linear_psize,
  672. mmu_kernel_ssize);
  673. BUG_ON(rc2 && (rc2 != -ENOENT));
  674. }
  675. return rc;
  676. }
  677. int hash__remove_section_mapping(unsigned long start, unsigned long end)
  678. {
  679. int rc = htab_remove_mapping(start, end, mmu_linear_psize,
  680. mmu_kernel_ssize);
  681. WARN_ON(rc < 0);
  682. return rc;
  683. }
  684. #endif /* CONFIG_MEMORY_HOTPLUG */
  685. static void update_hid_for_hash(void)
  686. {
  687. unsigned long hid0;
  688. unsigned long rb = 3UL << PPC_BITLSHIFT(53); /* IS = 3 */
  689. asm volatile("ptesync": : :"memory");
  690. /* prs = 0, ric = 2, rs = 0, r = 1 is = 3 */
  691. asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
  692. : : "r"(rb), "i"(0), "i"(0), "i"(2), "r"(0) : "memory");
  693. asm volatile("eieio; tlbsync; ptesync; isync; slbia": : :"memory");
  694. /*
  695. * now switch the HID
  696. */
  697. hid0 = mfspr(SPRN_HID0);
  698. hid0 &= ~HID0_POWER9_RADIX;
  699. mtspr(SPRN_HID0, hid0);
  700. asm volatile("isync": : :"memory");
  701. /* Wait for it to happen */
  702. while ((mfspr(SPRN_HID0) & HID0_POWER9_RADIX))
  703. cpu_relax();
  704. }
  705. static void __init hash_init_partition_table(phys_addr_t hash_table,
  706. unsigned long htab_size)
  707. {
  708. mmu_partition_table_init();
  709. /*
  710. * PS field (VRMA page size) is not used for LPID 0, hence set to 0.
  711. * For now, UPRT is 0 and we have no segment table.
  712. */
  713. htab_size = __ilog2(htab_size) - 18;
  714. mmu_partition_table_set_entry(0, hash_table | htab_size, 0);
  715. pr_info("Partition table %p\n", partition_tb);
  716. if (cpu_has_feature(CPU_FTR_POWER9_DD1))
  717. update_hid_for_hash();
  718. }
  719. static void __init htab_initialize(void)
  720. {
  721. unsigned long table;
  722. unsigned long pteg_count;
  723. unsigned long prot;
  724. unsigned long base = 0, size = 0;
  725. struct memblock_region *reg;
  726. DBG(" -> htab_initialize()\n");
  727. if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) {
  728. mmu_kernel_ssize = MMU_SEGSIZE_1T;
  729. mmu_highuser_ssize = MMU_SEGSIZE_1T;
  730. printk(KERN_INFO "Using 1TB segments\n");
  731. }
  732. /*
  733. * Calculate the required size of the htab. We want the number of
  734. * PTEGs to equal one half the number of real pages.
  735. */
  736. htab_size_bytes = htab_get_table_size();
  737. pteg_count = htab_size_bytes >> 7;
  738. htab_hash_mask = pteg_count - 1;
  739. if (firmware_has_feature(FW_FEATURE_LPAR) ||
  740. firmware_has_feature(FW_FEATURE_PS3_LV1)) {
  741. /* Using a hypervisor which owns the htab */
  742. htab_address = NULL;
  743. _SDR1 = 0;
  744. #ifdef CONFIG_FA_DUMP
  745. /*
  746. * If firmware assisted dump is active firmware preserves
  747. * the contents of htab along with entire partition memory.
  748. * Clear the htab if firmware assisted dump is active so
  749. * that we dont end up using old mappings.
  750. */
  751. if (is_fadump_active() && mmu_hash_ops.hpte_clear_all)
  752. mmu_hash_ops.hpte_clear_all();
  753. #endif
  754. } else {
  755. unsigned long limit = MEMBLOCK_ALLOC_ANYWHERE;
  756. #ifdef CONFIG_PPC_CELL
  757. /*
  758. * Cell may require the hash table down low when using the
  759. * Axon IOMMU in order to fit the dynamic region over it, see
  760. * comments in cell/iommu.c
  761. */
  762. if (fdt_subnode_offset(initial_boot_params, 0, "axon") > 0) {
  763. limit = 0x80000000;
  764. pr_info("Hash table forced below 2G for Axon IOMMU\n");
  765. }
  766. #endif /* CONFIG_PPC_CELL */
  767. table = memblock_alloc_base(htab_size_bytes, htab_size_bytes,
  768. limit);
  769. DBG("Hash table allocated at %lx, size: %lx\n", table,
  770. htab_size_bytes);
  771. htab_address = __va(table);
  772. /* htab absolute addr + encoded htabsize */
  773. _SDR1 = table + __ilog2(htab_size_bytes) - 18;
  774. /* Initialize the HPT with no entries */
  775. memset((void *)table, 0, htab_size_bytes);
  776. if (!cpu_has_feature(CPU_FTR_ARCH_300))
  777. /* Set SDR1 */
  778. mtspr(SPRN_SDR1, _SDR1);
  779. else
  780. hash_init_partition_table(table, htab_size_bytes);
  781. }
  782. prot = pgprot_val(PAGE_KERNEL);
  783. #ifdef CONFIG_DEBUG_PAGEALLOC
  784. if (debug_pagealloc_enabled()) {
  785. linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT;
  786. linear_map_hash_slots = __va(memblock_alloc_base(
  787. linear_map_hash_count, 1, ppc64_rma_size));
  788. memset(linear_map_hash_slots, 0, linear_map_hash_count);
  789. }
  790. #endif /* CONFIG_DEBUG_PAGEALLOC */
  791. /* On U3 based machines, we need to reserve the DART area and
  792. * _NOT_ map it to avoid cache paradoxes as it's remapped non
  793. * cacheable later on
  794. */
  795. /* create bolted the linear mapping in the hash table */
  796. for_each_memblock(memory, reg) {
  797. base = (unsigned long)__va(reg->base);
  798. size = reg->size;
  799. DBG("creating mapping for region: %lx..%lx (prot: %lx)\n",
  800. base, size, prot);
  801. BUG_ON(htab_bolt_mapping(base, base + size, __pa(base),
  802. prot, mmu_linear_psize, mmu_kernel_ssize));
  803. }
  804. memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
  805. /*
  806. * If we have a memory_limit and we've allocated TCEs then we need to
  807. * explicitly map the TCE area at the top of RAM. We also cope with the
  808. * case that the TCEs start below memory_limit.
  809. * tce_alloc_start/end are 16MB aligned so the mapping should work
  810. * for either 4K or 16MB pages.
  811. */
  812. if (tce_alloc_start) {
  813. tce_alloc_start = (unsigned long)__va(tce_alloc_start);
  814. tce_alloc_end = (unsigned long)__va(tce_alloc_end);
  815. if (base + size >= tce_alloc_start)
  816. tce_alloc_start = base + size + 1;
  817. BUG_ON(htab_bolt_mapping(tce_alloc_start, tce_alloc_end,
  818. __pa(tce_alloc_start), prot,
  819. mmu_linear_psize, mmu_kernel_ssize));
  820. }
  821. DBG(" <- htab_initialize()\n");
  822. }
  823. #undef KB
  824. #undef MB
  825. void __init hash__early_init_devtree(void)
  826. {
  827. /* Initialize segment sizes */
  828. of_scan_flat_dt(htab_dt_scan_seg_sizes, NULL);
  829. /* Initialize page sizes */
  830. htab_scan_page_sizes();
  831. }
  832. void __init hash__early_init_mmu(void)
  833. {
  834. htab_init_page_sizes();
  835. /*
  836. * initialize page table size
  837. */
  838. __pte_frag_nr = H_PTE_FRAG_NR;
  839. __pte_frag_size_shift = H_PTE_FRAG_SIZE_SHIFT;
  840. __pte_index_size = H_PTE_INDEX_SIZE;
  841. __pmd_index_size = H_PMD_INDEX_SIZE;
  842. __pud_index_size = H_PUD_INDEX_SIZE;
  843. __pgd_index_size = H_PGD_INDEX_SIZE;
  844. __pmd_cache_index = H_PMD_CACHE_INDEX;
  845. __pte_table_size = H_PTE_TABLE_SIZE;
  846. __pmd_table_size = H_PMD_TABLE_SIZE;
  847. __pud_table_size = H_PUD_TABLE_SIZE;
  848. __pgd_table_size = H_PGD_TABLE_SIZE;
  849. /*
  850. * 4k use hugepd format, so for hash set then to
  851. * zero
  852. */
  853. __pmd_val_bits = 0;
  854. __pud_val_bits = 0;
  855. __pgd_val_bits = 0;
  856. __kernel_virt_start = H_KERN_VIRT_START;
  857. __kernel_virt_size = H_KERN_VIRT_SIZE;
  858. __vmalloc_start = H_VMALLOC_START;
  859. __vmalloc_end = H_VMALLOC_END;
  860. vmemmap = (struct page *)H_VMEMMAP_BASE;
  861. ioremap_bot = IOREMAP_BASE;
  862. #ifdef CONFIG_PCI
  863. pci_io_base = ISA_IO_BASE;
  864. #endif
  865. /* Select appropriate backend */
  866. if (firmware_has_feature(FW_FEATURE_PS3_LV1))
  867. ps3_early_mm_init();
  868. else if (firmware_has_feature(FW_FEATURE_LPAR))
  869. hpte_init_pseries();
  870. else if (IS_ENABLED(CONFIG_PPC_NATIVE))
  871. hpte_init_native();
  872. if (!mmu_hash_ops.hpte_insert)
  873. panic("hash__early_init_mmu: No MMU hash ops defined!\n");
  874. /* Initialize the MMU Hash table and create the linear mapping
  875. * of memory. Has to be done before SLB initialization as this is
  876. * currently where the page size encoding is obtained.
  877. */
  878. htab_initialize();
  879. pr_info("Initializing hash mmu with SLB\n");
  880. /* Initialize SLB management */
  881. slb_initialize();
  882. }
  883. #ifdef CONFIG_SMP
  884. void hash__early_init_mmu_secondary(void)
  885. {
  886. /* Initialize hash table for that CPU */
  887. if (!firmware_has_feature(FW_FEATURE_LPAR)) {
  888. if (cpu_has_feature(CPU_FTR_POWER9_DD1))
  889. update_hid_for_hash();
  890. if (!cpu_has_feature(CPU_FTR_ARCH_300))
  891. mtspr(SPRN_SDR1, _SDR1);
  892. else
  893. mtspr(SPRN_PTCR,
  894. __pa(partition_tb) | (PATB_SIZE_SHIFT - 12));
  895. }
  896. /* Initialize SLB */
  897. slb_initialize();
  898. }
  899. #endif /* CONFIG_SMP */
  900. /*
  901. * Called by asm hashtable.S for doing lazy icache flush
  902. */
  903. unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap)
  904. {
  905. struct page *page;
  906. if (!pfn_valid(pte_pfn(pte)))
  907. return pp;
  908. page = pte_page(pte);
  909. /* page is dirty */
  910. if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) {
  911. if (trap == 0x400) {
  912. flush_dcache_icache_page(page);
  913. set_bit(PG_arch_1, &page->flags);
  914. } else
  915. pp |= HPTE_R_N;
  916. }
  917. return pp;
  918. }
  919. #ifdef CONFIG_PPC_MM_SLICES
  920. static unsigned int get_paca_psize(unsigned long addr)
  921. {
  922. u64 lpsizes;
  923. unsigned char *hpsizes;
  924. unsigned long index, mask_index;
  925. if (addr < SLICE_LOW_TOP) {
  926. lpsizes = get_paca()->mm_ctx_low_slices_psize;
  927. index = GET_LOW_SLICE_INDEX(addr);
  928. return (lpsizes >> (index * 4)) & 0xF;
  929. }
  930. hpsizes = get_paca()->mm_ctx_high_slices_psize;
  931. index = GET_HIGH_SLICE_INDEX(addr);
  932. mask_index = index & 0x1;
  933. return (hpsizes[index >> 1] >> (mask_index * 4)) & 0xF;
  934. }
  935. #else
  936. unsigned int get_paca_psize(unsigned long addr)
  937. {
  938. return get_paca()->mm_ctx_user_psize;
  939. }
  940. #endif
  941. /*
  942. * Demote a segment to using 4k pages.
  943. * For now this makes the whole process use 4k pages.
  944. */
  945. #ifdef CONFIG_PPC_64K_PAGES
  946. void demote_segment_4k(struct mm_struct *mm, unsigned long addr)
  947. {
  948. if (get_slice_psize(mm, addr) == MMU_PAGE_4K)
  949. return;
  950. slice_set_range_psize(mm, addr, 1, MMU_PAGE_4K);
  951. copro_flush_all_slbs(mm);
  952. if ((get_paca_psize(addr) != MMU_PAGE_4K) && (current->mm == mm)) {
  953. copy_mm_to_paca(&mm->context);
  954. slb_flush_and_rebolt();
  955. }
  956. }
  957. #endif /* CONFIG_PPC_64K_PAGES */
  958. #ifdef CONFIG_PPC_SUBPAGE_PROT
  959. /*
  960. * This looks up a 2-bit protection code for a 4k subpage of a 64k page.
  961. * Userspace sets the subpage permissions using the subpage_prot system call.
  962. *
  963. * Result is 0: full permissions, _PAGE_RW: read-only,
  964. * _PAGE_RWX: no access.
  965. */
  966. static int subpage_protection(struct mm_struct *mm, unsigned long ea)
  967. {
  968. struct subpage_prot_table *spt = &mm->context.spt;
  969. u32 spp = 0;
  970. u32 **sbpm, *sbpp;
  971. if (ea >= spt->maxaddr)
  972. return 0;
  973. if (ea < 0x100000000UL) {
  974. /* addresses below 4GB use spt->low_prot */
  975. sbpm = spt->low_prot;
  976. } else {
  977. sbpm = spt->protptrs[ea >> SBP_L3_SHIFT];
  978. if (!sbpm)
  979. return 0;
  980. }
  981. sbpp = sbpm[(ea >> SBP_L2_SHIFT) & (SBP_L2_COUNT - 1)];
  982. if (!sbpp)
  983. return 0;
  984. spp = sbpp[(ea >> PAGE_SHIFT) & (SBP_L1_COUNT - 1)];
  985. /* extract 2-bit bitfield for this 4k subpage */
  986. spp >>= 30 - 2 * ((ea >> 12) & 0xf);
  987. /*
  988. * 0 -> full premission
  989. * 1 -> Read only
  990. * 2 -> no access.
  991. * We return the flag that need to be cleared.
  992. */
  993. spp = ((spp & 2) ? _PAGE_RWX : 0) | ((spp & 1) ? _PAGE_WRITE : 0);
  994. return spp;
  995. }
  996. #else /* CONFIG_PPC_SUBPAGE_PROT */
  997. static inline int subpage_protection(struct mm_struct *mm, unsigned long ea)
  998. {
  999. return 0;
  1000. }
  1001. #endif
  1002. void hash_failure_debug(unsigned long ea, unsigned long access,
  1003. unsigned long vsid, unsigned long trap,
  1004. int ssize, int psize, int lpsize, unsigned long pte)
  1005. {
  1006. if (!printk_ratelimit())
  1007. return;
  1008. pr_info("mm: Hashing failure ! EA=0x%lx access=0x%lx current=%s\n",
  1009. ea, access, current->comm);
  1010. pr_info(" trap=0x%lx vsid=0x%lx ssize=%d base psize=%d psize %d pte=0x%lx\n",
  1011. trap, vsid, ssize, psize, lpsize, pte);
  1012. }
  1013. static void check_paca_psize(unsigned long ea, struct mm_struct *mm,
  1014. int psize, bool user_region)
  1015. {
  1016. if (user_region) {
  1017. if (psize != get_paca_psize(ea)) {
  1018. copy_mm_to_paca(&mm->context);
  1019. slb_flush_and_rebolt();
  1020. }
  1021. } else if (get_paca()->vmalloc_sllp !=
  1022. mmu_psize_defs[mmu_vmalloc_psize].sllp) {
  1023. get_paca()->vmalloc_sllp =
  1024. mmu_psize_defs[mmu_vmalloc_psize].sllp;
  1025. slb_vmalloc_update();
  1026. }
  1027. }
  1028. /* Result code is:
  1029. * 0 - handled
  1030. * 1 - normal page fault
  1031. * -1 - critical hash insertion error
  1032. * -2 - access not permitted by subpage protection mechanism
  1033. */
  1034. int hash_page_mm(struct mm_struct *mm, unsigned long ea,
  1035. unsigned long access, unsigned long trap,
  1036. unsigned long flags)
  1037. {
  1038. bool is_thp;
  1039. enum ctx_state prev_state = exception_enter();
  1040. pgd_t *pgdir;
  1041. unsigned long vsid;
  1042. pte_t *ptep;
  1043. unsigned hugeshift;
  1044. const struct cpumask *tmp;
  1045. int rc, user_region = 0;
  1046. int psize, ssize;
  1047. DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n",
  1048. ea, access, trap);
  1049. trace_hash_fault(ea, access, trap);
  1050. /* Get region & vsid */
  1051. switch (REGION_ID(ea)) {
  1052. case USER_REGION_ID:
  1053. user_region = 1;
  1054. if (! mm) {
  1055. DBG_LOW(" user region with no mm !\n");
  1056. rc = 1;
  1057. goto bail;
  1058. }
  1059. psize = get_slice_psize(mm, ea);
  1060. ssize = user_segment_size(ea);
  1061. vsid = get_vsid(mm->context.id, ea, ssize);
  1062. break;
  1063. case VMALLOC_REGION_ID:
  1064. vsid = get_kernel_vsid(ea, mmu_kernel_ssize);
  1065. if (ea < VMALLOC_END)
  1066. psize = mmu_vmalloc_psize;
  1067. else
  1068. psize = mmu_io_psize;
  1069. ssize = mmu_kernel_ssize;
  1070. break;
  1071. default:
  1072. /* Not a valid range
  1073. * Send the problem up to do_page_fault
  1074. */
  1075. rc = 1;
  1076. goto bail;
  1077. }
  1078. DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid);
  1079. /* Bad address. */
  1080. if (!vsid) {
  1081. DBG_LOW("Bad address!\n");
  1082. rc = 1;
  1083. goto bail;
  1084. }
  1085. /* Get pgdir */
  1086. pgdir = mm->pgd;
  1087. if (pgdir == NULL) {
  1088. rc = 1;
  1089. goto bail;
  1090. }
  1091. /* Check CPU locality */
  1092. tmp = cpumask_of(smp_processor_id());
  1093. if (user_region && cpumask_equal(mm_cpumask(mm), tmp))
  1094. flags |= HPTE_LOCAL_UPDATE;
  1095. #ifndef CONFIG_PPC_64K_PAGES
  1096. /* If we use 4K pages and our psize is not 4K, then we might
  1097. * be hitting a special driver mapping, and need to align the
  1098. * address before we fetch the PTE.
  1099. *
  1100. * It could also be a hugepage mapping, in which case this is
  1101. * not necessary, but it's not harmful, either.
  1102. */
  1103. if (psize != MMU_PAGE_4K)
  1104. ea &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
  1105. #endif /* CONFIG_PPC_64K_PAGES */
  1106. /* Get PTE and page size from page tables */
  1107. ptep = __find_linux_pte_or_hugepte(pgdir, ea, &is_thp, &hugeshift);
  1108. if (ptep == NULL || !pte_present(*ptep)) {
  1109. DBG_LOW(" no PTE !\n");
  1110. rc = 1;
  1111. goto bail;
  1112. }
  1113. /* Add _PAGE_PRESENT to the required access perm */
  1114. access |= _PAGE_PRESENT;
  1115. /* Pre-check access permissions (will be re-checked atomically
  1116. * in __hash_page_XX but this pre-check is a fast path
  1117. */
  1118. if (!check_pte_access(access, pte_val(*ptep))) {
  1119. DBG_LOW(" no access !\n");
  1120. rc = 1;
  1121. goto bail;
  1122. }
  1123. if (hugeshift) {
  1124. if (is_thp)
  1125. rc = __hash_page_thp(ea, access, vsid, (pmd_t *)ptep,
  1126. trap, flags, ssize, psize);
  1127. #ifdef CONFIG_HUGETLB_PAGE
  1128. else
  1129. rc = __hash_page_huge(ea, access, vsid, ptep, trap,
  1130. flags, ssize, hugeshift, psize);
  1131. #else
  1132. else {
  1133. /*
  1134. * if we have hugeshift, and is not transhuge with
  1135. * hugetlb disabled, something is really wrong.
  1136. */
  1137. rc = 1;
  1138. WARN_ON(1);
  1139. }
  1140. #endif
  1141. if (current->mm == mm)
  1142. check_paca_psize(ea, mm, psize, user_region);
  1143. goto bail;
  1144. }
  1145. #ifndef CONFIG_PPC_64K_PAGES
  1146. DBG_LOW(" i-pte: %016lx\n", pte_val(*ptep));
  1147. #else
  1148. DBG_LOW(" i-pte: %016lx %016lx\n", pte_val(*ptep),
  1149. pte_val(*(ptep + PTRS_PER_PTE)));
  1150. #endif
  1151. /* Do actual hashing */
  1152. #ifdef CONFIG_PPC_64K_PAGES
  1153. /* If H_PAGE_4K_PFN is set, make sure this is a 4k segment */
  1154. if ((pte_val(*ptep) & H_PAGE_4K_PFN) && psize == MMU_PAGE_64K) {
  1155. demote_segment_4k(mm, ea);
  1156. psize = MMU_PAGE_4K;
  1157. }
  1158. /* If this PTE is non-cacheable and we have restrictions on
  1159. * using non cacheable large pages, then we switch to 4k
  1160. */
  1161. if (mmu_ci_restrictions && psize == MMU_PAGE_64K && pte_ci(*ptep)) {
  1162. if (user_region) {
  1163. demote_segment_4k(mm, ea);
  1164. psize = MMU_PAGE_4K;
  1165. } else if (ea < VMALLOC_END) {
  1166. /*
  1167. * some driver did a non-cacheable mapping
  1168. * in vmalloc space, so switch vmalloc
  1169. * to 4k pages
  1170. */
  1171. printk(KERN_ALERT "Reducing vmalloc segment "
  1172. "to 4kB pages because of "
  1173. "non-cacheable mapping\n");
  1174. psize = mmu_vmalloc_psize = MMU_PAGE_4K;
  1175. copro_flush_all_slbs(mm);
  1176. }
  1177. }
  1178. #endif /* CONFIG_PPC_64K_PAGES */
  1179. if (current->mm == mm)
  1180. check_paca_psize(ea, mm, psize, user_region);
  1181. #ifdef CONFIG_PPC_64K_PAGES
  1182. if (psize == MMU_PAGE_64K)
  1183. rc = __hash_page_64K(ea, access, vsid, ptep, trap,
  1184. flags, ssize);
  1185. else
  1186. #endif /* CONFIG_PPC_64K_PAGES */
  1187. {
  1188. int spp = subpage_protection(mm, ea);
  1189. if (access & spp)
  1190. rc = -2;
  1191. else
  1192. rc = __hash_page_4K(ea, access, vsid, ptep, trap,
  1193. flags, ssize, spp);
  1194. }
  1195. /* Dump some info in case of hash insertion failure, they should
  1196. * never happen so it is really useful to know if/when they do
  1197. */
  1198. if (rc == -1)
  1199. hash_failure_debug(ea, access, vsid, trap, ssize, psize,
  1200. psize, pte_val(*ptep));
  1201. #ifndef CONFIG_PPC_64K_PAGES
  1202. DBG_LOW(" o-pte: %016lx\n", pte_val(*ptep));
  1203. #else
  1204. DBG_LOW(" o-pte: %016lx %016lx\n", pte_val(*ptep),
  1205. pte_val(*(ptep + PTRS_PER_PTE)));
  1206. #endif
  1207. DBG_LOW(" -> rc=%d\n", rc);
  1208. bail:
  1209. exception_exit(prev_state);
  1210. return rc;
  1211. }
  1212. EXPORT_SYMBOL_GPL(hash_page_mm);
  1213. int hash_page(unsigned long ea, unsigned long access, unsigned long trap,
  1214. unsigned long dsisr)
  1215. {
  1216. unsigned long flags = 0;
  1217. struct mm_struct *mm = current->mm;
  1218. if (REGION_ID(ea) == VMALLOC_REGION_ID)
  1219. mm = &init_mm;
  1220. if (dsisr & DSISR_NOHPTE)
  1221. flags |= HPTE_NOHPTE_UPDATE;
  1222. return hash_page_mm(mm, ea, access, trap, flags);
  1223. }
  1224. EXPORT_SYMBOL_GPL(hash_page);
  1225. int __hash_page(unsigned long ea, unsigned long msr, unsigned long trap,
  1226. unsigned long dsisr)
  1227. {
  1228. unsigned long access = _PAGE_PRESENT | _PAGE_READ;
  1229. unsigned long flags = 0;
  1230. struct mm_struct *mm = current->mm;
  1231. if (REGION_ID(ea) == VMALLOC_REGION_ID)
  1232. mm = &init_mm;
  1233. if (dsisr & DSISR_NOHPTE)
  1234. flags |= HPTE_NOHPTE_UPDATE;
  1235. if (dsisr & DSISR_ISSTORE)
  1236. access |= _PAGE_WRITE;
  1237. /*
  1238. * We set _PAGE_PRIVILEGED only when
  1239. * kernel mode access kernel space.
  1240. *
  1241. * _PAGE_PRIVILEGED is NOT set
  1242. * 1) when kernel mode access user space
  1243. * 2) user space access kernel space.
  1244. */
  1245. access |= _PAGE_PRIVILEGED;
  1246. if ((msr & MSR_PR) || (REGION_ID(ea) == USER_REGION_ID))
  1247. access &= ~_PAGE_PRIVILEGED;
  1248. if (trap == 0x400)
  1249. access |= _PAGE_EXEC;
  1250. return hash_page_mm(mm, ea, access, trap, flags);
  1251. }
  1252. #ifdef CONFIG_PPC_MM_SLICES
  1253. static bool should_hash_preload(struct mm_struct *mm, unsigned long ea)
  1254. {
  1255. int psize = get_slice_psize(mm, ea);
  1256. /* We only prefault standard pages for now */
  1257. if (unlikely(psize != mm->context.user_psize))
  1258. return false;
  1259. /*
  1260. * Don't prefault if subpage protection is enabled for the EA.
  1261. */
  1262. if (unlikely((psize == MMU_PAGE_4K) && subpage_protection(mm, ea)))
  1263. return false;
  1264. return true;
  1265. }
  1266. #else
  1267. static bool should_hash_preload(struct mm_struct *mm, unsigned long ea)
  1268. {
  1269. return true;
  1270. }
  1271. #endif
  1272. void hash_preload(struct mm_struct *mm, unsigned long ea,
  1273. unsigned long access, unsigned long trap)
  1274. {
  1275. int hugepage_shift;
  1276. unsigned long vsid;
  1277. pgd_t *pgdir;
  1278. pte_t *ptep;
  1279. unsigned long flags;
  1280. int rc, ssize, update_flags = 0;
  1281. BUG_ON(REGION_ID(ea) != USER_REGION_ID);
  1282. if (!should_hash_preload(mm, ea))
  1283. return;
  1284. DBG_LOW("hash_preload(mm=%p, mm->pgdir=%p, ea=%016lx, access=%lx,"
  1285. " trap=%lx\n", mm, mm->pgd, ea, access, trap);
  1286. /* Get Linux PTE if available */
  1287. pgdir = mm->pgd;
  1288. if (pgdir == NULL)
  1289. return;
  1290. /* Get VSID */
  1291. ssize = user_segment_size(ea);
  1292. vsid = get_vsid(mm->context.id, ea, ssize);
  1293. if (!vsid)
  1294. return;
  1295. /*
  1296. * Hash doesn't like irqs. Walking linux page table with irq disabled
  1297. * saves us from holding multiple locks.
  1298. */
  1299. local_irq_save(flags);
  1300. /*
  1301. * THP pages use update_mmu_cache_pmd. We don't do
  1302. * hash preload there. Hence can ignore THP here
  1303. */
  1304. ptep = find_linux_pte_or_hugepte(pgdir, ea, NULL, &hugepage_shift);
  1305. if (!ptep)
  1306. goto out_exit;
  1307. WARN_ON(hugepage_shift);
  1308. #ifdef CONFIG_PPC_64K_PAGES
  1309. /* If either H_PAGE_4K_PFN or cache inhibited is set (and we are on
  1310. * a 64K kernel), then we don't preload, hash_page() will take
  1311. * care of it once we actually try to access the page.
  1312. * That way we don't have to duplicate all of the logic for segment
  1313. * page size demotion here
  1314. */
  1315. if ((pte_val(*ptep) & H_PAGE_4K_PFN) || pte_ci(*ptep))
  1316. goto out_exit;
  1317. #endif /* CONFIG_PPC_64K_PAGES */
  1318. /* Is that local to this CPU ? */
  1319. if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
  1320. update_flags |= HPTE_LOCAL_UPDATE;
  1321. /* Hash it in */
  1322. #ifdef CONFIG_PPC_64K_PAGES
  1323. if (mm->context.user_psize == MMU_PAGE_64K)
  1324. rc = __hash_page_64K(ea, access, vsid, ptep, trap,
  1325. update_flags, ssize);
  1326. else
  1327. #endif /* CONFIG_PPC_64K_PAGES */
  1328. rc = __hash_page_4K(ea, access, vsid, ptep, trap, update_flags,
  1329. ssize, subpage_protection(mm, ea));
  1330. /* Dump some info in case of hash insertion failure, they should
  1331. * never happen so it is really useful to know if/when they do
  1332. */
  1333. if (rc == -1)
  1334. hash_failure_debug(ea, access, vsid, trap, ssize,
  1335. mm->context.user_psize,
  1336. mm->context.user_psize,
  1337. pte_val(*ptep));
  1338. out_exit:
  1339. local_irq_restore(flags);
  1340. }
  1341. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  1342. static inline void tm_flush_hash_page(int local)
  1343. {
  1344. /*
  1345. * Transactions are not aborted by tlbiel, only tlbie. Without, syncing a
  1346. * page back to a block device w/PIO could pick up transactional data
  1347. * (bad!) so we force an abort here. Before the sync the page will be
  1348. * made read-only, which will flush_hash_page. BIG ISSUE here: if the
  1349. * kernel uses a page from userspace without unmapping it first, it may
  1350. * see the speculated version.
  1351. */
  1352. if (local && cpu_has_feature(CPU_FTR_TM) && current->thread.regs &&
  1353. MSR_TM_ACTIVE(current->thread.regs->msr)) {
  1354. tm_enable();
  1355. tm_abort(TM_CAUSE_TLBI);
  1356. }
  1357. }
  1358. #else
  1359. static inline void tm_flush_hash_page(int local)
  1360. {
  1361. }
  1362. #endif
  1363. /* WARNING: This is called from hash_low_64.S, if you change this prototype,
  1364. * do not forget to update the assembly call site !
  1365. */
  1366. void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize, int ssize,
  1367. unsigned long flags)
  1368. {
  1369. unsigned long hash, index, shift, hidx, slot;
  1370. int local = flags & HPTE_LOCAL_UPDATE;
  1371. DBG_LOW("flush_hash_page(vpn=%016lx)\n", vpn);
  1372. pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
  1373. hash = hpt_hash(vpn, shift, ssize);
  1374. hidx = __rpte_to_hidx(pte, index);
  1375. if (hidx & _PTEIDX_SECONDARY)
  1376. hash = ~hash;
  1377. slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
  1378. slot += hidx & _PTEIDX_GROUP_IX;
  1379. DBG_LOW(" sub %ld: hash=%lx, hidx=%lx\n", index, slot, hidx);
  1380. /*
  1381. * We use same base page size and actual psize, because we don't
  1382. * use these functions for hugepage
  1383. */
  1384. mmu_hash_ops.hpte_invalidate(slot, vpn, psize, psize,
  1385. ssize, local);
  1386. } pte_iterate_hashed_end();
  1387. tm_flush_hash_page(local);
  1388. }
  1389. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  1390. void flush_hash_hugepage(unsigned long vsid, unsigned long addr,
  1391. pmd_t *pmdp, unsigned int psize, int ssize,
  1392. unsigned long flags)
  1393. {
  1394. int i, max_hpte_count, valid;
  1395. unsigned long s_addr;
  1396. unsigned char *hpte_slot_array;
  1397. unsigned long hidx, shift, vpn, hash, slot;
  1398. int local = flags & HPTE_LOCAL_UPDATE;
  1399. s_addr = addr & HPAGE_PMD_MASK;
  1400. hpte_slot_array = get_hpte_slot_array(pmdp);
  1401. /*
  1402. * IF we try to do a HUGE PTE update after a withdraw is done.
  1403. * we will find the below NULL. This happens when we do
  1404. * split_huge_page_pmd
  1405. */
  1406. if (!hpte_slot_array)
  1407. return;
  1408. if (mmu_hash_ops.hugepage_invalidate) {
  1409. mmu_hash_ops.hugepage_invalidate(vsid, s_addr, hpte_slot_array,
  1410. psize, ssize, local);
  1411. goto tm_abort;
  1412. }
  1413. /*
  1414. * No bluk hpte removal support, invalidate each entry
  1415. */
  1416. shift = mmu_psize_defs[psize].shift;
  1417. max_hpte_count = HPAGE_PMD_SIZE >> shift;
  1418. for (i = 0; i < max_hpte_count; i++) {
  1419. /*
  1420. * 8 bits per each hpte entries
  1421. * 000| [ secondary group (one bit) | hidx (3 bits) | valid bit]
  1422. */
  1423. valid = hpte_valid(hpte_slot_array, i);
  1424. if (!valid)
  1425. continue;
  1426. hidx = hpte_hash_index(hpte_slot_array, i);
  1427. /* get the vpn */
  1428. addr = s_addr + (i * (1ul << shift));
  1429. vpn = hpt_vpn(addr, vsid, ssize);
  1430. hash = hpt_hash(vpn, shift, ssize);
  1431. if (hidx & _PTEIDX_SECONDARY)
  1432. hash = ~hash;
  1433. slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
  1434. slot += hidx & _PTEIDX_GROUP_IX;
  1435. mmu_hash_ops.hpte_invalidate(slot, vpn, psize,
  1436. MMU_PAGE_16M, ssize, local);
  1437. }
  1438. tm_abort:
  1439. tm_flush_hash_page(local);
  1440. }
  1441. #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
  1442. void flush_hash_range(unsigned long number, int local)
  1443. {
  1444. if (mmu_hash_ops.flush_hash_range)
  1445. mmu_hash_ops.flush_hash_range(number, local);
  1446. else {
  1447. int i;
  1448. struct ppc64_tlb_batch *batch =
  1449. this_cpu_ptr(&ppc64_tlb_batch);
  1450. for (i = 0; i < number; i++)
  1451. flush_hash_page(batch->vpn[i], batch->pte[i],
  1452. batch->psize, batch->ssize, local);
  1453. }
  1454. }
  1455. /*
  1456. * low_hash_fault is called when we the low level hash code failed
  1457. * to instert a PTE due to an hypervisor error
  1458. */
  1459. void low_hash_fault(struct pt_regs *regs, unsigned long address, int rc)
  1460. {
  1461. enum ctx_state prev_state = exception_enter();
  1462. if (user_mode(regs)) {
  1463. #ifdef CONFIG_PPC_SUBPAGE_PROT
  1464. if (rc == -2)
  1465. _exception(SIGSEGV, regs, SEGV_ACCERR, address);
  1466. else
  1467. #endif
  1468. _exception(SIGBUS, regs, BUS_ADRERR, address);
  1469. } else
  1470. bad_page_fault(regs, address, SIGBUS);
  1471. exception_exit(prev_state);
  1472. }
  1473. long hpte_insert_repeating(unsigned long hash, unsigned long vpn,
  1474. unsigned long pa, unsigned long rflags,
  1475. unsigned long vflags, int psize, int ssize)
  1476. {
  1477. unsigned long hpte_group;
  1478. long slot;
  1479. repeat:
  1480. hpte_group = ((hash & htab_hash_mask) *
  1481. HPTES_PER_GROUP) & ~0x7UL;
  1482. /* Insert into the hash table, primary slot */
  1483. slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags, vflags,
  1484. psize, psize, ssize);
  1485. /* Primary is full, try the secondary */
  1486. if (unlikely(slot == -1)) {
  1487. hpte_group = ((~hash & htab_hash_mask) *
  1488. HPTES_PER_GROUP) & ~0x7UL;
  1489. slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags,
  1490. vflags | HPTE_V_SECONDARY,
  1491. psize, psize, ssize);
  1492. if (slot == -1) {
  1493. if (mftb() & 0x1)
  1494. hpte_group = ((hash & htab_hash_mask) *
  1495. HPTES_PER_GROUP)&~0x7UL;
  1496. mmu_hash_ops.hpte_remove(hpte_group);
  1497. goto repeat;
  1498. }
  1499. }
  1500. return slot;
  1501. }
  1502. #ifdef CONFIG_DEBUG_PAGEALLOC
  1503. static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
  1504. {
  1505. unsigned long hash;
  1506. unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize);
  1507. unsigned long vpn = hpt_vpn(vaddr, vsid, mmu_kernel_ssize);
  1508. unsigned long mode = htab_convert_pte_flags(pgprot_val(PAGE_KERNEL));
  1509. long ret;
  1510. hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize);
  1511. /* Don't create HPTE entries for bad address */
  1512. if (!vsid)
  1513. return;
  1514. ret = hpte_insert_repeating(hash, vpn, __pa(vaddr), mode,
  1515. HPTE_V_BOLTED,
  1516. mmu_linear_psize, mmu_kernel_ssize);
  1517. BUG_ON (ret < 0);
  1518. spin_lock(&linear_map_hash_lock);
  1519. BUG_ON(linear_map_hash_slots[lmi] & 0x80);
  1520. linear_map_hash_slots[lmi] = ret | 0x80;
  1521. spin_unlock(&linear_map_hash_lock);
  1522. }
  1523. static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
  1524. {
  1525. unsigned long hash, hidx, slot;
  1526. unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize);
  1527. unsigned long vpn = hpt_vpn(vaddr, vsid, mmu_kernel_ssize);
  1528. hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize);
  1529. spin_lock(&linear_map_hash_lock);
  1530. BUG_ON(!(linear_map_hash_slots[lmi] & 0x80));
  1531. hidx = linear_map_hash_slots[lmi] & 0x7f;
  1532. linear_map_hash_slots[lmi] = 0;
  1533. spin_unlock(&linear_map_hash_lock);
  1534. if (hidx & _PTEIDX_SECONDARY)
  1535. hash = ~hash;
  1536. slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
  1537. slot += hidx & _PTEIDX_GROUP_IX;
  1538. mmu_hash_ops.hpte_invalidate(slot, vpn, mmu_linear_psize,
  1539. mmu_linear_psize,
  1540. mmu_kernel_ssize, 0);
  1541. }
  1542. void __kernel_map_pages(struct page *page, int numpages, int enable)
  1543. {
  1544. unsigned long flags, vaddr, lmi;
  1545. int i;
  1546. local_irq_save(flags);
  1547. for (i = 0; i < numpages; i++, page++) {
  1548. vaddr = (unsigned long)page_address(page);
  1549. lmi = __pa(vaddr) >> PAGE_SHIFT;
  1550. if (lmi >= linear_map_hash_count)
  1551. continue;
  1552. if (enable)
  1553. kernel_map_linear_page(vaddr, lmi);
  1554. else
  1555. kernel_unmap_linear_page(vaddr, lmi);
  1556. }
  1557. local_irq_restore(flags);
  1558. }
  1559. #endif /* CONFIG_DEBUG_PAGEALLOC */
  1560. void hash__setup_initial_memory_limit(phys_addr_t first_memblock_base,
  1561. phys_addr_t first_memblock_size)
  1562. {
  1563. /* We don't currently support the first MEMBLOCK not mapping 0
  1564. * physical on those processors
  1565. */
  1566. BUG_ON(first_memblock_base != 0);
  1567. /* On LPAR systems, the first entry is our RMA region,
  1568. * non-LPAR 64-bit hash MMU systems don't have a limitation
  1569. * on real mode access, but using the first entry works well
  1570. * enough. We also clamp it to 1G to avoid some funky things
  1571. * such as RTAS bugs etc...
  1572. */
  1573. ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000);
  1574. /* Finally limit subsequent allocations */
  1575. memblock_set_current_limit(ppc64_rma_size);
  1576. }