hash_native_64.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733
  1. /*
  2. * native hashtable management.
  3. *
  4. * SMP scalability work:
  5. * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License
  9. * as published by the Free Software Foundation; either version
  10. * 2 of the License, or (at your option) any later version.
  11. */
  12. #undef DEBUG_LOW
  13. #include <linux/spinlock.h>
  14. #include <linux/bitops.h>
  15. #include <linux/of.h>
  16. #include <linux/threads.h>
  17. #include <linux/smp.h>
  18. #include <asm/machdep.h>
  19. #include <asm/mmu.h>
  20. #include <asm/mmu_context.h>
  21. #include <asm/pgtable.h>
  22. #include <asm/tlbflush.h>
  23. #include <asm/tlb.h>
  24. #include <asm/cputable.h>
  25. #include <asm/udbg.h>
  26. #include <asm/kexec.h>
  27. #include <asm/ppc-opcode.h>
  28. #include <misc/cxl-base.h>
  29. #ifdef DEBUG_LOW
  30. #define DBG_LOW(fmt...) udbg_printf(fmt)
  31. #else
  32. #define DBG_LOW(fmt...)
  33. #endif
  34. #ifdef __BIG_ENDIAN__
  35. #define HPTE_LOCK_BIT 3
  36. #else
  37. #define HPTE_LOCK_BIT (56+3)
  38. #endif
  39. DEFINE_RAW_SPINLOCK(native_tlbie_lock);
  40. static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
  41. {
  42. unsigned long va;
  43. unsigned int penc;
  44. unsigned long sllp;
  45. /*
  46. * We need 14 to 65 bits of va for a tlibe of 4K page
  47. * With vpn we ignore the lower VPN_SHIFT bits already.
  48. * And top two bits are already ignored because we can
  49. * only accomodate 76 bits in a 64 bit vpn with a VPN_SHIFT
  50. * of 12.
  51. */
  52. va = vpn << VPN_SHIFT;
  53. /*
  54. * clear top 16 bits of 64bit va, non SLS segment
  55. * Older versions of the architecture (2.02 and earler) require the
  56. * masking of the top 16 bits.
  57. */
  58. if (mmu_has_feature(MMU_FTR_TLBIE_CROP_VA))
  59. va &= ~(0xffffULL << 48);
  60. switch (psize) {
  61. case MMU_PAGE_4K:
  62. /* clear out bits after (52) [0....52.....63] */
  63. va &= ~((1ul << (64 - 52)) - 1);
  64. va |= ssize << 8;
  65. sllp = get_sllp_encoding(apsize);
  66. va |= sllp << 5;
  67. asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2)
  68. : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
  69. : "memory");
  70. break;
  71. default:
  72. /* We need 14 to 14 + i bits of va */
  73. penc = mmu_psize_defs[psize].penc[apsize];
  74. va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
  75. va |= penc << 12;
  76. va |= ssize << 8;
  77. /*
  78. * AVAL bits:
  79. * We don't need all the bits, but rest of the bits
  80. * must be ignored by the processor.
  81. * vpn cover upto 65 bits of va. (0...65) and we need
  82. * 58..64 bits of va.
  83. */
  84. va |= (vpn & 0xfe); /* AVAL */
  85. va |= 1; /* L */
  86. asm volatile(ASM_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), %2)
  87. : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
  88. : "memory");
  89. break;
  90. }
  91. }
  92. static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
  93. {
  94. unsigned long va;
  95. unsigned int penc;
  96. unsigned long sllp;
  97. /* VPN_SHIFT can be atmost 12 */
  98. va = vpn << VPN_SHIFT;
  99. /*
  100. * clear top 16 bits of 64 bit va, non SLS segment
  101. * Older versions of the architecture (2.02 and earler) require the
  102. * masking of the top 16 bits.
  103. */
  104. if (mmu_has_feature(MMU_FTR_TLBIE_CROP_VA))
  105. va &= ~(0xffffULL << 48);
  106. switch (psize) {
  107. case MMU_PAGE_4K:
  108. /* clear out bits after(52) [0....52.....63] */
  109. va &= ~((1ul << (64 - 52)) - 1);
  110. va |= ssize << 8;
  111. sllp = get_sllp_encoding(apsize);
  112. va |= sllp << 5;
  113. asm volatile(ASM_FTR_IFSET("tlbiel %0", "tlbiel %0,0", %1)
  114. : : "r" (va), "i" (CPU_FTR_ARCH_206)
  115. : "memory");
  116. break;
  117. default:
  118. /* We need 14 to 14 + i bits of va */
  119. penc = mmu_psize_defs[psize].penc[apsize];
  120. va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
  121. va |= penc << 12;
  122. va |= ssize << 8;
  123. /*
  124. * AVAL bits:
  125. * We don't need all the bits, but rest of the bits
  126. * must be ignored by the processor.
  127. * vpn cover upto 65 bits of va. (0...65) and we need
  128. * 58..64 bits of va.
  129. */
  130. va |= (vpn & 0xfe);
  131. va |= 1; /* L */
  132. asm volatile(ASM_FTR_IFSET("tlbiel %0", "tlbiel %0,1", %1)
  133. : : "r" (va), "i" (CPU_FTR_ARCH_206)
  134. : "memory");
  135. break;
  136. }
  137. }
  138. static inline void tlbie(unsigned long vpn, int psize, int apsize,
  139. int ssize, int local)
  140. {
  141. unsigned int use_local;
  142. int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
  143. use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) && !cxl_ctx_in_use();
  144. if (use_local)
  145. use_local = mmu_psize_defs[psize].tlbiel;
  146. if (lock_tlbie && !use_local)
  147. raw_spin_lock(&native_tlbie_lock);
  148. asm volatile("ptesync": : :"memory");
  149. if (use_local) {
  150. __tlbiel(vpn, psize, apsize, ssize);
  151. asm volatile("ptesync": : :"memory");
  152. } else {
  153. __tlbie(vpn, psize, apsize, ssize);
  154. asm volatile("eieio; tlbsync; ptesync": : :"memory");
  155. }
  156. if (lock_tlbie && !use_local)
  157. raw_spin_unlock(&native_tlbie_lock);
  158. }
  159. static inline void native_lock_hpte(struct hash_pte *hptep)
  160. {
  161. unsigned long *word = (unsigned long *)&hptep->v;
  162. while (1) {
  163. if (!test_and_set_bit_lock(HPTE_LOCK_BIT, word))
  164. break;
  165. while(test_bit(HPTE_LOCK_BIT, word))
  166. cpu_relax();
  167. }
  168. }
  169. static inline void native_unlock_hpte(struct hash_pte *hptep)
  170. {
  171. unsigned long *word = (unsigned long *)&hptep->v;
  172. clear_bit_unlock(HPTE_LOCK_BIT, word);
  173. }
  174. static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
  175. unsigned long pa, unsigned long rflags,
  176. unsigned long vflags, int psize, int apsize, int ssize)
  177. {
  178. struct hash_pte *hptep = htab_address + hpte_group;
  179. unsigned long hpte_v, hpte_r;
  180. int i;
  181. if (!(vflags & HPTE_V_BOLTED)) {
  182. DBG_LOW(" insert(group=%lx, vpn=%016lx, pa=%016lx,"
  183. " rflags=%lx, vflags=%lx, psize=%d)\n",
  184. hpte_group, vpn, pa, rflags, vflags, psize);
  185. }
  186. for (i = 0; i < HPTES_PER_GROUP; i++) {
  187. if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID)) {
  188. /* retry with lock held */
  189. native_lock_hpte(hptep);
  190. if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID))
  191. break;
  192. native_unlock_hpte(hptep);
  193. }
  194. hptep++;
  195. }
  196. if (i == HPTES_PER_GROUP)
  197. return -1;
  198. hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
  199. hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
  200. if (!(vflags & HPTE_V_BOLTED)) {
  201. DBG_LOW(" i=%x hpte_v=%016lx, hpte_r=%016lx\n",
  202. i, hpte_v, hpte_r);
  203. }
  204. if (cpu_has_feature(CPU_FTR_ARCH_300)) {
  205. hpte_r = hpte_old_to_new_r(hpte_v, hpte_r);
  206. hpte_v = hpte_old_to_new_v(hpte_v);
  207. }
  208. hptep->r = cpu_to_be64(hpte_r);
  209. /* Guarantee the second dword is visible before the valid bit */
  210. eieio();
  211. /*
  212. * Now set the first dword including the valid bit
  213. * NOTE: this also unlocks the hpte
  214. */
  215. hptep->v = cpu_to_be64(hpte_v);
  216. __asm__ __volatile__ ("ptesync" : : : "memory");
  217. return i | (!!(vflags & HPTE_V_SECONDARY) << 3);
  218. }
  219. static long native_hpte_remove(unsigned long hpte_group)
  220. {
  221. struct hash_pte *hptep;
  222. int i;
  223. int slot_offset;
  224. unsigned long hpte_v;
  225. DBG_LOW(" remove(group=%lx)\n", hpte_group);
  226. /* pick a random entry to start at */
  227. slot_offset = mftb() & 0x7;
  228. for (i = 0; i < HPTES_PER_GROUP; i++) {
  229. hptep = htab_address + hpte_group + slot_offset;
  230. hpte_v = be64_to_cpu(hptep->v);
  231. if ((hpte_v & HPTE_V_VALID) && !(hpte_v & HPTE_V_BOLTED)) {
  232. /* retry with lock held */
  233. native_lock_hpte(hptep);
  234. hpte_v = be64_to_cpu(hptep->v);
  235. if ((hpte_v & HPTE_V_VALID)
  236. && !(hpte_v & HPTE_V_BOLTED))
  237. break;
  238. native_unlock_hpte(hptep);
  239. }
  240. slot_offset++;
  241. slot_offset &= 0x7;
  242. }
  243. if (i == HPTES_PER_GROUP)
  244. return -1;
  245. /* Invalidate the hpte. NOTE: this also unlocks it */
  246. hptep->v = 0;
  247. return i;
  248. }
  249. static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
  250. unsigned long vpn, int bpsize,
  251. int apsize, int ssize, unsigned long flags)
  252. {
  253. struct hash_pte *hptep = htab_address + slot;
  254. unsigned long hpte_v, want_v;
  255. int ret = 0, local = 0;
  256. want_v = hpte_encode_avpn(vpn, bpsize, ssize);
  257. DBG_LOW(" update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)",
  258. vpn, want_v & HPTE_V_AVPN, slot, newpp);
  259. hpte_v = be64_to_cpu(hptep->v);
  260. if (cpu_has_feature(CPU_FTR_ARCH_300))
  261. hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
  262. /*
  263. * We need to invalidate the TLB always because hpte_remove doesn't do
  264. * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
  265. * random entry from it. When we do that we don't invalidate the TLB
  266. * (hpte_remove) because we assume the old translation is still
  267. * technically "valid".
  268. */
  269. if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) {
  270. DBG_LOW(" -> miss\n");
  271. ret = -1;
  272. } else {
  273. native_lock_hpte(hptep);
  274. /* recheck with locks held */
  275. hpte_v = be64_to_cpu(hptep->v);
  276. if (cpu_has_feature(CPU_FTR_ARCH_300))
  277. hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
  278. if (unlikely(!HPTE_V_COMPARE(hpte_v, want_v) ||
  279. !(hpte_v & HPTE_V_VALID))) {
  280. ret = -1;
  281. } else {
  282. DBG_LOW(" -> hit\n");
  283. /* Update the HPTE */
  284. hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &
  285. ~(HPTE_R_PPP | HPTE_R_N)) |
  286. (newpp & (HPTE_R_PPP | HPTE_R_N |
  287. HPTE_R_C)));
  288. }
  289. native_unlock_hpte(hptep);
  290. }
  291. if (flags & HPTE_LOCAL_UPDATE)
  292. local = 1;
  293. /*
  294. * Ensure it is out of the tlb too if it is not a nohpte fault
  295. */
  296. if (!(flags & HPTE_NOHPTE_UPDATE))
  297. tlbie(vpn, bpsize, apsize, ssize, local);
  298. return ret;
  299. }
  300. static long native_hpte_find(unsigned long vpn, int psize, int ssize)
  301. {
  302. struct hash_pte *hptep;
  303. unsigned long hash;
  304. unsigned long i;
  305. long slot;
  306. unsigned long want_v, hpte_v;
  307. hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize);
  308. want_v = hpte_encode_avpn(vpn, psize, ssize);
  309. /* Bolted mappings are only ever in the primary group */
  310. slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
  311. for (i = 0; i < HPTES_PER_GROUP; i++) {
  312. hptep = htab_address + slot;
  313. hpte_v = be64_to_cpu(hptep->v);
  314. if (cpu_has_feature(CPU_FTR_ARCH_300))
  315. hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
  316. if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))
  317. /* HPTE matches */
  318. return slot;
  319. ++slot;
  320. }
  321. return -1;
  322. }
  323. /*
  324. * Update the page protection bits. Intended to be used to create
  325. * guard pages for kernel data structures on pages which are bolted
  326. * in the HPT. Assumes pages being operated on will not be stolen.
  327. *
  328. * No need to lock here because we should be the only user.
  329. */
  330. static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
  331. int psize, int ssize)
  332. {
  333. unsigned long vpn;
  334. unsigned long vsid;
  335. long slot;
  336. struct hash_pte *hptep;
  337. vsid = get_kernel_vsid(ea, ssize);
  338. vpn = hpt_vpn(ea, vsid, ssize);
  339. slot = native_hpte_find(vpn, psize, ssize);
  340. if (slot == -1)
  341. panic("could not find page to bolt\n");
  342. hptep = htab_address + slot;
  343. /* Update the HPTE */
  344. hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &
  345. ~(HPTE_R_PPP | HPTE_R_N)) |
  346. (newpp & (HPTE_R_PPP | HPTE_R_N)));
  347. /*
  348. * Ensure it is out of the tlb too. Bolted entries base and
  349. * actual page size will be same.
  350. */
  351. tlbie(vpn, psize, psize, ssize, 0);
  352. }
  353. static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
  354. int bpsize, int apsize, int ssize, int local)
  355. {
  356. struct hash_pte *hptep = htab_address + slot;
  357. unsigned long hpte_v;
  358. unsigned long want_v;
  359. unsigned long flags;
  360. local_irq_save(flags);
  361. DBG_LOW(" invalidate(vpn=%016lx, hash: %lx)\n", vpn, slot);
  362. want_v = hpte_encode_avpn(vpn, bpsize, ssize);
  363. native_lock_hpte(hptep);
  364. hpte_v = be64_to_cpu(hptep->v);
  365. if (cpu_has_feature(CPU_FTR_ARCH_300))
  366. hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
  367. /*
  368. * We need to invalidate the TLB always because hpte_remove doesn't do
  369. * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
  370. * random entry from it. When we do that we don't invalidate the TLB
  371. * (hpte_remove) because we assume the old translation is still
  372. * technically "valid".
  373. */
  374. if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
  375. native_unlock_hpte(hptep);
  376. else
  377. /* Invalidate the hpte. NOTE: this also unlocks it */
  378. hptep->v = 0;
  379. /* Invalidate the TLB */
  380. tlbie(vpn, bpsize, apsize, ssize, local);
  381. local_irq_restore(flags);
  382. }
  383. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  384. static void native_hugepage_invalidate(unsigned long vsid,
  385. unsigned long addr,
  386. unsigned char *hpte_slot_array,
  387. int psize, int ssize, int local)
  388. {
  389. int i;
  390. struct hash_pte *hptep;
  391. int actual_psize = MMU_PAGE_16M;
  392. unsigned int max_hpte_count, valid;
  393. unsigned long flags, s_addr = addr;
  394. unsigned long hpte_v, want_v, shift;
  395. unsigned long hidx, vpn = 0, hash, slot;
  396. shift = mmu_psize_defs[psize].shift;
  397. max_hpte_count = 1U << (PMD_SHIFT - shift);
  398. local_irq_save(flags);
  399. for (i = 0; i < max_hpte_count; i++) {
  400. valid = hpte_valid(hpte_slot_array, i);
  401. if (!valid)
  402. continue;
  403. hidx = hpte_hash_index(hpte_slot_array, i);
  404. /* get the vpn */
  405. addr = s_addr + (i * (1ul << shift));
  406. vpn = hpt_vpn(addr, vsid, ssize);
  407. hash = hpt_hash(vpn, shift, ssize);
  408. if (hidx & _PTEIDX_SECONDARY)
  409. hash = ~hash;
  410. slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
  411. slot += hidx & _PTEIDX_GROUP_IX;
  412. hptep = htab_address + slot;
  413. want_v = hpte_encode_avpn(vpn, psize, ssize);
  414. native_lock_hpte(hptep);
  415. hpte_v = be64_to_cpu(hptep->v);
  416. if (cpu_has_feature(CPU_FTR_ARCH_300))
  417. hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
  418. /* Even if we miss, we need to invalidate the TLB */
  419. if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
  420. native_unlock_hpte(hptep);
  421. else
  422. /* Invalidate the hpte. NOTE: this also unlocks it */
  423. hptep->v = 0;
  424. /*
  425. * We need to do tlb invalidate for all the address, tlbie
  426. * instruction compares entry_VA in tlb with the VA specified
  427. * here
  428. */
  429. tlbie(vpn, psize, actual_psize, ssize, local);
  430. }
  431. local_irq_restore(flags);
  432. }
  433. #else
  434. static void native_hugepage_invalidate(unsigned long vsid,
  435. unsigned long addr,
  436. unsigned char *hpte_slot_array,
  437. int psize, int ssize, int local)
  438. {
  439. WARN(1, "%s called without THP support\n", __func__);
  440. }
  441. #endif
  442. static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
  443. int *psize, int *apsize, int *ssize, unsigned long *vpn)
  444. {
  445. unsigned long avpn, pteg, vpi;
  446. unsigned long hpte_v = be64_to_cpu(hpte->v);
  447. unsigned long hpte_r = be64_to_cpu(hpte->r);
  448. unsigned long vsid, seg_off;
  449. int size, a_size, shift;
  450. /* Look at the 8 bit LP value */
  451. unsigned int lp = (hpte_r >> LP_SHIFT) & ((1 << LP_BITS) - 1);
  452. if (cpu_has_feature(CPU_FTR_ARCH_300)) {
  453. hpte_v = hpte_new_to_old_v(hpte_v, hpte_r);
  454. hpte_r = hpte_new_to_old_r(hpte_r);
  455. }
  456. if (!(hpte_v & HPTE_V_LARGE)) {
  457. size = MMU_PAGE_4K;
  458. a_size = MMU_PAGE_4K;
  459. } else {
  460. size = hpte_page_sizes[lp] & 0xf;
  461. a_size = hpte_page_sizes[lp] >> 4;
  462. }
  463. /* This works for all page sizes, and for 256M and 1T segments */
  464. *ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
  465. shift = mmu_psize_defs[size].shift;
  466. avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm);
  467. pteg = slot / HPTES_PER_GROUP;
  468. if (hpte_v & HPTE_V_SECONDARY)
  469. pteg = ~pteg;
  470. switch (*ssize) {
  471. case MMU_SEGSIZE_256M:
  472. /* We only have 28 - 23 bits of seg_off in avpn */
  473. seg_off = (avpn & 0x1f) << 23;
  474. vsid = avpn >> 5;
  475. /* We can find more bits from the pteg value */
  476. if (shift < 23) {
  477. vpi = (vsid ^ pteg) & htab_hash_mask;
  478. seg_off |= vpi << shift;
  479. }
  480. *vpn = vsid << (SID_SHIFT - VPN_SHIFT) | seg_off >> VPN_SHIFT;
  481. break;
  482. case MMU_SEGSIZE_1T:
  483. /* We only have 40 - 23 bits of seg_off in avpn */
  484. seg_off = (avpn & 0x1ffff) << 23;
  485. vsid = avpn >> 17;
  486. if (shift < 23) {
  487. vpi = (vsid ^ (vsid << 25) ^ pteg) & htab_hash_mask;
  488. seg_off |= vpi << shift;
  489. }
  490. *vpn = vsid << (SID_SHIFT_1T - VPN_SHIFT) | seg_off >> VPN_SHIFT;
  491. break;
  492. default:
  493. *vpn = size = 0;
  494. }
  495. *psize = size;
  496. *apsize = a_size;
  497. }
  498. /*
  499. * clear all mappings on kexec. All cpus are in real mode (or they will
  500. * be when they isi), and we are the only one left. We rely on our kernel
  501. * mapping being 0xC0's and the hardware ignoring those two real bits.
  502. *
  503. * This must be called with interrupts disabled.
  504. *
  505. * Taking the native_tlbie_lock is unsafe here due to the possibility of
  506. * lockdep being on. On pre POWER5 hardware, not taking the lock could
  507. * cause deadlock. POWER5 and newer not taking the lock is fine. This only
  508. * gets called during boot before secondary CPUs have come up and during
  509. * crashdump and all bets are off anyway.
  510. *
  511. * TODO: add batching support when enabled. remember, no dynamic memory here,
  512. * although there is the control page available...
  513. */
  514. static void native_hpte_clear(void)
  515. {
  516. unsigned long vpn = 0;
  517. unsigned long slot, slots;
  518. struct hash_pte *hptep = htab_address;
  519. unsigned long hpte_v;
  520. unsigned long pteg_count;
  521. int psize, apsize, ssize;
  522. pteg_count = htab_hash_mask + 1;
  523. slots = pteg_count * HPTES_PER_GROUP;
  524. for (slot = 0; slot < slots; slot++, hptep++) {
  525. /*
  526. * we could lock the pte here, but we are the only cpu
  527. * running, right? and for crash dump, we probably
  528. * don't want to wait for a maybe bad cpu.
  529. */
  530. hpte_v = be64_to_cpu(hptep->v);
  531. /*
  532. * Call __tlbie() here rather than tlbie() since we can't take the
  533. * native_tlbie_lock.
  534. */
  535. if (hpte_v & HPTE_V_VALID) {
  536. hpte_decode(hptep, slot, &psize, &apsize, &ssize, &vpn);
  537. hptep->v = 0;
  538. __tlbie(vpn, psize, apsize, ssize);
  539. }
  540. }
  541. asm volatile("eieio; tlbsync; ptesync":::"memory");
  542. }
  543. /*
  544. * Batched hash table flush, we batch the tlbie's to avoid taking/releasing
  545. * the lock all the time
  546. */
  547. static void native_flush_hash_range(unsigned long number, int local)
  548. {
  549. unsigned long vpn;
  550. unsigned long hash, index, hidx, shift, slot;
  551. struct hash_pte *hptep;
  552. unsigned long hpte_v;
  553. unsigned long want_v;
  554. unsigned long flags;
  555. real_pte_t pte;
  556. struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);
  557. unsigned long psize = batch->psize;
  558. int ssize = batch->ssize;
  559. int i;
  560. local_irq_save(flags);
  561. for (i = 0; i < number; i++) {
  562. vpn = batch->vpn[i];
  563. pte = batch->pte[i];
  564. pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
  565. hash = hpt_hash(vpn, shift, ssize);
  566. hidx = __rpte_to_hidx(pte, index);
  567. if (hidx & _PTEIDX_SECONDARY)
  568. hash = ~hash;
  569. slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
  570. slot += hidx & _PTEIDX_GROUP_IX;
  571. hptep = htab_address + slot;
  572. want_v = hpte_encode_avpn(vpn, psize, ssize);
  573. native_lock_hpte(hptep);
  574. hpte_v = be64_to_cpu(hptep->v);
  575. if (cpu_has_feature(CPU_FTR_ARCH_300))
  576. hpte_v = hpte_new_to_old_v(hpte_v,
  577. be64_to_cpu(hptep->r));
  578. if (!HPTE_V_COMPARE(hpte_v, want_v) ||
  579. !(hpte_v & HPTE_V_VALID))
  580. native_unlock_hpte(hptep);
  581. else
  582. hptep->v = 0;
  583. } pte_iterate_hashed_end();
  584. }
  585. if (mmu_has_feature(MMU_FTR_TLBIEL) &&
  586. mmu_psize_defs[psize].tlbiel && local) {
  587. asm volatile("ptesync":::"memory");
  588. for (i = 0; i < number; i++) {
  589. vpn = batch->vpn[i];
  590. pte = batch->pte[i];
  591. pte_iterate_hashed_subpages(pte, psize,
  592. vpn, index, shift) {
  593. __tlbiel(vpn, psize, psize, ssize);
  594. } pte_iterate_hashed_end();
  595. }
  596. asm volatile("ptesync":::"memory");
  597. } else {
  598. int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
  599. if (lock_tlbie)
  600. raw_spin_lock(&native_tlbie_lock);
  601. asm volatile("ptesync":::"memory");
  602. for (i = 0; i < number; i++) {
  603. vpn = batch->vpn[i];
  604. pte = batch->pte[i];
  605. pte_iterate_hashed_subpages(pte, psize,
  606. vpn, index, shift) {
  607. __tlbie(vpn, psize, psize, ssize);
  608. } pte_iterate_hashed_end();
  609. }
  610. asm volatile("eieio; tlbsync; ptesync":::"memory");
  611. if (lock_tlbie)
  612. raw_spin_unlock(&native_tlbie_lock);
  613. }
  614. local_irq_restore(flags);
  615. }
  616. static int native_register_proc_table(unsigned long base, unsigned long page_size,
  617. unsigned long table_size)
  618. {
  619. unsigned long patb1 = base << 25; /* VSID */
  620. patb1 |= (page_size << 5); /* sllp */
  621. patb1 |= table_size;
  622. partition_tb->patb1 = cpu_to_be64(patb1);
  623. return 0;
  624. }
  625. void __init hpte_init_native(void)
  626. {
  627. mmu_hash_ops.hpte_invalidate = native_hpte_invalidate;
  628. mmu_hash_ops.hpte_updatepp = native_hpte_updatepp;
  629. mmu_hash_ops.hpte_updateboltedpp = native_hpte_updateboltedpp;
  630. mmu_hash_ops.hpte_insert = native_hpte_insert;
  631. mmu_hash_ops.hpte_remove = native_hpte_remove;
  632. mmu_hash_ops.hpte_clear_all = native_hpte_clear;
  633. mmu_hash_ops.flush_hash_range = native_flush_hash_range;
  634. mmu_hash_ops.hugepage_invalidate = native_hugepage_invalidate;
  635. if (cpu_has_feature(CPU_FTR_ARCH_300))
  636. register_process_table = native_register_proc_table;
  637. }