pgtable.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592
  1. /*
  2. * Copyright IBM Corp. 2007, 2011
  3. * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
  4. */
  5. #include <linux/sched.h>
  6. #include <linux/kernel.h>
  7. #include <linux/errno.h>
  8. #include <linux/gfp.h>
  9. #include <linux/mm.h>
  10. #include <linux/swap.h>
  11. #include <linux/smp.h>
  12. #include <linux/spinlock.h>
  13. #include <linux/rcupdate.h>
  14. #include <linux/slab.h>
  15. #include <linux/swapops.h>
  16. #include <linux/sysctl.h>
  17. #include <linux/ksm.h>
  18. #include <linux/mman.h>
  19. #include <asm/pgtable.h>
  20. #include <asm/pgalloc.h>
  21. #include <asm/tlb.h>
  22. #include <asm/tlbflush.h>
  23. #include <asm/mmu_context.h>
  24. static inline pte_t ptep_flush_direct(struct mm_struct *mm,
  25. unsigned long addr, pte_t *ptep)
  26. {
  27. pte_t old;
  28. old = *ptep;
  29. if (unlikely(pte_val(old) & _PAGE_INVALID))
  30. return old;
  31. atomic_inc(&mm->context.flush_count);
  32. if (MACHINE_HAS_TLB_LC &&
  33. cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
  34. __ptep_ipte_local(addr, ptep);
  35. else
  36. __ptep_ipte(addr, ptep);
  37. atomic_dec(&mm->context.flush_count);
  38. return old;
  39. }
  40. static inline pte_t ptep_flush_lazy(struct mm_struct *mm,
  41. unsigned long addr, pte_t *ptep)
  42. {
  43. pte_t old;
  44. old = *ptep;
  45. if (unlikely(pte_val(old) & _PAGE_INVALID))
  46. return old;
  47. atomic_inc(&mm->context.flush_count);
  48. if (cpumask_equal(&mm->context.cpu_attach_mask,
  49. cpumask_of(smp_processor_id()))) {
  50. pte_val(*ptep) |= _PAGE_INVALID;
  51. mm->context.flush_mm = 1;
  52. } else
  53. __ptep_ipte(addr, ptep);
  54. atomic_dec(&mm->context.flush_count);
  55. return old;
  56. }
  57. static inline pgste_t pgste_get_lock(pte_t *ptep)
  58. {
  59. unsigned long new = 0;
  60. #ifdef CONFIG_PGSTE
  61. unsigned long old;
  62. asm(
  63. " lg %0,%2\n"
  64. "0: lgr %1,%0\n"
  65. " nihh %0,0xff7f\n" /* clear PCL bit in old */
  66. " oihh %1,0x0080\n" /* set PCL bit in new */
  67. " csg %0,%1,%2\n"
  68. " jl 0b\n"
  69. : "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE])
  70. : "Q" (ptep[PTRS_PER_PTE]) : "cc", "memory");
  71. #endif
  72. return __pgste(new);
  73. }
  74. static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste)
  75. {
  76. #ifdef CONFIG_PGSTE
  77. asm(
  78. " nihh %1,0xff7f\n" /* clear PCL bit */
  79. " stg %1,%0\n"
  80. : "=Q" (ptep[PTRS_PER_PTE])
  81. : "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE])
  82. : "cc", "memory");
  83. #endif
  84. }
  85. static inline pgste_t pgste_get(pte_t *ptep)
  86. {
  87. unsigned long pgste = 0;
  88. #ifdef CONFIG_PGSTE
  89. pgste = *(unsigned long *)(ptep + PTRS_PER_PTE);
  90. #endif
  91. return __pgste(pgste);
  92. }
  93. static inline void pgste_set(pte_t *ptep, pgste_t pgste)
  94. {
  95. #ifdef CONFIG_PGSTE
  96. *(pgste_t *)(ptep + PTRS_PER_PTE) = pgste;
  97. #endif
  98. }
  99. static inline pgste_t pgste_update_all(pte_t pte, pgste_t pgste,
  100. struct mm_struct *mm)
  101. {
  102. #ifdef CONFIG_PGSTE
  103. unsigned long address, bits, skey;
  104. if (!mm_use_skey(mm) || pte_val(pte) & _PAGE_INVALID)
  105. return pgste;
  106. address = pte_val(pte) & PAGE_MASK;
  107. skey = (unsigned long) page_get_storage_key(address);
  108. bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
  109. /* Transfer page changed & referenced bit to guest bits in pgste */
  110. pgste_val(pgste) |= bits << 48; /* GR bit & GC bit */
  111. /* Copy page access key and fetch protection bit to pgste */
  112. pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT);
  113. pgste_val(pgste) |= (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
  114. #endif
  115. return pgste;
  116. }
  117. static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry,
  118. struct mm_struct *mm)
  119. {
  120. #ifdef CONFIG_PGSTE
  121. unsigned long address;
  122. unsigned long nkey;
  123. if (!mm_use_skey(mm) || pte_val(entry) & _PAGE_INVALID)
  124. return;
  125. VM_BUG_ON(!(pte_val(*ptep) & _PAGE_INVALID));
  126. address = pte_val(entry) & PAGE_MASK;
  127. /*
  128. * Set page access key and fetch protection bit from pgste.
  129. * The guest C/R information is still in the PGSTE, set real
  130. * key C/R to 0.
  131. */
  132. nkey = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56;
  133. nkey |= (pgste_val(pgste) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 48;
  134. page_set_storage_key(address, nkey, 0);
  135. #endif
  136. }
  137. static inline pgste_t pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry)
  138. {
  139. #ifdef CONFIG_PGSTE
  140. if ((pte_val(entry) & _PAGE_PRESENT) &&
  141. (pte_val(entry) & _PAGE_WRITE) &&
  142. !(pte_val(entry) & _PAGE_INVALID)) {
  143. if (!MACHINE_HAS_ESOP) {
  144. /*
  145. * Without enhanced suppression-on-protection force
  146. * the dirty bit on for all writable ptes.
  147. */
  148. pte_val(entry) |= _PAGE_DIRTY;
  149. pte_val(entry) &= ~_PAGE_PROTECT;
  150. }
  151. if (!(pte_val(entry) & _PAGE_PROTECT))
  152. /* This pte allows write access, set user-dirty */
  153. pgste_val(pgste) |= PGSTE_UC_BIT;
  154. }
  155. #endif
  156. *ptep = entry;
  157. return pgste;
  158. }
  159. static inline pgste_t pgste_ipte_notify(struct mm_struct *mm,
  160. unsigned long addr,
  161. pte_t *ptep, pgste_t pgste)
  162. {
  163. #ifdef CONFIG_PGSTE
  164. if (pgste_val(pgste) & PGSTE_IN_BIT) {
  165. pgste_val(pgste) &= ~PGSTE_IN_BIT;
  166. ptep_notify(mm, addr, ptep);
  167. }
  168. #endif
  169. return pgste;
  170. }
  171. static inline pgste_t ptep_xchg_start(struct mm_struct *mm,
  172. unsigned long addr, pte_t *ptep)
  173. {
  174. pgste_t pgste = __pgste(0);
  175. if (mm_has_pgste(mm)) {
  176. pgste = pgste_get_lock(ptep);
  177. pgste = pgste_ipte_notify(mm, addr, ptep, pgste);
  178. }
  179. return pgste;
  180. }
  181. static inline void ptep_xchg_commit(struct mm_struct *mm,
  182. unsigned long addr, pte_t *ptep,
  183. pgste_t pgste, pte_t old, pte_t new)
  184. {
  185. if (mm_has_pgste(mm)) {
  186. if (pte_val(old) & _PAGE_INVALID)
  187. pgste_set_key(ptep, pgste, new, mm);
  188. if (pte_val(new) & _PAGE_INVALID) {
  189. pgste = pgste_update_all(old, pgste, mm);
  190. if ((pgste_val(pgste) & _PGSTE_GPS_USAGE_MASK) ==
  191. _PGSTE_GPS_USAGE_UNUSED)
  192. pte_val(old) |= _PAGE_UNUSED;
  193. }
  194. pgste = pgste_set_pte(ptep, pgste, new);
  195. pgste_set_unlock(ptep, pgste);
  196. } else {
  197. *ptep = new;
  198. }
  199. }
  200. pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr,
  201. pte_t *ptep, pte_t new)
  202. {
  203. pgste_t pgste;
  204. pte_t old;
  205. preempt_disable();
  206. pgste = ptep_xchg_start(mm, addr, ptep);
  207. old = ptep_flush_direct(mm, addr, ptep);
  208. ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
  209. preempt_enable();
  210. return old;
  211. }
  212. EXPORT_SYMBOL(ptep_xchg_direct);
  213. pte_t ptep_xchg_lazy(struct mm_struct *mm, unsigned long addr,
  214. pte_t *ptep, pte_t new)
  215. {
  216. pgste_t pgste;
  217. pte_t old;
  218. preempt_disable();
  219. pgste = ptep_xchg_start(mm, addr, ptep);
  220. old = ptep_flush_lazy(mm, addr, ptep);
  221. ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
  222. preempt_enable();
  223. return old;
  224. }
  225. EXPORT_SYMBOL(ptep_xchg_lazy);
  226. pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr,
  227. pte_t *ptep)
  228. {
  229. pgste_t pgste;
  230. pte_t old;
  231. preempt_disable();
  232. pgste = ptep_xchg_start(mm, addr, ptep);
  233. old = ptep_flush_lazy(mm, addr, ptep);
  234. if (mm_has_pgste(mm)) {
  235. pgste = pgste_update_all(old, pgste, mm);
  236. pgste_set(ptep, pgste);
  237. }
  238. return old;
  239. }
  240. EXPORT_SYMBOL(ptep_modify_prot_start);
  241. void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
  242. pte_t *ptep, pte_t pte)
  243. {
  244. pgste_t pgste;
  245. if (mm_has_pgste(mm)) {
  246. pgste = pgste_get(ptep);
  247. pgste_set_key(ptep, pgste, pte, mm);
  248. pgste = pgste_set_pte(ptep, pgste, pte);
  249. pgste_set_unlock(ptep, pgste);
  250. } else {
  251. *ptep = pte;
  252. }
  253. preempt_enable();
  254. }
  255. EXPORT_SYMBOL(ptep_modify_prot_commit);
  256. static inline pmd_t pmdp_flush_direct(struct mm_struct *mm,
  257. unsigned long addr, pmd_t *pmdp)
  258. {
  259. pmd_t old;
  260. old = *pmdp;
  261. if (pmd_val(old) & _SEGMENT_ENTRY_INVALID)
  262. return old;
  263. if (!MACHINE_HAS_IDTE) {
  264. __pmdp_csp(pmdp);
  265. return old;
  266. }
  267. atomic_inc(&mm->context.flush_count);
  268. if (MACHINE_HAS_TLB_LC &&
  269. cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
  270. __pmdp_idte_local(addr, pmdp);
  271. else
  272. __pmdp_idte(addr, pmdp);
  273. atomic_dec(&mm->context.flush_count);
  274. return old;
  275. }
  276. static inline pmd_t pmdp_flush_lazy(struct mm_struct *mm,
  277. unsigned long addr, pmd_t *pmdp)
  278. {
  279. pmd_t old;
  280. old = *pmdp;
  281. if (pmd_val(old) & _SEGMENT_ENTRY_INVALID)
  282. return old;
  283. atomic_inc(&mm->context.flush_count);
  284. if (cpumask_equal(&mm->context.cpu_attach_mask,
  285. cpumask_of(smp_processor_id()))) {
  286. pmd_val(*pmdp) |= _SEGMENT_ENTRY_INVALID;
  287. mm->context.flush_mm = 1;
  288. } else if (MACHINE_HAS_IDTE)
  289. __pmdp_idte(addr, pmdp);
  290. else
  291. __pmdp_csp(pmdp);
  292. atomic_dec(&mm->context.flush_count);
  293. return old;
  294. }
  295. pmd_t pmdp_xchg_direct(struct mm_struct *mm, unsigned long addr,
  296. pmd_t *pmdp, pmd_t new)
  297. {
  298. pmd_t old;
  299. preempt_disable();
  300. old = pmdp_flush_direct(mm, addr, pmdp);
  301. *pmdp = new;
  302. preempt_enable();
  303. return old;
  304. }
  305. EXPORT_SYMBOL(pmdp_xchg_direct);
  306. pmd_t pmdp_xchg_lazy(struct mm_struct *mm, unsigned long addr,
  307. pmd_t *pmdp, pmd_t new)
  308. {
  309. pmd_t old;
  310. preempt_disable();
  311. old = pmdp_flush_lazy(mm, addr, pmdp);
  312. *pmdp = new;
  313. preempt_enable();
  314. return old;
  315. }
  316. EXPORT_SYMBOL(pmdp_xchg_lazy);
  317. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  318. void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
  319. pgtable_t pgtable)
  320. {
  321. struct list_head *lh = (struct list_head *) pgtable;
  322. assert_spin_locked(pmd_lockptr(mm, pmdp));
  323. /* FIFO */
  324. if (!pmd_huge_pte(mm, pmdp))
  325. INIT_LIST_HEAD(lh);
  326. else
  327. list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
  328. pmd_huge_pte(mm, pmdp) = pgtable;
  329. }
  330. pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
  331. {
  332. struct list_head *lh;
  333. pgtable_t pgtable;
  334. pte_t *ptep;
  335. assert_spin_locked(pmd_lockptr(mm, pmdp));
  336. /* FIFO */
  337. pgtable = pmd_huge_pte(mm, pmdp);
  338. lh = (struct list_head *) pgtable;
  339. if (list_empty(lh))
  340. pmd_huge_pte(mm, pmdp) = NULL;
  341. else {
  342. pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
  343. list_del(lh);
  344. }
  345. ptep = (pte_t *) pgtable;
  346. pte_val(*ptep) = _PAGE_INVALID;
  347. ptep++;
  348. pte_val(*ptep) = _PAGE_INVALID;
  349. return pgtable;
  350. }
  351. #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
  352. #ifdef CONFIG_PGSTE
  353. void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr,
  354. pte_t *ptep, pte_t entry)
  355. {
  356. pgste_t pgste;
  357. /* the mm_has_pgste() check is done in set_pte_at() */
  358. preempt_disable();
  359. pgste = pgste_get_lock(ptep);
  360. pgste_val(pgste) &= ~_PGSTE_GPS_ZERO;
  361. pgste_set_key(ptep, pgste, entry, mm);
  362. pgste = pgste_set_pte(ptep, pgste, entry);
  363. pgste_set_unlock(ptep, pgste);
  364. preempt_enable();
  365. }
  366. void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
  367. {
  368. pgste_t pgste;
  369. preempt_disable();
  370. pgste = pgste_get_lock(ptep);
  371. pgste_val(pgste) |= PGSTE_IN_BIT;
  372. pgste_set_unlock(ptep, pgste);
  373. preempt_enable();
  374. }
  375. static void ptep_zap_swap_entry(struct mm_struct *mm, swp_entry_t entry)
  376. {
  377. if (!non_swap_entry(entry))
  378. dec_mm_counter(mm, MM_SWAPENTS);
  379. else if (is_migration_entry(entry)) {
  380. struct page *page = migration_entry_to_page(entry);
  381. dec_mm_counter(mm, mm_counter(page));
  382. }
  383. free_swap_and_cache(entry);
  384. }
  385. void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
  386. pte_t *ptep, int reset)
  387. {
  388. unsigned long pgstev;
  389. pgste_t pgste;
  390. pte_t pte;
  391. /* Zap unused and logically-zero pages */
  392. preempt_disable();
  393. pgste = pgste_get_lock(ptep);
  394. pgstev = pgste_val(pgste);
  395. pte = *ptep;
  396. if (!reset && pte_swap(pte) &&
  397. ((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED ||
  398. (pgstev & _PGSTE_GPS_ZERO))) {
  399. ptep_zap_swap_entry(mm, pte_to_swp_entry(pte));
  400. pte_clear(mm, addr, ptep);
  401. }
  402. if (reset)
  403. pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK;
  404. pgste_set_unlock(ptep, pgste);
  405. preempt_enable();
  406. }
  407. void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
  408. {
  409. unsigned long ptev;
  410. pgste_t pgste;
  411. /* Clear storage key */
  412. preempt_disable();
  413. pgste = pgste_get_lock(ptep);
  414. pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT |
  415. PGSTE_GR_BIT | PGSTE_GC_BIT);
  416. ptev = pte_val(*ptep);
  417. if (!(ptev & _PAGE_INVALID) && (ptev & _PAGE_WRITE))
  418. page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 1);
  419. pgste_set_unlock(ptep, pgste);
  420. preempt_enable();
  421. }
  422. /*
  423. * Test and reset if a guest page is dirty
  424. */
  425. bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long addr)
  426. {
  427. spinlock_t *ptl;
  428. pgste_t pgste;
  429. pte_t *ptep;
  430. pte_t pte;
  431. bool dirty;
  432. ptep = get_locked_pte(mm, addr, &ptl);
  433. if (unlikely(!ptep))
  434. return false;
  435. pgste = pgste_get_lock(ptep);
  436. dirty = !!(pgste_val(pgste) & PGSTE_UC_BIT);
  437. pgste_val(pgste) &= ~PGSTE_UC_BIT;
  438. pte = *ptep;
  439. if (dirty && (pte_val(pte) & _PAGE_PRESENT)) {
  440. pgste = pgste_ipte_notify(mm, addr, ptep, pgste);
  441. __ptep_ipte(addr, ptep);
  442. if (MACHINE_HAS_ESOP || !(pte_val(pte) & _PAGE_WRITE))
  443. pte_val(pte) |= _PAGE_PROTECT;
  444. else
  445. pte_val(pte) |= _PAGE_INVALID;
  446. *ptep = pte;
  447. }
  448. pgste_set_unlock(ptep, pgste);
  449. spin_unlock(ptl);
  450. return dirty;
  451. }
  452. EXPORT_SYMBOL_GPL(test_and_clear_guest_dirty);
  453. int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
  454. unsigned char key, bool nq)
  455. {
  456. unsigned long keyul;
  457. spinlock_t *ptl;
  458. pgste_t old, new;
  459. pte_t *ptep;
  460. down_read(&mm->mmap_sem);
  461. ptep = get_locked_pte(mm, addr, &ptl);
  462. if (unlikely(!ptep)) {
  463. up_read(&mm->mmap_sem);
  464. return -EFAULT;
  465. }
  466. new = old = pgste_get_lock(ptep);
  467. pgste_val(new) &= ~(PGSTE_GR_BIT | PGSTE_GC_BIT |
  468. PGSTE_ACC_BITS | PGSTE_FP_BIT);
  469. keyul = (unsigned long) key;
  470. pgste_val(new) |= (keyul & (_PAGE_CHANGED | _PAGE_REFERENCED)) << 48;
  471. pgste_val(new) |= (keyul & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
  472. if (!(pte_val(*ptep) & _PAGE_INVALID)) {
  473. unsigned long address, bits, skey;
  474. address = pte_val(*ptep) & PAGE_MASK;
  475. skey = (unsigned long) page_get_storage_key(address);
  476. bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
  477. skey = key & (_PAGE_ACC_BITS | _PAGE_FP_BIT);
  478. /* Set storage key ACC and FP */
  479. page_set_storage_key(address, skey, !nq);
  480. /* Merge host changed & referenced into pgste */
  481. pgste_val(new) |= bits << 52;
  482. }
  483. /* changing the guest storage key is considered a change of the page */
  484. if ((pgste_val(new) ^ pgste_val(old)) &
  485. (PGSTE_ACC_BITS | PGSTE_FP_BIT | PGSTE_GR_BIT | PGSTE_GC_BIT))
  486. pgste_val(new) |= PGSTE_UC_BIT;
  487. pgste_set_unlock(ptep, new);
  488. pte_unmap_unlock(ptep, ptl);
  489. up_read(&mm->mmap_sem);
  490. return 0;
  491. }
  492. EXPORT_SYMBOL(set_guest_storage_key);
  493. unsigned char get_guest_storage_key(struct mm_struct *mm, unsigned long addr)
  494. {
  495. unsigned char key;
  496. spinlock_t *ptl;
  497. pgste_t pgste;
  498. pte_t *ptep;
  499. down_read(&mm->mmap_sem);
  500. ptep = get_locked_pte(mm, addr, &ptl);
  501. if (unlikely(!ptep)) {
  502. up_read(&mm->mmap_sem);
  503. return -EFAULT;
  504. }
  505. pgste = pgste_get_lock(ptep);
  506. if (pte_val(*ptep) & _PAGE_INVALID) {
  507. key = (pgste_val(pgste) & PGSTE_ACC_BITS) >> 56;
  508. key |= (pgste_val(pgste) & PGSTE_FP_BIT) >> 56;
  509. key |= (pgste_val(pgste) & PGSTE_GR_BIT) >> 48;
  510. key |= (pgste_val(pgste) & PGSTE_GC_BIT) >> 48;
  511. } else {
  512. key = page_get_storage_key(pte_val(*ptep) & PAGE_MASK);
  513. /* Reflect guest's logical view, not physical */
  514. if (pgste_val(pgste) & PGSTE_GR_BIT)
  515. key |= _PAGE_REFERENCED;
  516. if (pgste_val(pgste) & PGSTE_GC_BIT)
  517. key |= _PAGE_CHANGED;
  518. }
  519. pgste_set_unlock(ptep, pgste);
  520. pte_unmap_unlock(ptep, ptl);
  521. up_read(&mm->mmap_sem);
  522. return key;
  523. }
  524. EXPORT_SYMBOL(get_guest_storage_key);
  525. #endif