pgtable-book3s64.c 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173
  1. /*
  2. * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. */
  9. #include <linux/sched.h>
  10. #include <linux/mm_types.h>
  11. #include <misc/cxl-base.h>
  12. #include <asm/pgalloc.h>
  13. #include <asm/tlb.h>
  14. #include "mmu_decl.h"
  15. #include <trace/events/thp.h>
  16. int (*register_process_table)(unsigned long base, unsigned long page_size,
  17. unsigned long tbl_size);
  18. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  19. /*
  20. * This is called when relaxing access to a hugepage. It's also called in the page
  21. * fault path when we don't hit any of the major fault cases, ie, a minor
  22. * update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have
  23. * handled those two for us, we additionally deal with missing execute
  24. * permission here on some processors
  25. */
  26. int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
  27. pmd_t *pmdp, pmd_t entry, int dirty)
  28. {
  29. int changed;
  30. #ifdef CONFIG_DEBUG_VM
  31. WARN_ON(!pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
  32. assert_spin_locked(&vma->vm_mm->page_table_lock);
  33. #endif
  34. changed = !pmd_same(*(pmdp), entry);
  35. if (changed) {
  36. __ptep_set_access_flags(vma->vm_mm, pmdp_ptep(pmdp),
  37. pmd_pte(entry), address);
  38. flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
  39. }
  40. return changed;
  41. }
  42. int pmdp_test_and_clear_young(struct vm_area_struct *vma,
  43. unsigned long address, pmd_t *pmdp)
  44. {
  45. return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp);
  46. }
  47. /*
  48. * set a new huge pmd. We should not be called for updating
  49. * an existing pmd entry. That should go via pmd_hugepage_update.
  50. */
  51. void set_pmd_at(struct mm_struct *mm, unsigned long addr,
  52. pmd_t *pmdp, pmd_t pmd)
  53. {
  54. #ifdef CONFIG_DEBUG_VM
  55. WARN_ON(pte_present(pmd_pte(*pmdp)) && !pte_protnone(pmd_pte(*pmdp)));
  56. assert_spin_locked(&mm->page_table_lock);
  57. WARN_ON(!(pmd_trans_huge(pmd) || pmd_devmap(pmd)));
  58. #endif
  59. trace_hugepage_set_pmd(addr, pmd_val(pmd));
  60. return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
  61. }
  62. static void do_nothing(void *unused)
  63. {
  64. }
  65. /*
  66. * Serialize against find_current_mm_pte which does lock-less
  67. * lookup in page tables with local interrupts disabled. For huge pages
  68. * it casts pmd_t to pte_t. Since format of pte_t is different from
  69. * pmd_t we want to prevent transit from pmd pointing to page table
  70. * to pmd pointing to huge page (and back) while interrupts are disabled.
  71. * We clear pmd to possibly replace it with page table pointer in
  72. * different code paths. So make sure we wait for the parallel
  73. * find_current_mm_pte to finish.
  74. */
  75. void serialize_against_pte_lookup(struct mm_struct *mm)
  76. {
  77. smp_mb();
  78. smp_call_function_many(mm_cpumask(mm), do_nothing, NULL, 1);
  79. }
  80. /*
  81. * We use this to invalidate a pmdp entry before switching from a
  82. * hugepte to regular pmd entry.
  83. */
  84. pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
  85. pmd_t *pmdp)
  86. {
  87. unsigned long old_pmd;
  88. old_pmd = pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0);
  89. flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
  90. /*
  91. * This ensures that generic code that rely on IRQ disabling
  92. * to prevent a parallel THP split work as expected.
  93. */
  94. serialize_against_pte_lookup(vma->vm_mm);
  95. return __pmd(old_pmd);
  96. }
  97. static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot)
  98. {
  99. return __pmd(pmd_val(pmd) | pgprot_val(pgprot));
  100. }
  101. pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot)
  102. {
  103. unsigned long pmdv;
  104. pmdv = (pfn << PAGE_SHIFT) & PTE_RPN_MASK;
  105. return pmd_set_protbits(__pmd(pmdv), pgprot);
  106. }
  107. pmd_t mk_pmd(struct page *page, pgprot_t pgprot)
  108. {
  109. return pfn_pmd(page_to_pfn(page), pgprot);
  110. }
  111. pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
  112. {
  113. unsigned long pmdv;
  114. pmdv = pmd_val(pmd);
  115. pmdv &= _HPAGE_CHG_MASK;
  116. return pmd_set_protbits(__pmd(pmdv), newprot);
  117. }
  118. /*
  119. * This is called at the end of handling a user page fault, when the
  120. * fault has been handled by updating a HUGE PMD entry in the linux page tables.
  121. * We use it to preload an HPTE into the hash table corresponding to
  122. * the updated linux HUGE PMD entry.
  123. */
  124. void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
  125. pmd_t *pmd)
  126. {
  127. return;
  128. }
  129. #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
  130. /* For use by kexec */
  131. void mmu_cleanup_all(void)
  132. {
  133. if (radix_enabled())
  134. radix__mmu_cleanup_all();
  135. else if (mmu_hash_ops.hpte_clear_all)
  136. mmu_hash_ops.hpte_clear_all();
  137. }
  138. #ifdef CONFIG_MEMORY_HOTPLUG
  139. int create_section_mapping(unsigned long start, unsigned long end)
  140. {
  141. if (radix_enabled())
  142. return radix__create_section_mapping(start, end);
  143. return hash__create_section_mapping(start, end);
  144. }
  145. int remove_section_mapping(unsigned long start, unsigned long end)
  146. {
  147. if (radix_enabled())
  148. return radix__remove_section_mapping(start, end);
  149. return hash__remove_section_mapping(start, end);
  150. }
  151. #endif /* CONFIG_MEMORY_HOTPLUG */