mmu_context.h 7.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef __ASM_POWERPC_MMU_CONTEXT_H
  3. #define __ASM_POWERPC_MMU_CONTEXT_H
  4. #ifdef __KERNEL__
  5. #include <linux/kernel.h>
  6. #include <linux/mm.h>
  7. #include <linux/sched.h>
  8. #include <linux/spinlock.h>
  9. #include <asm/mmu.h>
  10. #include <asm/cputable.h>
  11. #include <asm/cputhreads.h>
  12. /*
  13. * Most if the context management is out of line
  14. */
  15. extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
  16. extern void destroy_context(struct mm_struct *mm);
  17. #ifdef CONFIG_SPAPR_TCE_IOMMU
  18. struct mm_iommu_table_group_mem_t;
  19. extern int isolate_lru_page(struct page *page); /* from internal.h */
  20. extern bool mm_iommu_preregistered(struct mm_struct *mm);
  21. extern long mm_iommu_get(struct mm_struct *mm,
  22. unsigned long ua, unsigned long entries,
  23. struct mm_iommu_table_group_mem_t **pmem);
  24. extern long mm_iommu_put(struct mm_struct *mm,
  25. struct mm_iommu_table_group_mem_t *mem);
  26. extern void mm_iommu_init(struct mm_struct *mm);
  27. extern void mm_iommu_cleanup(struct mm_struct *mm);
  28. extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
  29. unsigned long ua, unsigned long size);
  30. extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm(
  31. struct mm_struct *mm, unsigned long ua, unsigned long size);
  32. extern struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
  33. unsigned long ua, unsigned long entries);
  34. extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
  35. unsigned long ua, unsigned long *hpa);
  36. extern long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
  37. unsigned long ua, unsigned long *hpa);
  38. extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem);
  39. extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem);
  40. #endif
  41. extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
  42. extern void set_context(unsigned long id, pgd_t *pgd);
  43. #ifdef CONFIG_PPC_BOOK3S_64
  44. extern void radix__switch_mmu_context(struct mm_struct *prev,
  45. struct mm_struct *next);
  46. static inline void switch_mmu_context(struct mm_struct *prev,
  47. struct mm_struct *next,
  48. struct task_struct *tsk)
  49. {
  50. if (radix_enabled())
  51. return radix__switch_mmu_context(prev, next);
  52. return switch_slb(tsk, next);
  53. }
  54. extern int hash__alloc_context_id(void);
  55. extern void hash__reserve_context_id(int id);
  56. extern void __destroy_context(int context_id);
  57. static inline void mmu_context_init(void) { }
  58. static inline int alloc_extended_context(struct mm_struct *mm,
  59. unsigned long ea)
  60. {
  61. int context_id;
  62. int index = ea >> MAX_EA_BITS_PER_CONTEXT;
  63. context_id = hash__alloc_context_id();
  64. if (context_id < 0)
  65. return context_id;
  66. VM_WARN_ON(mm->context.extended_id[index]);
  67. mm->context.extended_id[index] = context_id;
  68. return context_id;
  69. }
  70. static inline bool need_extra_context(struct mm_struct *mm, unsigned long ea)
  71. {
  72. int context_id;
  73. context_id = get_ea_context(&mm->context, ea);
  74. if (!context_id)
  75. return true;
  76. return false;
  77. }
  78. #else
  79. extern void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next,
  80. struct task_struct *tsk);
  81. extern unsigned long __init_new_context(void);
  82. extern void __destroy_context(unsigned long context_id);
  83. extern void mmu_context_init(void);
  84. static inline int alloc_extended_context(struct mm_struct *mm,
  85. unsigned long ea)
  86. {
  87. /* non book3s_64 should never find this called */
  88. WARN_ON(1);
  89. return -ENOMEM;
  90. }
  91. static inline bool need_extra_context(struct mm_struct *mm, unsigned long ea)
  92. {
  93. return false;
  94. }
  95. #endif
  96. #if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE) && defined(CONFIG_PPC_RADIX_MMU)
  97. extern void radix_kvm_prefetch_workaround(struct mm_struct *mm);
  98. #else
  99. static inline void radix_kvm_prefetch_workaround(struct mm_struct *mm) { }
  100. #endif
  101. extern void switch_cop(struct mm_struct *next);
  102. extern int use_cop(unsigned long acop, struct mm_struct *mm);
  103. extern void drop_cop(unsigned long acop, struct mm_struct *mm);
  104. #ifdef CONFIG_PPC_BOOK3S_64
  105. static inline void inc_mm_active_cpus(struct mm_struct *mm)
  106. {
  107. atomic_inc(&mm->context.active_cpus);
  108. }
  109. static inline void dec_mm_active_cpus(struct mm_struct *mm)
  110. {
  111. atomic_dec(&mm->context.active_cpus);
  112. }
  113. static inline void mm_context_add_copro(struct mm_struct *mm)
  114. {
  115. /*
  116. * If any copro is in use, increment the active CPU count
  117. * in order to force TLB invalidations to be global as to
  118. * propagate to the Nest MMU.
  119. */
  120. if (atomic_inc_return(&mm->context.copros) == 1)
  121. inc_mm_active_cpus(mm);
  122. }
  123. static inline void mm_context_remove_copro(struct mm_struct *mm)
  124. {
  125. int c;
  126. c = atomic_dec_if_positive(&mm->context.copros);
  127. /* Detect imbalance between add and remove */
  128. WARN_ON(c < 0);
  129. /*
  130. * Need to broadcast a global flush of the full mm before
  131. * decrementing active_cpus count, as the next TLBI may be
  132. * local and the nMMU and/or PSL need to be cleaned up.
  133. * Should be rare enough so that it's acceptable.
  134. *
  135. * Skip on hash, as we don't know how to do the proper flush
  136. * for the time being. Invalidations will remain global if
  137. * used on hash.
  138. */
  139. if (c == 0 && radix_enabled()) {
  140. flush_all_mm(mm);
  141. dec_mm_active_cpus(mm);
  142. }
  143. }
  144. #else
  145. static inline void inc_mm_active_cpus(struct mm_struct *mm) { }
  146. static inline void dec_mm_active_cpus(struct mm_struct *mm) { }
  147. static inline void mm_context_add_copro(struct mm_struct *mm) { }
  148. static inline void mm_context_remove_copro(struct mm_struct *mm) { }
  149. #endif
  150. extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
  151. struct task_struct *tsk);
  152. static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
  153. struct task_struct *tsk)
  154. {
  155. unsigned long flags;
  156. local_irq_save(flags);
  157. switch_mm_irqs_off(prev, next, tsk);
  158. local_irq_restore(flags);
  159. }
  160. #define switch_mm_irqs_off switch_mm_irqs_off
  161. #define deactivate_mm(tsk,mm) do { } while (0)
  162. /*
  163. * After we have set current->mm to a new value, this activates
  164. * the context for the new mm so we see the new mappings.
  165. */
  166. static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
  167. {
  168. switch_mm(prev, next, current);
  169. }
  170. /* We don't currently use enter_lazy_tlb() for anything */
  171. static inline void enter_lazy_tlb(struct mm_struct *mm,
  172. struct task_struct *tsk)
  173. {
  174. /* 64-bit Book3E keeps track of current PGD in the PACA */
  175. #ifdef CONFIG_PPC_BOOK3E_64
  176. get_paca()->pgd = NULL;
  177. #endif
  178. }
  179. static inline int arch_dup_mmap(struct mm_struct *oldmm,
  180. struct mm_struct *mm)
  181. {
  182. return 0;
  183. }
  184. #ifndef CONFIG_PPC_BOOK3S_64
  185. static inline void arch_exit_mmap(struct mm_struct *mm)
  186. {
  187. }
  188. #else
  189. extern void arch_exit_mmap(struct mm_struct *mm);
  190. #endif
  191. static inline void arch_unmap(struct mm_struct *mm,
  192. struct vm_area_struct *vma,
  193. unsigned long start, unsigned long end)
  194. {
  195. if (start <= mm->context.vdso_base && mm->context.vdso_base < end)
  196. mm->context.vdso_base = 0;
  197. }
  198. static inline void arch_bprm_mm_init(struct mm_struct *mm,
  199. struct vm_area_struct *vma)
  200. {
  201. }
  202. #ifdef CONFIG_PPC_MEM_KEYS
  203. bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write,
  204. bool execute, bool foreign);
  205. #else /* CONFIG_PPC_MEM_KEYS */
  206. static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
  207. bool write, bool execute, bool foreign)
  208. {
  209. /* by default, allow everything */
  210. return true;
  211. }
  212. #define pkey_mm_init(mm)
  213. #define thread_pkey_regs_save(thread)
  214. #define thread_pkey_regs_restore(new_thread, old_thread)
  215. #define thread_pkey_regs_init(thread)
  216. static inline int vma_pkey(struct vm_area_struct *vma)
  217. {
  218. return 0;
  219. }
  220. static inline u64 pte_to_hpte_pkey_bits(u64 pteflags)
  221. {
  222. return 0x0UL;
  223. }
  224. #endif /* CONFIG_PPC_MEM_KEYS */
  225. #endif /* __KERNEL__ */
  226. #endif /* __ASM_POWERPC_MMU_CONTEXT_H */