mmu_context.h 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef __ASM_POWERPC_MMU_CONTEXT_H
  3. #define __ASM_POWERPC_MMU_CONTEXT_H
  4. #ifdef __KERNEL__
  5. #include <linux/kernel.h>
  6. #include <linux/mm.h>
  7. #include <linux/sched.h>
  8. #include <linux/spinlock.h>
  9. #include <asm/mmu.h>
  10. #include <asm/cputable.h>
  11. #include <asm/cputhreads.h>
  12. /*
  13. * Most if the context management is out of line
  14. */
  15. extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
  16. extern void destroy_context(struct mm_struct *mm);
  17. #ifdef CONFIG_SPAPR_TCE_IOMMU
  18. struct mm_iommu_table_group_mem_t;
  19. extern int isolate_lru_page(struct page *page); /* from internal.h */
  20. extern bool mm_iommu_preregistered(struct mm_struct *mm);
  21. extern long mm_iommu_get(struct mm_struct *mm,
  22. unsigned long ua, unsigned long entries,
  23. struct mm_iommu_table_group_mem_t **pmem);
  24. extern long mm_iommu_put(struct mm_struct *mm,
  25. struct mm_iommu_table_group_mem_t *mem);
  26. extern void mm_iommu_init(struct mm_struct *mm);
  27. extern void mm_iommu_cleanup(struct mm_struct *mm);
  28. extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
  29. unsigned long ua, unsigned long size);
  30. extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm(
  31. struct mm_struct *mm, unsigned long ua, unsigned long size);
  32. extern struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
  33. unsigned long ua, unsigned long entries);
  34. extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
  35. unsigned long ua, unsigned long *hpa);
  36. extern long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
  37. unsigned long ua, unsigned long *hpa);
  38. extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem);
  39. extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem);
  40. #endif
  41. extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
  42. extern void set_context(unsigned long id, pgd_t *pgd);
  43. #ifdef CONFIG_PPC_BOOK3S_64
  44. extern void radix__switch_mmu_context(struct mm_struct *prev,
  45. struct mm_struct *next);
  46. static inline void switch_mmu_context(struct mm_struct *prev,
  47. struct mm_struct *next,
  48. struct task_struct *tsk)
  49. {
  50. if (radix_enabled())
  51. return radix__switch_mmu_context(prev, next);
  52. return switch_slb(tsk, next);
  53. }
  54. extern int hash__alloc_context_id(void);
  55. extern void hash__reserve_context_id(int id);
  56. extern void __destroy_context(int context_id);
  57. static inline void mmu_context_init(void) { }
  58. #else
  59. extern void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next,
  60. struct task_struct *tsk);
  61. extern unsigned long __init_new_context(void);
  62. extern void __destroy_context(unsigned long context_id);
  63. extern void mmu_context_init(void);
  64. #endif
  65. #if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE) && defined(CONFIG_PPC_RADIX_MMU)
  66. extern void radix_kvm_prefetch_workaround(struct mm_struct *mm);
  67. #else
  68. static inline void radix_kvm_prefetch_workaround(struct mm_struct *mm) { }
  69. #endif
  70. extern void switch_cop(struct mm_struct *next);
  71. extern int use_cop(unsigned long acop, struct mm_struct *mm);
  72. extern void drop_cop(unsigned long acop, struct mm_struct *mm);
  73. #ifdef CONFIG_PPC_BOOK3S_64
  74. static inline void inc_mm_active_cpus(struct mm_struct *mm)
  75. {
  76. atomic_inc(&mm->context.active_cpus);
  77. }
  78. static inline void dec_mm_active_cpus(struct mm_struct *mm)
  79. {
  80. atomic_dec(&mm->context.active_cpus);
  81. }
  82. static inline void mm_context_add_copro(struct mm_struct *mm)
  83. {
  84. /*
  85. * On hash, should only be called once over the lifetime of
  86. * the context, as we can't decrement the active cpus count
  87. * and flush properly for the time being.
  88. */
  89. inc_mm_active_cpus(mm);
  90. }
  91. static inline void mm_context_remove_copro(struct mm_struct *mm)
  92. {
  93. /*
  94. * Need to broadcast a global flush of the full mm before
  95. * decrementing active_cpus count, as the next TLBI may be
  96. * local and the nMMU and/or PSL need to be cleaned up.
  97. * Should be rare enough so that it's acceptable.
  98. *
  99. * Skip on hash, as we don't know how to do the proper flush
  100. * for the time being. Invalidations will remain global if
  101. * used on hash.
  102. */
  103. if (radix_enabled()) {
  104. flush_all_mm(mm);
  105. dec_mm_active_cpus(mm);
  106. }
  107. }
  108. #else
  109. static inline void inc_mm_active_cpus(struct mm_struct *mm) { }
  110. static inline void dec_mm_active_cpus(struct mm_struct *mm) { }
  111. static inline void mm_context_add_copro(struct mm_struct *mm) { }
  112. static inline void mm_context_remove_copro(struct mm_struct *mm) { }
  113. #endif
  114. extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
  115. struct task_struct *tsk);
  116. static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
  117. struct task_struct *tsk)
  118. {
  119. unsigned long flags;
  120. local_irq_save(flags);
  121. switch_mm_irqs_off(prev, next, tsk);
  122. local_irq_restore(flags);
  123. }
  124. #define switch_mm_irqs_off switch_mm_irqs_off
  125. #define deactivate_mm(tsk,mm) do { } while (0)
  126. /*
  127. * After we have set current->mm to a new value, this activates
  128. * the context for the new mm so we see the new mappings.
  129. */
  130. static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
  131. {
  132. switch_mm(prev, next, current);
  133. }
  134. /* We don't currently use enter_lazy_tlb() for anything */
  135. static inline void enter_lazy_tlb(struct mm_struct *mm,
  136. struct task_struct *tsk)
  137. {
  138. /* 64-bit Book3E keeps track of current PGD in the PACA */
  139. #ifdef CONFIG_PPC_BOOK3E_64
  140. get_paca()->pgd = NULL;
  141. #endif
  142. }
  143. static inline int arch_dup_mmap(struct mm_struct *oldmm,
  144. struct mm_struct *mm)
  145. {
  146. return 0;
  147. }
  148. #ifndef CONFIG_PPC_BOOK3S_64
  149. static inline void arch_exit_mmap(struct mm_struct *mm)
  150. {
  151. }
  152. #else
  153. extern void arch_exit_mmap(struct mm_struct *mm);
  154. #endif
  155. static inline void arch_unmap(struct mm_struct *mm,
  156. struct vm_area_struct *vma,
  157. unsigned long start, unsigned long end)
  158. {
  159. if (start <= mm->context.vdso_base && mm->context.vdso_base < end)
  160. mm->context.vdso_base = 0;
  161. }
  162. static inline void arch_bprm_mm_init(struct mm_struct *mm,
  163. struct vm_area_struct *vma)
  164. {
  165. }
  166. static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
  167. bool write, bool execute, bool foreign)
  168. {
  169. /* by default, allow everything */
  170. return true;
  171. }
  172. #endif /* __KERNEL__ */
  173. #endif /* __ASM_POWERPC_MMU_CONTEXT_H */