mmu_context.h 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171
  1. #ifndef __ASM_POWERPC_MMU_CONTEXT_H
  2. #define __ASM_POWERPC_MMU_CONTEXT_H
  3. #ifdef __KERNEL__
  4. #include <linux/kernel.h>
  5. #include <linux/mm.h>
  6. #include <linux/sched.h>
  7. #include <linux/spinlock.h>
  8. #include <asm/mmu.h>
  9. #include <asm/cputable.h>
  10. #include <asm/cputhreads.h>
  11. /*
  12. * Most if the context management is out of line
  13. */
  14. extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
  15. extern void destroy_context(struct mm_struct *mm);
  16. #ifdef CONFIG_SPAPR_TCE_IOMMU
  17. struct mm_iommu_table_group_mem_t;
  18. extern int isolate_lru_page(struct page *page); /* from internal.h */
  19. extern bool mm_iommu_preregistered(void);
  20. extern long mm_iommu_get(unsigned long ua, unsigned long entries,
  21. struct mm_iommu_table_group_mem_t **pmem);
  22. extern long mm_iommu_put(struct mm_iommu_table_group_mem_t *mem);
  23. extern void mm_iommu_init(mm_context_t *ctx);
  24. extern void mm_iommu_cleanup(mm_context_t *ctx);
  25. extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(unsigned long ua,
  26. unsigned long size);
  27. extern struct mm_iommu_table_group_mem_t *mm_iommu_find(unsigned long ua,
  28. unsigned long entries);
  29. extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
  30. unsigned long ua, unsigned long *hpa);
  31. extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem);
  32. extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem);
  33. #endif
  34. extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
  35. extern void set_context(unsigned long id, pgd_t *pgd);
  36. #ifdef CONFIG_PPC_BOOK3S_64
  37. extern void radix__switch_mmu_context(struct mm_struct *prev,
  38. struct mm_struct *next);
  39. static inline void switch_mmu_context(struct mm_struct *prev,
  40. struct mm_struct *next,
  41. struct task_struct *tsk)
  42. {
  43. if (radix_enabled())
  44. return radix__switch_mmu_context(prev, next);
  45. return switch_slb(tsk, next);
  46. }
  47. extern int __init_new_context(void);
  48. extern void __destroy_context(int context_id);
  49. static inline void mmu_context_init(void) { }
  50. #else
  51. extern void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next,
  52. struct task_struct *tsk);
  53. extern unsigned long __init_new_context(void);
  54. extern void __destroy_context(unsigned long context_id);
  55. extern void mmu_context_init(void);
  56. #endif
  57. extern void switch_cop(struct mm_struct *next);
  58. extern int use_cop(unsigned long acop, struct mm_struct *mm);
  59. extern void drop_cop(unsigned long acop, struct mm_struct *mm);
  60. /*
  61. * switch_mm is the entry point called from the architecture independent
  62. * code in kernel/sched/core.c
  63. */
  64. static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
  65. struct task_struct *tsk)
  66. {
  67. /* Mark this context has been used on the new CPU */
  68. if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(next)))
  69. cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
  70. /* 32-bit keeps track of the current PGDIR in the thread struct */
  71. #ifdef CONFIG_PPC32
  72. tsk->thread.pgdir = next->pgd;
  73. #endif /* CONFIG_PPC32 */
  74. /* 64-bit Book3E keeps track of current PGD in the PACA */
  75. #ifdef CONFIG_PPC_BOOK3E_64
  76. get_paca()->pgd = next->pgd;
  77. #endif
  78. /* Nothing else to do if we aren't actually switching */
  79. if (prev == next)
  80. return;
  81. #ifdef CONFIG_PPC_ICSWX
  82. /* Switch coprocessor context only if prev or next uses a coprocessor */
  83. if (prev->context.acop || next->context.acop)
  84. switch_cop(next);
  85. #endif /* CONFIG_PPC_ICSWX */
  86. /* We must stop all altivec streams before changing the HW
  87. * context
  88. */
  89. #ifdef CONFIG_ALTIVEC
  90. if (cpu_has_feature(CPU_FTR_ALTIVEC))
  91. asm volatile ("dssall");
  92. #endif /* CONFIG_ALTIVEC */
  93. /*
  94. * The actual HW switching method differs between the various
  95. * sub architectures. Out of line for now
  96. */
  97. switch_mmu_context(prev, next, tsk);
  98. }
  99. #define deactivate_mm(tsk,mm) do { } while (0)
  100. /*
  101. * After we have set current->mm to a new value, this activates
  102. * the context for the new mm so we see the new mappings.
  103. */
  104. static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
  105. {
  106. unsigned long flags;
  107. local_irq_save(flags);
  108. switch_mm(prev, next, current);
  109. local_irq_restore(flags);
  110. }
  111. /* We don't currently use enter_lazy_tlb() for anything */
  112. static inline void enter_lazy_tlb(struct mm_struct *mm,
  113. struct task_struct *tsk)
  114. {
  115. /* 64-bit Book3E keeps track of current PGD in the PACA */
  116. #ifdef CONFIG_PPC_BOOK3E_64
  117. get_paca()->pgd = NULL;
  118. #endif
  119. }
  120. static inline void arch_dup_mmap(struct mm_struct *oldmm,
  121. struct mm_struct *mm)
  122. {
  123. }
  124. static inline void arch_exit_mmap(struct mm_struct *mm)
  125. {
  126. }
  127. static inline void arch_unmap(struct mm_struct *mm,
  128. struct vm_area_struct *vma,
  129. unsigned long start, unsigned long end)
  130. {
  131. if (start <= mm->context.vdso_base && mm->context.vdso_base < end)
  132. mm->context.vdso_base = 0;
  133. }
  134. static inline void arch_bprm_mm_init(struct mm_struct *mm,
  135. struct vm_area_struct *vma)
  136. {
  137. }
  138. static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
  139. bool write, bool execute, bool foreign)
  140. {
  141. /* by default, allow everything */
  142. return true;
  143. }
  144. static inline bool arch_pte_access_permitted(pte_t pte, bool write)
  145. {
  146. /* by default, allow everything */
  147. return true;
  148. }
  149. #endif /* __KERNEL__ */
  150. #endif /* __ASM_POWERPC_MMU_CONTEXT_H */