mmu_context.h 2.1 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef __PARISC_MMU_CONTEXT_H
  3. #define __PARISC_MMU_CONTEXT_H
  4. #include <linux/mm.h>
  5. #include <linux/sched.h>
  6. #include <linux/atomic.h>
  7. #include <asm/pgalloc.h>
  8. #include <asm/pgtable.h>
  9. #include <asm-generic/mm_hooks.h>
  10. static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
  11. {
  12. }
  13. /* on PA-RISC, we actually have enough contexts to justify an allocator
  14. * for them. prumpf */
  15. extern unsigned long alloc_sid(void);
  16. extern void free_sid(unsigned long);
  17. static inline int
  18. init_new_context(struct task_struct *tsk, struct mm_struct *mm)
  19. {
  20. BUG_ON(atomic_read(&mm->mm_users) != 1);
  21. mm->context = alloc_sid();
  22. return 0;
  23. }
  24. static inline void
  25. destroy_context(struct mm_struct *mm)
  26. {
  27. free_sid(mm->context);
  28. mm->context = 0;
  29. }
  30. static inline unsigned long __space_to_prot(mm_context_t context)
  31. {
  32. #if SPACEID_SHIFT == 0
  33. return context << 1;
  34. #else
  35. return context >> (SPACEID_SHIFT - 1);
  36. #endif
  37. }
  38. static inline void load_context(mm_context_t context)
  39. {
  40. mtsp(context, 3);
  41. mtctl(__space_to_prot(context), 8);
  42. }
  43. static inline void switch_mm_irqs_off(struct mm_struct *prev,
  44. struct mm_struct *next, struct task_struct *tsk)
  45. {
  46. if (prev != next) {
  47. mtctl(__pa(next->pgd), 25);
  48. load_context(next->context);
  49. }
  50. }
  51. static inline void switch_mm(struct mm_struct *prev,
  52. struct mm_struct *next, struct task_struct *tsk)
  53. {
  54. unsigned long flags;
  55. if (prev == next)
  56. return;
  57. local_irq_save(flags);
  58. switch_mm_irqs_off(prev, next, tsk);
  59. local_irq_restore(flags);
  60. }
  61. #define switch_mm_irqs_off switch_mm_irqs_off
  62. #define deactivate_mm(tsk,mm) do { } while (0)
  63. static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
  64. {
  65. /*
  66. * Activate_mm is our one chance to allocate a space id
  67. * for a new mm created in the exec path. There's also
  68. * some lazy tlb stuff, which is currently dead code, but
  69. * we only allocate a space id if one hasn't been allocated
  70. * already, so we should be OK.
  71. */
  72. BUG_ON(next == &init_mm); /* Should never happen */
  73. if (next->context == 0)
  74. next->context = alloc_sid();
  75. switch_mm(prev,next,current);
  76. }
  77. #endif