mmu_context.h 3.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161
  1. /*
  2. * Switch an MMU context.
  3. *
  4. * This file is subject to the terms and conditions of the GNU General Public
  5. * License. See the file "COPYING" in the main directory of this archive
  6. * for more details.
  7. *
  8. * Copyright (C) 2001 - 2013 Tensilica Inc.
  9. */
  10. #ifndef _XTENSA_MMU_CONTEXT_H
  11. #define _XTENSA_MMU_CONTEXT_H
  12. #ifndef CONFIG_MMU
  13. #include <asm/nommu_context.h>
  14. #else
  15. #include <linux/stringify.h>
  16. #include <linux/sched.h>
  17. #include <asm/vectors.h>
  18. #include <asm/pgtable.h>
  19. #include <asm/cacheflush.h>
  20. #include <asm/tlbflush.h>
  21. #include <asm-generic/mm_hooks.h>
  22. #include <asm-generic/percpu.h>
  23. #if (XCHAL_HAVE_TLBS != 1)
  24. # error "Linux must have an MMU!"
  25. #endif
  26. DECLARE_PER_CPU(unsigned long, asid_cache);
  27. #define cpu_asid_cache(cpu) per_cpu(asid_cache, cpu)
  28. /*
  29. * NO_CONTEXT is the invalid ASID value that we don't ever assign to
  30. * any user or kernel context. We use the reserved values in the
  31. * ASID_INSERT macro below.
  32. *
  33. * 0 invalid
  34. * 1 kernel
  35. * 2 reserved
  36. * 3 reserved
  37. * 4...255 available
  38. */
  39. #define NO_CONTEXT 0
  40. #define ASID_USER_FIRST 4
  41. #define ASID_MASK ((1 << XCHAL_MMU_ASID_BITS) - 1)
  42. #define ASID_INSERT(x) (0x03020001 | (((x) & ASID_MASK) << 8))
  43. #ifdef CONFIG_MMU
  44. void init_mmu(void);
  45. #else
  46. static inline void init_mmu(void) { }
  47. #endif
  48. static inline void set_rasid_register (unsigned long val)
  49. {
  50. __asm__ __volatile__ (" wsr %0, rasid\n\t"
  51. " isync\n" : : "a" (val));
  52. }
  53. static inline unsigned long get_rasid_register (void)
  54. {
  55. unsigned long tmp;
  56. __asm__ __volatile__ (" rsr %0, rasid\n\t" : "=a" (tmp));
  57. return tmp;
  58. }
  59. static inline void get_new_mmu_context(struct mm_struct *mm, unsigned int cpu)
  60. {
  61. unsigned long asid = cpu_asid_cache(cpu);
  62. if ((++asid & ASID_MASK) == 0) {
  63. /*
  64. * Start new asid cycle; continue counting with next
  65. * incarnation bits; skipping over 0, 1, 2, 3.
  66. */
  67. local_flush_tlb_all();
  68. asid += ASID_USER_FIRST;
  69. }
  70. cpu_asid_cache(cpu) = asid;
  71. mm->context.asid[cpu] = asid;
  72. mm->context.cpu = cpu;
  73. }
  74. static inline void get_mmu_context(struct mm_struct *mm, unsigned int cpu)
  75. {
  76. /*
  77. * Check if our ASID is of an older version and thus invalid.
  78. */
  79. if (mm) {
  80. unsigned long asid = mm->context.asid[cpu];
  81. if (asid == NO_CONTEXT ||
  82. ((asid ^ cpu_asid_cache(cpu)) & ~ASID_MASK))
  83. get_new_mmu_context(mm, cpu);
  84. }
  85. }
  86. static inline void activate_context(struct mm_struct *mm, unsigned int cpu)
  87. {
  88. get_mmu_context(mm, cpu);
  89. set_rasid_register(ASID_INSERT(mm->context.asid[cpu]));
  90. invalidate_page_directory();
  91. }
  92. /*
  93. * Initialize the context related info for a new mm_struct
  94. * instance. Valid cpu values are 0..(NR_CPUS-1), so initializing
  95. * to -1 says the process has never run on any core.
  96. */
  97. static inline int init_new_context(struct task_struct *tsk,
  98. struct mm_struct *mm)
  99. {
  100. int cpu;
  101. for_each_possible_cpu(cpu) {
  102. mm->context.asid[cpu] = NO_CONTEXT;
  103. }
  104. mm->context.cpu = -1;
  105. return 0;
  106. }
  107. static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
  108. struct task_struct *tsk)
  109. {
  110. unsigned int cpu = smp_processor_id();
  111. int migrated = next->context.cpu != cpu;
  112. /* Flush the icache if we migrated to a new core. */
  113. if (migrated) {
  114. __invalidate_icache_all();
  115. next->context.cpu = cpu;
  116. }
  117. if (migrated || prev != next)
  118. activate_context(next, cpu);
  119. }
  120. #define activate_mm(prev, next) switch_mm((prev), (next), NULL)
  121. #define deactivate_mm(tsk, mm) do { } while (0)
  122. /*
  123. * Destroy context related info for an mm_struct that is about
  124. * to be put to rest.
  125. */
  126. static inline void destroy_context(struct mm_struct *mm)
  127. {
  128. invalidate_page_directory();
  129. }
  130. static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
  131. {
  132. /* Nothing to do. */
  133. }
  134. #endif /* CONFIG_MMU */
  135. #endif /* _XTENSA_MMU_CONTEXT_H */