mmu_context.h 3.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158
  1. /*
  2. * Switch an MMU context.
  3. *
  4. * This file is subject to the terms and conditions of the GNU General Public
  5. * License. See the file "COPYING" in the main directory of this archive
  6. * for more details.
  7. *
  8. * Copyright (C) 2001 - 2013 Tensilica Inc.
  9. */
  10. #ifndef _XTENSA_MMU_CONTEXT_H
  11. #define _XTENSA_MMU_CONTEXT_H
  12. #ifndef CONFIG_MMU
  13. #include <asm/nommu_context.h>
  14. #else
  15. #include <linux/stringify.h>
  16. #include <linux/sched.h>
  17. #include <linux/mm_types.h>
  18. #include <asm/vectors.h>
  19. #include <asm/pgtable.h>
  20. #include <asm/cacheflush.h>
  21. #include <asm/tlbflush.h>
  22. #include <asm-generic/mm_hooks.h>
  23. #include <asm-generic/percpu.h>
  24. #if (XCHAL_HAVE_TLBS != 1)
  25. # error "Linux must have an MMU!"
  26. #endif
  27. DECLARE_PER_CPU(unsigned long, asid_cache);
  28. #define cpu_asid_cache(cpu) per_cpu(asid_cache, cpu)
  29. /*
  30. * NO_CONTEXT is the invalid ASID value that we don't ever assign to
  31. * any user or kernel context. We use the reserved values in the
  32. * ASID_INSERT macro below.
  33. *
  34. * 0 invalid
  35. * 1 kernel
  36. * 2 reserved
  37. * 3 reserved
  38. * 4...255 available
  39. */
  40. #define NO_CONTEXT 0
  41. #define ASID_USER_FIRST 4
  42. #define ASID_MASK ((1 << XCHAL_MMU_ASID_BITS) - 1)
  43. #define ASID_INSERT(x) (0x03020001 | (((x) & ASID_MASK) << 8))
  44. void init_mmu(void);
  45. static inline void set_rasid_register (unsigned long val)
  46. {
  47. __asm__ __volatile__ (" wsr %0, rasid\n\t"
  48. " isync\n" : : "a" (val));
  49. }
  50. static inline unsigned long get_rasid_register (void)
  51. {
  52. unsigned long tmp;
  53. __asm__ __volatile__ (" rsr %0, rasid\n\t" : "=a" (tmp));
  54. return tmp;
  55. }
  56. static inline void get_new_mmu_context(struct mm_struct *mm, unsigned int cpu)
  57. {
  58. unsigned long asid = cpu_asid_cache(cpu);
  59. if ((++asid & ASID_MASK) == 0) {
  60. /*
  61. * Start new asid cycle; continue counting with next
  62. * incarnation bits; skipping over 0, 1, 2, 3.
  63. */
  64. local_flush_tlb_all();
  65. asid += ASID_USER_FIRST;
  66. }
  67. cpu_asid_cache(cpu) = asid;
  68. mm->context.asid[cpu] = asid;
  69. mm->context.cpu = cpu;
  70. }
  71. static inline void get_mmu_context(struct mm_struct *mm, unsigned int cpu)
  72. {
  73. /*
  74. * Check if our ASID is of an older version and thus invalid.
  75. */
  76. if (mm) {
  77. unsigned long asid = mm->context.asid[cpu];
  78. if (asid == NO_CONTEXT ||
  79. ((asid ^ cpu_asid_cache(cpu)) & ~ASID_MASK))
  80. get_new_mmu_context(mm, cpu);
  81. }
  82. }
  83. static inline void activate_context(struct mm_struct *mm, unsigned int cpu)
  84. {
  85. get_mmu_context(mm, cpu);
  86. set_rasid_register(ASID_INSERT(mm->context.asid[cpu]));
  87. invalidate_page_directory();
  88. }
  89. /*
  90. * Initialize the context related info for a new mm_struct
  91. * instance. Valid cpu values are 0..(NR_CPUS-1), so initializing
  92. * to -1 says the process has never run on any core.
  93. */
  94. static inline int init_new_context(struct task_struct *tsk,
  95. struct mm_struct *mm)
  96. {
  97. int cpu;
  98. for_each_possible_cpu(cpu) {
  99. mm->context.asid[cpu] = NO_CONTEXT;
  100. }
  101. mm->context.cpu = -1;
  102. return 0;
  103. }
  104. static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
  105. struct task_struct *tsk)
  106. {
  107. unsigned int cpu = smp_processor_id();
  108. int migrated = next->context.cpu != cpu;
  109. /* Flush the icache if we migrated to a new core. */
  110. if (migrated) {
  111. __invalidate_icache_all();
  112. next->context.cpu = cpu;
  113. }
  114. if (migrated || prev != next)
  115. activate_context(next, cpu);
  116. }
  117. #define activate_mm(prev, next) switch_mm((prev), (next), NULL)
  118. #define deactivate_mm(tsk, mm) do { } while (0)
  119. /*
  120. * Destroy context related info for an mm_struct that is about
  121. * to be put to rest.
  122. */
  123. static inline void destroy_context(struct mm_struct *mm)
  124. {
  125. invalidate_page_directory();
  126. }
  127. static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
  128. {
  129. /* Nothing to do. */
  130. }
  131. #endif /* CONFIG_MMU */
  132. #endif /* _XTENSA_MMU_CONTEXT_H */