mmu_context.h 3.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
  3. #ifndef __ASM_CSKY_MMU_CONTEXT_H
  4. #define __ASM_CSKY_MMU_CONTEXT_H
  5. #include <asm-generic/mm_hooks.h>
  6. #include <asm/setup.h>
  7. #include <asm/page.h>
  8. #include <asm/cacheflush.h>
  9. #include <asm/tlbflush.h>
  10. #include <linux/errno.h>
  11. #include <linux/sched.h>
  12. #include <abi/ckmmu.h>
  13. static inline void tlbmiss_handler_setup_pgd(unsigned long pgd, bool kernel)
  14. {
  15. pgd &= ~(1<<31);
  16. pgd += PHYS_OFFSET;
  17. pgd |= 1;
  18. setup_pgd(pgd, kernel);
  19. }
  20. #define TLBMISS_HANDLER_SETUP_PGD(pgd) \
  21. tlbmiss_handler_setup_pgd((unsigned long)pgd, 0)
  22. #define TLBMISS_HANDLER_SETUP_PGD_KERNEL(pgd) \
  23. tlbmiss_handler_setup_pgd((unsigned long)pgd, 1)
  24. static inline unsigned long tlb_get_pgd(void)
  25. {
  26. return ((get_pgd()|(1<<31)) - PHYS_OFFSET) & ~1;
  27. }
  28. #define cpu_context(cpu, mm) ((mm)->context.asid[cpu])
  29. #define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & ASID_MASK)
  30. #define asid_cache(cpu) (cpu_data[cpu].asid_cache)
  31. #define ASID_FIRST_VERSION (1 << CONFIG_CPU_ASID_BITS)
  32. #define ASID_INC 0x1
  33. #define ASID_MASK (ASID_FIRST_VERSION - 1)
  34. #define ASID_VERSION_MASK ~ASID_MASK
  35. #define destroy_context(mm) do {} while (0)
  36. #define enter_lazy_tlb(mm, tsk) do {} while (0)
  37. #define deactivate_mm(tsk, mm) do {} while (0)
  38. /*
  39. * All unused by hardware upper bits will be considered
  40. * as a software asid extension.
  41. */
  42. static inline void
  43. get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
  44. {
  45. unsigned long asid = asid_cache(cpu);
  46. asid += ASID_INC;
  47. if (!(asid & ASID_MASK)) {
  48. flush_tlb_all(); /* start new asid cycle */
  49. if (!asid) /* fix version if needed */
  50. asid = ASID_FIRST_VERSION;
  51. }
  52. cpu_context(cpu, mm) = asid_cache(cpu) = asid;
  53. }
  54. /*
  55. * Initialize the context related info for a new mm_struct
  56. * instance.
  57. */
  58. static inline int
  59. init_new_context(struct task_struct *tsk, struct mm_struct *mm)
  60. {
  61. int i;
  62. for_each_online_cpu(i)
  63. cpu_context(i, mm) = 0;
  64. return 0;
  65. }
  66. static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
  67. struct task_struct *tsk)
  68. {
  69. unsigned int cpu = smp_processor_id();
  70. unsigned long flags;
  71. local_irq_save(flags);
  72. /* Check if our ASID is of an older version and thus invalid */
  73. if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK)
  74. get_new_mmu_context(next, cpu);
  75. write_mmu_entryhi(cpu_asid(cpu, next));
  76. TLBMISS_HANDLER_SETUP_PGD(next->pgd);
  77. /*
  78. * Mark current->active_mm as not "active" anymore.
  79. * We don't want to mislead possible IPI tlb flush routines.
  80. */
  81. cpumask_clear_cpu(cpu, mm_cpumask(prev));
  82. cpumask_set_cpu(cpu, mm_cpumask(next));
  83. local_irq_restore(flags);
  84. }
  85. /*
  86. * After we have set current->mm to a new value, this activates
  87. * the context for the new mm so we see the new mappings.
  88. */
  89. static inline void
  90. activate_mm(struct mm_struct *prev, struct mm_struct *next)
  91. {
  92. unsigned long flags;
  93. int cpu = smp_processor_id();
  94. local_irq_save(flags);
  95. /* Unconditionally get a new ASID. */
  96. get_new_mmu_context(next, cpu);
  97. write_mmu_entryhi(cpu_asid(cpu, next));
  98. TLBMISS_HANDLER_SETUP_PGD(next->pgd);
  99. /* mark mmu ownership change */
  100. cpumask_clear_cpu(cpu, mm_cpumask(prev));
  101. cpumask_set_cpu(cpu, mm_cpumask(next));
  102. local_irq_restore(flags);
  103. }
  104. /*
  105. * If mm is currently active_mm, we can't really drop it. Instead,
  106. * we will get a new one for it.
  107. */
  108. static inline void
  109. drop_mmu_context(struct mm_struct *mm, unsigned int cpu)
  110. {
  111. unsigned long flags;
  112. local_irq_save(flags);
  113. if (cpumask_test_cpu(cpu, mm_cpumask(mm))) {
  114. get_new_mmu_context(mm, cpu);
  115. write_mmu_entryhi(cpu_asid(cpu, mm));
  116. } else {
  117. /* will get a new context next time */
  118. cpu_context(cpu, mm) = 0;
  119. }
  120. local_irq_restore(flags);
  121. }
  122. #endif /* __ASM_CSKY_MMU_CONTEXT_H */