mmu_context.c 2.6 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697
  1. /*
  2. * Common implementation of switch_mm_irqs_off
  3. *
  4. * Copyright IBM Corp. 2017
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. *
  11. */
  12. #include <linux/mm.h>
  13. #include <linux/cpu.h>
  14. #include <linux/sched/mm.h>
  15. #include <asm/mmu_context.h>
  16. #if defined(CONFIG_PPC32)
  17. static inline void switch_mm_pgdir(struct task_struct *tsk,
  18. struct mm_struct *mm)
  19. {
  20. /* 32-bit keeps track of the current PGDIR in the thread struct */
  21. tsk->thread.pgdir = mm->pgd;
  22. }
  23. #elif defined(CONFIG_PPC_BOOK3E_64)
  24. static inline void switch_mm_pgdir(struct task_struct *tsk,
  25. struct mm_struct *mm)
  26. {
  27. /* 64-bit Book3E keeps track of current PGD in the PACA */
  28. get_paca()->pgd = mm->pgd;
  29. }
  30. #else
  31. static inline void switch_mm_pgdir(struct task_struct *tsk,
  32. struct mm_struct *mm) { }
  33. #endif
  34. void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
  35. struct task_struct *tsk)
  36. {
  37. bool new_on_cpu = false;
  38. /* Mark this context has been used on the new CPU */
  39. if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(next))) {
  40. cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
  41. inc_mm_active_cpus(next);
  42. /*
  43. * This full barrier orders the store to the cpumask above vs
  44. * a subsequent operation which allows this CPU to begin loading
  45. * translations for next.
  46. *
  47. * When using the radix MMU that operation is the load of the
  48. * MMU context id, which is then moved to SPRN_PID.
  49. *
  50. * For the hash MMU it is either the first load from slb_cache
  51. * in switch_slb(), and/or the store of paca->mm_ctx_id in
  52. * copy_mm_to_paca().
  53. *
  54. * On the read side the barrier is in pte_xchg(), which orders
  55. * the store to the PTE vs the load of mm_cpumask.
  56. *
  57. * This full barrier is needed by membarrier when switching
  58. * between processes after store to rq->curr, before user-space
  59. * memory accesses.
  60. */
  61. smp_mb();
  62. new_on_cpu = true;
  63. }
  64. /* Some subarchs need to track the PGD elsewhere */
  65. switch_mm_pgdir(tsk, next);
  66. /* Nothing else to do if we aren't actually switching */
  67. if (prev == next)
  68. return;
  69. /*
  70. * We must stop all altivec streams before changing the HW
  71. * context
  72. */
  73. if (cpu_has_feature(CPU_FTR_ALTIVEC))
  74. asm volatile ("dssall");
  75. if (new_on_cpu)
  76. radix_kvm_prefetch_workaround(next);
  77. else
  78. membarrier_arch_switch_mm(prev, next, tsk);
  79. /*
  80. * The actual HW switching method differs between the various
  81. * sub architectures. Out of line for now
  82. */
  83. switch_mmu_context(prev, next, tsk);
  84. }