mmu-context.c 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210
  1. /* mmu-context.c: MMU context allocation and management
  2. *
  3. * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include <linux/sched.h>
  12. #include <linux/sched/mm.h>
  13. #include <linux/sched/task.h>
  14. #include <linux/mm.h>
  15. #include <asm/tlbflush.h>
  16. #define NR_CXN 4096
  17. static unsigned long cxn_bitmap[NR_CXN / (sizeof(unsigned long) * 8)];
  18. static LIST_HEAD(cxn_owners_lru);
  19. static DEFINE_SPINLOCK(cxn_owners_lock);
  20. int __nongpreldata cxn_pinned = -1;
  21. /*****************************************************************************/
  22. /*
  23. * initialise a new context
  24. */
  25. int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
  26. {
  27. memset(&mm->context, 0, sizeof(mm->context));
  28. INIT_LIST_HEAD(&mm->context.id_link);
  29. mm->context.itlb_cached_pge = 0xffffffffUL;
  30. mm->context.dtlb_cached_pge = 0xffffffffUL;
  31. return 0;
  32. } /* end init_new_context() */
  33. /*****************************************************************************/
  34. /*
  35. * make sure a kernel MMU context has a CPU context number
  36. * - call with cxn_owners_lock held
  37. */
  38. static unsigned get_cxn(mm_context_t *ctx)
  39. {
  40. struct list_head *_p;
  41. mm_context_t *p;
  42. unsigned cxn;
  43. if (!list_empty(&ctx->id_link)) {
  44. list_move_tail(&ctx->id_link, &cxn_owners_lru);
  45. }
  46. else {
  47. /* find the first unallocated context number
  48. * - 0 is reserved for the kernel
  49. */
  50. cxn = find_next_zero_bit(cxn_bitmap, NR_CXN, 1);
  51. if (cxn < NR_CXN) {
  52. set_bit(cxn, cxn_bitmap);
  53. }
  54. else {
  55. /* none remaining - need to steal someone else's cxn */
  56. p = NULL;
  57. list_for_each(_p, &cxn_owners_lru) {
  58. p = list_entry(_p, mm_context_t, id_link);
  59. if (!p->id_busy && p->id != cxn_pinned)
  60. break;
  61. }
  62. BUG_ON(_p == &cxn_owners_lru);
  63. cxn = p->id;
  64. p->id = 0;
  65. list_del_init(&p->id_link);
  66. __flush_tlb_mm(cxn);
  67. }
  68. ctx->id = cxn;
  69. list_add_tail(&ctx->id_link, &cxn_owners_lru);
  70. }
  71. return ctx->id;
  72. } /* end get_cxn() */
  73. /*****************************************************************************/
  74. /*
  75. * restore the current TLB miss handler mapped page tables into the MMU context and set up a
  76. * mapping for the page directory
  77. */
  78. void change_mm_context(mm_context_t *old, mm_context_t *ctx, pgd_t *pgd)
  79. {
  80. unsigned long _pgd;
  81. _pgd = virt_to_phys(pgd);
  82. /* save the state of the outgoing MMU context */
  83. old->id_busy = 0;
  84. asm volatile("movsg scr0,%0" : "=r"(old->itlb_cached_pge));
  85. asm volatile("movsg dampr4,%0" : "=r"(old->itlb_ptd_mapping));
  86. asm volatile("movsg scr1,%0" : "=r"(old->dtlb_cached_pge));
  87. asm volatile("movsg dampr5,%0" : "=r"(old->dtlb_ptd_mapping));
  88. /* select an MMU context number */
  89. spin_lock(&cxn_owners_lock);
  90. get_cxn(ctx);
  91. ctx->id_busy = 1;
  92. spin_unlock(&cxn_owners_lock);
  93. asm volatile("movgs %0,cxnr" : : "r"(ctx->id));
  94. /* restore the state of the incoming MMU context */
  95. asm volatile("movgs %0,scr0" : : "r"(ctx->itlb_cached_pge));
  96. asm volatile("movgs %0,dampr4" : : "r"(ctx->itlb_ptd_mapping));
  97. asm volatile("movgs %0,scr1" : : "r"(ctx->dtlb_cached_pge));
  98. asm volatile("movgs %0,dampr5" : : "r"(ctx->dtlb_ptd_mapping));
  99. /* map the PGD into uncached virtual memory */
  100. asm volatile("movgs %0,ttbr" : : "r"(_pgd));
  101. asm volatile("movgs %0,dampr3"
  102. :: "r"(_pgd | xAMPRx_L | xAMPRx_M | xAMPRx_SS_16Kb |
  103. xAMPRx_S | xAMPRx_C | xAMPRx_V));
  104. } /* end change_mm_context() */
  105. /*****************************************************************************/
  106. /*
  107. * finished with an MMU context number
  108. */
  109. void destroy_context(struct mm_struct *mm)
  110. {
  111. mm_context_t *ctx = &mm->context;
  112. spin_lock(&cxn_owners_lock);
  113. if (!list_empty(&ctx->id_link)) {
  114. if (ctx->id == cxn_pinned)
  115. cxn_pinned = -1;
  116. list_del_init(&ctx->id_link);
  117. clear_bit(ctx->id, cxn_bitmap);
  118. __flush_tlb_mm(ctx->id);
  119. ctx->id = 0;
  120. }
  121. spin_unlock(&cxn_owners_lock);
  122. } /* end destroy_context() */
  123. /*****************************************************************************/
  124. /*
  125. * display the MMU context currently a process is currently using
  126. */
  127. #ifdef CONFIG_PROC_FS
  128. char *proc_pid_status_frv_cxnr(struct mm_struct *mm, char *buffer)
  129. {
  130. spin_lock(&cxn_owners_lock);
  131. buffer += sprintf(buffer, "CXNR: %u\n", mm->context.id);
  132. spin_unlock(&cxn_owners_lock);
  133. return buffer;
  134. } /* end proc_pid_status_frv_cxnr() */
  135. #endif
  136. /*****************************************************************************/
  137. /*
  138. * (un)pin a process's mm_struct's MMU context ID
  139. */
  140. int cxn_pin_by_pid(pid_t pid)
  141. {
  142. struct task_struct *tsk;
  143. struct mm_struct *mm = NULL;
  144. int ret;
  145. /* unpin if pid is zero */
  146. if (pid == 0) {
  147. cxn_pinned = -1;
  148. return 0;
  149. }
  150. ret = -ESRCH;
  151. /* get a handle on the mm_struct */
  152. read_lock(&tasklist_lock);
  153. tsk = find_task_by_vpid(pid);
  154. if (tsk) {
  155. ret = -EINVAL;
  156. task_lock(tsk);
  157. if (tsk->mm) {
  158. mm = tsk->mm;
  159. mmget(mm);
  160. ret = 0;
  161. }
  162. task_unlock(tsk);
  163. }
  164. read_unlock(&tasklist_lock);
  165. if (ret < 0)
  166. return ret;
  167. /* make sure it has a CXN and pin it */
  168. spin_lock(&cxn_owners_lock);
  169. cxn_pinned = get_cxn(&mm->context);
  170. spin_unlock(&cxn_owners_lock);
  171. mmput(mm);
  172. return 0;
  173. } /* end cxn_pin_by_pid() */