vmacache.c 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2014 Davidlohr Bueso.
  4. */
  5. #include <linux/sched/signal.h>
  6. #include <linux/sched/task.h>
  7. #include <linux/mm.h>
  8. #include <linux/vmacache.h>
  9. /*
  10. * Flush vma caches for threads that share a given mm.
  11. *
  12. * The operation is safe because the caller holds the mmap_sem
  13. * exclusively and other threads accessing the vma cache will
  14. * have mmap_sem held at least for read, so no extra locking
  15. * is required to maintain the vma cache.
  16. */
  17. void vmacache_flush_all(struct mm_struct *mm)
  18. {
  19. struct task_struct *g, *p;
  20. count_vm_vmacache_event(VMACACHE_FULL_FLUSHES);
  21. /*
  22. * Single threaded tasks need not iterate the entire
  23. * list of process. We can avoid the flushing as well
  24. * since the mm's seqnum was increased and don't have
  25. * to worry about other threads' seqnum. Current's
  26. * flush will occur upon the next lookup.
  27. */
  28. if (atomic_read(&mm->mm_users) == 1)
  29. return;
  30. rcu_read_lock();
  31. for_each_process_thread(g, p) {
  32. /*
  33. * Only flush the vmacache pointers as the
  34. * mm seqnum is already set and curr's will
  35. * be set upon invalidation when the next
  36. * lookup is done.
  37. */
  38. if (mm == p->mm)
  39. vmacache_flush(p);
  40. }
  41. rcu_read_unlock();
  42. }
  43. /*
  44. * This task may be accessing a foreign mm via (for example)
  45. * get_user_pages()->find_vma(). The vmacache is task-local and this
  46. * task's vmacache pertains to a different mm (ie, its own). There is
  47. * nothing we can do here.
  48. *
  49. * Also handle the case where a kernel thread has adopted this mm via use_mm().
  50. * That kernel thread's vmacache is not applicable to this mm.
  51. */
  52. static inline bool vmacache_valid_mm(struct mm_struct *mm)
  53. {
  54. return current->mm == mm && !(current->flags & PF_KTHREAD);
  55. }
  56. void vmacache_update(unsigned long addr, struct vm_area_struct *newvma)
  57. {
  58. if (vmacache_valid_mm(newvma->vm_mm))
  59. current->vmacache.vmas[VMACACHE_HASH(addr)] = newvma;
  60. }
  61. static bool vmacache_valid(struct mm_struct *mm)
  62. {
  63. struct task_struct *curr;
  64. if (!vmacache_valid_mm(mm))
  65. return false;
  66. curr = current;
  67. if (mm->vmacache_seqnum != curr->vmacache.seqnum) {
  68. /*
  69. * First attempt will always be invalid, initialize
  70. * the new cache for this task here.
  71. */
  72. curr->vmacache.seqnum = mm->vmacache_seqnum;
  73. vmacache_flush(curr);
  74. return false;
  75. }
  76. return true;
  77. }
  78. struct vm_area_struct *vmacache_find(struct mm_struct *mm, unsigned long addr)
  79. {
  80. int i;
  81. count_vm_vmacache_event(VMACACHE_FIND_CALLS);
  82. if (!vmacache_valid(mm))
  83. return NULL;
  84. for (i = 0; i < VMACACHE_SIZE; i++) {
  85. struct vm_area_struct *vma = current->vmacache.vmas[i];
  86. if (!vma)
  87. continue;
  88. if (WARN_ON_ONCE(vma->vm_mm != mm))
  89. break;
  90. if (vma->vm_start <= addr && vma->vm_end > addr) {
  91. count_vm_vmacache_event(VMACACHE_FIND_HITS);
  92. return vma;
  93. }
  94. }
  95. return NULL;
  96. }
  97. #ifndef CONFIG_MMU
  98. struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm,
  99. unsigned long start,
  100. unsigned long end)
  101. {
  102. int i;
  103. count_vm_vmacache_event(VMACACHE_FIND_CALLS);
  104. if (!vmacache_valid(mm))
  105. return NULL;
  106. for (i = 0; i < VMACACHE_SIZE; i++) {
  107. struct vm_area_struct *vma = current->vmacache.vmas[i];
  108. if (vma && vma->vm_start == start && vma->vm_end == end) {
  109. count_vm_vmacache_event(VMACACHE_FIND_HITS);
  110. return vma;
  111. }
  112. }
  113. return NULL;
  114. }
  115. #endif