vmacache.c 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135
  1. /*
  2. * Copyright (C) 2014 Davidlohr Bueso.
  3. */
  4. #include <linux/sched/signal.h>
  5. #include <linux/sched/task.h>
  6. #include <linux/mm.h>
  7. #include <linux/vmacache.h>
  8. /*
  9. * Flush vma caches for threads that share a given mm.
  10. *
  11. * The operation is safe because the caller holds the mmap_sem
  12. * exclusively and other threads accessing the vma cache will
  13. * have mmap_sem held at least for read, so no extra locking
  14. * is required to maintain the vma cache.
  15. */
  16. void vmacache_flush_all(struct mm_struct *mm)
  17. {
  18. struct task_struct *g, *p;
  19. count_vm_vmacache_event(VMACACHE_FULL_FLUSHES);
  20. /*
  21. * Single threaded tasks need not iterate the entire
  22. * list of process. We can avoid the flushing as well
  23. * since the mm's seqnum was increased and don't have
  24. * to worry about other threads' seqnum. Current's
  25. * flush will occur upon the next lookup.
  26. */
  27. if (atomic_read(&mm->mm_users) == 1)
  28. return;
  29. rcu_read_lock();
  30. for_each_process_thread(g, p) {
  31. /*
  32. * Only flush the vmacache pointers as the
  33. * mm seqnum is already set and curr's will
  34. * be set upon invalidation when the next
  35. * lookup is done.
  36. */
  37. if (mm == p->mm)
  38. vmacache_flush(p);
  39. }
  40. rcu_read_unlock();
  41. }
  42. /*
  43. * This task may be accessing a foreign mm via (for example)
  44. * get_user_pages()->find_vma(). The vmacache is task-local and this
  45. * task's vmacache pertains to a different mm (ie, its own). There is
  46. * nothing we can do here.
  47. *
  48. * Also handle the case where a kernel thread has adopted this mm via use_mm().
  49. * That kernel thread's vmacache is not applicable to this mm.
  50. */
  51. static inline bool vmacache_valid_mm(struct mm_struct *mm)
  52. {
  53. return current->mm == mm && !(current->flags & PF_KTHREAD);
  54. }
  55. void vmacache_update(unsigned long addr, struct vm_area_struct *newvma)
  56. {
  57. if (vmacache_valid_mm(newvma->vm_mm))
  58. current->vmacache.vmas[VMACACHE_HASH(addr)] = newvma;
  59. }
  60. static bool vmacache_valid(struct mm_struct *mm)
  61. {
  62. struct task_struct *curr;
  63. if (!vmacache_valid_mm(mm))
  64. return false;
  65. curr = current;
  66. if (mm->vmacache_seqnum != curr->vmacache.seqnum) {
  67. /*
  68. * First attempt will always be invalid, initialize
  69. * the new cache for this task here.
  70. */
  71. curr->vmacache.seqnum = mm->vmacache_seqnum;
  72. vmacache_flush(curr);
  73. return false;
  74. }
  75. return true;
  76. }
  77. struct vm_area_struct *vmacache_find(struct mm_struct *mm, unsigned long addr)
  78. {
  79. int i;
  80. count_vm_vmacache_event(VMACACHE_FIND_CALLS);
  81. if (!vmacache_valid(mm))
  82. return NULL;
  83. for (i = 0; i < VMACACHE_SIZE; i++) {
  84. struct vm_area_struct *vma = current->vmacache.vmas[i];
  85. if (!vma)
  86. continue;
  87. if (WARN_ON_ONCE(vma->vm_mm != mm))
  88. break;
  89. if (vma->vm_start <= addr && vma->vm_end > addr) {
  90. count_vm_vmacache_event(VMACACHE_FIND_HITS);
  91. return vma;
  92. }
  93. }
  94. return NULL;
  95. }
  96. #ifndef CONFIG_MMU
  97. struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm,
  98. unsigned long start,
  99. unsigned long end)
  100. {
  101. int i;
  102. count_vm_vmacache_event(VMACACHE_FIND_CALLS);
  103. if (!vmacache_valid(mm))
  104. return NULL;
  105. for (i = 0; i < VMACACHE_SIZE; i++) {
  106. struct vm_area_struct *vma = current->vmacache.vmas[i];
  107. if (vma && vma->vm_start == start && vma->vm_end == end) {
  108. count_vm_vmacache_event(VMACACHE_FIND_HITS);
  109. return vma;
  110. }
  111. }
  112. return NULL;
  113. }
  114. #endif