mincore.c 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191
  1. /*
  2. * linux/mm/mincore.c
  3. *
  4. * Copyright (C) 1994-1999 Linus Torvalds
  5. */
  6. /*
  7. * The mincore() system call.
  8. */
  9. #include <linux/slab.h>
  10. #include <linux/pagemap.h>
  11. #include <linux/mm.h>
  12. #include <linux/mman.h>
  13. #include <linux/syscalls.h>
  14. #include <asm/uaccess.h>
  15. #include <asm/pgtable.h>
  16. /*
  17. * Later we can get more picky about what "in core" means precisely.
  18. * For now, simply check to see if the page is in the page cache,
  19. * and is up to date; i.e. that no page-in operation would be required
  20. * at this time if an application were to map and access this page.
  21. */
  22. static unsigned char mincore_page(struct vm_area_struct * vma,
  23. unsigned long pgoff)
  24. {
  25. unsigned char present = 0;
  26. struct address_space * as = vma->vm_file->f_mapping;
  27. struct page * page;
  28. page = find_get_page(as, pgoff);
  29. if (page) {
  30. present = PageUptodate(page);
  31. page_cache_release(page);
  32. }
  33. return present;
  34. }
  35. static long mincore_vma(struct vm_area_struct * vma,
  36. unsigned long start, unsigned long end, unsigned char __user * vec)
  37. {
  38. long error, i, remaining;
  39. unsigned char * tmp;
  40. error = -ENOMEM;
  41. if (!vma->vm_file)
  42. return error;
  43. start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
  44. if (end > vma->vm_end)
  45. end = vma->vm_end;
  46. end = ((end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
  47. error = -EAGAIN;
  48. tmp = (unsigned char *) __get_free_page(GFP_KERNEL);
  49. if (!tmp)
  50. return error;
  51. /* (end - start) is # of pages, and also # of bytes in "vec */
  52. remaining = (end - start),
  53. error = 0;
  54. for (i = 0; remaining > 0; remaining -= PAGE_SIZE, i++) {
  55. int j = 0;
  56. long thispiece = (remaining < PAGE_SIZE) ?
  57. remaining : PAGE_SIZE;
  58. while (j < thispiece)
  59. tmp[j++] = mincore_page(vma, start++);
  60. if (copy_to_user(vec + PAGE_SIZE * i, tmp, thispiece)) {
  61. error = -EFAULT;
  62. break;
  63. }
  64. }
  65. free_page((unsigned long) tmp);
  66. return error;
  67. }
  68. /*
  69. * The mincore(2) system call.
  70. *
  71. * mincore() returns the memory residency status of the pages in the
  72. * current process's address space specified by [addr, addr + len).
  73. * The status is returned in a vector of bytes. The least significant
  74. * bit of each byte is 1 if the referenced page is in memory, otherwise
  75. * it is zero.
  76. *
  77. * Because the status of a page can change after mincore() checks it
  78. * but before it returns to the application, the returned vector may
  79. * contain stale information. Only locked pages are guaranteed to
  80. * remain in memory.
  81. *
  82. * return values:
  83. * zero - success
  84. * -EFAULT - vec points to an illegal address
  85. * -EINVAL - addr is not a multiple of PAGE_CACHE_SIZE
  86. * -ENOMEM - Addresses in the range [addr, addr + len] are
  87. * invalid for the address space of this process, or
  88. * specify one or more pages which are not currently
  89. * mapped
  90. * -EAGAIN - A kernel resource was temporarily unavailable.
  91. */
  92. asmlinkage long sys_mincore(unsigned long start, size_t len,
  93. unsigned char __user * vec)
  94. {
  95. int index = 0;
  96. unsigned long end, limit;
  97. struct vm_area_struct * vma;
  98. size_t max;
  99. int unmapped_error = 0;
  100. long error;
  101. /* check the arguments */
  102. if (start & ~PAGE_CACHE_MASK)
  103. goto einval;
  104. if (start < FIRST_USER_PGD_NR * PGDIR_SIZE)
  105. goto enomem;
  106. limit = TASK_SIZE;
  107. if (start >= limit)
  108. goto enomem;
  109. if (!len)
  110. return 0;
  111. max = limit - start;
  112. len = PAGE_CACHE_ALIGN(len);
  113. if (len > max || !len)
  114. goto enomem;
  115. end = start + len;
  116. /* check the output buffer whilst holding the lock */
  117. error = -EFAULT;
  118. down_read(&current->mm->mmap_sem);
  119. if (!access_ok(VERIFY_WRITE, vec, len >> PAGE_SHIFT))
  120. goto out;
  121. /*
  122. * If the interval [start,end) covers some unmapped address
  123. * ranges, just ignore them, but return -ENOMEM at the end.
  124. */
  125. error = 0;
  126. vma = find_vma(current->mm, start);
  127. while (vma) {
  128. /* Here start < vma->vm_end. */
  129. if (start < vma->vm_start) {
  130. unmapped_error = -ENOMEM;
  131. start = vma->vm_start;
  132. }
  133. /* Here vma->vm_start <= start < vma->vm_end. */
  134. if (end <= vma->vm_end) {
  135. if (start < end) {
  136. error = mincore_vma(vma, start, end,
  137. &vec[index]);
  138. if (error)
  139. goto out;
  140. }
  141. error = unmapped_error;
  142. goto out;
  143. }
  144. /* Here vma->vm_start <= start < vma->vm_end < end. */
  145. error = mincore_vma(vma, start, vma->vm_end, &vec[index]);
  146. if (error)
  147. goto out;
  148. index += (vma->vm_end - start) >> PAGE_CACHE_SHIFT;
  149. start = vma->vm_end;
  150. vma = vma->vm_next;
  151. }
  152. /* we found a hole in the area queried if we arrive here */
  153. error = -ENOMEM;
  154. out:
  155. up_read(&current->mm->mmap_sem);
  156. return error;
  157. einval:
  158. return -EINVAL;
  159. enomem:
  160. return -ENOMEM;
  161. }