uaccess_pt.c 3.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158
  1. /*
  2. * arch/s390/lib/uaccess_pt.c
  3. *
  4. * User access functions based on page table walks.
  5. *
  6. * Copyright IBM Corp. 2006
  7. * Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com)
  8. */
  9. #include <linux/errno.h>
  10. #include <linux/hardirq.h>
  11. #include <linux/mm.h>
  12. #include <asm/uaccess.h>
  13. #include <asm/futex.h>
  14. #include "uaccess.h"
  15. static inline int __handle_fault(struct mm_struct *mm, unsigned long address,
  16. int write_access)
  17. {
  18. struct vm_area_struct *vma;
  19. int ret = -EFAULT;
  20. if (in_atomic())
  21. return ret;
  22. down_read(&mm->mmap_sem);
  23. vma = find_vma(mm, address);
  24. if (unlikely(!vma))
  25. goto out;
  26. if (unlikely(vma->vm_start > address)) {
  27. if (!(vma->vm_flags & VM_GROWSDOWN))
  28. goto out;
  29. if (expand_stack(vma, address))
  30. goto out;
  31. }
  32. if (!write_access) {
  33. /* page not present, check vm flags */
  34. if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
  35. goto out;
  36. } else {
  37. if (!(vma->vm_flags & VM_WRITE))
  38. goto out;
  39. }
  40. survive:
  41. switch (handle_mm_fault(mm, vma, address, write_access)) {
  42. case VM_FAULT_MINOR:
  43. current->min_flt++;
  44. break;
  45. case VM_FAULT_MAJOR:
  46. current->maj_flt++;
  47. break;
  48. case VM_FAULT_SIGBUS:
  49. goto out_sigbus;
  50. case VM_FAULT_OOM:
  51. goto out_of_memory;
  52. default:
  53. BUG();
  54. }
  55. ret = 0;
  56. out:
  57. up_read(&mm->mmap_sem);
  58. return ret;
  59. out_of_memory:
  60. up_read(&mm->mmap_sem);
  61. if (is_init(current)) {
  62. yield();
  63. down_read(&mm->mmap_sem);
  64. goto survive;
  65. }
  66. printk("VM: killing process %s\n", current->comm);
  67. return ret;
  68. out_sigbus:
  69. up_read(&mm->mmap_sem);
  70. current->thread.prot_addr = address;
  71. current->thread.trap_no = 0x11;
  72. force_sig(SIGBUS, current);
  73. return ret;
  74. }
  75. static inline size_t __user_copy_pt(unsigned long uaddr, void *kptr,
  76. size_t n, int write_user)
  77. {
  78. struct mm_struct *mm = current->mm;
  79. unsigned long offset, pfn, done, size;
  80. pgd_t *pgd;
  81. pmd_t *pmd;
  82. pte_t *pte;
  83. void *from, *to;
  84. done = 0;
  85. retry:
  86. spin_lock(&mm->page_table_lock);
  87. do {
  88. pgd = pgd_offset(mm, uaddr);
  89. if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
  90. goto fault;
  91. pmd = pmd_offset(pgd, uaddr);
  92. if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
  93. goto fault;
  94. pte = pte_offset_map(pmd, uaddr);
  95. if (!pte || !pte_present(*pte) ||
  96. (write_user && !pte_write(*pte)))
  97. goto fault;
  98. pfn = pte_pfn(*pte);
  99. if (!pfn_valid(pfn))
  100. goto out;
  101. offset = uaddr & (PAGE_SIZE - 1);
  102. size = min(n - done, PAGE_SIZE - offset);
  103. if (write_user) {
  104. to = (void *)((pfn << PAGE_SHIFT) + offset);
  105. from = kptr + done;
  106. } else {
  107. from = (void *)((pfn << PAGE_SHIFT) + offset);
  108. to = kptr + done;
  109. }
  110. memcpy(to, from, size);
  111. done += size;
  112. uaddr += size;
  113. } while (done < n);
  114. out:
  115. spin_unlock(&mm->page_table_lock);
  116. return n - done;
  117. fault:
  118. spin_unlock(&mm->page_table_lock);
  119. if (__handle_fault(mm, uaddr, write_user))
  120. return n - done;
  121. goto retry;
  122. }
  123. size_t copy_from_user_pt(size_t n, const void __user *from, void *to)
  124. {
  125. size_t rc;
  126. if (segment_eq(get_fs(), KERNEL_DS)) {
  127. memcpy(to, (void __kernel __force *) from, n);
  128. return 0;
  129. }
  130. rc = __user_copy_pt((unsigned long) from, to, n, 0);
  131. if (unlikely(rc))
  132. memset(to + n - rc, 0, rc);
  133. return rc;
  134. }
  135. size_t copy_to_user_pt(size_t n, void __user *to, const void *from)
  136. {
  137. if (segment_eq(get_fs(), KERNEL_DS)) {
  138. memcpy((void __kernel __force *) to, from, n);
  139. return 0;
  140. }
  141. return __user_copy_pt((unsigned long) to, (void *) from, n, 1);
  142. }