Эх сурвалжийг харах

s390/uaccess: test if current->mm is set before walking page tables

If get_fs() == USER_DS we better test if current->mm is not zero before
walking page tables.
The page table walk code would try to lock mm->page_table_lock, however
if mm is zero this might crash.

Now it is arguably incorrect trying to access userspace if current->mm
is zero, however we have seen that and s390 would be the only architecture
which would crash in such a case.
So we better make the page table walk code a bit more robust and report
always a fault instead.

Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Heiko Carstens 11 жил өмнө
parent
commit
b03b467944

+ 10 - 0
arch/s390/lib/uaccess_pt.c

@@ -153,6 +153,8 @@ static __always_inline size_t __user_copy_pt(unsigned long uaddr, void *kptr,
 	unsigned long offset, done, size, kaddr;
 	unsigned long offset, done, size, kaddr;
 	void *from, *to;
 	void *from, *to;
 
 
+	if (!mm)
+		return n;
 	done = 0;
 	done = 0;
 retry:
 retry:
 	spin_lock(&mm->page_table_lock);
 	spin_lock(&mm->page_table_lock);
@@ -262,6 +264,8 @@ static size_t strnlen_user_pt(size_t count, const char __user *src)
 		return 0;
 		return 0;
 	if (segment_eq(get_fs(), KERNEL_DS))
 	if (segment_eq(get_fs(), KERNEL_DS))
 		return strnlen_kernel(count, src);
 		return strnlen_kernel(count, src);
+	if (!mm)
+		return 0;
 	done = 0;
 	done = 0;
 retry:
 retry:
 	spin_lock(&mm->page_table_lock);
 	spin_lock(&mm->page_table_lock);
@@ -323,6 +327,8 @@ static size_t copy_in_user_pt(size_t n, void __user *to,
 
 
 	if (segment_eq(get_fs(), KERNEL_DS))
 	if (segment_eq(get_fs(), KERNEL_DS))
 		return copy_in_kernel(n, to, from);
 		return copy_in_kernel(n, to, from);
+	if (!mm)
+		return n;
 	done = 0;
 	done = 0;
 retry:
 retry:
 	spin_lock(&mm->page_table_lock);
 	spin_lock(&mm->page_table_lock);
@@ -411,6 +417,8 @@ int futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old)
 
 
 	if (segment_eq(get_fs(), KERNEL_DS))
 	if (segment_eq(get_fs(), KERNEL_DS))
 		return __futex_atomic_op_pt(op, uaddr, oparg, old);
 		return __futex_atomic_op_pt(op, uaddr, oparg, old);
+	if (unlikely(!current->mm))
+		return -EFAULT;
 	spin_lock(&current->mm->page_table_lock);
 	spin_lock(&current->mm->page_table_lock);
 	uaddr = (u32 __force __user *)
 	uaddr = (u32 __force __user *)
 		__dat_user_addr((__force unsigned long) uaddr, 1);
 		__dat_user_addr((__force unsigned long) uaddr, 1);
@@ -448,6 +456,8 @@ int futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr,
 
 
 	if (segment_eq(get_fs(), KERNEL_DS))
 	if (segment_eq(get_fs(), KERNEL_DS))
 		return __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval);
 		return __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval);
+	if (unlikely(!current->mm))
+		return -EFAULT;
 	spin_lock(&current->mm->page_table_lock);
 	spin_lock(&current->mm->page_table_lock);
 	uaddr = (u32 __force __user *)
 	uaddr = (u32 __force __user *)
 		__dat_user_addr((__force unsigned long) uaddr, 1);
 		__dat_user_addr((__force unsigned long) uaddr, 1);