|
@@ -599,7 +599,7 @@ extern void __put_user_unknown(void);
|
|
|
* On error, the variable @x is set to zero.
|
|
|
*/
|
|
|
#define __get_user_unaligned(x,ptr) \
|
|
|
- __get_user__unalignednocheck((x),(ptr),sizeof(*(ptr)))
|
|
|
+ __get_user_unaligned_nocheck((x),(ptr),sizeof(*(ptr)))
|
|
|
|
|
|
/*
|
|
|
* Yuck. We need two variants, one for 64bit operation and one
|
|
@@ -620,8 +620,8 @@ extern void __get_user_unaligned_unknown(void);
|
|
|
do { \
|
|
|
switch (size) { \
|
|
|
case 1: __get_data_asm(val, "lb", ptr); break; \
|
|
|
- case 2: __get_user_unaligned_asm(val, "ulh", ptr); break; \
|
|
|
- case 4: __get_user_unaligned_asm(val, "ulw", ptr); break; \
|
|
|
+ case 2: __get_data_unaligned_asm(val, "ulh", ptr); break; \
|
|
|
+ case 4: __get_data_unaligned_asm(val, "ulw", ptr); break; \
|
|
|
case 8: __GET_USER_UNALIGNED_DW(val, ptr); break; \
|
|
|
default: __get_user_unaligned_unknown(); break; \
|
|
|
} \
|
|
@@ -1122,9 +1122,15 @@ extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
|
|
|
__cu_to = (to); \
|
|
|
__cu_from = (from); \
|
|
|
__cu_len = (n); \
|
|
|
- might_fault(); \
|
|
|
- __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
|
|
|
- __cu_len); \
|
|
|
+ if (eva_kernel_access()) { \
|
|
|
+ __cu_len = __invoke_copy_from_kernel(__cu_to, \
|
|
|
+ __cu_from, \
|
|
|
+ __cu_len); \
|
|
|
+ } else { \
|
|
|
+ might_fault(); \
|
|
|
+ __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
|
|
|
+ __cu_len); \
|
|
|
+ } \
|
|
|
__cu_len; \
|
|
|
})
|
|
|
|
|
@@ -1229,16 +1235,28 @@ __clear_user(void __user *addr, __kernel_size_t size)
|
|
|
{
|
|
|
__kernel_size_t res;
|
|
|
|
|
|
- might_fault();
|
|
|
- __asm__ __volatile__(
|
|
|
- "move\t$4, %1\n\t"
|
|
|
- "move\t$5, $0\n\t"
|
|
|
- "move\t$6, %2\n\t"
|
|
|
- __MODULE_JAL(__bzero)
|
|
|
- "move\t%0, $6"
|
|
|
- : "=r" (res)
|
|
|
- : "r" (addr), "r" (size)
|
|
|
- : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
|
|
|
+ if (eva_kernel_access()) {
|
|
|
+ __asm__ __volatile__(
|
|
|
+ "move\t$4, %1\n\t"
|
|
|
+ "move\t$5, $0\n\t"
|
|
|
+ "move\t$6, %2\n\t"
|
|
|
+ __MODULE_JAL(__bzero_kernel)
|
|
|
+ "move\t%0, $6"
|
|
|
+ : "=r" (res)
|
|
|
+ : "r" (addr), "r" (size)
|
|
|
+ : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
|
|
|
+ } else {
|
|
|
+ might_fault();
|
|
|
+ __asm__ __volatile__(
|
|
|
+ "move\t$4, %1\n\t"
|
|
|
+ "move\t$5, $0\n\t"
|
|
|
+ "move\t$6, %2\n\t"
|
|
|
+ __MODULE_JAL(__bzero)
|
|
|
+ "move\t%0, $6"
|
|
|
+ : "=r" (res)
|
|
|
+ : "r" (addr), "r" (size)
|
|
|
+ : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
|
|
|
+ }
|
|
|
|
|
|
return res;
|
|
|
}
|
|
@@ -1384,7 +1402,7 @@ static inline long strlen_user(const char __user *s)
|
|
|
might_fault();
|
|
|
__asm__ __volatile__(
|
|
|
"move\t$4, %1\n\t"
|
|
|
- __MODULE_JAL(__strlen_kernel_asm)
|
|
|
+ __MODULE_JAL(__strlen_user_asm)
|
|
|
"move\t%0, $2"
|
|
|
: "=r" (res)
|
|
|
: "r" (s)
|