|
@@ -78,6 +78,21 @@ extern u64 __ua_limit;
|
|
|
|
|
|
#define segment_eq(a, b) ((a).seg == (b).seg)
|
|
|
|
|
|
+/*
|
|
|
+ * eva_kernel_access() - determine whether kernel memory access on an EVA system
|
|
|
+ *
|
|
|
+ * Determines whether memory accesses should be performed to kernel memory
|
|
|
+ * on a system using Extended Virtual Addressing (EVA).
|
|
|
+ *
|
|
|
+ * Return: true if a kernel memory access on an EVA system, else false.
|
|
|
+ */
|
|
|
+static inline bool eva_kernel_access(void)
|
|
|
+{
|
|
|
+ if (!config_enabled(CONFIG_EVA))
|
|
|
+ return false;
|
|
|
+
|
|
|
+ return segment_eq(get_fs(), get_ds());
|
|
|
+}
|
|
|
|
|
|
/*
|
|
|
* Is a address valid? This does a straighforward calculation rather
|
|
@@ -281,7 +296,7 @@ do { \
|
|
|
({ \
|
|
|
int __gu_err; \
|
|
|
\
|
|
|
- if (segment_eq(get_fs(), get_ds())) { \
|
|
|
+ if (eva_kernel_access()) { \
|
|
|
__get_kernel_common((x), size, ptr); \
|
|
|
} else { \
|
|
|
__chk_user_ptr(ptr); \
|
|
@@ -297,7 +312,7 @@ do { \
|
|
|
\
|
|
|
might_fault(); \
|
|
|
if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) { \
|
|
|
- if (segment_eq(get_fs(), get_ds())) \
|
|
|
+ if (eva_kernel_access()) \
|
|
|
__get_kernel_common((x), size, __gu_ptr); \
|
|
|
else \
|
|
|
__get_user_common((x), size, __gu_ptr); \
|
|
@@ -422,7 +437,7 @@ do { \
|
|
|
int __pu_err = 0; \
|
|
|
\
|
|
|
__pu_val = (x); \
|
|
|
- if (segment_eq(get_fs(), get_ds())) { \
|
|
|
+ if (eva_kernel_access()) { \
|
|
|
__put_kernel_common(ptr, size); \
|
|
|
} else { \
|
|
|
__chk_user_ptr(ptr); \
|
|
@@ -439,7 +454,7 @@ do { \
|
|
|
\
|
|
|
might_fault(); \
|
|
|
if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) { \
|
|
|
- if (segment_eq(get_fs(), get_ds())) \
|
|
|
+ if (eva_kernel_access()) \
|
|
|
__put_kernel_common(__pu_addr, size); \
|
|
|
else \
|
|
|
__put_user_common(__pu_addr, size); \
|
|
@@ -833,7 +848,7 @@ extern size_t __copy_user(void *__to, const void *__from, size_t __n);
|
|
|
__cu_from = (from); \
|
|
|
__cu_len = (n); \
|
|
|
might_fault(); \
|
|
|
- if (segment_eq(get_fs(), get_ds())) \
|
|
|
+ if (eva_kernel_access()) \
|
|
|
__cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from, \
|
|
|
__cu_len); \
|
|
|
else \
|
|
@@ -853,7 +868,7 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
|
|
|
__cu_to = (to); \
|
|
|
__cu_from = (from); \
|
|
|
__cu_len = (n); \
|
|
|
- if (segment_eq(get_fs(), get_ds())) \
|
|
|
+ if (eva_kernel_access()) \
|
|
|
__cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from, \
|
|
|
__cu_len); \
|
|
|
else \
|
|
@@ -871,7 +886,7 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
|
|
|
__cu_to = (to); \
|
|
|
__cu_from = (from); \
|
|
|
__cu_len = (n); \
|
|
|
- if (segment_eq(get_fs(), get_ds())) \
|
|
|
+ if (eva_kernel_access()) \
|
|
|
__cu_len = __invoke_copy_from_kernel_inatomic(__cu_to, \
|
|
|
__cu_from,\
|
|
|
__cu_len);\
|
|
@@ -904,7 +919,7 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
|
|
|
__cu_to = (to); \
|
|
|
__cu_from = (from); \
|
|
|
__cu_len = (n); \
|
|
|
- if (segment_eq(get_fs(), get_ds())) { \
|
|
|
+ if (eva_kernel_access()) { \
|
|
|
__cu_len = __invoke_copy_to_kernel(__cu_to, \
|
|
|
__cu_from, \
|
|
|
__cu_len); \
|
|
@@ -1126,7 +1141,7 @@ extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
|
|
|
__cu_to = (to); \
|
|
|
__cu_from = (from); \
|
|
|
__cu_len = (n); \
|
|
|
- if (segment_eq(get_fs(), get_ds())) { \
|
|
|
+ if (eva_kernel_access()) { \
|
|
|
__cu_len = __invoke_copy_from_kernel(__cu_to, \
|
|
|
__cu_from, \
|
|
|
__cu_len); \
|
|
@@ -1150,7 +1165,7 @@ extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
|
|
|
__cu_to = (to); \
|
|
|
__cu_from = (from); \
|
|
|
__cu_len = (n); \
|
|
|
- if (segment_eq(get_fs(), get_ds())) { \
|
|
|
+ if (eva_kernel_access()) { \
|
|
|
__cu_len = ___invoke_copy_in_kernel(__cu_to, __cu_from, \
|
|
|
__cu_len); \
|
|
|
} else { \
|
|
@@ -1170,7 +1185,7 @@ extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
|
|
|
__cu_to = (to); \
|
|
|
__cu_from = (from); \
|
|
|
__cu_len = (n); \
|
|
|
- if (segment_eq(get_fs(), get_ds())) { \
|
|
|
+ if (eva_kernel_access()) { \
|
|
|
__cu_len = ___invoke_copy_in_kernel(__cu_to,__cu_from, \
|
|
|
__cu_len); \
|
|
|
} else { \
|
|
@@ -1250,7 +1265,7 @@ __strncpy_from_user(char *__to, const char __user *__from, long __len)
|
|
|
{
|
|
|
long res;
|
|
|
|
|
|
- if (segment_eq(get_fs(), get_ds())) {
|
|
|
+ if (eva_kernel_access()) {
|
|
|
__asm__ __volatile__(
|
|
|
"move\t$4, %1\n\t"
|
|
|
"move\t$5, %2\n\t"
|
|
@@ -1299,7 +1314,7 @@ strncpy_from_user(char *__to, const char __user *__from, long __len)
|
|
|
{
|
|
|
long res;
|
|
|
|
|
|
- if (segment_eq(get_fs(), get_ds())) {
|
|
|
+ if (eva_kernel_access()) {
|
|
|
__asm__ __volatile__(
|
|
|
"move\t$4, %1\n\t"
|
|
|
"move\t$5, %2\n\t"
|
|
@@ -1343,7 +1358,7 @@ static inline long strlen_user(const char __user *s)
|
|
|
{
|
|
|
long res;
|
|
|
|
|
|
- if (segment_eq(get_fs(), get_ds())) {
|
|
|
+ if (eva_kernel_access()) {
|
|
|
__asm__ __volatile__(
|
|
|
"move\t$4, %1\n\t"
|
|
|
__MODULE_JAL(__strlen_kernel_asm)
|
|
@@ -1370,7 +1385,7 @@ static inline long __strnlen_user(const char __user *s, long n)
|
|
|
{
|
|
|
long res;
|
|
|
|
|
|
- if (segment_eq(get_fs(), get_ds())) {
|
|
|
+ if (eva_kernel_access()) {
|
|
|
__asm__ __volatile__(
|
|
|
"move\t$4, %1\n\t"
|
|
|
"move\t$5, %2\n\t"
|
|
@@ -1411,7 +1426,7 @@ static inline long strnlen_user(const char __user *s, long n)
|
|
|
long res;
|
|
|
|
|
|
might_fault();
|
|
|
- if (segment_eq(get_fs(), get_ds())) {
|
|
|
+ if (eva_kernel_access()) {
|
|
|
__asm__ __volatile__(
|
|
|
"move\t$4, %1\n\t"
|
|
|
"move\t$5, %2\n\t"
|