|
@@ -241,12 +241,43 @@ static inline pud_t native_pudp_get_and_clear(pud_t *pudp)
|
|
#endif
|
|
#endif
|
|
|
|
|
|
/* Encode and de-code a swap entry */
|
|
/* Encode and de-code a swap entry */
|
|
|
|
+#define SWP_TYPE_BITS 5
|
|
|
|
+
|
|
|
|
+#define SWP_OFFSET_FIRST_BIT (_PAGE_BIT_PROTNONE + 1)
|
|
|
|
+
|
|
|
|
+/* We always extract/encode the offset by shifting it all the way up, and then down again */
|
|
|
|
+#define SWP_OFFSET_SHIFT (SWP_OFFSET_FIRST_BIT + SWP_TYPE_BITS)
|
|
|
|
+
|
|
#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > 5)
|
|
#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > 5)
|
|
#define __swp_type(x) (((x).val) & 0x1f)
|
|
#define __swp_type(x) (((x).val) & 0x1f)
|
|
#define __swp_offset(x) ((x).val >> 5)
|
|
#define __swp_offset(x) ((x).val >> 5)
|
|
#define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) << 5})
|
|
#define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) << 5})
|
|
-#define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high })
|
|
|
|
-#define __swp_entry_to_pte(x) ((pte_t){ { .pte_high = (x).val } })
|
|
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * Normally, __swp_entry() converts from arch-independent swp_entry_t to
|
|
|
|
+ * arch-dependent swp_entry_t, and __swp_entry_to_pte() just stores the result
|
|
|
|
+ * to pte. But here we have 32bit swp_entry_t and 64bit pte, and need to use the
|
|
|
|
+ * whole 64 bits. Thus, we shift the "real" arch-dependent conversion to
|
|
|
|
+ * __swp_entry_to_pte() through the following helper macro based on 64bit
|
|
|
|
+ * __swp_entry().
|
|
|
|
+ */
|
|
|
|
+#define __swp_pteval_entry(type, offset) ((pteval_t) { \
|
|
|
|
+ (~(pteval_t)(offset) << SWP_OFFSET_SHIFT >> SWP_TYPE_BITS) \
|
|
|
|
+ | ((pteval_t)(type) << (64 - SWP_TYPE_BITS)) })
|
|
|
|
+
|
|
|
|
+#define __swp_entry_to_pte(x) ((pte_t){ .pte = \
|
|
|
|
+ __swp_pteval_entry(__swp_type(x), __swp_offset(x)) })
|
|
|
|
+/*
|
|
|
|
+ * Analogically, __pte_to_swp_entry() doesn't just extract the arch-dependent
|
|
|
|
+ * swp_entry_t, but also has to convert it from 64bit to the 32bit
|
|
|
|
+ * intermediate representation, using the following macros based on 64bit
|
|
|
|
+ * __swp_type() and __swp_offset().
|
|
|
|
+ */
|
|
|
|
+#define __pteval_swp_type(x) ((unsigned long)((x).pte >> (64 - SWP_TYPE_BITS)))
|
|
|
|
+#define __pteval_swp_offset(x) ((unsigned long)(~((x).pte) << SWP_TYPE_BITS >> SWP_OFFSET_SHIFT))
|
|
|
|
+
|
|
|
|
+#define __pte_to_swp_entry(pte) (__swp_entry(__pteval_swp_type(pte), \
|
|
|
|
+ __pteval_swp_offset(pte)))
|
|
|
|
|
|
#define gup_get_pte gup_get_pte
|
|
#define gup_get_pte gup_get_pte
|
|
/*
|
|
/*
|