|
@@ -193,9 +193,15 @@ static inline int is_module_addr(void *addr)
|
|
|
#define _PAGE_UNUSED 0x080 /* SW bit for pgste usage state */
|
|
|
#define __HAVE_ARCH_PTE_SPECIAL
|
|
|
|
|
|
+#ifdef CONFIG_MEM_SOFT_DIRTY
|
|
|
+#define _PAGE_SOFT_DIRTY 0x002 /* SW pte soft dirty bit */
|
|
|
+#else
|
|
|
+#define _PAGE_SOFT_DIRTY 0x000
|
|
|
+#endif
|
|
|
+
|
|
|
/* Set of bits not changed in pte_modify */
|
|
|
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \
|
|
|
- _PAGE_YOUNG)
|
|
|
+ _PAGE_YOUNG | _PAGE_SOFT_DIRTY)
|
|
|
|
|
|
/*
|
|
|
* handle_pte_fault uses pte_present and pte_none to find out the pte type
|
|
@@ -285,6 +291,12 @@ static inline int is_module_addr(void *addr)
|
|
|
#define _SEGMENT_ENTRY_READ 0x0002 /* SW segment read bit */
|
|
|
#define _SEGMENT_ENTRY_WRITE 0x0001 /* SW segment write bit */
|
|
|
|
|
|
+#ifdef CONFIG_MEM_SOFT_DIRTY
|
|
|
+#define _SEGMENT_ENTRY_SOFT_DIRTY 0x4000 /* SW segment soft dirty bit */
|
|
|
+#else
|
|
|
+#define _SEGMENT_ENTRY_SOFT_DIRTY 0x0000 /* SW segment soft dirty bit */
|
|
|
+#endif
|
|
|
+
|
|
|
/*
|
|
|
* Segment table entry encoding (R = read-only, I = invalid, y = young bit):
|
|
|
* dy..R...I...wr
|
|
@@ -589,6 +601,43 @@ static inline int pmd_protnone(pmd_t pmd)
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
+static inline int pte_soft_dirty(pte_t pte)
|
|
|
+{
|
|
|
+ return pte_val(pte) & _PAGE_SOFT_DIRTY;
|
|
|
+}
|
|
|
+#define pte_swp_soft_dirty pte_soft_dirty
|
|
|
+
|
|
|
+static inline pte_t pte_mksoft_dirty(pte_t pte)
|
|
|
+{
|
|
|
+ pte_val(pte) |= _PAGE_SOFT_DIRTY;
|
|
|
+ return pte;
|
|
|
+}
|
|
|
+#define pte_swp_mksoft_dirty pte_mksoft_dirty
|
|
|
+
|
|
|
+static inline pte_t pte_clear_soft_dirty(pte_t pte)
|
|
|
+{
|
|
|
+ pte_val(pte) &= ~_PAGE_SOFT_DIRTY;
|
|
|
+ return pte;
|
|
|
+}
|
|
|
+#define pte_swp_clear_soft_dirty pte_clear_soft_dirty
|
|
|
+
|
|
|
+static inline int pmd_soft_dirty(pmd_t pmd)
|
|
|
+{
|
|
|
+ return pmd_val(pmd) & _SEGMENT_ENTRY_SOFT_DIRTY;
|
|
|
+}
|
|
|
+
|
|
|
+static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
|
|
|
+{
|
|
|
+ pmd_val(pmd) |= _SEGMENT_ENTRY_SOFT_DIRTY;
|
|
|
+ return pmd;
|
|
|
+}
|
|
|
+
|
|
|
+static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
|
|
|
+{
|
|
|
+ pmd_val(pmd) &= ~_SEGMENT_ENTRY_SOFT_DIRTY;
|
|
|
+ return pmd;
|
|
|
+}
|
|
|
+
|
|
|
static inline pgste_t pgste_get_lock(pte_t *ptep)
|
|
|
{
|
|
|
unsigned long new = 0;
|
|
@@ -889,7 +938,7 @@ static inline pte_t pte_mkclean(pte_t pte)
|
|
|
|
|
|
static inline pte_t pte_mkdirty(pte_t pte)
|
|
|
{
|
|
|
- pte_val(pte) |= _PAGE_DIRTY;
|
|
|
+ pte_val(pte) |= _PAGE_DIRTY | _PAGE_SOFT_DIRTY;
|
|
|
if (pte_val(pte) & _PAGE_WRITE)
|
|
|
pte_val(pte) &= ~_PAGE_PROTECT;
|
|
|
return pte;
|
|
@@ -1340,7 +1389,8 @@ static inline pmd_t pmd_mkclean(pmd_t pmd)
|
|
|
static inline pmd_t pmd_mkdirty(pmd_t pmd)
|
|
|
{
|
|
|
if (pmd_large(pmd)) {
|
|
|
- pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY;
|
|
|
+ pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY |
|
|
|
+ _SEGMENT_ENTRY_SOFT_DIRTY;
|
|
|
if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE)
|
|
|
pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
|
|
|
}
|
|
@@ -1371,7 +1421,8 @@ static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
|
|
|
if (pmd_large(pmd)) {
|
|
|
pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN_LARGE |
|
|
|
_SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_YOUNG |
|
|
|
- _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SPLIT;
|
|
|
+ _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SPLIT |
|
|
|
+ _SEGMENT_ENTRY_SOFT_DIRTY;
|
|
|
pmd_val(pmd) |= massage_pgprot_pmd(newprot);
|
|
|
if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
|
|
|
pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
|