|
@@ -442,6 +442,14 @@ struct mm_struct {
|
|
|
|
|
|
/* numa_scan_seq prevents two threads setting pte_numa */
|
|
|
int numa_scan_seq;
|
|
|
+#endif
|
|
|
+#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
|
|
|
+ /*
|
|
|
+ * An operation with batched TLB flushing is going on. Anything that
|
|
|
+ * can move process memory needs to flush the TLB when moving a
|
|
|
+ * PROT_NONE or PROT_NUMA mapped page.
|
|
|
+ */
|
|
|
+ bool tlb_flush_pending;
|
|
|
#endif
|
|
|
struct uprobes_state uprobes_state;
|
|
|
};
|
|
@@ -459,4 +467,40 @@ static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
|
|
|
return mm->cpu_vm_mask_var;
|
|
|
}
|
|
|
|
|
|
+#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
|
|
|
+/*
|
|
|
+ * Memory barriers to keep this state in sync are graciously provided by
|
|
|
+ * the page table locks, outside of which no page table modifications happen.
|
|
|
+ * The barriers below prevent the compiler from re-ordering the instructions
|
|
|
+ * around the memory barriers that are already present in the code.
|
|
|
+ */
|
|
|
+static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
|
|
|
+{
|
|
|
+ barrier();
|
|
|
+ return mm->tlb_flush_pending;
|
|
|
+}
|
|
|
+static inline void set_tlb_flush_pending(struct mm_struct *mm)
|
|
|
+{
|
|
|
+ mm->tlb_flush_pending = true;
|
|
|
+ barrier();
|
|
|
+}
|
|
|
+/* Clearing is done after a TLB flush, which also provides a barrier. */
|
|
|
+static inline void clear_tlb_flush_pending(struct mm_struct *mm)
|
|
|
+{
|
|
|
+ barrier();
|
|
|
+ mm->tlb_flush_pending = false;
|
|
|
+}
|
|
|
+#else
|
|
|
+static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
|
|
|
+{
|
|
|
+ return false;
|
|
|
+}
|
|
|
+static inline void set_tlb_flush_pending(struct mm_struct *mm)
|
|
|
+{
|
|
|
+}
|
|
|
+static inline void clear_tlb_flush_pending(struct mm_struct *mm)
|
|
|
+{
|
|
|
+}
|
|
|
+#endif
|
|
|
+
|
|
|
#endif /* _LINUX_MM_TYPES_H */
|