|
@@ -487,14 +487,12 @@ struct mm_struct {
|
|
|
/* numa_scan_seq prevents two threads setting pte_numa */
|
|
|
int numa_scan_seq;
|
|
|
#endif
|
|
|
-#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
|
|
|
/*
|
|
|
* An operation with batched TLB flushing is going on. Anything that
|
|
|
* can move process memory needs to flush the TLB when moving a
|
|
|
* PROT_NONE or PROT_NUMA mapped page.
|
|
|
*/
|
|
|
atomic_t tlb_flush_pending;
|
|
|
-#endif
|
|
|
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
|
|
|
/* See flush_tlb_batched_pending() */
|
|
|
bool tlb_flush_batched;
|
|
@@ -528,7 +526,6 @@ extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
|
|
|
extern void tlb_finish_mmu(struct mmu_gather *tlb,
|
|
|
unsigned long start, unsigned long end);
|
|
|
|
|
|
-#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
|
|
|
/*
|
|
|
* Memory barriers to keep this state in sync are graciously provided by
|
|
|
* the page table locks, outside of which no page table modifications happen.
|
|
@@ -569,24 +566,6 @@ static inline void dec_tlb_flush_pending(struct mm_struct *mm)
|
|
|
smp_mb__before_atomic();
|
|
|
atomic_dec(&mm->tlb_flush_pending);
|
|
|
}
|
|
|
-#else
|
|
|
-static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
|
|
|
-{
|
|
|
- return false;
|
|
|
-}
|
|
|
-
|
|
|
-static inline void init_tlb_flush_pending(struct mm_struct *mm)
|
|
|
-{
|
|
|
-}
|
|
|
-
|
|
|
-static inline void inc_tlb_flush_pending(struct mm_struct *mm)
|
|
|
-{
|
|
|
-}
|
|
|
-
|
|
|
-static inline void dec_tlb_flush_pending(struct mm_struct *mm)
|
|
|
-{
|
|
|
-}
|
|
|
-#endif
|
|
|
|
|
|
struct vm_fault;
|
|
|
|