|
@@ -7,6 +7,7 @@
|
|
|
#include <asm/processor.h>
|
|
|
#include <asm/cpufeature.h>
|
|
|
#include <asm/special_insns.h>
|
|
|
+#include <asm/smp.h>
|
|
|
|
|
|
static inline void __invpcid(unsigned long pcid, unsigned long addr,
|
|
|
unsigned long type)
|
|
@@ -65,10 +66,8 @@ static inline void invpcid_flush_all_nonglobals(void)
|
|
|
#endif
|
|
|
|
|
|
struct tlb_state {
|
|
|
-#ifdef CONFIG_SMP
|
|
|
struct mm_struct *active_mm;
|
|
|
int state;
|
|
|
-#endif
|
|
|
|
|
|
/*
|
|
|
* Access to this CR4 shadow and to H/W CR4 is protected by
|
|
@@ -231,77 +230,6 @@ struct flush_tlb_info {
|
|
|
unsigned long end;
|
|
|
};
|
|
|
|
|
|
-#ifndef CONFIG_SMP
|
|
|
-
|
|
|
-/* "_up" is for UniProcessor.
|
|
|
- *
|
|
|
- * This is a helper for other header functions. *Not* intended to be called
|
|
|
- * directly. All global TLB flushes need to either call this, or to bump the
|
|
|
- * vm statistics themselves.
|
|
|
- */
|
|
|
-static inline void __flush_tlb_up(void)
|
|
|
-{
|
|
|
- count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
|
|
|
- __flush_tlb();
|
|
|
-}
|
|
|
-
|
|
|
-static inline void flush_tlb_all(void)
|
|
|
-{
|
|
|
- count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
|
|
|
- __flush_tlb_all();
|
|
|
-}
|
|
|
-
|
|
|
-static inline void local_flush_tlb(void)
|
|
|
-{
|
|
|
- __flush_tlb_up();
|
|
|
-}
|
|
|
-
|
|
|
-static inline void flush_tlb_mm(struct mm_struct *mm)
|
|
|
-{
|
|
|
- if (mm == current->active_mm)
|
|
|
- __flush_tlb_up();
|
|
|
-}
|
|
|
-
|
|
|
-static inline void flush_tlb_page(struct vm_area_struct *vma,
|
|
|
- unsigned long addr)
|
|
|
-{
|
|
|
- if (vma->vm_mm == current->active_mm)
|
|
|
- __flush_tlb_one(addr);
|
|
|
-}
|
|
|
-
|
|
|
-static inline void flush_tlb_range(struct vm_area_struct *vma,
|
|
|
- unsigned long start, unsigned long end)
|
|
|
-{
|
|
|
- if (vma->vm_mm == current->active_mm)
|
|
|
- __flush_tlb_up();
|
|
|
-}
|
|
|
-
|
|
|
-static inline void flush_tlb_mm_range(struct mm_struct *mm,
|
|
|
- unsigned long start, unsigned long end, unsigned long vmflag)
|
|
|
-{
|
|
|
- if (mm == current->active_mm)
|
|
|
- __flush_tlb_up();
|
|
|
-}
|
|
|
-
|
|
|
-static inline void native_flush_tlb_others(const struct cpumask *cpumask,
|
|
|
- const struct flush_tlb_info *info)
|
|
|
-{
|
|
|
-}
|
|
|
-
|
|
|
-static inline void reset_lazy_tlbstate(void)
|
|
|
-{
|
|
|
-}
|
|
|
-
|
|
|
-static inline void flush_tlb_kernel_range(unsigned long start,
|
|
|
- unsigned long end)
|
|
|
-{
|
|
|
- flush_tlb_all();
|
|
|
-}
|
|
|
-
|
|
|
-#else /* SMP */
|
|
|
-
|
|
|
-#include <asm/smp.h>
|
|
|
-
|
|
|
#define local_flush_tlb() __flush_tlb()
|
|
|
|
|
|
#define flush_tlb_mm(mm) flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL)
|
|
@@ -339,8 +267,6 @@ static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch,
|
|
|
|
|
|
extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
|
|
|
|
|
|
-#endif /* SMP */
|
|
|
-
|
|
|
#ifndef CONFIG_PARAVIRT
|
|
|
#define flush_tlb_others(mask, info) \
|
|
|
native_flush_tlb_others(mask, info)
|