|
@@ -36,9 +36,9 @@ struct hv_flush_pcpu_ex {
|
|
|
/* Each gva in gva_list encodes up to 4096 pages to flush */
|
|
|
#define HV_TLB_FLUSH_UNIT (4096 * PAGE_SIZE)
|
|
|
|
|
|
-static struct hv_flush_pcpu __percpu *pcpu_flush;
|
|
|
+static struct hv_flush_pcpu __percpu **pcpu_flush;
|
|
|
|
|
|
-static struct hv_flush_pcpu_ex __percpu *pcpu_flush_ex;
|
|
|
+static struct hv_flush_pcpu_ex __percpu **pcpu_flush_ex;
|
|
|
|
|
|
/*
|
|
|
* Fills in gva_list starting from offset. Returns the number of items added.
|
|
@@ -109,6 +109,7 @@ static void hyperv_flush_tlb_others(const struct cpumask *cpus,
|
|
|
const struct flush_tlb_info *info)
|
|
|
{
|
|
|
int cpu, vcpu, gva_n, max_gvas;
|
|
|
+ struct hv_flush_pcpu **flush_pcpu;
|
|
|
struct hv_flush_pcpu *flush;
|
|
|
u64 status = U64_MAX;
|
|
|
unsigned long flags;
|
|
@@ -123,7 +124,17 @@ static void hyperv_flush_tlb_others(const struct cpumask *cpus,
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
|
|
|
- flush = this_cpu_ptr(pcpu_flush);
|
|
|
+ flush_pcpu = this_cpu_ptr(pcpu_flush);
|
|
|
+
|
|
|
+ if (unlikely(!*flush_pcpu))
|
|
|
+ *flush_pcpu = page_address(alloc_page(GFP_ATOMIC));
|
|
|
+
|
|
|
+ flush = *flush_pcpu;
|
|
|
+
|
|
|
+ if (unlikely(!flush)) {
|
|
|
+ local_irq_restore(flags);
|
|
|
+ goto do_native;
|
|
|
+ }
|
|
|
|
|
|
if (info->mm) {
|
|
|
flush->address_space = virt_to_phys(info->mm->pgd);
|
|
@@ -180,6 +191,7 @@ static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
|
|
|
const struct flush_tlb_info *info)
|
|
|
{
|
|
|
int nr_bank = 0, max_gvas, gva_n;
|
|
|
+ struct hv_flush_pcpu_ex **flush_pcpu;
|
|
|
struct hv_flush_pcpu_ex *flush;
|
|
|
u64 status = U64_MAX;
|
|
|
unsigned long flags;
|
|
@@ -194,7 +206,17 @@ static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
|
|
|
- flush = this_cpu_ptr(pcpu_flush_ex);
|
|
|
+ flush_pcpu = this_cpu_ptr(pcpu_flush_ex);
|
|
|
+
|
|
|
+ if (unlikely(!*flush_pcpu))
|
|
|
+ *flush_pcpu = page_address(alloc_page(GFP_ATOMIC));
|
|
|
+
|
|
|
+ flush = *flush_pcpu;
|
|
|
+
|
|
|
+ if (unlikely(!flush)) {
|
|
|
+ local_irq_restore(flags);
|
|
|
+ goto do_native;
|
|
|
+ }
|
|
|
|
|
|
if (info->mm) {
|
|
|
flush->address_space = virt_to_phys(info->mm->pgd);
|
|
@@ -273,7 +295,7 @@ void hyper_alloc_mmu(void)
|
|
|
return;
|
|
|
|
|
|
if (!(ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
|
|
|
- pcpu_flush = __alloc_percpu(PAGE_SIZE, PAGE_SIZE);
|
|
|
+ pcpu_flush = alloc_percpu(struct hv_flush_pcpu *);
|
|
|
else
|
|
|
- pcpu_flush_ex = __alloc_percpu(PAGE_SIZE, PAGE_SIZE);
|
|
|
+ pcpu_flush_ex = alloc_percpu(struct hv_flush_pcpu_ex *);
|
|
|
}
|