Explorar o código

KVM guest: Make pv trampoline code executable

Our PV guest patching code assembles chunks of instructions on the fly when it
encounters more complicated instructions to hijack. These instructions need
to live in a section that we don't mark as non-executable, as otherwise we
fault when jumping there.

Right now we put it into the .bss section where it automatically gets marked
as non-executable. Add a check to the NX setting function to ensure that we
leave these particular pages executable.

Signed-off-by: Alexander Graf <agraf@suse.de>
Alexander Graf %!s(int64=11) %!d(string=hai) anos
pai
achega
b18db0b808

+ 11 - 0
arch/powerpc/include/asm/sections.h

@@ -39,6 +39,17 @@ static inline int overlaps_kernel_text(unsigned long start, unsigned long end)
 		(unsigned long)_stext < end;
 		(unsigned long)_stext < end;
 }
 }
 
 
+static inline int overlaps_kvm_tmp(unsigned long start, unsigned long end)
+{
+#ifdef CONFIG_KVM_GUEST
+	extern char kvm_tmp[];
+	return start < (unsigned long)kvm_tmp &&
+		(unsigned long)&kvm_tmp[1024 * 1024] < end;
+#else
+	return 0;
+#endif
+}
+
 #undef dereference_function_descriptor
 #undef dereference_function_descriptor
 static inline void *dereference_function_descriptor(void *ptr)
 static inline void *dereference_function_descriptor(void *ptr)
 {
 {

+ 1 - 1
arch/powerpc/kernel/kvm.c

@@ -74,7 +74,7 @@
 #define KVM_INST_MTSRIN		0x7c0001e4
 #define KVM_INST_MTSRIN		0x7c0001e4
 
 
 static bool kvm_patching_worked = true;
 static bool kvm_patching_worked = true;
-static char kvm_tmp[1024 * 1024];
+char kvm_tmp[1024 * 1024];
 static int kvm_tmp_index;
 static int kvm_tmp_index;
 
 
 static inline void kvm_patch_ins(u32 *inst, u32 new_inst)
 static inline void kvm_patch_ins(u32 *inst, u32 new_inst)

+ 4 - 0
arch/powerpc/mm/hash_utils_64.c

@@ -207,6 +207,10 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
 		if (overlaps_kernel_text(vaddr, vaddr + step))
 		if (overlaps_kernel_text(vaddr, vaddr + step))
 			tprot &= ~HPTE_R_N;
 			tprot &= ~HPTE_R_N;
 
 
+		/* Make kvm guest trampolines executable */
+		if (overlaps_kvm_tmp(vaddr, vaddr + step))
+			tprot &= ~HPTE_R_N;
+
 		/*
 		/*
 		 * If relocatable, check if it overlaps interrupt vectors that
 		 * If relocatable, check if it overlaps interrupt vectors that
 		 * are copied down to real 0. For relocatable kernel
 		 * are copied down to real 0. For relocatable kernel