Ver código fonte

s390/mm,kvm: improve detection of KVM guest faults

The identification of guest fault currently relies on the PF_VCPU flag.
This is set in guest_entry_irqoff and cleared in guest_exit_irqoff.
Both functions are called by __vcpu_run, the PF_VCPU flag is set for
quite a lot of kernel code outside of the guest execution.

Replace the PF_VCPU scheme with the PIF_GUEST_FAULT in the pt_regs and
make the program check handler code in entry.S set the bit only for
exception that occurred between the .Lsie_gmap and .Lsie_done labels.

Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Martin Schwidefsky 8 anos atrás
pai
commit
c771320e93
3 arquivos alterados com 10 adições e 5 exclusões
  1. 2 0
      arch/s390/include/asm/ptrace.h
  2. 5 2
      arch/s390/kernel/entry.S
  3. 3 3
      arch/s390/mm/fault.c

+ 2 - 0
arch/s390/include/asm/ptrace.h

@@ -13,10 +13,12 @@
 #define PIF_SYSCALL		0	/* inside a system call */
 #define PIF_PER_TRAP		1	/* deliver sigtrap on return to user */
 #define PIF_SYSCALL_RESTART	2	/* restart the current system call */
+#define PIF_GUEST_FAULT		3	/* indicates program check in sie64a */
 
 #define _PIF_SYSCALL		_BITUL(PIF_SYSCALL)
 #define _PIF_PER_TRAP		_BITUL(PIF_PER_TRAP)
 #define _PIF_SYSCALL_RESTART	_BITUL(PIF_SYSCALL_RESTART)
+#define _PIF_GUEST_FAULT	_BITUL(PIF_GUEST_FAULT)
 
 #ifndef __ASSEMBLY__
 

+ 5 - 2
arch/s390/kernel/entry.S

@@ -518,6 +518,7 @@ ENTRY(pgm_check_handler)
 	stmg	%r8,%r15,__LC_SAVE_AREA_SYNC
 	lg	%r10,__LC_LAST_BREAK
 	lg	%r12,__LC_CURRENT
+	lghi	%r11,0
 	larl	%r13,cleanup_critical
 	lmg	%r8,%r9,__LC_PGM_OLD_PSW
 	tmhh	%r8,0x0001		# test problem state bit
@@ -532,6 +533,7 @@ ENTRY(pgm_check_handler)
 	ni	__SIE_PROG0C+3(%r14),0xfe	# no longer in SIE
 	lctlg	%c1,%c1,__LC_USER_ASCE		# load primary asce
 	larl	%r9,sie_exit			# skip forward to sie_exit
+	lghi	%r11,_PIF_GUEST_FAULT
 #endif
 0:	tmhh	%r8,0x4000		# PER bit set in old PSW ?
 	jnz	1f			# -> enabled, can't be a double fault
@@ -549,13 +551,14 @@ ENTRY(pgm_check_handler)
 	jz	3f
 	mvc	__THREAD_trap_tdb(256,%r14),0(%r13)
 3:	stg	%r10,__THREAD_last_break(%r14)
-4:	la	%r11,STACK_FRAME_OVERHEAD(%r15)
+4:	lgr	%r13,%r11
+	la	%r11,STACK_FRAME_OVERHEAD(%r15)
 	stmg	%r0,%r7,__PT_R0(%r11)
 	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
 	stmg	%r8,%r9,__PT_PSW(%r11)
 	mvc	__PT_INT_CODE(4,%r11),__LC_PGM_ILC
 	mvc	__PT_INT_PARM_LONG(8,%r11),__LC_TRANS_EXC_CODE
-	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
+	stg	%r13,__PT_FLAGS(%r11)
 	stg	%r10,__PT_ARGS(%r11)
 	tm	__LC_PGM_ILC+3,0x80	# check for per exception
 	jz	5f

+ 3 - 3
arch/s390/mm/fault.c

@@ -117,7 +117,7 @@ static inline int user_space_fault(struct pt_regs *regs)
 		return 1;
 	if (trans_exc_code == 2) /* secondary space -> set_fs */
 		return current->thread.mm_segment.ar4;
-	if (current->flags & PF_VCPU)
+	if (test_pt_regs_flag(regs, PIF_GUEST_FAULT))
 		return 1;
 	return 0;
 }
@@ -209,7 +209,7 @@ static void dump_fault_info(struct pt_regs *regs)
 		pr_cont("kernel ");
 	}
 #ifdef CONFIG_PGSTE
-	else if ((current->flags & PF_VCPU) && S390_lowcore.gmap) {
+	else if (test_pt_regs_flag(regs, PIF_GUEST_FAULT)) {
 		struct gmap *gmap = (struct gmap *)S390_lowcore.gmap;
 		asce = gmap->asce;
 		pr_cont("gmap ");
@@ -438,7 +438,7 @@ static inline int do_exception(struct pt_regs *regs, int access)
 	down_read(&mm->mmap_sem);
 
 #ifdef CONFIG_PGSTE
-	gmap = (current->flags & PF_VCPU) ?
+	gmap = test_pt_regs_flag(regs, PIF_GUEST_FAULT) ?
 		(struct gmap *) S390_lowcore.gmap : NULL;
 	if (gmap) {
 		current->thread.gmap_addr = address;