|
@@ -15,12 +15,14 @@
|
|
|
#include <linux/context_tracking.h> /* exception_enter(), ... */
|
|
|
#include <linux/uaccess.h> /* faulthandler_disabled() */
|
|
|
|
|
|
+#include <asm/cpufeature.h> /* boot_cpu_has, ... */
|
|
|
#include <asm/traps.h> /* dotraplinkage, ... */
|
|
|
#include <asm/pgalloc.h> /* pgd_*(), ... */
|
|
|
#include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
|
|
|
#include <asm/fixmap.h> /* VSYSCALL_ADDR */
|
|
|
#include <asm/vsyscall.h> /* emulate_vsyscall */
|
|
|
#include <asm/vm86.h> /* struct vm86 */
|
|
|
+#include <asm/mmu_context.h> /* vma_pkey() */
|
|
|
|
|
|
#define CREATE_TRACE_POINTS
|
|
|
#include <asm/trace/exceptions.h>
|
|
@@ -169,6 +171,56 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
|
|
|
return prefetch;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * A protection key fault means that the PKRU value did not allow
|
|
|
+ * access to some PTE. Userspace can figure out what PKRU was
|
|
|
+ * from the XSAVE state, and this function fills out a field in
|
|
|
+ * siginfo so userspace can discover which protection key was set
|
|
|
+ * on the PTE.
|
|
|
+ *
|
|
|
+ * If we get here, we know that the hardware signaled a PF_PK
|
|
|
+ * fault and that there was a VMA once we got in the fault
|
|
|
+ * handler. It does *not* guarantee that the VMA we find here
|
|
|
+ * was the one that we faulted on.
|
|
|
+ *
|
|
|
+ * 1. T1 : mprotect_key(foo, PAGE_SIZE, pkey=4);
|
|
|
+ * 2. T1 : set PKRU to deny access to pkey=4, touches page
|
|
|
+ * 3. T1 : faults...
|
|
|
+ * 4. T2: mprotect_key(foo, PAGE_SIZE, pkey=5);
|
|
|
+ * 5. T1 : enters fault handler, takes mmap_sem, etc...
|
|
|
+ * 6. T1 : reaches here, sees vma_pkey(vma)=5, when we really
|
|
|
+ * faulted on a pte with its pkey=4.
|
|
|
+ */
|
|
|
+static void fill_sig_info_pkey(int si_code, siginfo_t *info,
|
|
|
+ struct vm_area_struct *vma)
|
|
|
+{
|
|
|
+ /* This is effectively an #ifdef */
|
|
|
+ if (!boot_cpu_has(X86_FEATURE_OSPKE))
|
|
|
+ return;
|
|
|
+
|
|
|
+ /* Fault not from Protection Keys: nothing to do */
|
|
|
+ if (si_code != SEGV_PKUERR)
|
|
|
+ return;
|
|
|
+ /*
|
|
|
+ * force_sig_info_fault() is called from a number of
|
|
|
+ * contexts, some of which have a VMA and some of which
|
|
|
+ * do not. The PF_PK handing happens after we have a
|
|
|
+ * valid VMA, so we should never reach this without a
|
|
|
+ * valid VMA.
|
|
|
+ */
|
|
|
+ if (!vma) {
|
|
|
+ WARN_ONCE(1, "PKU fault with no VMA passed in");
|
|
|
+ info->si_pkey = 0;
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ /*
|
|
|
+ * si_pkey should be thought of as a strong hint, but not
|
|
|
+ * absolutely guranteed to be 100% accurate because of
|
|
|
+ * the race explained above.
|
|
|
+ */
|
|
|
+ info->si_pkey = vma_pkey(vma);
|
|
|
+}
|
|
|
+
|
|
|
static void
|
|
|
force_sig_info_fault(int si_signo, int si_code, unsigned long address,
|
|
|
struct task_struct *tsk, struct vm_area_struct *vma,
|
|
@@ -187,6 +239,8 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
|
|
|
lsb = PAGE_SHIFT;
|
|
|
info.si_addr_lsb = lsb;
|
|
|
|
|
|
+ fill_sig_info_pkey(si_code, &info, vma);
|
|
|
+
|
|
|
force_sig_info(si_signo, &info, tsk);
|
|
|
}
|
|
|
|
|
@@ -847,7 +901,15 @@ static noinline void
|
|
|
bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
|
|
|
unsigned long address, struct vm_area_struct *vma)
|
|
|
{
|
|
|
- __bad_area(regs, error_code, address, vma, SEGV_ACCERR);
|
|
|
+ /*
|
|
|
+ * This OSPKE check is not strictly necessary at runtime.
|
|
|
+ * But, doing it this way allows compiler optimizations
|
|
|
+ * if pkeys are compiled out.
|
|
|
+ */
|
|
|
+ if (boot_cpu_has(X86_FEATURE_OSPKE) && (error_code & PF_PK))
|
|
|
+ __bad_area(regs, error_code, address, vma, SEGV_PKUERR);
|
|
|
+ else
|
|
|
+ __bad_area(regs, error_code, address, vma, SEGV_ACCERR);
|
|
|
}
|
|
|
|
|
|
static void
|