|
@@ -11,6 +11,7 @@
|
|
|
#include <asm/fpu/internal.h>
|
|
|
#include <asm/fpu/signal.h>
|
|
|
#include <asm/fpu/regset.h>
|
|
|
+#include <asm/fpu/xstate.h>
|
|
|
|
|
|
#include <asm/tlbflush.h>
|
|
|
|
|
@@ -43,6 +44,13 @@ static unsigned int xstate_offsets[XFEATURE_MAX] = { [ 0 ... XFEATURE_MAX - 1] =
|
|
|
static unsigned int xstate_sizes[XFEATURE_MAX] = { [ 0 ... XFEATURE_MAX - 1] = -1};
|
|
|
static unsigned int xstate_comp_offsets[sizeof(xfeatures_mask)*8];
|
|
|
|
|
|
+/*
|
|
|
+ * The XSAVE area of kernel can be in standard or compacted format;
|
|
|
+ * it is always in standard format for user mode. This is the user
|
|
|
+ * mode standard format size used for signal and ptrace frames.
|
|
|
+ */
|
|
|
+unsigned int fpu_user_xstate_size;
|
|
|
+
|
|
|
/*
|
|
|
* Clear all of the X86_FEATURE_* bits that are unavailable
|
|
|
* when the CPU has no XSAVE support.
|
|
@@ -105,6 +113,27 @@ int cpu_has_xfeatures(u64 xfeatures_needed, const char **feature_name)
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(cpu_has_xfeatures);
|
|
|
|
|
|
+static int xfeature_is_supervisor(int xfeature_nr)
|
|
|
+{
|
|
|
+ /*
|
|
|
+ * We currently do not support supervisor states, but if
|
|
|
+ * we did, we could find out like this.
|
|
|
+ *
|
|
|
+ * SDM says: If state component 'i' is a user state component,
|
|
|
+ * ECX[0] return 0; if state component i is a supervisor
|
|
|
+ * state component, ECX[0] returns 1.
|
|
|
+ */
|
|
|
+ u32 eax, ebx, ecx, edx;
|
|
|
+
|
|
|
+ cpuid_count(XSTATE_CPUID, xfeature_nr, &eax, &ebx, &ecx, &edx);
|
|
|
+ return !!(ecx & 1);
|
|
|
+}
|
|
|
+
|
|
|
+static int xfeature_is_user(int xfeature_nr)
|
|
|
+{
|
|
|
+ return !xfeature_is_supervisor(xfeature_nr);
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* When executing XSAVEOPT (or other optimized XSAVE instructions), if
|
|
|
* a processor implementation detects that an FPU state component is still
|
|
@@ -171,7 +200,7 @@ void fpstate_sanitize_xstate(struct fpu *fpu)
|
|
|
*/
|
|
|
while (xfeatures) {
|
|
|
if (xfeatures & 0x1) {
|
|
|
- int offset = xstate_offsets[feature_bit];
|
|
|
+ int offset = xstate_comp_offsets[feature_bit];
|
|
|
int size = xstate_sizes[feature_bit];
|
|
|
|
|
|
memcpy((void *)fx + offset,
|
|
@@ -192,6 +221,15 @@ void fpu__init_cpu_xstate(void)
|
|
|
{
|
|
|
if (!boot_cpu_has(X86_FEATURE_XSAVE) || !xfeatures_mask)
|
|
|
return;
|
|
|
+ /*
|
|
|
+ * Make it clear that XSAVES supervisor states are not yet
|
|
|
+ * implemented should anyone expect it to work by changing
|
|
|
+ * bits in XFEATURE_MASK_* macros and XCR0.
|
|
|
+ */
|
|
|
+ WARN_ONCE((xfeatures_mask & XFEATURE_MASK_SUPERVISOR),
|
|
|
+ "x86/fpu: XSAVES supervisor states are not yet implemented.\n");
|
|
|
+
|
|
|
+ xfeatures_mask &= ~XFEATURE_MASK_SUPERVISOR;
|
|
|
|
|
|
cr4_set_bits(X86_CR4_OSXSAVE);
|
|
|
xsetbv(XCR_XFEATURE_ENABLED_MASK, xfeatures_mask);
|
|
@@ -217,13 +255,29 @@ static void __init setup_xstate_features(void)
|
|
|
/* start at the beginnning of the "extended state" */
|
|
|
unsigned int last_good_offset = offsetof(struct xregs_state,
|
|
|
extended_state_area);
|
|
|
+ /*
|
|
|
+ * The FP xstates and SSE xstates are legacy states. They are always
|
|
|
+ * in the fixed offsets in the xsave area in either compacted form
|
|
|
+ * or standard form.
|
|
|
+ */
|
|
|
+ xstate_offsets[0] = 0;
|
|
|
+ xstate_sizes[0] = offsetof(struct fxregs_state, xmm_space);
|
|
|
+ xstate_offsets[1] = xstate_sizes[0];
|
|
|
+ xstate_sizes[1] = FIELD_SIZEOF(struct fxregs_state, xmm_space);
|
|
|
|
|
|
for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) {
|
|
|
if (!xfeature_enabled(i))
|
|
|
continue;
|
|
|
|
|
|
cpuid_count(XSTATE_CPUID, i, &eax, &ebx, &ecx, &edx);
|
|
|
- xstate_offsets[i] = ebx;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * If an xfeature is supervisor state, the offset
|
|
|
+ * in EBX is invalid. We leave it to -1.
|
|
|
+ */
|
|
|
+ if (xfeature_is_user(i))
|
|
|
+ xstate_offsets[i] = ebx;
|
|
|
+
|
|
|
xstate_sizes[i] = eax;
|
|
|
/*
|
|
|
* In our xstate size checks, we assume that the
|
|
@@ -233,8 +287,6 @@ static void __init setup_xstate_features(void)
|
|
|
WARN_ONCE(last_good_offset > xstate_offsets[i],
|
|
|
"x86/fpu: misordered xstate at %d\n", last_good_offset);
|
|
|
last_good_offset = xstate_offsets[i];
|
|
|
-
|
|
|
- printk(KERN_INFO "x86/fpu: xstate_offset[%d]: %4d, xstate_sizes[%d]: %4d\n", i, ebx, i, eax);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -262,6 +314,33 @@ static void __init print_xstate_features(void)
|
|
|
print_xstate_feature(XFEATURE_MASK_PKRU);
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * This check is important because it is easy to get XSTATE_*
|
|
|
+ * confused with XSTATE_BIT_*.
|
|
|
+ */
|
|
|
+#define CHECK_XFEATURE(nr) do { \
|
|
|
+ WARN_ON(nr < FIRST_EXTENDED_XFEATURE); \
|
|
|
+ WARN_ON(nr >= XFEATURE_MAX); \
|
|
|
+} while (0)
|
|
|
+
|
|
|
+/*
|
|
|
+ * We could cache this like xstate_size[], but we only use
|
|
|
+ * it here, so it would be a waste of space.
|
|
|
+ */
|
|
|
+static int xfeature_is_aligned(int xfeature_nr)
|
|
|
+{
|
|
|
+ u32 eax, ebx, ecx, edx;
|
|
|
+
|
|
|
+ CHECK_XFEATURE(xfeature_nr);
|
|
|
+ cpuid_count(XSTATE_CPUID, xfeature_nr, &eax, &ebx, &ecx, &edx);
|
|
|
+ /*
|
|
|
+ * The value returned by ECX[1] indicates the alignment
|
|
|
+ * of state component 'i' when the compacted format
|
|
|
+ * of the extended region of an XSAVE area is used:
|
|
|
+ */
|
|
|
+ return !!(ecx & 2);
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* This function sets up offsets and sizes of all extended states in
|
|
|
* xsave area. This supports both standard format and compacted format
|
|
@@ -299,10 +378,29 @@ static void __init setup_xstate_comp(void)
|
|
|
else
|
|
|
xstate_comp_sizes[i] = 0;
|
|
|
|
|
|
- if (i > FIRST_EXTENDED_XFEATURE)
|
|
|
+ if (i > FIRST_EXTENDED_XFEATURE) {
|
|
|
xstate_comp_offsets[i] = xstate_comp_offsets[i-1]
|
|
|
+ xstate_comp_sizes[i-1];
|
|
|
|
|
|
+ if (xfeature_is_aligned(i))
|
|
|
+ xstate_comp_offsets[i] =
|
|
|
+ ALIGN(xstate_comp_offsets[i], 64);
|
|
|
+ }
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Print out xstate component offsets and sizes
|
|
|
+ */
|
|
|
+static void __init print_xstate_offset_size(void)
|
|
|
+{
|
|
|
+ int i;
|
|
|
+
|
|
|
+ for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) {
|
|
|
+ if (!xfeature_enabled(i))
|
|
|
+ continue;
|
|
|
+ pr_info("x86/fpu: xstate_offset[%d]: %4d, xstate_sizes[%d]: %4d\n",
|
|
|
+ i, xstate_comp_offsets[i], i, xstate_sizes[i]);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -322,13 +420,11 @@ static void __init setup_init_fpu_buf(void)
|
|
|
setup_xstate_features();
|
|
|
print_xstate_features();
|
|
|
|
|
|
- if (boot_cpu_has(X86_FEATURE_XSAVES)) {
|
|
|
+ if (boot_cpu_has(X86_FEATURE_XSAVES))
|
|
|
init_fpstate.xsave.header.xcomp_bv = (u64)1 << 63 | xfeatures_mask;
|
|
|
- init_fpstate.xsave.header.xfeatures = xfeatures_mask;
|
|
|
- }
|
|
|
|
|
|
/*
|
|
|
- * Init all the features state with header_bv being 0x0
|
|
|
+ * Init all the features state with header.xfeatures being 0x0
|
|
|
*/
|
|
|
copy_kernel_to_xregs_booting(&init_fpstate.xsave);
|
|
|
|
|
@@ -339,58 +435,19 @@ static void __init setup_init_fpu_buf(void)
|
|
|
copy_xregs_to_kernel_booting(&init_fpstate.xsave);
|
|
|
}
|
|
|
|
|
|
-static int xfeature_is_supervisor(int xfeature_nr)
|
|
|
-{
|
|
|
- /*
|
|
|
- * We currently do not support supervisor states, but if
|
|
|
- * we did, we could find out like this.
|
|
|
- *
|
|
|
- * SDM says: If state component i is a user state component,
|
|
|
- * ECX[0] return 0; if state component i is a supervisor
|
|
|
- * state component, ECX[0] returns 1.
|
|
|
- u32 eax, ebx, ecx, edx;
|
|
|
- cpuid_count(XSTATE_CPUID, xfeature_nr, &eax, &ebx, &ecx, &edx;
|
|
|
- return !!(ecx & 1);
|
|
|
- */
|
|
|
- return 0;
|
|
|
-}
|
|
|
-/*
|
|
|
-static int xfeature_is_user(int xfeature_nr)
|
|
|
-{
|
|
|
- return !xfeature_is_supervisor(xfeature_nr);
|
|
|
-}
|
|
|
-*/
|
|
|
-
|
|
|
-/*
|
|
|
- * This check is important because it is easy to get XSTATE_*
|
|
|
- * confused with XSTATE_BIT_*.
|
|
|
- */
|
|
|
-#define CHECK_XFEATURE(nr) do { \
|
|
|
- WARN_ON(nr < FIRST_EXTENDED_XFEATURE); \
|
|
|
- WARN_ON(nr >= XFEATURE_MAX); \
|
|
|
-} while (0)
|
|
|
-
|
|
|
-/*
|
|
|
- * We could cache this like xstate_size[], but we only use
|
|
|
- * it here, so it would be a waste of space.
|
|
|
- */
|
|
|
-static int xfeature_is_aligned(int xfeature_nr)
|
|
|
+static int xfeature_uncompacted_offset(int xfeature_nr)
|
|
|
{
|
|
|
u32 eax, ebx, ecx, edx;
|
|
|
|
|
|
- CHECK_XFEATURE(xfeature_nr);
|
|
|
- cpuid_count(XSTATE_CPUID, xfeature_nr, &eax, &ebx, &ecx, &edx);
|
|
|
/*
|
|
|
- * The value returned by ECX[1] indicates the alignment
|
|
|
- * of state component i when the compacted format
|
|
|
- * of the extended region of an XSAVE area is used
|
|
|
+ * Only XSAVES supports supervisor states and it uses compacted
|
|
|
+ * format. Checking a supervisor state's uncompacted offset is
|
|
|
+ * an error.
|
|
|
*/
|
|
|
- return !!(ecx & 2);
|
|
|
-}
|
|
|
-
|
|
|
-static int xfeature_uncompacted_offset(int xfeature_nr)
|
|
|
-{
|
|
|
- u32 eax, ebx, ecx, edx;
|
|
|
+ if (XFEATURE_MASK_SUPERVISOR & (1 << xfeature_nr)) {
|
|
|
+ WARN_ONCE(1, "No fixed offset for xstate %d\n", xfeature_nr);
|
|
|
+ return -1;
|
|
|
+ }
|
|
|
|
|
|
CHECK_XFEATURE(xfeature_nr);
|
|
|
cpuid_count(XSTATE_CPUID, xfeature_nr, &eax, &ebx, &ecx, &edx);
|
|
@@ -415,7 +472,7 @@ static int xfeature_size(int xfeature_nr)
|
|
|
* that it is obvious which aspect of 'XSAVES' is being handled
|
|
|
* by the calling code.
|
|
|
*/
|
|
|
-static int using_compacted_format(void)
|
|
|
+int using_compacted_format(void)
|
|
|
{
|
|
|
return boot_cpu_has(X86_FEATURE_XSAVES);
|
|
|
}
|
|
@@ -530,11 +587,12 @@ static void do_extra_xstate_size_checks(void)
|
|
|
*/
|
|
|
paranoid_xstate_size += xfeature_size(i);
|
|
|
}
|
|
|
- XSTATE_WARN_ON(paranoid_xstate_size != xstate_size);
|
|
|
+ XSTATE_WARN_ON(paranoid_xstate_size != fpu_kernel_xstate_size);
|
|
|
}
|
|
|
|
|
|
+
|
|
|
/*
|
|
|
- * Calculate total size of enabled xstates in XCR0/xfeatures_mask.
|
|
|
+ * Get total size of enabled xstates in XCR0/xfeatures_mask.
|
|
|
*
|
|
|
* Note the SDM's wording here. "sub-function 0" only enumerates
|
|
|
* the size of the *user* states. If we use it to size a buffer
|
|
@@ -544,34 +602,33 @@ static void do_extra_xstate_size_checks(void)
|
|
|
* Note that we do not currently set any bits on IA32_XSS so
|
|
|
* 'XCR0 | IA32_XSS == XCR0' for now.
|
|
|
*/
|
|
|
-static unsigned int __init calculate_xstate_size(void)
|
|
|
+static unsigned int __init get_xsaves_size(void)
|
|
|
{
|
|
|
unsigned int eax, ebx, ecx, edx;
|
|
|
- unsigned int calculated_xstate_size;
|
|
|
+ /*
|
|
|
+ * - CPUID function 0DH, sub-function 1:
|
|
|
+ * EBX enumerates the size (in bytes) required by
|
|
|
+ * the XSAVES instruction for an XSAVE area
|
|
|
+ * containing all the state components
|
|
|
+ * corresponding to bits currently set in
|
|
|
+ * XCR0 | IA32_XSS.
|
|
|
+ */
|
|
|
+ cpuid_count(XSTATE_CPUID, 1, &eax, &ebx, &ecx, &edx);
|
|
|
+ return ebx;
|
|
|
+}
|
|
|
|
|
|
- if (!boot_cpu_has(X86_FEATURE_XSAVES)) {
|
|
|
- /*
|
|
|
- * - CPUID function 0DH, sub-function 0:
|
|
|
- * EBX enumerates the size (in bytes) required by
|
|
|
- * the XSAVE instruction for an XSAVE area
|
|
|
- * containing all the *user* state components
|
|
|
- * corresponding to bits currently set in XCR0.
|
|
|
- */
|
|
|
- cpuid_count(XSTATE_CPUID, 0, &eax, &ebx, &ecx, &edx);
|
|
|
- calculated_xstate_size = ebx;
|
|
|
- } else {
|
|
|
- /*
|
|
|
- * - CPUID function 0DH, sub-function 1:
|
|
|
- * EBX enumerates the size (in bytes) required by
|
|
|
- * the XSAVES instruction for an XSAVE area
|
|
|
- * containing all the state components
|
|
|
- * corresponding to bits currently set in
|
|
|
- * XCR0 | IA32_XSS.
|
|
|
- */
|
|
|
- cpuid_count(XSTATE_CPUID, 1, &eax, &ebx, &ecx, &edx);
|
|
|
- calculated_xstate_size = ebx;
|
|
|
- }
|
|
|
- return calculated_xstate_size;
|
|
|
+static unsigned int __init get_xsave_size(void)
|
|
|
+{
|
|
|
+ unsigned int eax, ebx, ecx, edx;
|
|
|
+ /*
|
|
|
+ * - CPUID function 0DH, sub-function 0:
|
|
|
+ * EBX enumerates the size (in bytes) required by
|
|
|
+ * the XSAVE instruction for an XSAVE area
|
|
|
+ * containing all the *user* state components
|
|
|
+ * corresponding to bits currently set in XCR0.
|
|
|
+ */
|
|
|
+ cpuid_count(XSTATE_CPUID, 0, &eax, &ebx, &ecx, &edx);
|
|
|
+ return ebx;
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -591,7 +648,15 @@ static bool is_supported_xstate_size(unsigned int test_xstate_size)
|
|
|
static int init_xstate_size(void)
|
|
|
{
|
|
|
/* Recompute the context size for enabled features: */
|
|
|
- unsigned int possible_xstate_size = calculate_xstate_size();
|
|
|
+ unsigned int possible_xstate_size;
|
|
|
+ unsigned int xsave_size;
|
|
|
+
|
|
|
+ xsave_size = get_xsave_size();
|
|
|
+
|
|
|
+ if (boot_cpu_has(X86_FEATURE_XSAVES))
|
|
|
+ possible_xstate_size = get_xsaves_size();
|
|
|
+ else
|
|
|
+ possible_xstate_size = xsave_size;
|
|
|
|
|
|
/* Ensure we have the space to store all enabled: */
|
|
|
if (!is_supported_xstate_size(possible_xstate_size))
|
|
@@ -601,8 +666,13 @@ static int init_xstate_size(void)
|
|
|
* The size is OK, we are definitely going to use xsave,
|
|
|
* make it known to the world that we need more space.
|
|
|
*/
|
|
|
- xstate_size = possible_xstate_size;
|
|
|
+ fpu_kernel_xstate_size = possible_xstate_size;
|
|
|
do_extra_xstate_size_checks();
|
|
|
+
|
|
|
+ /*
|
|
|
+ * User space is always in standard format.
|
|
|
+ */
|
|
|
+ fpu_user_xstate_size = xsave_size;
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -644,8 +714,13 @@ void __init fpu__init_system_xstate(void)
|
|
|
xfeatures_mask = eax + ((u64)edx << 32);
|
|
|
|
|
|
if ((xfeatures_mask & XFEATURE_MASK_FPSSE) != XFEATURE_MASK_FPSSE) {
|
|
|
+ /*
|
|
|
+ * This indicates that something really unexpected happened
|
|
|
+ * with the enumeration. Disable XSAVE and try to continue
|
|
|
+ * booting without it. This is too early to BUG().
|
|
|
+ */
|
|
|
pr_err("x86/fpu: FP/SSE not present amongst the CPU's xstate features: 0x%llx.\n", xfeatures_mask);
|
|
|
- BUG();
|
|
|
+ goto out_disable;
|
|
|
}
|
|
|
|
|
|
xfeatures_mask &= fpu__get_supported_xfeatures_mask();
|
|
@@ -653,21 +728,29 @@ void __init fpu__init_system_xstate(void)
|
|
|
/* Enable xstate instructions to be able to continue with initialization: */
|
|
|
fpu__init_cpu_xstate();
|
|
|
err = init_xstate_size();
|
|
|
- if (err) {
|
|
|
- /* something went wrong, boot without any XSAVE support */
|
|
|
- fpu__init_disable_system_xstate();
|
|
|
- return;
|
|
|
- }
|
|
|
+ if (err)
|
|
|
+ goto out_disable;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Update info used for ptrace frames; use standard-format size and no
|
|
|
+ * supervisor xstates:
|
|
|
+ */
|
|
|
+ update_regset_xstate_info(fpu_user_xstate_size, xfeatures_mask & ~XFEATURE_MASK_SUPERVISOR);
|
|
|
|
|
|
- update_regset_xstate_info(xstate_size, xfeatures_mask);
|
|
|
fpu__init_prepare_fx_sw_frame();
|
|
|
setup_init_fpu_buf();
|
|
|
setup_xstate_comp();
|
|
|
+ print_xstate_offset_size();
|
|
|
|
|
|
pr_info("x86/fpu: Enabled xstate features 0x%llx, context size is %d bytes, using '%s' format.\n",
|
|
|
xfeatures_mask,
|
|
|
- xstate_size,
|
|
|
+ fpu_kernel_xstate_size,
|
|
|
boot_cpu_has(X86_FEATURE_XSAVES) ? "compacted" : "standard");
|
|
|
+ return;
|
|
|
+
|
|
|
+out_disable:
|
|
|
+ /* something went wrong, try to boot without any XSAVE support */
|
|
|
+ fpu__init_disable_system_xstate();
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -693,6 +776,11 @@ void *__raw_xsave_addr(struct xregs_state *xsave, int xstate_feature_mask)
|
|
|
{
|
|
|
int feature_nr = fls64(xstate_feature_mask) - 1;
|
|
|
|
|
|
+ if (!xfeature_enabled(feature_nr)) {
|
|
|
+ WARN_ON_FPU(1);
|
|
|
+ return NULL;
|
|
|
+ }
|
|
|
+
|
|
|
return (void *)xsave + xstate_comp_offsets[feature_nr];
|
|
|
}
|
|
|
/*
|
|
@@ -887,16 +975,16 @@ int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
|
|
|
if (!boot_cpu_has(X86_FEATURE_OSPKE))
|
|
|
return -EINVAL;
|
|
|
|
|
|
- /* Set the bits we need in PKRU */
|
|
|
+ /* Set the bits we need in PKRU: */
|
|
|
if (init_val & PKEY_DISABLE_ACCESS)
|
|
|
new_pkru_bits |= PKRU_AD_BIT;
|
|
|
if (init_val & PKEY_DISABLE_WRITE)
|
|
|
new_pkru_bits |= PKRU_WD_BIT;
|
|
|
|
|
|
- /* Shift the bits in to the correct place in PKRU for pkey. */
|
|
|
+ /* Shift the bits in to the correct place in PKRU for pkey: */
|
|
|
new_pkru_bits <<= pkey_shift;
|
|
|
|
|
|
- /* Locate old copy of the state in the xsave buffer */
|
|
|
+ /* Locate old copy of the state in the xsave buffer: */
|
|
|
old_pkru_state = get_xsave_addr(xsave, XFEATURE_MASK_PKRU);
|
|
|
|
|
|
/*
|
|
@@ -909,9 +997,10 @@ int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
|
|
|
else
|
|
|
new_pkru_state.pkru = old_pkru_state->pkru;
|
|
|
|
|
|
- /* mask off any old bits in place */
|
|
|
+ /* Mask off any old bits in place: */
|
|
|
new_pkru_state.pkru &= ~((PKRU_AD_BIT|PKRU_WD_BIT) << pkey_shift);
|
|
|
- /* Set the newly-requested bits */
|
|
|
+
|
|
|
+ /* Set the newly-requested bits: */
|
|
|
new_pkru_state.pkru |= new_pkru_bits;
|
|
|
|
|
|
/*
|
|
@@ -925,8 +1014,168 @@ int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
|
|
|
*/
|
|
|
new_pkru_state.pad = 0;
|
|
|
|
|
|
- fpu__xfeature_set_state(XFEATURE_MASK_PKRU, &new_pkru_state,
|
|
|
- sizeof(new_pkru_state));
|
|
|
+ fpu__xfeature_set_state(XFEATURE_MASK_PKRU, &new_pkru_state, sizeof(new_pkru_state));
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * This is similar to user_regset_copyout(), but will not add offset to
|
|
|
+ * the source data pointer or increment pos, count, kbuf, and ubuf.
|
|
|
+ */
|
|
|
+static inline int xstate_copyout(unsigned int pos, unsigned int count,
|
|
|
+ void *kbuf, void __user *ubuf,
|
|
|
+ const void *data, const int start_pos,
|
|
|
+ const int end_pos)
|
|
|
+{
|
|
|
+ if ((count == 0) || (pos < start_pos))
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ if (end_pos < 0 || pos < end_pos) {
|
|
|
+ unsigned int copy = (end_pos < 0 ? count : min(count, end_pos - pos));
|
|
|
+
|
|
|
+ if (kbuf) {
|
|
|
+ memcpy(kbuf + pos, data, copy);
|
|
|
+ } else {
|
|
|
+ if (__copy_to_user(ubuf + pos, data, copy))
|
|
|
+ return -EFAULT;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Convert from kernel XSAVES compacted format to standard format and copy
|
|
|
+ * to a ptrace buffer. It supports partial copy but pos always starts from
|
|
|
+ * zero. This is called from xstateregs_get() and there we check the CPU
|
|
|
+ * has XSAVES.
|
|
|
+ */
|
|
|
+int copyout_from_xsaves(unsigned int pos, unsigned int count, void *kbuf,
|
|
|
+ void __user *ubuf, struct xregs_state *xsave)
|
|
|
+{
|
|
|
+ unsigned int offset, size;
|
|
|
+ int ret, i;
|
|
|
+ struct xstate_header header;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Currently copy_regset_to_user() starts from pos 0:
|
|
|
+ */
|
|
|
+ if (unlikely(pos != 0))
|
|
|
+ return -EFAULT;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * The destination is a ptrace buffer; we put in only user xstates:
|
|
|
+ */
|
|
|
+ memset(&header, 0, sizeof(header));
|
|
|
+ header.xfeatures = xsave->header.xfeatures;
|
|
|
+ header.xfeatures &= ~XFEATURE_MASK_SUPERVISOR;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Copy xregs_state->header:
|
|
|
+ */
|
|
|
+ offset = offsetof(struct xregs_state, header);
|
|
|
+ size = sizeof(header);
|
|
|
+
|
|
|
+ ret = xstate_copyout(offset, size, kbuf, ubuf, &header, 0, count);
|
|
|
+
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
+
|
|
|
+ for (i = 0; i < XFEATURE_MAX; i++) {
|
|
|
+ /*
|
|
|
+ * Copy only in-use xstates:
|
|
|
+ */
|
|
|
+ if ((header.xfeatures >> i) & 1) {
|
|
|
+ void *src = __raw_xsave_addr(xsave, 1 << i);
|
|
|
+
|
|
|
+ offset = xstate_offsets[i];
|
|
|
+ size = xstate_sizes[i];
|
|
|
+
|
|
|
+ ret = xstate_copyout(offset, size, kbuf, ubuf, src, 0, count);
|
|
|
+
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
+
|
|
|
+ if (offset + size >= count)
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ }
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Fill xsave->i387.sw_reserved value for ptrace frame:
|
|
|
+ */
|
|
|
+ offset = offsetof(struct fxregs_state, sw_reserved);
|
|
|
+ size = sizeof(xstate_fx_sw_bytes);
|
|
|
+
|
|
|
+ ret = xstate_copyout(offset, size, kbuf, ubuf, xstate_fx_sw_bytes, 0, count);
|
|
|
+
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Convert from a ptrace standard-format buffer to kernel XSAVES format
|
|
|
+ * and copy to the target thread. This is called from xstateregs_set() and
|
|
|
+ * there we check the CPU has XSAVES and a whole standard-sized buffer
|
|
|
+ * exists.
|
|
|
+ */
|
|
|
+int copyin_to_xsaves(const void *kbuf, const void __user *ubuf,
|
|
|
+ struct xregs_state *xsave)
|
|
|
+{
|
|
|
+ unsigned int offset, size;
|
|
|
+ int i;
|
|
|
+ u64 xfeatures;
|
|
|
+ u64 allowed_features;
|
|
|
+
|
|
|
+ offset = offsetof(struct xregs_state, header);
|
|
|
+ size = sizeof(xfeatures);
|
|
|
+
|
|
|
+ if (kbuf) {
|
|
|
+ memcpy(&xfeatures, kbuf + offset, size);
|
|
|
+ } else {
|
|
|
+ if (__copy_from_user(&xfeatures, ubuf + offset, size))
|
|
|
+ return -EFAULT;
|
|
|
+ }
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Reject if the user sets any disabled or supervisor features:
|
|
|
+ */
|
|
|
+ allowed_features = xfeatures_mask & ~XFEATURE_MASK_SUPERVISOR;
|
|
|
+
|
|
|
+ if (xfeatures & ~allowed_features)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ for (i = 0; i < XFEATURE_MAX; i++) {
|
|
|
+ u64 mask = ((u64)1 << i);
|
|
|
+
|
|
|
+ if (xfeatures & mask) {
|
|
|
+ void *dst = __raw_xsave_addr(xsave, 1 << i);
|
|
|
+
|
|
|
+ offset = xstate_offsets[i];
|
|
|
+ size = xstate_sizes[i];
|
|
|
+
|
|
|
+ if (kbuf) {
|
|
|
+ memcpy(dst, kbuf + offset, size);
|
|
|
+ } else {
|
|
|
+ if (__copy_from_user(dst, ubuf + offset, size))
|
|
|
+ return -EFAULT;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ /*
|
|
|
+ * The state that came in from userspace was user-state only.
|
|
|
+ * Mask all the user states out of 'xfeatures':
|
|
|
+ */
|
|
|
+ xsave->header.xfeatures &= XFEATURE_MASK_SUPERVISOR;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Add back in the features that came in from userspace:
|
|
|
+ */
|
|
|
+ xsave->header.xfeatures |= xfeatures;
|
|
|
|
|
|
return 0;
|
|
|
}
|