|
@@ -69,6 +69,22 @@ static inline u64 inc_mm_tlb_gen(struct mm_struct *mm)
|
|
|
return atomic64_inc_return(&mm->context.tlb_gen);
|
|
|
}
|
|
|
|
|
|
+/* There are 12 bits of space for ASIDS in CR3 */
|
|
|
+#define CR3_HW_ASID_BITS 12
|
|
|
+/*
|
|
|
+ * When enabled, PAGE_TABLE_ISOLATION consumes a single bit for
|
|
|
+ * user/kernel switches
|
|
|
+ */
|
|
|
+#define PTI_CONSUMED_ASID_BITS 0
|
|
|
+
|
|
|
+#define CR3_AVAIL_ASID_BITS (CR3_HW_ASID_BITS - PTI_CONSUMED_ASID_BITS)
|
|
|
+/*
|
|
|
+ * ASIDs are zero-based: 0->MAX_AVAIL_ASID are valid. -1 below to account
|
|
|
+ * for them being zero-based. Another -1 is because ASID 0 is reserved for
|
|
|
+ * use by non-PCID-aware users.
|
|
|
+ */
|
|
|
+#define MAX_ASID_AVAILABLE ((1 << CR3_AVAIL_ASID_BITS) - 2)
|
|
|
+
|
|
|
/*
|
|
|
* If PCID is on, ASID-aware code paths put the ASID+1 into the PCID bits.
|
|
|
* This serves two purposes. It prevents a nasty situation in which
|
|
@@ -81,7 +97,7 @@ struct pgd_t;
|
|
|
static inline unsigned long build_cr3(pgd_t *pgd, u16 asid)
|
|
|
{
|
|
|
if (static_cpu_has(X86_FEATURE_PCID)) {
|
|
|
- VM_WARN_ON_ONCE(asid > 4094);
|
|
|
+ VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE);
|
|
|
return __sme_pa(pgd) | (asid + 1);
|
|
|
} else {
|
|
|
VM_WARN_ON_ONCE(asid != 0);
|
|
@@ -91,7 +107,7 @@ static inline unsigned long build_cr3(pgd_t *pgd, u16 asid)
|
|
|
|
|
|
static inline unsigned long build_cr3_noflush(pgd_t *pgd, u16 asid)
|
|
|
{
|
|
|
- VM_WARN_ON_ONCE(asid > 4094);
|
|
|
+ VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE);
|
|
|
return __sme_pa(pgd) | (asid + 1) | CR3_NOFLUSH;
|
|
|
}
|
|
|
|