|
@@ -42,16 +42,19 @@ static struct equiv_cpu_entry *equiv_cpu_table;
|
|
|
|
|
|
/*
|
|
/*
|
|
* This points to the current valid container of microcode patches which we will
|
|
* This points to the current valid container of microcode patches which we will
|
|
- * save from the initrd/builtin before jettisoning its contents.
|
|
|
|
|
|
+ * save from the initrd/builtin before jettisoning its contents. @mc is the
|
|
|
|
+ * microcode patch we found to match.
|
|
*/
|
|
*/
|
|
-struct container {
|
|
|
|
- u8 *data;
|
|
|
|
- size_t size;
|
|
|
|
-} cont;
|
|
|
|
|
|
+struct cont_desc {
|
|
|
|
+ struct microcode_amd *mc;
|
|
|
|
+ u32 cpuid_1_eax;
|
|
|
|
+ u32 psize;
|
|
|
|
+ u8 *data;
|
|
|
|
+ size_t size;
|
|
|
|
+};
|
|
|
|
|
|
static u32 ucode_new_rev;
|
|
static u32 ucode_new_rev;
|
|
static u8 amd_ucode_patch[PATCH_MAX_SIZE];
|
|
static u8 amd_ucode_patch[PATCH_MAX_SIZE];
|
|
-static u16 this_equiv_id;
|
|
|
|
|
|
|
|
/*
|
|
/*
|
|
* Microcode patch container file is prepended to the initrd in cpio
|
|
* Microcode patch container file is prepended to the initrd in cpio
|
|
@@ -60,57 +63,13 @@ static u16 this_equiv_id;
|
|
static const char
|
|
static const char
|
|
ucode_path[] __maybe_unused = "kernel/x86/microcode/AuthenticAMD.bin";
|
|
ucode_path[] __maybe_unused = "kernel/x86/microcode/AuthenticAMD.bin";
|
|
|
|
|
|
-static size_t compute_container_size(u8 *data, u32 total_size)
|
|
|
|
|
|
+static u16 find_equiv_id(struct equiv_cpu_entry *equiv_table, u32 sig)
|
|
{
|
|
{
|
|
- size_t size = 0;
|
|
|
|
- u32 *header = (u32 *)data;
|
|
|
|
-
|
|
|
|
- if (header[0] != UCODE_MAGIC ||
|
|
|
|
- header[1] != UCODE_EQUIV_CPU_TABLE_TYPE || /* type */
|
|
|
|
- header[2] == 0) /* size */
|
|
|
|
- return size;
|
|
|
|
-
|
|
|
|
- size = header[2] + CONTAINER_HDR_SZ;
|
|
|
|
- total_size -= size;
|
|
|
|
- data += size;
|
|
|
|
-
|
|
|
|
- while (total_size) {
|
|
|
|
- u16 patch_size;
|
|
|
|
-
|
|
|
|
- header = (u32 *)data;
|
|
|
|
-
|
|
|
|
- if (header[0] != UCODE_UCODE_TYPE)
|
|
|
|
- break;
|
|
|
|
-
|
|
|
|
- /*
|
|
|
|
- * Sanity-check patch size.
|
|
|
|
- */
|
|
|
|
- patch_size = header[1];
|
|
|
|
- if (patch_size > PATCH_MAX_SIZE)
|
|
|
|
- break;
|
|
|
|
-
|
|
|
|
- size += patch_size + SECTION_HDR_SIZE;
|
|
|
|
- data += patch_size + SECTION_HDR_SIZE;
|
|
|
|
- total_size -= patch_size + SECTION_HDR_SIZE;
|
|
|
|
|
|
+ for (; equiv_table && equiv_table->installed_cpu; equiv_table++) {
|
|
|
|
+ if (sig == equiv_table->installed_cpu)
|
|
|
|
+ return equiv_table->equiv_cpu;
|
|
}
|
|
}
|
|
|
|
|
|
- return size;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static inline u16 find_equiv_id(struct equiv_cpu_entry *equiv_cpu_table,
|
|
|
|
- unsigned int sig)
|
|
|
|
-{
|
|
|
|
- int i = 0;
|
|
|
|
-
|
|
|
|
- if (!equiv_cpu_table)
|
|
|
|
- return 0;
|
|
|
|
-
|
|
|
|
- while (equiv_cpu_table[i].installed_cpu != 0) {
|
|
|
|
- if (sig == equiv_cpu_table[i].installed_cpu)
|
|
|
|
- return equiv_cpu_table[i].equiv_cpu;
|
|
|
|
-
|
|
|
|
- i++;
|
|
|
|
- }
|
|
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -118,91 +77,109 @@ static inline u16 find_equiv_id(struct equiv_cpu_entry *equiv_cpu_table,
|
|
* This scans the ucode blob for the proper container as we can have multiple
|
|
* This scans the ucode blob for the proper container as we can have multiple
|
|
* containers glued together. Returns the equivalence ID from the equivalence
|
|
* containers glued together. Returns the equivalence ID from the equivalence
|
|
* table or 0 if none found.
|
|
* table or 0 if none found.
|
|
|
|
+ * Returns the amount of bytes consumed while scanning. @desc contains all the
|
|
|
|
+ * data we're going to use in later stages of the application.
|
|
*/
|
|
*/
|
|
-static u16
|
|
|
|
-find_proper_container(u8 *ucode, size_t size, struct container *ret_cont)
|
|
|
|
|
|
+static ssize_t parse_container(u8 *ucode, ssize_t size, struct cont_desc *desc)
|
|
{
|
|
{
|
|
- struct container ret = { NULL, 0 };
|
|
|
|
- u32 eax, ebx, ecx, edx;
|
|
|
|
struct equiv_cpu_entry *eq;
|
|
struct equiv_cpu_entry *eq;
|
|
- int offset, left;
|
|
|
|
- u16 eq_id = 0;
|
|
|
|
- u32 *header;
|
|
|
|
- u8 *data;
|
|
|
|
|
|
+ ssize_t orig_size = size;
|
|
|
|
+ u32 *hdr = (u32 *)ucode;
|
|
|
|
+ u16 eq_id;
|
|
|
|
+ u8 *buf;
|
|
|
|
|
|
- data = ucode;
|
|
|
|
- left = size;
|
|
|
|
- header = (u32 *)data;
|
|
|
|
|
|
+ /* Am I looking at an equivalence table header? */
|
|
|
|
+ if (hdr[0] != UCODE_MAGIC ||
|
|
|
|
+ hdr[1] != UCODE_EQUIV_CPU_TABLE_TYPE ||
|
|
|
|
+ hdr[2] == 0)
|
|
|
|
+ return CONTAINER_HDR_SZ;
|
|
|
|
|
|
|
|
+ buf = ucode;
|
|
|
|
|
|
- /* find equiv cpu table */
|
|
|
|
- if (header[0] != UCODE_MAGIC ||
|
|
|
|
- header[1] != UCODE_EQUIV_CPU_TABLE_TYPE || /* type */
|
|
|
|
- header[2] == 0) /* size */
|
|
|
|
- return eq_id;
|
|
|
|
|
|
+ eq = (struct equiv_cpu_entry *)(buf + CONTAINER_HDR_SZ);
|
|
|
|
|
|
- eax = 0x00000001;
|
|
|
|
- ecx = 0;
|
|
|
|
- native_cpuid(&eax, &ebx, &ecx, &edx);
|
|
|
|
|
|
+ /* Find the equivalence ID of our CPU in this table: */
|
|
|
|
+ eq_id = find_equiv_id(eq, desc->cpuid_1_eax);
|
|
|
|
|
|
- while (left > 0) {
|
|
|
|
- eq = (struct equiv_cpu_entry *)(data + CONTAINER_HDR_SZ);
|
|
|
|
|
|
+ buf += hdr[2] + CONTAINER_HDR_SZ;
|
|
|
|
+ size -= hdr[2] + CONTAINER_HDR_SZ;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Scan through the rest of the container to find where it ends. We do
|
|
|
|
+ * some basic sanity-checking too.
|
|
|
|
+ */
|
|
|
|
+ while (size > 0) {
|
|
|
|
+ struct microcode_amd *mc;
|
|
|
|
+ u32 patch_size;
|
|
|
|
|
|
- ret.data = data;
|
|
|
|
|
|
+ hdr = (u32 *)buf;
|
|
|
|
|
|
- /* Advance past the container header */
|
|
|
|
- offset = header[2] + CONTAINER_HDR_SZ;
|
|
|
|
- data += offset;
|
|
|
|
- left -= offset;
|
|
|
|
|
|
+ if (hdr[0] != UCODE_UCODE_TYPE)
|
|
|
|
+ break;
|
|
|
|
|
|
- eq_id = find_equiv_id(eq, eax);
|
|
|
|
- if (eq_id) {
|
|
|
|
- ret.size = compute_container_size(ret.data, left + offset);
|
|
|
|
|
|
+ /* Sanity-check patch size. */
|
|
|
|
+ patch_size = hdr[1];
|
|
|
|
+ if (patch_size > PATCH_MAX_SIZE)
|
|
|
|
+ break;
|
|
|
|
|
|
- /*
|
|
|
|
- * truncate how much we need to iterate over in the
|
|
|
|
- * ucode update loop below
|
|
|
|
- */
|
|
|
|
- left = ret.size - offset;
|
|
|
|
|
|
+ /* Skip patch section header: */
|
|
|
|
+ buf += SECTION_HDR_SIZE;
|
|
|
|
+ size -= SECTION_HDR_SIZE;
|
|
|
|
|
|
- *ret_cont = ret;
|
|
|
|
- return eq_id;
|
|
|
|
|
|
+ mc = (struct microcode_amd *)buf;
|
|
|
|
+ if (eq_id == mc->hdr.processor_rev_id) {
|
|
|
|
+ desc->psize = patch_size;
|
|
|
|
+ desc->mc = mc;
|
|
}
|
|
}
|
|
|
|
|
|
- /*
|
|
|
|
- * support multiple container files appended together. if this
|
|
|
|
- * one does not have a matching equivalent cpu entry, we fast
|
|
|
|
- * forward to the next container file.
|
|
|
|
- */
|
|
|
|
- while (left > 0) {
|
|
|
|
- header = (u32 *)data;
|
|
|
|
-
|
|
|
|
- if (header[0] == UCODE_MAGIC &&
|
|
|
|
- header[1] == UCODE_EQUIV_CPU_TABLE_TYPE)
|
|
|
|
- break;
|
|
|
|
-
|
|
|
|
- offset = header[1] + SECTION_HDR_SIZE;
|
|
|
|
- data += offset;
|
|
|
|
- left -= offset;
|
|
|
|
- }
|
|
|
|
|
|
+ buf += patch_size;
|
|
|
|
+ size -= patch_size;
|
|
|
|
+ }
|
|
|
|
|
|
- /* mark where the next microcode container file starts */
|
|
|
|
- offset = data - (u8 *)ucode;
|
|
|
|
- ucode = data;
|
|
|
|
|
|
+ /*
|
|
|
|
+ * If we have found a patch (desc->mc), it means we're looking at the
|
|
|
|
+ * container which has a patch for this CPU so return 0 to mean, @ucode
|
|
|
|
+ * already points to the proper container. Otherwise, we return the size
|
|
|
|
+ * we scanned so that we can advance to the next container in the
|
|
|
|
+ * buffer.
|
|
|
|
+ */
|
|
|
|
+ if (desc->mc) {
|
|
|
|
+ desc->data = ucode;
|
|
|
|
+ desc->size = orig_size - size;
|
|
|
|
+
|
|
|
|
+ return 0;
|
|
}
|
|
}
|
|
|
|
|
|
- return eq_id;
|
|
|
|
|
|
+ return orig_size - size;
|
|
}
|
|
}
|
|
|
|
|
|
-static int __apply_microcode_amd(struct microcode_amd *mc_amd)
|
|
|
|
|
|
+/*
|
|
|
|
+ * Scan the ucode blob for the proper container as we can have multiple
|
|
|
|
+ * containers glued together.
|
|
|
|
+ */
|
|
|
|
+static void scan_containers(u8 *ucode, size_t size, struct cont_desc *desc)
|
|
|
|
+{
|
|
|
|
+ ssize_t rem = size;
|
|
|
|
+
|
|
|
|
+ while (rem >= 0) {
|
|
|
|
+ ssize_t s = parse_container(ucode, rem, desc);
|
|
|
|
+ if (!s)
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ ucode += s;
|
|
|
|
+ rem -= s;
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int __apply_microcode_amd(struct microcode_amd *mc)
|
|
{
|
|
{
|
|
u32 rev, dummy;
|
|
u32 rev, dummy;
|
|
|
|
|
|
- native_wrmsrl(MSR_AMD64_PATCH_LOADER, (u64)(long)&mc_amd->hdr.data_code);
|
|
|
|
|
|
+ native_wrmsrl(MSR_AMD64_PATCH_LOADER, (u64)(long)&mc->hdr.data_code);
|
|
|
|
|
|
/* verify patch application was successful */
|
|
/* verify patch application was successful */
|
|
native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
|
|
native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
|
|
- if (rev != mc_amd->hdr.patch_id)
|
|
|
|
|
|
+ if (rev != mc->hdr.patch_id)
|
|
return -1;
|
|
return -1;
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
@@ -217,17 +194,16 @@ static int __apply_microcode_amd(struct microcode_amd *mc_amd)
|
|
* load_microcode_amd() to save equivalent cpu table and microcode patches in
|
|
* load_microcode_amd() to save equivalent cpu table and microcode patches in
|
|
* kernel heap memory.
|
|
* kernel heap memory.
|
|
*
|
|
*
|
|
- * Returns true if container found (sets @ret_cont), false otherwise.
|
|
|
|
|
|
+ * Returns true if container found (sets @desc), false otherwise.
|
|
*/
|
|
*/
|
|
-static bool apply_microcode_early_amd(void *ucode, size_t size, bool save_patch,
|
|
|
|
- struct container *ret_cont)
|
|
|
|
|
|
+static bool
|
|
|
|
+apply_microcode_early_amd(u32 cpuid_1_eax, void *ucode, size_t size, bool save_patch)
|
|
{
|
|
{
|
|
|
|
+ struct cont_desc desc = { 0 };
|
|
u8 (*patch)[PATCH_MAX_SIZE];
|
|
u8 (*patch)[PATCH_MAX_SIZE];
|
|
- u32 rev, *header, *new_rev;
|
|
|
|
- struct container ret;
|
|
|
|
- int offset, left;
|
|
|
|
- u16 eq_id = 0;
|
|
|
|
- u8 *data;
|
|
|
|
|
|
+ struct microcode_amd *mc;
|
|
|
|
+ u32 rev, dummy, *new_rev;
|
|
|
|
+ bool ret = false;
|
|
|
|
|
|
#ifdef CONFIG_X86_32
|
|
#ifdef CONFIG_X86_32
|
|
new_rev = (u32 *)__pa_nodebug(&ucode_new_rev);
|
|
new_rev = (u32 *)__pa_nodebug(&ucode_new_rev);
|
|
@@ -237,50 +213,27 @@ static bool apply_microcode_early_amd(void *ucode, size_t size, bool save_patch,
|
|
patch = &amd_ucode_patch;
|
|
patch = &amd_ucode_patch;
|
|
#endif
|
|
#endif
|
|
|
|
|
|
- if (check_current_patch_level(&rev, true))
|
|
|
|
- return false;
|
|
|
|
-
|
|
|
|
- eq_id = find_proper_container(ucode, size, &ret);
|
|
|
|
- if (!eq_id)
|
|
|
|
- return false;
|
|
|
|
-
|
|
|
|
- this_equiv_id = eq_id;
|
|
|
|
- header = (u32 *)ret.data;
|
|
|
|
-
|
|
|
|
- /* We're pointing to an equiv table, skip over it. */
|
|
|
|
- data = ret.data + header[2] + CONTAINER_HDR_SZ;
|
|
|
|
- left = ret.size - (header[2] + CONTAINER_HDR_SZ);
|
|
|
|
-
|
|
|
|
- while (left > 0) {
|
|
|
|
- struct microcode_amd *mc;
|
|
|
|
-
|
|
|
|
- header = (u32 *)data;
|
|
|
|
- if (header[0] != UCODE_UCODE_TYPE || /* type */
|
|
|
|
- header[1] == 0) /* size */
|
|
|
|
- break;
|
|
|
|
|
|
+ desc.cpuid_1_eax = cpuid_1_eax;
|
|
|
|
|
|
- mc = (struct microcode_amd *)(data + SECTION_HDR_SIZE);
|
|
|
|
|
|
+ scan_containers(ucode, size, &desc);
|
|
|
|
|
|
- if (eq_id == mc->hdr.processor_rev_id && rev < mc->hdr.patch_id) {
|
|
|
|
|
|
+ mc = desc.mc;
|
|
|
|
+ if (!mc)
|
|
|
|
+ return ret;
|
|
|
|
|
|
- if (!__apply_microcode_amd(mc)) {
|
|
|
|
- rev = mc->hdr.patch_id;
|
|
|
|
- *new_rev = rev;
|
|
|
|
|
|
+ native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
|
|
|
|
+ if (rev >= mc->hdr.patch_id)
|
|
|
|
+ return ret;
|
|
|
|
|
|
- if (save_patch)
|
|
|
|
- memcpy(patch, mc, min_t(u32, header[1], PATCH_MAX_SIZE));
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
|
|
+ if (!__apply_microcode_amd(mc)) {
|
|
|
|
+ *new_rev = mc->hdr.patch_id;
|
|
|
|
+ ret = true;
|
|
|
|
|
|
- offset = header[1] + SECTION_HDR_SIZE;
|
|
|
|
- data += offset;
|
|
|
|
- left -= offset;
|
|
|
|
|
|
+ if (save_patch)
|
|
|
|
+ memcpy(patch, mc, min_t(u32, desc.psize, PATCH_MAX_SIZE));
|
|
}
|
|
}
|
|
|
|
|
|
- if (ret_cont)
|
|
|
|
- *ret_cont = ret;
|
|
|
|
-
|
|
|
|
- return true;
|
|
|
|
|
|
+ return ret;
|
|
}
|
|
}
|
|
|
|
|
|
static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family)
|
|
static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family)
|
|
@@ -298,10 +251,9 @@ static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family)
|
|
#endif
|
|
#endif
|
|
}
|
|
}
|
|
|
|
|
|
-void __init load_ucode_amd_bsp(unsigned int family)
|
|
|
|
|
|
+void __load_ucode_amd(unsigned int cpuid_1_eax, struct cpio_data *ret)
|
|
{
|
|
{
|
|
struct ucode_cpu_info *uci;
|
|
struct ucode_cpu_info *uci;
|
|
- u32 eax, ebx, ecx, edx;
|
|
|
|
struct cpio_data cp;
|
|
struct cpio_data cp;
|
|
const char *path;
|
|
const char *path;
|
|
bool use_pa;
|
|
bool use_pa;
|
|
@@ -316,184 +268,95 @@ void __init load_ucode_amd_bsp(unsigned int family)
|
|
use_pa = false;
|
|
use_pa = false;
|
|
}
|
|
}
|
|
|
|
|
|
- if (!get_builtin_microcode(&cp, family))
|
|
|
|
|
|
+ if (!get_builtin_microcode(&cp, x86_family(cpuid_1_eax)))
|
|
cp = find_microcode_in_initrd(path, use_pa);
|
|
cp = find_microcode_in_initrd(path, use_pa);
|
|
|
|
|
|
- if (!(cp.data && cp.size))
|
|
|
|
- return;
|
|
|
|
-
|
|
|
|
- /* Get BSP's CPUID.EAX(1), needed in load_microcode_amd() */
|
|
|
|
- eax = 1;
|
|
|
|
- ecx = 0;
|
|
|
|
- native_cpuid(&eax, &ebx, &ecx, &edx);
|
|
|
|
- uci->cpu_sig.sig = eax;
|
|
|
|
|
|
+ /* Needed in load_microcode_amd() */
|
|
|
|
+ uci->cpu_sig.sig = cpuid_1_eax;
|
|
|
|
|
|
- apply_microcode_early_amd(cp.data, cp.size, true, NULL);
|
|
|
|
|
|
+ *ret = cp;
|
|
}
|
|
}
|
|
|
|
|
|
-#ifdef CONFIG_X86_32
|
|
|
|
-/*
|
|
|
|
- * On 32-bit, since AP's early load occurs before paging is turned on, we
|
|
|
|
- * cannot traverse cpu_equiv_table and microcode_cache in kernel heap memory.
|
|
|
|
- * So during cold boot, AP will apply_ucode_in_initrd() just like the BSP.
|
|
|
|
- * In save_microcode_in_initrd_amd() BSP's patch is copied to amd_ucode_patch,
|
|
|
|
- * which is used upon resume from suspend.
|
|
|
|
- */
|
|
|
|
-void load_ucode_amd_ap(unsigned int family)
|
|
|
|
|
|
+void __init load_ucode_amd_bsp(unsigned int cpuid_1_eax)
|
|
{
|
|
{
|
|
- struct microcode_amd *mc;
|
|
|
|
- struct cpio_data cp;
|
|
|
|
-
|
|
|
|
- mc = (struct microcode_amd *)__pa_nodebug(amd_ucode_patch);
|
|
|
|
- if (mc->hdr.patch_id && mc->hdr.processor_rev_id) {
|
|
|
|
- __apply_microcode_amd(mc);
|
|
|
|
- return;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- if (!get_builtin_microcode(&cp, family))
|
|
|
|
- cp = find_microcode_in_initrd((const char *)__pa_nodebug(ucode_path), true);
|
|
|
|
|
|
+ struct cpio_data cp = { };
|
|
|
|
|
|
|
|
+ __load_ucode_amd(cpuid_1_eax, &cp);
|
|
if (!(cp.data && cp.size))
|
|
if (!(cp.data && cp.size))
|
|
return;
|
|
return;
|
|
|
|
|
|
- /*
|
|
|
|
- * This would set amd_ucode_patch above so that the following APs can
|
|
|
|
- * use it directly instead of going down this path again.
|
|
|
|
- */
|
|
|
|
- apply_microcode_early_amd(cp.data, cp.size, true, NULL);
|
|
|
|
|
|
+ apply_microcode_early_amd(cpuid_1_eax, cp.data, cp.size, true);
|
|
}
|
|
}
|
|
-#else
|
|
|
|
-void load_ucode_amd_ap(unsigned int family)
|
|
|
|
|
|
+
|
|
|
|
+void load_ucode_amd_ap(unsigned int cpuid_1_eax)
|
|
{
|
|
{
|
|
- struct equiv_cpu_entry *eq;
|
|
|
|
struct microcode_amd *mc;
|
|
struct microcode_amd *mc;
|
|
- u32 rev, eax;
|
|
|
|
- u16 eq_id;
|
|
|
|
-
|
|
|
|
- /* 64-bit runs with paging enabled, thus early==false. */
|
|
|
|
- if (check_current_patch_level(&rev, false))
|
|
|
|
- return;
|
|
|
|
-
|
|
|
|
- /* First AP hasn't cached it yet, go through the blob. */
|
|
|
|
- if (!cont.data) {
|
|
|
|
- struct cpio_data cp = { NULL, 0, "" };
|
|
|
|
|
|
+ struct cpio_data cp;
|
|
|
|
+ u32 *new_rev, rev, dummy;
|
|
|
|
|
|
- if (cont.size == -1)
|
|
|
|
- return;
|
|
|
|
|
|
+ if (IS_ENABLED(CONFIG_X86_32)) {
|
|
|
|
+ mc = (struct microcode_amd *)__pa_nodebug(amd_ucode_patch);
|
|
|
|
+ new_rev = (u32 *)__pa_nodebug(&ucode_new_rev);
|
|
|
|
+ } else {
|
|
|
|
+ mc = (struct microcode_amd *)amd_ucode_patch;
|
|
|
|
+ new_rev = &ucode_new_rev;
|
|
|
|
+ }
|
|
|
|
|
|
-reget:
|
|
|
|
- if (!get_builtin_microcode(&cp, family)) {
|
|
|
|
-#ifdef CONFIG_BLK_DEV_INITRD
|
|
|
|
- if (!initrd_gone)
|
|
|
|
- cp = find_cpio_data(ucode_path, (void *)initrd_start,
|
|
|
|
- initrd_end - initrd_start, NULL);
|
|
|
|
-#endif
|
|
|
|
- if (!(cp.data && cp.size)) {
|
|
|
|
- /*
|
|
|
|
- * Mark it so that other APs do not scan again
|
|
|
|
- * for no real reason and slow down boot
|
|
|
|
- * needlessly.
|
|
|
|
- */
|
|
|
|
- cont.size = -1;
|
|
|
|
- return;
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
|
|
+ native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
|
|
|
|
|
|
- if (!apply_microcode_early_amd(cp.data, cp.size, false, &cont)) {
|
|
|
|
- cont.size = -1;
|
|
|
|
|
|
+ /* Check whether we have saved a new patch already: */
|
|
|
|
+ if (*new_rev && rev < mc->hdr.patch_id) {
|
|
|
|
+ if (!__apply_microcode_amd(mc)) {
|
|
|
|
+ *new_rev = mc->hdr.patch_id;
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
- eax = cpuid_eax(0x00000001);
|
|
|
|
- eq = (struct equiv_cpu_entry *)(cont.data + CONTAINER_HDR_SZ);
|
|
|
|
-
|
|
|
|
- eq_id = find_equiv_id(eq, eax);
|
|
|
|
- if (!eq_id)
|
|
|
|
|
|
+ __load_ucode_amd(cpuid_1_eax, &cp);
|
|
|
|
+ if (!(cp.data && cp.size))
|
|
return;
|
|
return;
|
|
|
|
|
|
- if (eq_id == this_equiv_id) {
|
|
|
|
- mc = (struct microcode_amd *)amd_ucode_patch;
|
|
|
|
-
|
|
|
|
- if (mc && rev < mc->hdr.patch_id) {
|
|
|
|
- if (!__apply_microcode_amd(mc))
|
|
|
|
- ucode_new_rev = mc->hdr.patch_id;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- } else {
|
|
|
|
-
|
|
|
|
- /*
|
|
|
|
- * AP has a different equivalence ID than BSP, looks like
|
|
|
|
- * mixed-steppings silicon so go through the ucode blob anew.
|
|
|
|
- */
|
|
|
|
- goto reget;
|
|
|
|
- }
|
|
|
|
|
|
+ apply_microcode_early_amd(cpuid_1_eax, cp.data, cp.size, false);
|
|
}
|
|
}
|
|
-#endif /* CONFIG_X86_32 */
|
|
|
|
|
|
|
|
static enum ucode_state
|
|
static enum ucode_state
|
|
load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size);
|
|
load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size);
|
|
|
|
|
|
-int __init save_microcode_in_initrd_amd(unsigned int fam)
|
|
|
|
|
|
+int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
|
|
{
|
|
{
|
|
|
|
+ struct cont_desc desc = { 0 };
|
|
enum ucode_state ret;
|
|
enum ucode_state ret;
|
|
- int retval = 0;
|
|
|
|
- u16 eq_id;
|
|
|
|
-
|
|
|
|
- if (!cont.data) {
|
|
|
|
- if (IS_ENABLED(CONFIG_X86_32) && (cont.size != -1)) {
|
|
|
|
- struct cpio_data cp = { NULL, 0, "" };
|
|
|
|
-
|
|
|
|
-#ifdef CONFIG_BLK_DEV_INITRD
|
|
|
|
- cp = find_cpio_data(ucode_path, (void *)initrd_start,
|
|
|
|
- initrd_end - initrd_start, NULL);
|
|
|
|
-#endif
|
|
|
|
|
|
+ struct cpio_data cp;
|
|
|
|
|
|
- if (!(cp.data && cp.size)) {
|
|
|
|
- cont.size = -1;
|
|
|
|
- return -EINVAL;
|
|
|
|
- }
|
|
|
|
|
|
+ cp = find_microcode_in_initrd(ucode_path, false);
|
|
|
|
+ if (!(cp.data && cp.size))
|
|
|
|
+ return -EINVAL;
|
|
|
|
|
|
- eq_id = find_proper_container(cp.data, cp.size, &cont);
|
|
|
|
- if (!eq_id) {
|
|
|
|
- cont.size = -1;
|
|
|
|
- return -EINVAL;
|
|
|
|
- }
|
|
|
|
|
|
+ desc.cpuid_1_eax = cpuid_1_eax;
|
|
|
|
|
|
- } else
|
|
|
|
- return -EINVAL;
|
|
|
|
- }
|
|
|
|
|
|
+ scan_containers(cp.data, cp.size, &desc);
|
|
|
|
+ if (!desc.mc)
|
|
|
|
+ return -EINVAL;
|
|
|
|
|
|
- ret = load_microcode_amd(smp_processor_id(), fam, cont.data, cont.size);
|
|
|
|
|
|
+ ret = load_microcode_amd(smp_processor_id(), x86_family(cpuid_1_eax),
|
|
|
|
+ desc.data, desc.size);
|
|
if (ret != UCODE_OK)
|
|
if (ret != UCODE_OK)
|
|
- retval = -EINVAL;
|
|
|
|
-
|
|
|
|
- /*
|
|
|
|
- * This will be freed any msec now, stash patches for the current
|
|
|
|
- * family and switch to patch cache for cpu hotplug, etc later.
|
|
|
|
- */
|
|
|
|
- cont.data = NULL;
|
|
|
|
- cont.size = 0;
|
|
|
|
|
|
+ return -EINVAL;
|
|
|
|
|
|
- return retval;
|
|
|
|
|
|
+ return 0;
|
|
}
|
|
}
|
|
|
|
|
|
void reload_ucode_amd(void)
|
|
void reload_ucode_amd(void)
|
|
{
|
|
{
|
|
struct microcode_amd *mc;
|
|
struct microcode_amd *mc;
|
|
- u32 rev;
|
|
|
|
-
|
|
|
|
- /*
|
|
|
|
- * early==false because this is a syscore ->resume path and by
|
|
|
|
- * that time paging is long enabled.
|
|
|
|
- */
|
|
|
|
- if (check_current_patch_level(&rev, false))
|
|
|
|
- return;
|
|
|
|
|
|
+ u32 rev, dummy;
|
|
|
|
|
|
mc = (struct microcode_amd *)amd_ucode_patch;
|
|
mc = (struct microcode_amd *)amd_ucode_patch;
|
|
if (!mc)
|
|
if (!mc)
|
|
return;
|
|
return;
|
|
|
|
|
|
|
|
+ rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
|
|
|
|
+
|
|
if (rev < mc->hdr.patch_id) {
|
|
if (rev < mc->hdr.patch_id) {
|
|
if (!__apply_microcode_amd(mc)) {
|
|
if (!__apply_microcode_amd(mc)) {
|
|
ucode_new_rev = mc->hdr.patch_id;
|
|
ucode_new_rev = mc->hdr.patch_id;
|
|
@@ -631,60 +494,13 @@ static unsigned int verify_patch_size(u8 family, u32 patch_size,
|
|
return patch_size;
|
|
return patch_size;
|
|
}
|
|
}
|
|
|
|
|
|
-/*
|
|
|
|
- * Those patch levels cannot be updated to newer ones and thus should be final.
|
|
|
|
- */
|
|
|
|
-static u32 final_levels[] = {
|
|
|
|
- 0x01000098,
|
|
|
|
- 0x0100009f,
|
|
|
|
- 0x010000af,
|
|
|
|
- 0, /* T-101 terminator */
|
|
|
|
-};
|
|
|
|
-
|
|
|
|
-/*
|
|
|
|
- * Check the current patch level on this CPU.
|
|
|
|
- *
|
|
|
|
- * @rev: Use it to return the patch level. It is set to 0 in the case of
|
|
|
|
- * error.
|
|
|
|
- *
|
|
|
|
- * Returns:
|
|
|
|
- * - true: if update should stop
|
|
|
|
- * - false: otherwise
|
|
|
|
- */
|
|
|
|
-bool check_current_patch_level(u32 *rev, bool early)
|
|
|
|
-{
|
|
|
|
- u32 lvl, dummy, i;
|
|
|
|
- bool ret = false;
|
|
|
|
- u32 *levels;
|
|
|
|
-
|
|
|
|
- native_rdmsr(MSR_AMD64_PATCH_LEVEL, lvl, dummy);
|
|
|
|
-
|
|
|
|
- if (IS_ENABLED(CONFIG_X86_32) && early)
|
|
|
|
- levels = (u32 *)__pa_nodebug(&final_levels);
|
|
|
|
- else
|
|
|
|
- levels = final_levels;
|
|
|
|
-
|
|
|
|
- for (i = 0; levels[i]; i++) {
|
|
|
|
- if (lvl == levels[i]) {
|
|
|
|
- lvl = 0;
|
|
|
|
- ret = true;
|
|
|
|
- break;
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- if (rev)
|
|
|
|
- *rev = lvl;
|
|
|
|
-
|
|
|
|
- return ret;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
static int apply_microcode_amd(int cpu)
|
|
static int apply_microcode_amd(int cpu)
|
|
{
|
|
{
|
|
struct cpuinfo_x86 *c = &cpu_data(cpu);
|
|
struct cpuinfo_x86 *c = &cpu_data(cpu);
|
|
struct microcode_amd *mc_amd;
|
|
struct microcode_amd *mc_amd;
|
|
struct ucode_cpu_info *uci;
|
|
struct ucode_cpu_info *uci;
|
|
struct ucode_patch *p;
|
|
struct ucode_patch *p;
|
|
- u32 rev;
|
|
|
|
|
|
+ u32 rev, dummy;
|
|
|
|
|
|
BUG_ON(raw_smp_processor_id() != cpu);
|
|
BUG_ON(raw_smp_processor_id() != cpu);
|
|
|
|
|
|
@@ -697,8 +513,7 @@ static int apply_microcode_amd(int cpu)
|
|
mc_amd = p->data;
|
|
mc_amd = p->data;
|
|
uci->mc = p->data;
|
|
uci->mc = p->data;
|
|
|
|
|
|
- if (check_current_patch_level(&rev, false))
|
|
|
|
- return -1;
|
|
|
|
|
|
+ rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
|
|
|
|
|
|
/* need to apply patch? */
|
|
/* need to apply patch? */
|
|
if (rev >= mc_amd->hdr.patch_id) {
|
|
if (rev >= mc_amd->hdr.patch_id) {
|