|
@@ -517,7 +517,29 @@ static int check_online_cpus(void)
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
|
|
|
-static atomic_t late_cpus;
|
|
|
+static atomic_t late_cpus_in;
|
|
|
+static atomic_t late_cpus_out;
|
|
|
+
|
|
|
+static int __wait_for_cpus(atomic_t *t, long long timeout)
|
|
|
+{
|
|
|
+ int all_cpus = num_online_cpus();
|
|
|
+
|
|
|
+ atomic_inc(t);
|
|
|
+
|
|
|
+ while (atomic_read(t) < all_cpus) {
|
|
|
+ if (timeout < SPINUNIT) {
|
|
|
+ pr_err("Timeout while waiting for CPUs rendezvous, remaining: %d\n",
|
|
|
+ all_cpus - atomic_read(t));
|
|
|
+ return 1;
|
|
|
+ }
|
|
|
+
|
|
|
+ ndelay(SPINUNIT);
|
|
|
+ timeout -= SPINUNIT;
|
|
|
+
|
|
|
+ touch_nmi_watchdog();
|
|
|
+ }
|
|
|
+ return 0;
|
|
|
+}
|
|
|
|
|
|
/*
|
|
|
* Returns:
|
|
@@ -527,30 +549,16 @@ static atomic_t late_cpus;
|
|
|
*/
|
|
|
static int __reload_late(void *info)
|
|
|
{
|
|
|
- unsigned int timeout = NSEC_PER_SEC;
|
|
|
- int all_cpus = num_online_cpus();
|
|
|
int cpu = smp_processor_id();
|
|
|
enum ucode_state err;
|
|
|
int ret = 0;
|
|
|
|
|
|
- atomic_dec(&late_cpus);
|
|
|
-
|
|
|
/*
|
|
|
* Wait for all CPUs to arrive. A load will not be attempted unless all
|
|
|
* CPUs show up.
|
|
|
* */
|
|
|
- while (atomic_read(&late_cpus)) {
|
|
|
- if (timeout < SPINUNIT) {
|
|
|
- pr_err("Timeout while waiting for CPUs rendezvous, remaining: %d\n",
|
|
|
- atomic_read(&late_cpus));
|
|
|
- return -1;
|
|
|
- }
|
|
|
-
|
|
|
- ndelay(SPINUNIT);
|
|
|
- timeout -= SPINUNIT;
|
|
|
-
|
|
|
- touch_nmi_watchdog();
|
|
|
- }
|
|
|
+ if (__wait_for_cpus(&late_cpus_in, NSEC_PER_SEC))
|
|
|
+ return -1;
|
|
|
|
|
|
spin_lock(&update_lock);
|
|
|
apply_microcode_local(&err);
|
|
@@ -558,15 +566,22 @@ static int __reload_late(void *info)
|
|
|
|
|
|
if (err > UCODE_NFOUND) {
|
|
|
pr_warn("Error reloading microcode on CPU %d\n", cpu);
|
|
|
- ret = -1;
|
|
|
- } else if (err == UCODE_UPDATED) {
|
|
|
+ return -1;
|
|
|
+ /* siblings return UCODE_OK because their engine got updated already */
|
|
|
+ } else if (err == UCODE_UPDATED || err == UCODE_OK) {
|
|
|
ret = 1;
|
|
|
+ } else {
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
- atomic_inc(&late_cpus);
|
|
|
-
|
|
|
- while (atomic_read(&late_cpus) != all_cpus)
|
|
|
- cpu_relax();
|
|
|
+ /*
|
|
|
+ * Increase the wait timeout to a safe value here since we're
|
|
|
+ * serializing the microcode update and that could take a while on a
|
|
|
+ * large number of CPUs. And that is fine as the *actual* timeout will
|
|
|
+ * be determined by the last CPU finished updating and thus cut short.
|
|
|
+ */
|
|
|
+ if (__wait_for_cpus(&late_cpus_out, NSEC_PER_SEC * num_online_cpus()))
|
|
|
+ panic("Timeout during microcode update!\n");
|
|
|
|
|
|
return ret;
|
|
|
}
|
|
@@ -579,12 +594,11 @@ static int microcode_reload_late(void)
|
|
|
{
|
|
|
int ret;
|
|
|
|
|
|
- atomic_set(&late_cpus, num_online_cpus());
|
|
|
+ atomic_set(&late_cpus_in, 0);
|
|
|
+ atomic_set(&late_cpus_out, 0);
|
|
|
|
|
|
ret = stop_machine_cpuslocked(__reload_late, NULL, cpu_online_mask);
|
|
|
- if (ret < 0)
|
|
|
- return ret;
|
|
|
- else if (ret > 0)
|
|
|
+ if (ret > 0)
|
|
|
microcode_check();
|
|
|
|
|
|
return ret;
|
|
@@ -607,7 +621,7 @@ static ssize_t reload_store(struct device *dev,
|
|
|
return size;
|
|
|
|
|
|
tmp_ret = microcode_ops->request_microcode_fw(bsp, µcode_pdev->dev, true);
|
|
|
- if (tmp_ret != UCODE_OK)
|
|
|
+ if (tmp_ret != UCODE_NEW)
|
|
|
return size;
|
|
|
|
|
|
get_online_cpus();
|
|
@@ -691,10 +705,8 @@ static enum ucode_state microcode_init_cpu(int cpu, bool refresh_fw)
|
|
|
if (system_state != SYSTEM_RUNNING)
|
|
|
return UCODE_NFOUND;
|
|
|
|
|
|
- ustate = microcode_ops->request_microcode_fw(cpu, µcode_pdev->dev,
|
|
|
- refresh_fw);
|
|
|
-
|
|
|
- if (ustate == UCODE_OK) {
|
|
|
+ ustate = microcode_ops->request_microcode_fw(cpu, µcode_pdev->dev, refresh_fw);
|
|
|
+ if (ustate == UCODE_NEW) {
|
|
|
pr_debug("CPU%d updated upon init\n", cpu);
|
|
|
apply_microcode_on_target(cpu);
|
|
|
}
|