|
@@ -2,6 +2,7 @@
|
|
|
#include <linux/export.h>
|
|
|
#include <linux/preempt.h>
|
|
|
#include <linux/smp.h>
|
|
|
+#include <linux/completion.h>
|
|
|
#include <asm/msr.h>
|
|
|
|
|
|
static void __rdmsr_on_cpu(void *info)
|
|
@@ -143,13 +144,19 @@ void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
|
|
|
}
|
|
|
EXPORT_SYMBOL(wrmsr_on_cpus);
|
|
|
|
|
|
+struct msr_info_completion {
|
|
|
+ struct msr_info msr;
|
|
|
+ struct completion done;
|
|
|
+};
|
|
|
+
|
|
|
/* These "safe" variants are slower and should be used when the target MSR
|
|
|
may not actually exist. */
|
|
|
static void __rdmsr_safe_on_cpu(void *info)
|
|
|
{
|
|
|
- struct msr_info *rv = info;
|
|
|
+ struct msr_info_completion *rv = info;
|
|
|
|
|
|
- rv->err = rdmsr_safe(rv->msr_no, &rv->reg.l, &rv->reg.h);
|
|
|
+ rv->msr.err = rdmsr_safe(rv->msr.msr_no, &rv->msr.reg.l, &rv->msr.reg.h);
|
|
|
+ complete(&rv->done);
|
|
|
}
|
|
|
|
|
|
static void __wrmsr_safe_on_cpu(void *info)
|
|
@@ -161,17 +168,26 @@ static void __wrmsr_safe_on_cpu(void *info)
|
|
|
|
|
|
int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
|
|
|
{
|
|
|
+ struct msr_info_completion rv;
|
|
|
+ call_single_data_t csd = {
|
|
|
+ .func = __rdmsr_safe_on_cpu,
|
|
|
+ .info = &rv,
|
|
|
+ };
|
|
|
int err;
|
|
|
- struct msr_info rv;
|
|
|
|
|
|
memset(&rv, 0, sizeof(rv));
|
|
|
+ init_completion(&rv.done);
|
|
|
+ rv.msr.msr_no = msr_no;
|
|
|
|
|
|
- rv.msr_no = msr_no;
|
|
|
- err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1);
|
|
|
- *l = rv.reg.l;
|
|
|
- *h = rv.reg.h;
|
|
|
+ err = smp_call_function_single_async(cpu, &csd);
|
|
|
+ if (!err) {
|
|
|
+ wait_for_completion(&rv.done);
|
|
|
+ err = rv.msr.err;
|
|
|
+ }
|
|
|
+ *l = rv.msr.reg.l;
|
|
|
+ *h = rv.msr.reg.h;
|
|
|
|
|
|
- return err ? err : rv.err;
|
|
|
+ return err;
|
|
|
}
|
|
|
EXPORT_SYMBOL(rdmsr_safe_on_cpu);
|
|
|
|