|
@@ -1,3 +1,4 @@
|
|
|
|
|
+#include <linux/percpu.h>
|
|
|
#include <linux/slab.h>
|
|
#include <linux/slab.h>
|
|
|
#include <asm/cacheflush.h>
|
|
#include <asm/cacheflush.h>
|
|
|
#include <asm/cpu_ops.h>
|
|
#include <asm/cpu_ops.h>
|
|
@@ -89,6 +90,13 @@ int cpu_suspend(unsigned long arg)
|
|
|
if (ret == 0) {
|
|
if (ret == 0) {
|
|
|
cpu_switch_mm(mm->pgd, mm);
|
|
cpu_switch_mm(mm->pgd, mm);
|
|
|
flush_tlb_all();
|
|
flush_tlb_all();
|
|
|
|
|
+
|
|
|
|
|
+ /*
|
|
|
|
|
+ * Restore per-cpu offset before any kernel
|
|
|
|
|
+ * subsystem relying on it has a chance to run.
|
|
|
|
|
+ */
|
|
|
|
|
+ set_my_cpu_offset(per_cpu_offset(cpu));
|
|
|
|
|
+
|
|
|
/*
|
|
/*
|
|
|
* Restore HW breakpoint registers to sane values
|
|
* Restore HW breakpoint registers to sane values
|
|
|
* before debug exceptions are possibly reenabled
|
|
* before debug exceptions are possibly reenabled
|