|
@@ -53,15 +53,13 @@ extern unsigned long get_switch_to_pc(void);
|
|
|
* Kernel threads can check to see if they need to migrate their
|
|
|
* stack whenever they return from a context switch; for user
|
|
|
* threads, we defer until they are returning to user-space.
|
|
|
+ * We defer homecache migration until the runqueue lock is released.
|
|
|
*/
|
|
|
-#define finish_arch_switch(prev) do { \
|
|
|
- if (unlikely((prev)->state == TASK_DEAD)) \
|
|
|
- __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_EXIT | \
|
|
|
- ((prev)->pid << _SIM_CONTROL_OPERATOR_BITS)); \
|
|
|
+#define finish_arch_post_lock_switch() do { \
|
|
|
__insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_SWITCH | \
|
|
|
(current->pid << _SIM_CONTROL_OPERATOR_BITS)); \
|
|
|
if (current->mm == NULL && !kstack_hash && \
|
|
|
- current_thread_info()->homecache_cpu != smp_processor_id()) \
|
|
|
+ current_thread_info()->homecache_cpu != raw_smp_processor_id()) \
|
|
|
homecache_migrate_kthread(); \
|
|
|
} while (0)
|
|
|
|