|
@@ -21,6 +21,10 @@ DEF_NATIVE(pv_cpu_ops, swapgs, "swapgs");
|
|
|
DEF_NATIVE(, mov32, "mov %edi, %eax");
|
|
|
DEF_NATIVE(, mov64, "mov %rdi, %rax");
|
|
|
|
|
|
+#if defined(CONFIG_PARAVIRT_SPINLOCKS) && defined(CONFIG_QUEUED_SPINLOCK)
|
|
|
+DEF_NATIVE(pv_lock_ops, queued_spin_unlock, "movb $0, (%rdi)");
|
|
|
+#endif
|
|
|
+
|
|
|
unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
|
|
|
{
|
|
|
return paravirt_patch_insns(insnbuf, len,
|
|
@@ -33,6 +37,8 @@ unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len)
|
|
|
start__mov64, end__mov64);
|
|
|
}
|
|
|
|
|
|
+extern bool pv_is_native_spin_unlock(void);
|
|
|
+
|
|
|
unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
|
|
|
unsigned long addr, unsigned len)
|
|
|
{
|
|
@@ -59,14 +65,22 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
|
|
|
PATCH_SITE(pv_cpu_ops, clts);
|
|
|
PATCH_SITE(pv_mmu_ops, flush_tlb_single);
|
|
|
PATCH_SITE(pv_cpu_ops, wbinvd);
|
|
|
-
|
|
|
- patch_site:
|
|
|
- ret = paravirt_patch_insns(ibuf, len, start, end);
|
|
|
- break;
|
|
|
+#if defined(CONFIG_PARAVIRT_SPINLOCKS) && defined(CONFIG_QUEUED_SPINLOCK)
|
|
|
+ case PARAVIRT_PATCH(pv_lock_ops.queued_spin_unlock):
|
|
|
+ if (pv_is_native_spin_unlock()) {
|
|
|
+ start = start_pv_lock_ops_queued_spin_unlock;
|
|
|
+ end = end_pv_lock_ops_queued_spin_unlock;
|
|
|
+ goto patch_site;
|
|
|
+ }
|
|
|
+#endif
|
|
|
|
|
|
default:
|
|
|
ret = paravirt_patch_default(type, clobbers, ibuf, addr, len);
|
|
|
break;
|
|
|
+
|
|
|
+patch_site:
|
|
|
+ ret = paravirt_patch_insns(ibuf, len, start, end);
|
|
|
+ break;
|
|
|
}
|
|
|
#undef PATCH_SITE
|
|
|
return ret;
|