alternative.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813
  1. #define pr_fmt(fmt) "SMP alternatives: " fmt
  2. #include <linux/module.h>
  3. #include <linux/sched.h>
  4. #include <linux/mutex.h>
  5. #include <linux/list.h>
  6. #include <linux/stringify.h>
  7. #include <linux/mm.h>
  8. #include <linux/vmalloc.h>
  9. #include <linux/memory.h>
  10. #include <linux/stop_machine.h>
  11. #include <linux/slab.h>
  12. #include <linux/kdebug.h>
  13. #include <asm/alternative.h>
  14. #include <asm/sections.h>
  15. #include <asm/pgtable.h>
  16. #include <asm/mce.h>
  17. #include <asm/nmi.h>
  18. #include <asm/cacheflush.h>
  19. #include <asm/tlbflush.h>
  20. #include <asm/io.h>
  21. #include <asm/fixmap.h>
  22. int __read_mostly alternatives_patched;
  23. EXPORT_SYMBOL_GPL(alternatives_patched);
  24. #define MAX_PATCH_LEN (255-1)
  25. static int __initdata_or_module debug_alternative;
  26. static int __init debug_alt(char *str)
  27. {
  28. debug_alternative = 1;
  29. return 1;
  30. }
  31. __setup("debug-alternative", debug_alt);
  32. static int noreplace_smp;
  33. static int __init setup_noreplace_smp(char *str)
  34. {
  35. noreplace_smp = 1;
  36. return 1;
  37. }
  38. __setup("noreplace-smp", setup_noreplace_smp);
  39. #ifdef CONFIG_PARAVIRT
  40. static int __initdata_or_module noreplace_paravirt = 0;
  41. static int __init setup_noreplace_paravirt(char *str)
  42. {
  43. noreplace_paravirt = 1;
  44. return 1;
  45. }
  46. __setup("noreplace-paravirt", setup_noreplace_paravirt);
  47. #endif
  48. #define DPRINTK(fmt, args...) \
  49. do { \
  50. if (debug_alternative) \
  51. printk(KERN_DEBUG "%s: " fmt "\n", __func__, ##args); \
  52. } while (0)
  53. #define DUMP_BYTES(buf, len, fmt, args...) \
  54. do { \
  55. if (unlikely(debug_alternative)) { \
  56. int j; \
  57. \
  58. if (!(len)) \
  59. break; \
  60. \
  61. printk(KERN_DEBUG fmt, ##args); \
  62. for (j = 0; j < (len) - 1; j++) \
  63. printk(KERN_CONT "%02hhx ", buf[j]); \
  64. printk(KERN_CONT "%02hhx\n", buf[j]); \
  65. } \
  66. } while (0)
  67. /*
  68. * Each GENERIC_NOPX is of X bytes, and defined as an array of bytes
  69. * that correspond to that nop. Getting from one nop to the next, we
  70. * add to the array the offset that is equal to the sum of all sizes of
  71. * nops preceding the one we are after.
  72. *
  73. * Note: The GENERIC_NOP5_ATOMIC is at the end, as it breaks the
  74. * nice symmetry of sizes of the previous nops.
  75. */
  76. #if defined(GENERIC_NOP1) && !defined(CONFIG_X86_64)
  77. static const unsigned char intelnops[] =
  78. {
  79. GENERIC_NOP1,
  80. GENERIC_NOP2,
  81. GENERIC_NOP3,
  82. GENERIC_NOP4,
  83. GENERIC_NOP5,
  84. GENERIC_NOP6,
  85. GENERIC_NOP7,
  86. GENERIC_NOP8,
  87. GENERIC_NOP5_ATOMIC
  88. };
  89. static const unsigned char * const intel_nops[ASM_NOP_MAX+2] =
  90. {
  91. NULL,
  92. intelnops,
  93. intelnops + 1,
  94. intelnops + 1 + 2,
  95. intelnops + 1 + 2 + 3,
  96. intelnops + 1 + 2 + 3 + 4,
  97. intelnops + 1 + 2 + 3 + 4 + 5,
  98. intelnops + 1 + 2 + 3 + 4 + 5 + 6,
  99. intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
  100. intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
  101. };
  102. #endif
  103. #ifdef K8_NOP1
  104. static const unsigned char k8nops[] =
  105. {
  106. K8_NOP1,
  107. K8_NOP2,
  108. K8_NOP3,
  109. K8_NOP4,
  110. K8_NOP5,
  111. K8_NOP6,
  112. K8_NOP7,
  113. K8_NOP8,
  114. K8_NOP5_ATOMIC
  115. };
  116. static const unsigned char * const k8_nops[ASM_NOP_MAX+2] =
  117. {
  118. NULL,
  119. k8nops,
  120. k8nops + 1,
  121. k8nops + 1 + 2,
  122. k8nops + 1 + 2 + 3,
  123. k8nops + 1 + 2 + 3 + 4,
  124. k8nops + 1 + 2 + 3 + 4 + 5,
  125. k8nops + 1 + 2 + 3 + 4 + 5 + 6,
  126. k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
  127. k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
  128. };
  129. #endif
  130. #if defined(K7_NOP1) && !defined(CONFIG_X86_64)
  131. static const unsigned char k7nops[] =
  132. {
  133. K7_NOP1,
  134. K7_NOP2,
  135. K7_NOP3,
  136. K7_NOP4,
  137. K7_NOP5,
  138. K7_NOP6,
  139. K7_NOP7,
  140. K7_NOP8,
  141. K7_NOP5_ATOMIC
  142. };
  143. static const unsigned char * const k7_nops[ASM_NOP_MAX+2] =
  144. {
  145. NULL,
  146. k7nops,
  147. k7nops + 1,
  148. k7nops + 1 + 2,
  149. k7nops + 1 + 2 + 3,
  150. k7nops + 1 + 2 + 3 + 4,
  151. k7nops + 1 + 2 + 3 + 4 + 5,
  152. k7nops + 1 + 2 + 3 + 4 + 5 + 6,
  153. k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
  154. k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
  155. };
  156. #endif
  157. #ifdef P6_NOP1
  158. static const unsigned char p6nops[] =
  159. {
  160. P6_NOP1,
  161. P6_NOP2,
  162. P6_NOP3,
  163. P6_NOP4,
  164. P6_NOP5,
  165. P6_NOP6,
  166. P6_NOP7,
  167. P6_NOP8,
  168. P6_NOP5_ATOMIC
  169. };
  170. static const unsigned char * const p6_nops[ASM_NOP_MAX+2] =
  171. {
  172. NULL,
  173. p6nops,
  174. p6nops + 1,
  175. p6nops + 1 + 2,
  176. p6nops + 1 + 2 + 3,
  177. p6nops + 1 + 2 + 3 + 4,
  178. p6nops + 1 + 2 + 3 + 4 + 5,
  179. p6nops + 1 + 2 + 3 + 4 + 5 + 6,
  180. p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
  181. p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
  182. };
  183. #endif
  184. /* Initialize these to a safe default */
  185. #ifdef CONFIG_X86_64
  186. const unsigned char * const *ideal_nops = p6_nops;
  187. #else
  188. const unsigned char * const *ideal_nops = intel_nops;
  189. #endif
  190. void __init arch_init_ideal_nops(void)
  191. {
  192. switch (boot_cpu_data.x86_vendor) {
  193. case X86_VENDOR_INTEL:
  194. /*
  195. * Due to a decoder implementation quirk, some
  196. * specific Intel CPUs actually perform better with
  197. * the "k8_nops" than with the SDM-recommended NOPs.
  198. */
  199. if (boot_cpu_data.x86 == 6 &&
  200. boot_cpu_data.x86_model >= 0x0f &&
  201. boot_cpu_data.x86_model != 0x1c &&
  202. boot_cpu_data.x86_model != 0x26 &&
  203. boot_cpu_data.x86_model != 0x27 &&
  204. boot_cpu_data.x86_model < 0x30) {
  205. ideal_nops = k8_nops;
  206. } else if (boot_cpu_has(X86_FEATURE_NOPL)) {
  207. ideal_nops = p6_nops;
  208. } else {
  209. #ifdef CONFIG_X86_64
  210. ideal_nops = k8_nops;
  211. #else
  212. ideal_nops = intel_nops;
  213. #endif
  214. }
  215. break;
  216. case X86_VENDOR_AMD:
  217. if (boot_cpu_data.x86 > 0xf) {
  218. ideal_nops = p6_nops;
  219. return;
  220. }
  221. /* fall through */
  222. default:
  223. #ifdef CONFIG_X86_64
  224. ideal_nops = k8_nops;
  225. #else
  226. if (boot_cpu_has(X86_FEATURE_K8))
  227. ideal_nops = k8_nops;
  228. else if (boot_cpu_has(X86_FEATURE_K7))
  229. ideal_nops = k7_nops;
  230. else
  231. ideal_nops = intel_nops;
  232. #endif
  233. }
  234. }
  235. /* Use this to add nops to a buffer, then text_poke the whole buffer. */
  236. static void __init_or_module add_nops(void *insns, unsigned int len)
  237. {
  238. while (len > 0) {
  239. unsigned int noplen = len;
  240. if (noplen > ASM_NOP_MAX)
  241. noplen = ASM_NOP_MAX;
  242. memcpy(insns, ideal_nops[noplen], noplen);
  243. insns += noplen;
  244. len -= noplen;
  245. }
  246. }
  247. extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
  248. extern s32 __smp_locks[], __smp_locks_end[];
  249. void *text_poke_early(void *addr, const void *opcode, size_t len);
  250. /*
  251. * Are we looking at a near JMP with a 1 or 4-byte displacement.
  252. */
  253. static inline bool is_jmp(const u8 opcode)
  254. {
  255. return opcode == 0xeb || opcode == 0xe9;
  256. }
  257. static void __init_or_module
  258. recompute_jump(struct alt_instr *a, u8 *orig_insn, u8 *repl_insn, u8 *insnbuf)
  259. {
  260. u8 *next_rip, *tgt_rip;
  261. s32 n_dspl, o_dspl;
  262. int repl_len;
  263. if (a->replacementlen != 5)
  264. return;
  265. o_dspl = *(s32 *)(insnbuf + 1);
  266. /* next_rip of the replacement JMP */
  267. next_rip = repl_insn + a->replacementlen;
  268. /* target rip of the replacement JMP */
  269. tgt_rip = next_rip + o_dspl;
  270. n_dspl = tgt_rip - orig_insn;
  271. DPRINTK("target RIP: %p, new_displ: 0x%x", tgt_rip, n_dspl);
  272. if (tgt_rip - orig_insn >= 0) {
  273. if (n_dspl - 2 <= 127)
  274. goto two_byte_jmp;
  275. else
  276. goto five_byte_jmp;
  277. /* negative offset */
  278. } else {
  279. if (((n_dspl - 2) & 0xff) == (n_dspl - 2))
  280. goto two_byte_jmp;
  281. else
  282. goto five_byte_jmp;
  283. }
  284. two_byte_jmp:
  285. n_dspl -= 2;
  286. insnbuf[0] = 0xeb;
  287. insnbuf[1] = (s8)n_dspl;
  288. add_nops(insnbuf + 2, 3);
  289. repl_len = 2;
  290. goto done;
  291. five_byte_jmp:
  292. n_dspl -= 5;
  293. insnbuf[0] = 0xe9;
  294. *(s32 *)&insnbuf[1] = n_dspl;
  295. repl_len = 5;
  296. done:
  297. DPRINTK("final displ: 0x%08x, JMP 0x%lx",
  298. n_dspl, (unsigned long)orig_insn + n_dspl + repl_len);
  299. }
  300. static void __init_or_module optimize_nops(struct alt_instr *a, u8 *instr)
  301. {
  302. unsigned long flags;
  303. if (instr[0] != 0x90)
  304. return;
  305. local_irq_save(flags);
  306. add_nops(instr + (a->instrlen - a->padlen), a->padlen);
  307. sync_core();
  308. local_irq_restore(flags);
  309. DUMP_BYTES(instr, a->instrlen, "%p: [%d:%d) optimized NOPs: ",
  310. instr, a->instrlen - a->padlen, a->padlen);
  311. }
  312. /*
  313. * Replace instructions with better alternatives for this CPU type. This runs
  314. * before SMP is initialized to avoid SMP problems with self modifying code.
  315. * This implies that asymmetric systems where APs have less capabilities than
  316. * the boot processor are not handled. Tough. Make sure you disable such
  317. * features by hand.
  318. */
  319. void __init_or_module apply_alternatives(struct alt_instr *start,
  320. struct alt_instr *end)
  321. {
  322. struct alt_instr *a;
  323. u8 *instr, *replacement;
  324. u8 insnbuf[MAX_PATCH_LEN];
  325. DPRINTK("alt table %p -> %p", start, end);
  326. /*
  327. * The scan order should be from start to end. A later scanned
  328. * alternative code can overwrite previously scanned alternative code.
  329. * Some kernel functions (e.g. memcpy, memset, etc) use this order to
  330. * patch code.
  331. *
  332. * So be careful if you want to change the scan order to any other
  333. * order.
  334. */
  335. for (a = start; a < end; a++) {
  336. int insnbuf_sz = 0;
  337. instr = (u8 *)&a->instr_offset + a->instr_offset;
  338. replacement = (u8 *)&a->repl_offset + a->repl_offset;
  339. BUG_ON(a->instrlen > sizeof(insnbuf));
  340. BUG_ON(a->cpuid >= (NCAPINTS + NBUGINTS) * 32);
  341. if (!boot_cpu_has(a->cpuid)) {
  342. if (a->padlen > 1)
  343. optimize_nops(a, instr);
  344. continue;
  345. }
  346. DPRINTK("feat: %d*32+%d, old: (%p, len: %d), repl: (%p, len: %d), pad: %d",
  347. a->cpuid >> 5,
  348. a->cpuid & 0x1f,
  349. instr, a->instrlen,
  350. replacement, a->replacementlen, a->padlen);
  351. DUMP_BYTES(instr, a->instrlen, "%p: old_insn: ", instr);
  352. DUMP_BYTES(replacement, a->replacementlen, "%p: rpl_insn: ", replacement);
  353. memcpy(insnbuf, replacement, a->replacementlen);
  354. insnbuf_sz = a->replacementlen;
  355. /* 0xe8 is a relative jump; fix the offset. */
  356. if (*insnbuf == 0xe8 && a->replacementlen == 5) {
  357. *(s32 *)(insnbuf + 1) += replacement - instr;
  358. DPRINTK("Fix CALL offset: 0x%x, CALL 0x%lx",
  359. *(s32 *)(insnbuf + 1),
  360. (unsigned long)instr + *(s32 *)(insnbuf + 1) + 5);
  361. }
  362. if (a->replacementlen && is_jmp(replacement[0]))
  363. recompute_jump(a, instr, replacement, insnbuf);
  364. if (a->instrlen > a->replacementlen) {
  365. add_nops(insnbuf + a->replacementlen,
  366. a->instrlen - a->replacementlen);
  367. insnbuf_sz += a->instrlen - a->replacementlen;
  368. }
  369. DUMP_BYTES(insnbuf, insnbuf_sz, "%p: final_insn: ", instr);
  370. text_poke_early(instr, insnbuf, insnbuf_sz);
  371. }
  372. }
  373. #ifdef CONFIG_SMP
  374. static void alternatives_smp_lock(const s32 *start, const s32 *end,
  375. u8 *text, u8 *text_end)
  376. {
  377. const s32 *poff;
  378. mutex_lock(&text_mutex);
  379. for (poff = start; poff < end; poff++) {
  380. u8 *ptr = (u8 *)poff + *poff;
  381. if (!*poff || ptr < text || ptr >= text_end)
  382. continue;
  383. /* turn DS segment override prefix into lock prefix */
  384. if (*ptr == 0x3e)
  385. text_poke(ptr, ((unsigned char []){0xf0}), 1);
  386. }
  387. mutex_unlock(&text_mutex);
  388. }
  389. static void alternatives_smp_unlock(const s32 *start, const s32 *end,
  390. u8 *text, u8 *text_end)
  391. {
  392. const s32 *poff;
  393. mutex_lock(&text_mutex);
  394. for (poff = start; poff < end; poff++) {
  395. u8 *ptr = (u8 *)poff + *poff;
  396. if (!*poff || ptr < text || ptr >= text_end)
  397. continue;
  398. /* turn lock prefix into DS segment override prefix */
  399. if (*ptr == 0xf0)
  400. text_poke(ptr, ((unsigned char []){0x3E}), 1);
  401. }
  402. mutex_unlock(&text_mutex);
  403. }
  404. struct smp_alt_module {
  405. /* what is this ??? */
  406. struct module *mod;
  407. char *name;
  408. /* ptrs to lock prefixes */
  409. const s32 *locks;
  410. const s32 *locks_end;
  411. /* .text segment, needed to avoid patching init code ;) */
  412. u8 *text;
  413. u8 *text_end;
  414. struct list_head next;
  415. };
  416. static LIST_HEAD(smp_alt_modules);
  417. static DEFINE_MUTEX(smp_alt);
  418. static bool uniproc_patched = false; /* protected by smp_alt */
  419. void __init_or_module alternatives_smp_module_add(struct module *mod,
  420. char *name,
  421. void *locks, void *locks_end,
  422. void *text, void *text_end)
  423. {
  424. struct smp_alt_module *smp;
  425. mutex_lock(&smp_alt);
  426. if (!uniproc_patched)
  427. goto unlock;
  428. if (num_possible_cpus() == 1)
  429. /* Don't bother remembering, we'll never have to undo it. */
  430. goto smp_unlock;
  431. smp = kzalloc(sizeof(*smp), GFP_KERNEL);
  432. if (NULL == smp)
  433. /* we'll run the (safe but slow) SMP code then ... */
  434. goto unlock;
  435. smp->mod = mod;
  436. smp->name = name;
  437. smp->locks = locks;
  438. smp->locks_end = locks_end;
  439. smp->text = text;
  440. smp->text_end = text_end;
  441. DPRINTK("locks %p -> %p, text %p -> %p, name %s\n",
  442. smp->locks, smp->locks_end,
  443. smp->text, smp->text_end, smp->name);
  444. list_add_tail(&smp->next, &smp_alt_modules);
  445. smp_unlock:
  446. alternatives_smp_unlock(locks, locks_end, text, text_end);
  447. unlock:
  448. mutex_unlock(&smp_alt);
  449. }
  450. void __init_or_module alternatives_smp_module_del(struct module *mod)
  451. {
  452. struct smp_alt_module *item;
  453. mutex_lock(&smp_alt);
  454. list_for_each_entry(item, &smp_alt_modules, next) {
  455. if (mod != item->mod)
  456. continue;
  457. list_del(&item->next);
  458. kfree(item);
  459. break;
  460. }
  461. mutex_unlock(&smp_alt);
  462. }
  463. void alternatives_enable_smp(void)
  464. {
  465. struct smp_alt_module *mod;
  466. /* Why bother if there are no other CPUs? */
  467. BUG_ON(num_possible_cpus() == 1);
  468. mutex_lock(&smp_alt);
  469. if (uniproc_patched) {
  470. pr_info("switching to SMP code\n");
  471. BUG_ON(num_online_cpus() != 1);
  472. clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
  473. clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
  474. list_for_each_entry(mod, &smp_alt_modules, next)
  475. alternatives_smp_lock(mod->locks, mod->locks_end,
  476. mod->text, mod->text_end);
  477. uniproc_patched = false;
  478. }
  479. mutex_unlock(&smp_alt);
  480. }
  481. /* Return 1 if the address range is reserved for smp-alternatives */
  482. int alternatives_text_reserved(void *start, void *end)
  483. {
  484. struct smp_alt_module *mod;
  485. const s32 *poff;
  486. u8 *text_start = start;
  487. u8 *text_end = end;
  488. list_for_each_entry(mod, &smp_alt_modules, next) {
  489. if (mod->text > text_end || mod->text_end < text_start)
  490. continue;
  491. for (poff = mod->locks; poff < mod->locks_end; poff++) {
  492. const u8 *ptr = (const u8 *)poff + *poff;
  493. if (text_start <= ptr && text_end > ptr)
  494. return 1;
  495. }
  496. }
  497. return 0;
  498. }
  499. #endif /* CONFIG_SMP */
  500. #ifdef CONFIG_PARAVIRT
  501. void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
  502. struct paravirt_patch_site *end)
  503. {
  504. struct paravirt_patch_site *p;
  505. char insnbuf[MAX_PATCH_LEN];
  506. if (noreplace_paravirt)
  507. return;
  508. for (p = start; p < end; p++) {
  509. unsigned int used;
  510. BUG_ON(p->len > MAX_PATCH_LEN);
  511. /* prep the buffer with the original instructions */
  512. memcpy(insnbuf, p->instr, p->len);
  513. used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
  514. (unsigned long)p->instr, p->len);
  515. BUG_ON(used > p->len);
  516. /* Pad the rest with nops */
  517. add_nops(insnbuf + used, p->len - used);
  518. text_poke_early(p->instr, insnbuf, p->len);
  519. }
  520. }
  521. extern struct paravirt_patch_site __start_parainstructions[],
  522. __stop_parainstructions[];
  523. #endif /* CONFIG_PARAVIRT */
  524. void __init alternative_instructions(void)
  525. {
  526. /* The patching is not fully atomic, so try to avoid local interruptions
  527. that might execute the to be patched code.
  528. Other CPUs are not running. */
  529. stop_nmi();
  530. /*
  531. * Don't stop machine check exceptions while patching.
  532. * MCEs only happen when something got corrupted and in this
  533. * case we must do something about the corruption.
  534. * Ignoring it is worse than a unlikely patching race.
  535. * Also machine checks tend to be broadcast and if one CPU
  536. * goes into machine check the others follow quickly, so we don't
  537. * expect a machine check to cause undue problems during to code
  538. * patching.
  539. */
  540. apply_alternatives(__alt_instructions, __alt_instructions_end);
  541. #ifdef CONFIG_SMP
  542. /* Patch to UP if other cpus not imminent. */
  543. if (!noreplace_smp && (num_present_cpus() == 1 || setup_max_cpus <= 1)) {
  544. uniproc_patched = true;
  545. alternatives_smp_module_add(NULL, "core kernel",
  546. __smp_locks, __smp_locks_end,
  547. _text, _etext);
  548. }
  549. if (!uniproc_patched || num_possible_cpus() == 1)
  550. free_init_pages("SMP alternatives",
  551. (unsigned long)__smp_locks,
  552. (unsigned long)__smp_locks_end);
  553. #endif
  554. apply_paravirt(__parainstructions, __parainstructions_end);
  555. restart_nmi();
  556. alternatives_patched = 1;
  557. }
  558. /**
  559. * text_poke_early - Update instructions on a live kernel at boot time
  560. * @addr: address to modify
  561. * @opcode: source of the copy
  562. * @len: length to copy
  563. *
  564. * When you use this code to patch more than one byte of an instruction
  565. * you need to make sure that other CPUs cannot execute this code in parallel.
  566. * Also no thread must be currently preempted in the middle of these
  567. * instructions. And on the local CPU you need to be protected again NMI or MCE
  568. * handlers seeing an inconsistent instruction while you patch.
  569. */
  570. void *__init_or_module text_poke_early(void *addr, const void *opcode,
  571. size_t len)
  572. {
  573. unsigned long flags;
  574. local_irq_save(flags);
  575. memcpy(addr, opcode, len);
  576. sync_core();
  577. local_irq_restore(flags);
  578. /* Could also do a CLFLUSH here to speed up CPU recovery; but
  579. that causes hangs on some VIA CPUs. */
  580. return addr;
  581. }
  582. /**
  583. * text_poke - Update instructions on a live kernel
  584. * @addr: address to modify
  585. * @opcode: source of the copy
  586. * @len: length to copy
  587. *
  588. * Only atomic text poke/set should be allowed when not doing early patching.
  589. * It means the size must be writable atomically and the address must be aligned
  590. * in a way that permits an atomic write. It also makes sure we fit on a single
  591. * page.
  592. *
  593. * Note: Must be called under text_mutex.
  594. */
  595. void *text_poke(void *addr, const void *opcode, size_t len)
  596. {
  597. unsigned long flags;
  598. char *vaddr;
  599. struct page *pages[2];
  600. int i;
  601. if (!core_kernel_text((unsigned long)addr)) {
  602. pages[0] = vmalloc_to_page(addr);
  603. pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
  604. } else {
  605. pages[0] = virt_to_page(addr);
  606. WARN_ON(!PageReserved(pages[0]));
  607. pages[1] = virt_to_page(addr + PAGE_SIZE);
  608. }
  609. BUG_ON(!pages[0]);
  610. local_irq_save(flags);
  611. set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
  612. if (pages[1])
  613. set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
  614. vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
  615. memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
  616. clear_fixmap(FIX_TEXT_POKE0);
  617. if (pages[1])
  618. clear_fixmap(FIX_TEXT_POKE1);
  619. local_flush_tlb();
  620. sync_core();
  621. /* Could also do a CLFLUSH here to speed up CPU recovery; but
  622. that causes hangs on some VIA CPUs. */
  623. for (i = 0; i < len; i++)
  624. BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
  625. local_irq_restore(flags);
  626. return addr;
  627. }
  628. static void do_sync_core(void *info)
  629. {
  630. sync_core();
  631. }
  632. static bool bp_patching_in_progress;
  633. static void *bp_int3_handler, *bp_int3_addr;
  634. int poke_int3_handler(struct pt_regs *regs)
  635. {
  636. /* bp_patching_in_progress */
  637. smp_rmb();
  638. if (likely(!bp_patching_in_progress))
  639. return 0;
  640. if (user_mode(regs) || regs->ip != (unsigned long)bp_int3_addr)
  641. return 0;
  642. /* set up the specified breakpoint handler */
  643. regs->ip = (unsigned long) bp_int3_handler;
  644. return 1;
  645. }
  646. /**
  647. * text_poke_bp() -- update instructions on live kernel on SMP
  648. * @addr: address to patch
  649. * @opcode: opcode of new instruction
  650. * @len: length to copy
  651. * @handler: address to jump to when the temporary breakpoint is hit
  652. *
  653. * Modify multi-byte instruction by using int3 breakpoint on SMP.
  654. * We completely avoid stop_machine() here, and achieve the
  655. * synchronization using int3 breakpoint.
  656. *
  657. * The way it is done:
  658. * - add a int3 trap to the address that will be patched
  659. * - sync cores
  660. * - update all but the first byte of the patched range
  661. * - sync cores
  662. * - replace the first byte (int3) by the first byte of
  663. * replacing opcode
  664. * - sync cores
  665. *
  666. * Note: must be called under text_mutex.
  667. */
  668. void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
  669. {
  670. unsigned char int3 = 0xcc;
  671. bp_int3_handler = handler;
  672. bp_int3_addr = (u8 *)addr + sizeof(int3);
  673. bp_patching_in_progress = true;
  674. /*
  675. * Corresponding read barrier in int3 notifier for
  676. * making sure the in_progress flags is correctly ordered wrt.
  677. * patching
  678. */
  679. smp_wmb();
  680. text_poke(addr, &int3, sizeof(int3));
  681. on_each_cpu(do_sync_core, NULL, 1);
  682. if (len - sizeof(int3) > 0) {
  683. /* patch all but the first byte */
  684. text_poke((char *)addr + sizeof(int3),
  685. (const char *) opcode + sizeof(int3),
  686. len - sizeof(int3));
  687. /*
  688. * According to Intel, this core syncing is very likely
  689. * not necessary and we'd be safe even without it. But
  690. * better safe than sorry (plus there's not only Intel).
  691. */
  692. on_each_cpu(do_sync_core, NULL, 1);
  693. }
  694. /* patch the first byte */
  695. text_poke(addr, opcode, sizeof(int3));
  696. on_each_cpu(do_sync_core, NULL, 1);
  697. bp_patching_in_progress = false;
  698. smp_wmb();
  699. return addr;
  700. }