feature-fixups.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654
  1. /*
  2. * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org)
  3. *
  4. * Modifications for ppc64:
  5. * Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com>
  6. *
  7. * Copyright 2008 Michael Ellerman, IBM Corporation.
  8. *
  9. * This program is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU General Public License
  11. * as published by the Free Software Foundation; either version
  12. * 2 of the License, or (at your option) any later version.
  13. */
  14. #include <linux/types.h>
  15. #include <linux/jump_label.h>
  16. #include <linux/kernel.h>
  17. #include <linux/string.h>
  18. #include <linux/init.h>
  19. #include <linux/sched/mm.h>
  20. #include <asm/cputable.h>
  21. #include <asm/code-patching.h>
  22. #include <asm/page.h>
  23. #include <asm/sections.h>
  24. #include <asm/setup.h>
  25. #include <asm/security_features.h>
  26. #include <asm/firmware.h>
  27. struct fixup_entry {
  28. unsigned long mask;
  29. unsigned long value;
  30. long start_off;
  31. long end_off;
  32. long alt_start_off;
  33. long alt_end_off;
  34. };
  35. static unsigned int *calc_addr(struct fixup_entry *fcur, long offset)
  36. {
  37. /*
  38. * We store the offset to the code as a negative offset from
  39. * the start of the alt_entry, to support the VDSO. This
  40. * routine converts that back into an actual address.
  41. */
  42. return (unsigned int *)((unsigned long)fcur + offset);
  43. }
  44. static int patch_alt_instruction(unsigned int *src, unsigned int *dest,
  45. unsigned int *alt_start, unsigned int *alt_end)
  46. {
  47. unsigned int instr;
  48. instr = *src;
  49. if (instr_is_relative_branch(*src)) {
  50. unsigned int *target = (unsigned int *)branch_target(src);
  51. /* Branch within the section doesn't need translating */
  52. if (target < alt_start || target > alt_end) {
  53. instr = translate_branch(dest, src);
  54. if (!instr)
  55. return 1;
  56. }
  57. }
  58. raw_patch_instruction(dest, instr);
  59. return 0;
  60. }
  61. static int patch_feature_section(unsigned long value, struct fixup_entry *fcur)
  62. {
  63. unsigned int *start, *end, *alt_start, *alt_end, *src, *dest;
  64. start = calc_addr(fcur, fcur->start_off);
  65. end = calc_addr(fcur, fcur->end_off);
  66. alt_start = calc_addr(fcur, fcur->alt_start_off);
  67. alt_end = calc_addr(fcur, fcur->alt_end_off);
  68. if ((alt_end - alt_start) > (end - start))
  69. return 1;
  70. if ((value & fcur->mask) == fcur->value)
  71. return 0;
  72. src = alt_start;
  73. dest = start;
  74. for (; src < alt_end; src++, dest++) {
  75. if (patch_alt_instruction(src, dest, alt_start, alt_end))
  76. return 1;
  77. }
  78. for (; dest < end; dest++)
  79. raw_patch_instruction(dest, PPC_INST_NOP);
  80. return 0;
  81. }
  82. void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end)
  83. {
  84. struct fixup_entry *fcur, *fend;
  85. fcur = fixup_start;
  86. fend = fixup_end;
  87. for (; fcur < fend; fcur++) {
  88. if (patch_feature_section(value, fcur)) {
  89. WARN_ON(1);
  90. printk("Unable to patch feature section at %p - %p" \
  91. " with %p - %p\n",
  92. calc_addr(fcur, fcur->start_off),
  93. calc_addr(fcur, fcur->end_off),
  94. calc_addr(fcur, fcur->alt_start_off),
  95. calc_addr(fcur, fcur->alt_end_off));
  96. }
  97. }
  98. }
  99. #ifdef CONFIG_PPC_BOOK3S_64
  100. void do_stf_entry_barrier_fixups(enum stf_barrier_type types)
  101. {
  102. unsigned int instrs[3], *dest;
  103. long *start, *end;
  104. int i;
  105. start = PTRRELOC(&__start___stf_entry_barrier_fixup),
  106. end = PTRRELOC(&__stop___stf_entry_barrier_fixup);
  107. instrs[0] = 0x60000000; /* nop */
  108. instrs[1] = 0x60000000; /* nop */
  109. instrs[2] = 0x60000000; /* nop */
  110. i = 0;
  111. if (types & STF_BARRIER_FALLBACK) {
  112. instrs[i++] = 0x7d4802a6; /* mflr r10 */
  113. instrs[i++] = 0x60000000; /* branch patched below */
  114. instrs[i++] = 0x7d4803a6; /* mtlr r10 */
  115. } else if (types & STF_BARRIER_EIEIO) {
  116. instrs[i++] = 0x7e0006ac; /* eieio + bit 6 hint */
  117. } else if (types & STF_BARRIER_SYNC_ORI) {
  118. instrs[i++] = 0x7c0004ac; /* hwsync */
  119. instrs[i++] = 0xe94d0000; /* ld r10,0(r13) */
  120. instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
  121. }
  122. for (i = 0; start < end; start++, i++) {
  123. dest = (void *)start + *start;
  124. pr_devel("patching dest %lx\n", (unsigned long)dest);
  125. patch_instruction(dest, instrs[0]);
  126. if (types & STF_BARRIER_FALLBACK)
  127. patch_branch(dest + 1, (unsigned long)&stf_barrier_fallback,
  128. BRANCH_SET_LINK);
  129. else
  130. patch_instruction(dest + 1, instrs[1]);
  131. patch_instruction(dest + 2, instrs[2]);
  132. }
  133. printk(KERN_DEBUG "stf-barrier: patched %d entry locations (%s barrier)\n", i,
  134. (types == STF_BARRIER_NONE) ? "no" :
  135. (types == STF_BARRIER_FALLBACK) ? "fallback" :
  136. (types == STF_BARRIER_EIEIO) ? "eieio" :
  137. (types == (STF_BARRIER_SYNC_ORI)) ? "hwsync"
  138. : "unknown");
  139. }
  140. void do_stf_exit_barrier_fixups(enum stf_barrier_type types)
  141. {
  142. unsigned int instrs[6], *dest;
  143. long *start, *end;
  144. int i;
  145. start = PTRRELOC(&__start___stf_exit_barrier_fixup),
  146. end = PTRRELOC(&__stop___stf_exit_barrier_fixup);
  147. instrs[0] = 0x60000000; /* nop */
  148. instrs[1] = 0x60000000; /* nop */
  149. instrs[2] = 0x60000000; /* nop */
  150. instrs[3] = 0x60000000; /* nop */
  151. instrs[4] = 0x60000000; /* nop */
  152. instrs[5] = 0x60000000; /* nop */
  153. i = 0;
  154. if (types & STF_BARRIER_FALLBACK || types & STF_BARRIER_SYNC_ORI) {
  155. if (cpu_has_feature(CPU_FTR_HVMODE)) {
  156. instrs[i++] = 0x7db14ba6; /* mtspr 0x131, r13 (HSPRG1) */
  157. instrs[i++] = 0x7db04aa6; /* mfspr r13, 0x130 (HSPRG0) */
  158. } else {
  159. instrs[i++] = 0x7db243a6; /* mtsprg 2,r13 */
  160. instrs[i++] = 0x7db142a6; /* mfsprg r13,1 */
  161. }
  162. instrs[i++] = 0x7c0004ac; /* hwsync */
  163. instrs[i++] = 0xe9ad0000; /* ld r13,0(r13) */
  164. instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
  165. if (cpu_has_feature(CPU_FTR_HVMODE)) {
  166. instrs[i++] = 0x7db14aa6; /* mfspr r13, 0x131 (HSPRG1) */
  167. } else {
  168. instrs[i++] = 0x7db242a6; /* mfsprg r13,2 */
  169. }
  170. } else if (types & STF_BARRIER_EIEIO) {
  171. instrs[i++] = 0x7e0006ac; /* eieio + bit 6 hint */
  172. }
  173. for (i = 0; start < end; start++, i++) {
  174. dest = (void *)start + *start;
  175. pr_devel("patching dest %lx\n", (unsigned long)dest);
  176. patch_instruction(dest, instrs[0]);
  177. patch_instruction(dest + 1, instrs[1]);
  178. patch_instruction(dest + 2, instrs[2]);
  179. patch_instruction(dest + 3, instrs[3]);
  180. patch_instruction(dest + 4, instrs[4]);
  181. patch_instruction(dest + 5, instrs[5]);
  182. }
  183. printk(KERN_DEBUG "stf-barrier: patched %d exit locations (%s barrier)\n", i,
  184. (types == STF_BARRIER_NONE) ? "no" :
  185. (types == STF_BARRIER_FALLBACK) ? "fallback" :
  186. (types == STF_BARRIER_EIEIO) ? "eieio" :
  187. (types == (STF_BARRIER_SYNC_ORI)) ? "hwsync"
  188. : "unknown");
  189. }
  190. void do_stf_barrier_fixups(enum stf_barrier_type types)
  191. {
  192. do_stf_entry_barrier_fixups(types);
  193. do_stf_exit_barrier_fixups(types);
  194. }
  195. void do_rfi_flush_fixups(enum l1d_flush_type types)
  196. {
  197. unsigned int instrs[3], *dest;
  198. long *start, *end;
  199. int i;
  200. start = PTRRELOC(&__start___rfi_flush_fixup),
  201. end = PTRRELOC(&__stop___rfi_flush_fixup);
  202. instrs[0] = 0x60000000; /* nop */
  203. instrs[1] = 0x60000000; /* nop */
  204. instrs[2] = 0x60000000; /* nop */
  205. if (types & L1D_FLUSH_FALLBACK)
  206. /* b .+16 to fallback flush */
  207. instrs[0] = 0x48000010;
  208. i = 0;
  209. if (types & L1D_FLUSH_ORI) {
  210. instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
  211. instrs[i++] = 0x63de0000; /* ori 30,30,0 L1d flush*/
  212. }
  213. if (types & L1D_FLUSH_MTTRIG)
  214. instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */
  215. for (i = 0; start < end; start++, i++) {
  216. dest = (void *)start + *start;
  217. pr_devel("patching dest %lx\n", (unsigned long)dest);
  218. patch_instruction(dest, instrs[0]);
  219. patch_instruction(dest + 1, instrs[1]);
  220. patch_instruction(dest + 2, instrs[2]);
  221. }
  222. printk(KERN_DEBUG "rfi-flush: patched %d locations (%s flush)\n", i,
  223. (types == L1D_FLUSH_NONE) ? "no" :
  224. (types == L1D_FLUSH_FALLBACK) ? "fallback displacement" :
  225. (types & L1D_FLUSH_ORI) ? (types & L1D_FLUSH_MTTRIG)
  226. ? "ori+mttrig type"
  227. : "ori type" :
  228. (types & L1D_FLUSH_MTTRIG) ? "mttrig type"
  229. : "unknown");
  230. }
  231. void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_end)
  232. {
  233. unsigned int instr, *dest;
  234. long *start, *end;
  235. int i;
  236. start = fixup_start;
  237. end = fixup_end;
  238. instr = 0x60000000; /* nop */
  239. if (enable) {
  240. pr_info("barrier-nospec: using ORI speculation barrier\n");
  241. instr = 0x63ff0000; /* ori 31,31,0 speculation barrier */
  242. }
  243. for (i = 0; start < end; start++, i++) {
  244. dest = (void *)start + *start;
  245. pr_devel("patching dest %lx\n", (unsigned long)dest);
  246. patch_instruction(dest, instr);
  247. }
  248. printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i);
  249. }
  250. void do_barrier_nospec_fixups(bool enable)
  251. {
  252. void *start, *end;
  253. start = PTRRELOC(&__start___barrier_nospec_fixup),
  254. end = PTRRELOC(&__stop___barrier_nospec_fixup);
  255. do_barrier_nospec_fixups_range(enable, start, end);
  256. }
  257. #endif /* CONFIG_PPC_BOOK3S_64 */
  258. void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
  259. {
  260. long *start, *end;
  261. unsigned int *dest;
  262. if (!(value & CPU_FTR_LWSYNC))
  263. return ;
  264. start = fixup_start;
  265. end = fixup_end;
  266. for (; start < end; start++) {
  267. dest = (void *)start + *start;
  268. raw_patch_instruction(dest, PPC_INST_LWSYNC);
  269. }
  270. }
  271. static void do_final_fixups(void)
  272. {
  273. #if defined(CONFIG_PPC64) && defined(CONFIG_RELOCATABLE)
  274. int *src, *dest;
  275. unsigned long length;
  276. if (PHYSICAL_START == 0)
  277. return;
  278. src = (int *)(KERNELBASE + PHYSICAL_START);
  279. dest = (int *)KERNELBASE;
  280. length = (__end_interrupts - _stext) / sizeof(int);
  281. while (length--) {
  282. raw_patch_instruction(dest, *src);
  283. src++;
  284. dest++;
  285. }
  286. #endif
  287. }
  288. static unsigned long __initdata saved_cpu_features;
  289. static unsigned int __initdata saved_mmu_features;
  290. #ifdef CONFIG_PPC64
  291. static unsigned long __initdata saved_firmware_features;
  292. #endif
  293. void __init apply_feature_fixups(void)
  294. {
  295. struct cpu_spec *spec = PTRRELOC(*PTRRELOC(&cur_cpu_spec));
  296. *PTRRELOC(&saved_cpu_features) = spec->cpu_features;
  297. *PTRRELOC(&saved_mmu_features) = spec->mmu_features;
  298. /*
  299. * Apply the CPU-specific and firmware specific fixups to kernel text
  300. * (nop out sections not relevant to this CPU or this firmware).
  301. */
  302. do_feature_fixups(spec->cpu_features,
  303. PTRRELOC(&__start___ftr_fixup),
  304. PTRRELOC(&__stop___ftr_fixup));
  305. do_feature_fixups(spec->mmu_features,
  306. PTRRELOC(&__start___mmu_ftr_fixup),
  307. PTRRELOC(&__stop___mmu_ftr_fixup));
  308. do_lwsync_fixups(spec->cpu_features,
  309. PTRRELOC(&__start___lwsync_fixup),
  310. PTRRELOC(&__stop___lwsync_fixup));
  311. #ifdef CONFIG_PPC64
  312. saved_firmware_features = powerpc_firmware_features;
  313. do_feature_fixups(powerpc_firmware_features,
  314. &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup);
  315. #endif
  316. do_final_fixups();
  317. }
  318. void __init setup_feature_keys(void)
  319. {
  320. /*
  321. * Initialise jump label. This causes all the cpu/mmu_has_feature()
  322. * checks to take on their correct polarity based on the current set of
  323. * CPU/MMU features.
  324. */
  325. jump_label_init();
  326. cpu_feature_keys_init();
  327. mmu_feature_keys_init();
  328. }
  329. static int __init check_features(void)
  330. {
  331. WARN(saved_cpu_features != cur_cpu_spec->cpu_features,
  332. "CPU features changed after feature patching!\n");
  333. WARN(saved_mmu_features != cur_cpu_spec->mmu_features,
  334. "MMU features changed after feature patching!\n");
  335. #ifdef CONFIG_PPC64
  336. WARN(saved_firmware_features != powerpc_firmware_features,
  337. "Firmware features changed after feature patching!\n");
  338. #endif
  339. return 0;
  340. }
  341. late_initcall(check_features);
  342. #ifdef CONFIG_FTR_FIXUP_SELFTEST
  343. #define check(x) \
  344. if (!(x)) printk("feature-fixups: test failed at line %d\n", __LINE__);
  345. /* This must be after the text it fixes up, vmlinux.lds.S enforces that atm */
  346. static struct fixup_entry fixup;
  347. static long calc_offset(struct fixup_entry *entry, unsigned int *p)
  348. {
  349. return (unsigned long)p - (unsigned long)entry;
  350. }
  351. static void test_basic_patching(void)
  352. {
  353. extern unsigned int ftr_fixup_test1[];
  354. extern unsigned int end_ftr_fixup_test1[];
  355. extern unsigned int ftr_fixup_test1_orig[];
  356. extern unsigned int ftr_fixup_test1_expected[];
  357. int size = 4 * (end_ftr_fixup_test1 - ftr_fixup_test1);
  358. fixup.value = fixup.mask = 8;
  359. fixup.start_off = calc_offset(&fixup, ftr_fixup_test1 + 1);
  360. fixup.end_off = calc_offset(&fixup, ftr_fixup_test1 + 2);
  361. fixup.alt_start_off = fixup.alt_end_off = 0;
  362. /* Sanity check */
  363. check(memcmp(ftr_fixup_test1, ftr_fixup_test1_orig, size) == 0);
  364. /* Check we don't patch if the value matches */
  365. patch_feature_section(8, &fixup);
  366. check(memcmp(ftr_fixup_test1, ftr_fixup_test1_orig, size) == 0);
  367. /* Check we do patch if the value doesn't match */
  368. patch_feature_section(0, &fixup);
  369. check(memcmp(ftr_fixup_test1, ftr_fixup_test1_expected, size) == 0);
  370. /* Check we do patch if the mask doesn't match */
  371. memcpy(ftr_fixup_test1, ftr_fixup_test1_orig, size);
  372. check(memcmp(ftr_fixup_test1, ftr_fixup_test1_orig, size) == 0);
  373. patch_feature_section(~8, &fixup);
  374. check(memcmp(ftr_fixup_test1, ftr_fixup_test1_expected, size) == 0);
  375. }
  376. static void test_alternative_patching(void)
  377. {
  378. extern unsigned int ftr_fixup_test2[];
  379. extern unsigned int end_ftr_fixup_test2[];
  380. extern unsigned int ftr_fixup_test2_orig[];
  381. extern unsigned int ftr_fixup_test2_alt[];
  382. extern unsigned int ftr_fixup_test2_expected[];
  383. int size = 4 * (end_ftr_fixup_test2 - ftr_fixup_test2);
  384. fixup.value = fixup.mask = 0xF;
  385. fixup.start_off = calc_offset(&fixup, ftr_fixup_test2 + 1);
  386. fixup.end_off = calc_offset(&fixup, ftr_fixup_test2 + 2);
  387. fixup.alt_start_off = calc_offset(&fixup, ftr_fixup_test2_alt);
  388. fixup.alt_end_off = calc_offset(&fixup, ftr_fixup_test2_alt + 1);
  389. /* Sanity check */
  390. check(memcmp(ftr_fixup_test2, ftr_fixup_test2_orig, size) == 0);
  391. /* Check we don't patch if the value matches */
  392. patch_feature_section(0xF, &fixup);
  393. check(memcmp(ftr_fixup_test2, ftr_fixup_test2_orig, size) == 0);
  394. /* Check we do patch if the value doesn't match */
  395. patch_feature_section(0, &fixup);
  396. check(memcmp(ftr_fixup_test2, ftr_fixup_test2_expected, size) == 0);
  397. /* Check we do patch if the mask doesn't match */
  398. memcpy(ftr_fixup_test2, ftr_fixup_test2_orig, size);
  399. check(memcmp(ftr_fixup_test2, ftr_fixup_test2_orig, size) == 0);
  400. patch_feature_section(~0xF, &fixup);
  401. check(memcmp(ftr_fixup_test2, ftr_fixup_test2_expected, size) == 0);
  402. }
  403. static void test_alternative_case_too_big(void)
  404. {
  405. extern unsigned int ftr_fixup_test3[];
  406. extern unsigned int end_ftr_fixup_test3[];
  407. extern unsigned int ftr_fixup_test3_orig[];
  408. extern unsigned int ftr_fixup_test3_alt[];
  409. int size = 4 * (end_ftr_fixup_test3 - ftr_fixup_test3);
  410. fixup.value = fixup.mask = 0xC;
  411. fixup.start_off = calc_offset(&fixup, ftr_fixup_test3 + 1);
  412. fixup.end_off = calc_offset(&fixup, ftr_fixup_test3 + 2);
  413. fixup.alt_start_off = calc_offset(&fixup, ftr_fixup_test3_alt);
  414. fixup.alt_end_off = calc_offset(&fixup, ftr_fixup_test3_alt + 2);
  415. /* Sanity check */
  416. check(memcmp(ftr_fixup_test3, ftr_fixup_test3_orig, size) == 0);
  417. /* Expect nothing to be patched, and the error returned to us */
  418. check(patch_feature_section(0xF, &fixup) == 1);
  419. check(memcmp(ftr_fixup_test3, ftr_fixup_test3_orig, size) == 0);
  420. check(patch_feature_section(0, &fixup) == 1);
  421. check(memcmp(ftr_fixup_test3, ftr_fixup_test3_orig, size) == 0);
  422. check(patch_feature_section(~0xF, &fixup) == 1);
  423. check(memcmp(ftr_fixup_test3, ftr_fixup_test3_orig, size) == 0);
  424. }
  425. static void test_alternative_case_too_small(void)
  426. {
  427. extern unsigned int ftr_fixup_test4[];
  428. extern unsigned int end_ftr_fixup_test4[];
  429. extern unsigned int ftr_fixup_test4_orig[];
  430. extern unsigned int ftr_fixup_test4_alt[];
  431. extern unsigned int ftr_fixup_test4_expected[];
  432. int size = 4 * (end_ftr_fixup_test4 - ftr_fixup_test4);
  433. unsigned long flag;
  434. /* Check a high-bit flag */
  435. flag = 1UL << ((sizeof(unsigned long) - 1) * 8);
  436. fixup.value = fixup.mask = flag;
  437. fixup.start_off = calc_offset(&fixup, ftr_fixup_test4 + 1);
  438. fixup.end_off = calc_offset(&fixup, ftr_fixup_test4 + 5);
  439. fixup.alt_start_off = calc_offset(&fixup, ftr_fixup_test4_alt);
  440. fixup.alt_end_off = calc_offset(&fixup, ftr_fixup_test4_alt + 2);
  441. /* Sanity check */
  442. check(memcmp(ftr_fixup_test4, ftr_fixup_test4_orig, size) == 0);
  443. /* Check we don't patch if the value matches */
  444. patch_feature_section(flag, &fixup);
  445. check(memcmp(ftr_fixup_test4, ftr_fixup_test4_orig, size) == 0);
  446. /* Check we do patch if the value doesn't match */
  447. patch_feature_section(0, &fixup);
  448. check(memcmp(ftr_fixup_test4, ftr_fixup_test4_expected, size) == 0);
  449. /* Check we do patch if the mask doesn't match */
  450. memcpy(ftr_fixup_test4, ftr_fixup_test4_orig, size);
  451. check(memcmp(ftr_fixup_test4, ftr_fixup_test4_orig, size) == 0);
  452. patch_feature_section(~flag, &fixup);
  453. check(memcmp(ftr_fixup_test4, ftr_fixup_test4_expected, size) == 0);
  454. }
  455. static void test_alternative_case_with_branch(void)
  456. {
  457. extern unsigned int ftr_fixup_test5[];
  458. extern unsigned int end_ftr_fixup_test5[];
  459. extern unsigned int ftr_fixup_test5_expected[];
  460. int size = 4 * (end_ftr_fixup_test5 - ftr_fixup_test5);
  461. check(memcmp(ftr_fixup_test5, ftr_fixup_test5_expected, size) == 0);
  462. }
  463. static void test_alternative_case_with_external_branch(void)
  464. {
  465. extern unsigned int ftr_fixup_test6[];
  466. extern unsigned int end_ftr_fixup_test6[];
  467. extern unsigned int ftr_fixup_test6_expected[];
  468. int size = 4 * (end_ftr_fixup_test6 - ftr_fixup_test6);
  469. check(memcmp(ftr_fixup_test6, ftr_fixup_test6_expected, size) == 0);
  470. }
  471. static void test_alternative_case_with_branch_to_end(void)
  472. {
  473. extern unsigned int ftr_fixup_test7[];
  474. extern unsigned int end_ftr_fixup_test7[];
  475. extern unsigned int ftr_fixup_test7_expected[];
  476. int size = 4 * (end_ftr_fixup_test7 - ftr_fixup_test7);
  477. check(memcmp(ftr_fixup_test7, ftr_fixup_test7_expected, size) == 0);
  478. }
  479. static void test_cpu_macros(void)
  480. {
  481. extern u8 ftr_fixup_test_FTR_macros[];
  482. extern u8 ftr_fixup_test_FTR_macros_expected[];
  483. unsigned long size = ftr_fixup_test_FTR_macros_expected -
  484. ftr_fixup_test_FTR_macros;
  485. /* The fixups have already been done for us during boot */
  486. check(memcmp(ftr_fixup_test_FTR_macros,
  487. ftr_fixup_test_FTR_macros_expected, size) == 0);
  488. }
  489. static void test_fw_macros(void)
  490. {
  491. #ifdef CONFIG_PPC64
  492. extern u8 ftr_fixup_test_FW_FTR_macros[];
  493. extern u8 ftr_fixup_test_FW_FTR_macros_expected[];
  494. unsigned long size = ftr_fixup_test_FW_FTR_macros_expected -
  495. ftr_fixup_test_FW_FTR_macros;
  496. /* The fixups have already been done for us during boot */
  497. check(memcmp(ftr_fixup_test_FW_FTR_macros,
  498. ftr_fixup_test_FW_FTR_macros_expected, size) == 0);
  499. #endif
  500. }
  501. static void test_lwsync_macros(void)
  502. {
  503. extern u8 lwsync_fixup_test[];
  504. extern u8 end_lwsync_fixup_test[];
  505. extern u8 lwsync_fixup_test_expected_LWSYNC[];
  506. extern u8 lwsync_fixup_test_expected_SYNC[];
  507. unsigned long size = end_lwsync_fixup_test -
  508. lwsync_fixup_test;
  509. /* The fixups have already been done for us during boot */
  510. if (cur_cpu_spec->cpu_features & CPU_FTR_LWSYNC) {
  511. check(memcmp(lwsync_fixup_test,
  512. lwsync_fixup_test_expected_LWSYNC, size) == 0);
  513. } else {
  514. check(memcmp(lwsync_fixup_test,
  515. lwsync_fixup_test_expected_SYNC, size) == 0);
  516. }
  517. }
  518. static int __init test_feature_fixups(void)
  519. {
  520. printk(KERN_DEBUG "Running feature fixup self-tests ...\n");
  521. test_basic_patching();
  522. test_alternative_patching();
  523. test_alternative_case_too_big();
  524. test_alternative_case_too_small();
  525. test_alternative_case_with_branch();
  526. test_alternative_case_with_external_branch();
  527. test_alternative_case_with_branch_to_end();
  528. test_cpu_macros();
  529. test_fw_macros();
  530. test_lwsync_macros();
  531. return 0;
  532. }
  533. late_initcall(test_feature_fixups);
  534. #endif /* CONFIG_FTR_FIXUP_SELFTEST */