mem_encrypt.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595
  1. /*
  2. * AMD Memory Encryption Support
  3. *
  4. * Copyright (C) 2016 Advanced Micro Devices, Inc.
  5. *
  6. * Author: Tom Lendacky <thomas.lendacky@amd.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #define DISABLE_BRANCH_PROFILING
  13. #include <linux/linkage.h>
  14. #include <linux/init.h>
  15. #include <linux/mm.h>
  16. #include <linux/dma-mapping.h>
  17. #include <linux/swiotlb.h>
  18. #include <linux/mem_encrypt.h>
  19. #include <asm/tlbflush.h>
  20. #include <asm/fixmap.h>
  21. #include <asm/setup.h>
  22. #include <asm/bootparam.h>
  23. #include <asm/set_memory.h>
  24. #include <asm/cacheflush.h>
  25. #include <asm/sections.h>
  26. #include <asm/processor-flags.h>
  27. #include <asm/msr.h>
  28. #include <asm/cmdline.h>
  29. static char sme_cmdline_arg[] __initdata = "mem_encrypt";
  30. static char sme_cmdline_on[] __initdata = "on";
  31. static char sme_cmdline_off[] __initdata = "off";
  32. /*
  33. * Since SME related variables are set early in the boot process they must
  34. * reside in the .data section so as not to be zeroed out when the .bss
  35. * section is later cleared.
  36. */
  37. u64 sme_me_mask __section(.data) = 0;
  38. EXPORT_SYMBOL_GPL(sme_me_mask);
  39. /* Buffer used for early in-place encryption by BSP, no locking needed */
  40. static char sme_early_buffer[PAGE_SIZE] __aligned(PAGE_SIZE);
  41. /*
  42. * This routine does not change the underlying encryption setting of the
  43. * page(s) that map this memory. It assumes that eventually the memory is
  44. * meant to be accessed as either encrypted or decrypted but the contents
  45. * are currently not in the desired state.
  46. *
  47. * This routine follows the steps outlined in the AMD64 Architecture
  48. * Programmer's Manual Volume 2, Section 7.10.8 Encrypt-in-Place.
  49. */
  50. static void __init __sme_early_enc_dec(resource_size_t paddr,
  51. unsigned long size, bool enc)
  52. {
  53. void *src, *dst;
  54. size_t len;
  55. if (!sme_me_mask)
  56. return;
  57. local_flush_tlb();
  58. wbinvd();
  59. /*
  60. * There are limited number of early mapping slots, so map (at most)
  61. * one page at time.
  62. */
  63. while (size) {
  64. len = min_t(size_t, sizeof(sme_early_buffer), size);
  65. /*
  66. * Create mappings for the current and desired format of
  67. * the memory. Use a write-protected mapping for the source.
  68. */
  69. src = enc ? early_memremap_decrypted_wp(paddr, len) :
  70. early_memremap_encrypted_wp(paddr, len);
  71. dst = enc ? early_memremap_encrypted(paddr, len) :
  72. early_memremap_decrypted(paddr, len);
  73. /*
  74. * If a mapping can't be obtained to perform the operation,
  75. * then eventual access of that area in the desired mode
  76. * will cause a crash.
  77. */
  78. BUG_ON(!src || !dst);
  79. /*
  80. * Use a temporary buffer, of cache-line multiple size, to
  81. * avoid data corruption as documented in the APM.
  82. */
  83. memcpy(sme_early_buffer, src, len);
  84. memcpy(dst, sme_early_buffer, len);
  85. early_memunmap(dst, len);
  86. early_memunmap(src, len);
  87. paddr += len;
  88. size -= len;
  89. }
  90. }
  91. void __init sme_early_encrypt(resource_size_t paddr, unsigned long size)
  92. {
  93. __sme_early_enc_dec(paddr, size, true);
  94. }
  95. void __init sme_early_decrypt(resource_size_t paddr, unsigned long size)
  96. {
  97. __sme_early_enc_dec(paddr, size, false);
  98. }
  99. static void __init __sme_early_map_unmap_mem(void *vaddr, unsigned long size,
  100. bool map)
  101. {
  102. unsigned long paddr = (unsigned long)vaddr - __PAGE_OFFSET;
  103. pmdval_t pmd_flags, pmd;
  104. /* Use early_pmd_flags but remove the encryption mask */
  105. pmd_flags = __sme_clr(early_pmd_flags);
  106. do {
  107. pmd = map ? (paddr & PMD_MASK) + pmd_flags : 0;
  108. __early_make_pgtable((unsigned long)vaddr, pmd);
  109. vaddr += PMD_SIZE;
  110. paddr += PMD_SIZE;
  111. size = (size <= PMD_SIZE) ? 0 : size - PMD_SIZE;
  112. } while (size);
  113. __native_flush_tlb();
  114. }
  115. void __init sme_unmap_bootdata(char *real_mode_data)
  116. {
  117. struct boot_params *boot_data;
  118. unsigned long cmdline_paddr;
  119. if (!sme_active())
  120. return;
  121. /* Get the command line address before unmapping the real_mode_data */
  122. boot_data = (struct boot_params *)real_mode_data;
  123. cmdline_paddr = boot_data->hdr.cmd_line_ptr | ((u64)boot_data->ext_cmd_line_ptr << 32);
  124. __sme_early_map_unmap_mem(real_mode_data, sizeof(boot_params), false);
  125. if (!cmdline_paddr)
  126. return;
  127. __sme_early_map_unmap_mem(__va(cmdline_paddr), COMMAND_LINE_SIZE, false);
  128. }
  129. void __init sme_map_bootdata(char *real_mode_data)
  130. {
  131. struct boot_params *boot_data;
  132. unsigned long cmdline_paddr;
  133. if (!sme_active())
  134. return;
  135. __sme_early_map_unmap_mem(real_mode_data, sizeof(boot_params), true);
  136. /* Get the command line address after mapping the real_mode_data */
  137. boot_data = (struct boot_params *)real_mode_data;
  138. cmdline_paddr = boot_data->hdr.cmd_line_ptr | ((u64)boot_data->ext_cmd_line_ptr << 32);
  139. if (!cmdline_paddr)
  140. return;
  141. __sme_early_map_unmap_mem(__va(cmdline_paddr), COMMAND_LINE_SIZE, true);
  142. }
  143. void __init sme_early_init(void)
  144. {
  145. unsigned int i;
  146. if (!sme_me_mask)
  147. return;
  148. early_pmd_flags = __sme_set(early_pmd_flags);
  149. __supported_pte_mask = __sme_set(__supported_pte_mask);
  150. /* Update the protection map with memory encryption mask */
  151. for (i = 0; i < ARRAY_SIZE(protection_map); i++)
  152. protection_map[i] = pgprot_encrypted(protection_map[i]);
  153. }
  154. /* Architecture __weak replacement functions */
  155. void __init mem_encrypt_init(void)
  156. {
  157. if (!sme_me_mask)
  158. return;
  159. /* Call into SWIOTLB to update the SWIOTLB DMA buffers */
  160. swiotlb_update_mem_attributes();
  161. pr_info("AMD Secure Memory Encryption (SME) active\n");
  162. }
  163. void swiotlb_set_mem_attributes(void *vaddr, unsigned long size)
  164. {
  165. WARN(PAGE_ALIGN(size) != size,
  166. "size is not page-aligned (%#lx)\n", size);
  167. /* Make the SWIOTLB buffer area decrypted */
  168. set_memory_decrypted((unsigned long)vaddr, size >> PAGE_SHIFT);
  169. }
  170. static void __init sme_clear_pgd(pgd_t *pgd_base, unsigned long start,
  171. unsigned long end)
  172. {
  173. unsigned long pgd_start, pgd_end, pgd_size;
  174. pgd_t *pgd_p;
  175. pgd_start = start & PGDIR_MASK;
  176. pgd_end = end & PGDIR_MASK;
  177. pgd_size = (((pgd_end - pgd_start) / PGDIR_SIZE) + 1);
  178. pgd_size *= sizeof(pgd_t);
  179. pgd_p = pgd_base + pgd_index(start);
  180. memset(pgd_p, 0, pgd_size);
  181. }
  182. #define PGD_FLAGS _KERNPG_TABLE_NOENC
  183. #define P4D_FLAGS _KERNPG_TABLE_NOENC
  184. #define PUD_FLAGS _KERNPG_TABLE_NOENC
  185. #define PMD_FLAGS (__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL)
  186. static void __init *sme_populate_pgd(pgd_t *pgd_base, void *pgtable_area,
  187. unsigned long vaddr, pmdval_t pmd_val)
  188. {
  189. pgd_t *pgd_p;
  190. p4d_t *p4d_p;
  191. pud_t *pud_p;
  192. pmd_t *pmd_p;
  193. pgd_p = pgd_base + pgd_index(vaddr);
  194. if (native_pgd_val(*pgd_p)) {
  195. if (IS_ENABLED(CONFIG_X86_5LEVEL))
  196. p4d_p = (p4d_t *)(native_pgd_val(*pgd_p) & ~PTE_FLAGS_MASK);
  197. else
  198. pud_p = (pud_t *)(native_pgd_val(*pgd_p) & ~PTE_FLAGS_MASK);
  199. } else {
  200. pgd_t pgd;
  201. if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
  202. p4d_p = pgtable_area;
  203. memset(p4d_p, 0, sizeof(*p4d_p) * PTRS_PER_P4D);
  204. pgtable_area += sizeof(*p4d_p) * PTRS_PER_P4D;
  205. pgd = native_make_pgd((pgdval_t)p4d_p + PGD_FLAGS);
  206. } else {
  207. pud_p = pgtable_area;
  208. memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD);
  209. pgtable_area += sizeof(*pud_p) * PTRS_PER_PUD;
  210. pgd = native_make_pgd((pgdval_t)pud_p + PGD_FLAGS);
  211. }
  212. native_set_pgd(pgd_p, pgd);
  213. }
  214. if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
  215. p4d_p += p4d_index(vaddr);
  216. if (native_p4d_val(*p4d_p)) {
  217. pud_p = (pud_t *)(native_p4d_val(*p4d_p) & ~PTE_FLAGS_MASK);
  218. } else {
  219. p4d_t p4d;
  220. pud_p = pgtable_area;
  221. memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD);
  222. pgtable_area += sizeof(*pud_p) * PTRS_PER_PUD;
  223. p4d = native_make_p4d((pudval_t)pud_p + P4D_FLAGS);
  224. native_set_p4d(p4d_p, p4d);
  225. }
  226. }
  227. pud_p += pud_index(vaddr);
  228. if (native_pud_val(*pud_p)) {
  229. if (native_pud_val(*pud_p) & _PAGE_PSE)
  230. goto out;
  231. pmd_p = (pmd_t *)(native_pud_val(*pud_p) & ~PTE_FLAGS_MASK);
  232. } else {
  233. pud_t pud;
  234. pmd_p = pgtable_area;
  235. memset(pmd_p, 0, sizeof(*pmd_p) * PTRS_PER_PMD);
  236. pgtable_area += sizeof(*pmd_p) * PTRS_PER_PMD;
  237. pud = native_make_pud((pmdval_t)pmd_p + PUD_FLAGS);
  238. native_set_pud(pud_p, pud);
  239. }
  240. pmd_p += pmd_index(vaddr);
  241. if (!native_pmd_val(*pmd_p) || !(native_pmd_val(*pmd_p) & _PAGE_PSE))
  242. native_set_pmd(pmd_p, native_make_pmd(pmd_val));
  243. out:
  244. return pgtable_area;
  245. }
  246. static unsigned long __init sme_pgtable_calc(unsigned long len)
  247. {
  248. unsigned long p4d_size, pud_size, pmd_size;
  249. unsigned long total;
  250. /*
  251. * Perform a relatively simplistic calculation of the pagetable
  252. * entries that are needed. That mappings will be covered by 2MB
  253. * PMD entries so we can conservatively calculate the required
  254. * number of P4D, PUD and PMD structures needed to perform the
  255. * mappings. Incrementing the count for each covers the case where
  256. * the addresses cross entries.
  257. */
  258. if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
  259. p4d_size = (ALIGN(len, PGDIR_SIZE) / PGDIR_SIZE) + 1;
  260. p4d_size *= sizeof(p4d_t) * PTRS_PER_P4D;
  261. pud_size = (ALIGN(len, P4D_SIZE) / P4D_SIZE) + 1;
  262. pud_size *= sizeof(pud_t) * PTRS_PER_PUD;
  263. } else {
  264. p4d_size = 0;
  265. pud_size = (ALIGN(len, PGDIR_SIZE) / PGDIR_SIZE) + 1;
  266. pud_size *= sizeof(pud_t) * PTRS_PER_PUD;
  267. }
  268. pmd_size = (ALIGN(len, PUD_SIZE) / PUD_SIZE) + 1;
  269. pmd_size *= sizeof(pmd_t) * PTRS_PER_PMD;
  270. total = p4d_size + pud_size + pmd_size;
  271. /*
  272. * Now calculate the added pagetable structures needed to populate
  273. * the new pagetables.
  274. */
  275. if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
  276. p4d_size = ALIGN(total, PGDIR_SIZE) / PGDIR_SIZE;
  277. p4d_size *= sizeof(p4d_t) * PTRS_PER_P4D;
  278. pud_size = ALIGN(total, P4D_SIZE) / P4D_SIZE;
  279. pud_size *= sizeof(pud_t) * PTRS_PER_PUD;
  280. } else {
  281. p4d_size = 0;
  282. pud_size = ALIGN(total, PGDIR_SIZE) / PGDIR_SIZE;
  283. pud_size *= sizeof(pud_t) * PTRS_PER_PUD;
  284. }
  285. pmd_size = ALIGN(total, PUD_SIZE) / PUD_SIZE;
  286. pmd_size *= sizeof(pmd_t) * PTRS_PER_PMD;
  287. total += p4d_size + pud_size + pmd_size;
  288. return total;
  289. }
  290. void __init sme_encrypt_kernel(void)
  291. {
  292. unsigned long workarea_start, workarea_end, workarea_len;
  293. unsigned long execute_start, execute_end, execute_len;
  294. unsigned long kernel_start, kernel_end, kernel_len;
  295. unsigned long pgtable_area_len;
  296. unsigned long paddr, pmd_flags;
  297. unsigned long decrypted_base;
  298. void *pgtable_area;
  299. pgd_t *pgd;
  300. if (!sme_active())
  301. return;
  302. /*
  303. * Prepare for encrypting the kernel by building new pagetables with
  304. * the necessary attributes needed to encrypt the kernel in place.
  305. *
  306. * One range of virtual addresses will map the memory occupied
  307. * by the kernel as encrypted.
  308. *
  309. * Another range of virtual addresses will map the memory occupied
  310. * by the kernel as decrypted and write-protected.
  311. *
  312. * The use of write-protect attribute will prevent any of the
  313. * memory from being cached.
  314. */
  315. /* Physical addresses gives us the identity mapped virtual addresses */
  316. kernel_start = __pa_symbol(_text);
  317. kernel_end = ALIGN(__pa_symbol(_end), PMD_PAGE_SIZE);
  318. kernel_len = kernel_end - kernel_start;
  319. /* Set the encryption workarea to be immediately after the kernel */
  320. workarea_start = kernel_end;
  321. /*
  322. * Calculate required number of workarea bytes needed:
  323. * executable encryption area size:
  324. * stack page (PAGE_SIZE)
  325. * encryption routine page (PAGE_SIZE)
  326. * intermediate copy buffer (PMD_PAGE_SIZE)
  327. * pagetable structures for the encryption of the kernel
  328. * pagetable structures for workarea (in case not currently mapped)
  329. */
  330. execute_start = workarea_start;
  331. execute_end = execute_start + (PAGE_SIZE * 2) + PMD_PAGE_SIZE;
  332. execute_len = execute_end - execute_start;
  333. /*
  334. * One PGD for both encrypted and decrypted mappings and a set of
  335. * PUDs and PMDs for each of the encrypted and decrypted mappings.
  336. */
  337. pgtable_area_len = sizeof(pgd_t) * PTRS_PER_PGD;
  338. pgtable_area_len += sme_pgtable_calc(execute_end - kernel_start) * 2;
  339. /* PUDs and PMDs needed in the current pagetables for the workarea */
  340. pgtable_area_len += sme_pgtable_calc(execute_len + pgtable_area_len);
  341. /*
  342. * The total workarea includes the executable encryption area and
  343. * the pagetable area.
  344. */
  345. workarea_len = execute_len + pgtable_area_len;
  346. workarea_end = workarea_start + workarea_len;
  347. /*
  348. * Set the address to the start of where newly created pagetable
  349. * structures (PGDs, PUDs and PMDs) will be allocated. New pagetable
  350. * structures are created when the workarea is added to the current
  351. * pagetables and when the new encrypted and decrypted kernel
  352. * mappings are populated.
  353. */
  354. pgtable_area = (void *)execute_end;
  355. /*
  356. * Make sure the current pagetable structure has entries for
  357. * addressing the workarea.
  358. */
  359. pgd = (pgd_t *)native_read_cr3_pa();
  360. paddr = workarea_start;
  361. while (paddr < workarea_end) {
  362. pgtable_area = sme_populate_pgd(pgd, pgtable_area,
  363. paddr,
  364. paddr + PMD_FLAGS);
  365. paddr += PMD_PAGE_SIZE;
  366. }
  367. /* Flush the TLB - no globals so cr3 is enough */
  368. native_write_cr3(__native_read_cr3());
  369. /*
  370. * A new pagetable structure is being built to allow for the kernel
  371. * to be encrypted. It starts with an empty PGD that will then be
  372. * populated with new PUDs and PMDs as the encrypted and decrypted
  373. * kernel mappings are created.
  374. */
  375. pgd = pgtable_area;
  376. memset(pgd, 0, sizeof(*pgd) * PTRS_PER_PGD);
  377. pgtable_area += sizeof(*pgd) * PTRS_PER_PGD;
  378. /* Add encrypted kernel (identity) mappings */
  379. pmd_flags = PMD_FLAGS | _PAGE_ENC;
  380. paddr = kernel_start;
  381. while (paddr < kernel_end) {
  382. pgtable_area = sme_populate_pgd(pgd, pgtable_area,
  383. paddr,
  384. paddr + pmd_flags);
  385. paddr += PMD_PAGE_SIZE;
  386. }
  387. /*
  388. * A different PGD index/entry must be used to get different
  389. * pagetable entries for the decrypted mapping. Choose the next
  390. * PGD index and convert it to a virtual address to be used as
  391. * the base of the mapping.
  392. */
  393. decrypted_base = (pgd_index(workarea_end) + 1) & (PTRS_PER_PGD - 1);
  394. decrypted_base <<= PGDIR_SHIFT;
  395. /* Add decrypted, write-protected kernel (non-identity) mappings */
  396. pmd_flags = (PMD_FLAGS & ~_PAGE_CACHE_MASK) | (_PAGE_PAT | _PAGE_PWT);
  397. paddr = kernel_start;
  398. while (paddr < kernel_end) {
  399. pgtable_area = sme_populate_pgd(pgd, pgtable_area,
  400. paddr + decrypted_base,
  401. paddr + pmd_flags);
  402. paddr += PMD_PAGE_SIZE;
  403. }
  404. /* Add decrypted workarea mappings to both kernel mappings */
  405. paddr = workarea_start;
  406. while (paddr < workarea_end) {
  407. pgtable_area = sme_populate_pgd(pgd, pgtable_area,
  408. paddr,
  409. paddr + PMD_FLAGS);
  410. pgtable_area = sme_populate_pgd(pgd, pgtable_area,
  411. paddr + decrypted_base,
  412. paddr + PMD_FLAGS);
  413. paddr += PMD_PAGE_SIZE;
  414. }
  415. /* Perform the encryption */
  416. sme_encrypt_execute(kernel_start, kernel_start + decrypted_base,
  417. kernel_len, workarea_start, (unsigned long)pgd);
  418. /*
  419. * At this point we are running encrypted. Remove the mappings for
  420. * the decrypted areas - all that is needed for this is to remove
  421. * the PGD entry/entries.
  422. */
  423. sme_clear_pgd(pgd, kernel_start + decrypted_base,
  424. kernel_end + decrypted_base);
  425. sme_clear_pgd(pgd, workarea_start + decrypted_base,
  426. workarea_end + decrypted_base);
  427. /* Flush the TLB - no globals so cr3 is enough */
  428. native_write_cr3(__native_read_cr3());
  429. }
  430. void __init __nostackprotector sme_enable(struct boot_params *bp)
  431. {
  432. const char *cmdline_ptr, *cmdline_arg, *cmdline_on, *cmdline_off;
  433. unsigned int eax, ebx, ecx, edx;
  434. bool active_by_default;
  435. unsigned long me_mask;
  436. char buffer[16];
  437. u64 msr;
  438. /* Check for the SME support leaf */
  439. eax = 0x80000000;
  440. ecx = 0;
  441. native_cpuid(&eax, &ebx, &ecx, &edx);
  442. if (eax < 0x8000001f)
  443. return;
  444. /*
  445. * Check for the SME feature:
  446. * CPUID Fn8000_001F[EAX] - Bit 0
  447. * Secure Memory Encryption support
  448. * CPUID Fn8000_001F[EBX] - Bits 5:0
  449. * Pagetable bit position used to indicate encryption
  450. */
  451. eax = 0x8000001f;
  452. ecx = 0;
  453. native_cpuid(&eax, &ebx, &ecx, &edx);
  454. if (!(eax & 1))
  455. return;
  456. me_mask = 1UL << (ebx & 0x3f);
  457. /* Check if SME is enabled */
  458. msr = __rdmsr(MSR_K8_SYSCFG);
  459. if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
  460. return;
  461. /*
  462. * Fixups have not been applied to phys_base yet and we're running
  463. * identity mapped, so we must obtain the address to the SME command
  464. * line argument data using rip-relative addressing.
  465. */
  466. asm ("lea sme_cmdline_arg(%%rip), %0"
  467. : "=r" (cmdline_arg)
  468. : "p" (sme_cmdline_arg));
  469. asm ("lea sme_cmdline_on(%%rip), %0"
  470. : "=r" (cmdline_on)
  471. : "p" (sme_cmdline_on));
  472. asm ("lea sme_cmdline_off(%%rip), %0"
  473. : "=r" (cmdline_off)
  474. : "p" (sme_cmdline_off));
  475. if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT))
  476. active_by_default = true;
  477. else
  478. active_by_default = false;
  479. cmdline_ptr = (const char *)((u64)bp->hdr.cmd_line_ptr |
  480. ((u64)bp->ext_cmd_line_ptr << 32));
  481. cmdline_find_option(cmdline_ptr, cmdline_arg, buffer, sizeof(buffer));
  482. if (!strncmp(buffer, cmdline_on, sizeof(buffer)))
  483. sme_me_mask = me_mask;
  484. else if (!strncmp(buffer, cmdline_off, sizeof(buffer)))
  485. sme_me_mask = 0;
  486. else
  487. sme_me_mask = active_by_default ? me_mask : 0;
  488. }