mem_encrypt_identity.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568
  1. /*
  2. * AMD Memory Encryption Support
  3. *
  4. * Copyright (C) 2016 Advanced Micro Devices, Inc.
  5. *
  6. * Author: Tom Lendacky <thomas.lendacky@amd.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #define DISABLE_BRANCH_PROFILING
  13. /*
  14. * Since we're dealing with identity mappings, physical and virtual
  15. * addresses are the same, so override these defines which are ultimately
  16. * used by the headers in misc.h.
  17. */
  18. #define __pa(x) ((unsigned long)(x))
  19. #define __va(x) ((void *)((unsigned long)(x)))
  20. /*
  21. * Special hack: we have to be careful, because no indirections are
  22. * allowed here, and paravirt_ops is a kind of one. As it will only run in
  23. * baremetal anyway, we just keep it from happening. (This list needs to
  24. * be extended when new paravirt and debugging variants are added.)
  25. */
  26. #undef CONFIG_PARAVIRT
  27. #undef CONFIG_PARAVIRT_XXL
  28. #undef CONFIG_PARAVIRT_SPINLOCKS
  29. #include <linux/kernel.h>
  30. #include <linux/mm.h>
  31. #include <linux/mem_encrypt.h>
  32. #include <asm/setup.h>
  33. #include <asm/sections.h>
  34. #include <asm/cmdline.h>
  35. #include "mm_internal.h"
  36. #define PGD_FLAGS _KERNPG_TABLE_NOENC
  37. #define P4D_FLAGS _KERNPG_TABLE_NOENC
  38. #define PUD_FLAGS _KERNPG_TABLE_NOENC
  39. #define PMD_FLAGS _KERNPG_TABLE_NOENC
  40. #define PMD_FLAGS_LARGE (__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL)
  41. #define PMD_FLAGS_DEC PMD_FLAGS_LARGE
  42. #define PMD_FLAGS_DEC_WP ((PMD_FLAGS_DEC & ~_PAGE_CACHE_MASK) | \
  43. (_PAGE_PAT | _PAGE_PWT))
  44. #define PMD_FLAGS_ENC (PMD_FLAGS_LARGE | _PAGE_ENC)
  45. #define PTE_FLAGS (__PAGE_KERNEL_EXEC & ~_PAGE_GLOBAL)
  46. #define PTE_FLAGS_DEC PTE_FLAGS
  47. #define PTE_FLAGS_DEC_WP ((PTE_FLAGS_DEC & ~_PAGE_CACHE_MASK) | \
  48. (_PAGE_PAT | _PAGE_PWT))
  49. #define PTE_FLAGS_ENC (PTE_FLAGS | _PAGE_ENC)
  50. struct sme_populate_pgd_data {
  51. void *pgtable_area;
  52. pgd_t *pgd;
  53. pmdval_t pmd_flags;
  54. pteval_t pte_flags;
  55. unsigned long paddr;
  56. unsigned long vaddr;
  57. unsigned long vaddr_end;
  58. };
  59. static char sme_cmdline_arg[] __initdata = "mem_encrypt";
  60. static char sme_cmdline_on[] __initdata = "on";
  61. static char sme_cmdline_off[] __initdata = "off";
  62. static void __init sme_clear_pgd(struct sme_populate_pgd_data *ppd)
  63. {
  64. unsigned long pgd_start, pgd_end, pgd_size;
  65. pgd_t *pgd_p;
  66. pgd_start = ppd->vaddr & PGDIR_MASK;
  67. pgd_end = ppd->vaddr_end & PGDIR_MASK;
  68. pgd_size = (((pgd_end - pgd_start) / PGDIR_SIZE) + 1) * sizeof(pgd_t);
  69. pgd_p = ppd->pgd + pgd_index(ppd->vaddr);
  70. memset(pgd_p, 0, pgd_size);
  71. }
  72. static pud_t __init *sme_prepare_pgd(struct sme_populate_pgd_data *ppd)
  73. {
  74. pgd_t *pgd;
  75. p4d_t *p4d;
  76. pud_t *pud;
  77. pmd_t *pmd;
  78. pgd = ppd->pgd + pgd_index(ppd->vaddr);
  79. if (pgd_none(*pgd)) {
  80. p4d = ppd->pgtable_area;
  81. memset(p4d, 0, sizeof(*p4d) * PTRS_PER_P4D);
  82. ppd->pgtable_area += sizeof(*p4d) * PTRS_PER_P4D;
  83. set_pgd(pgd, __pgd(PGD_FLAGS | __pa(p4d)));
  84. }
  85. p4d = p4d_offset(pgd, ppd->vaddr);
  86. if (p4d_none(*p4d)) {
  87. pud = ppd->pgtable_area;
  88. memset(pud, 0, sizeof(*pud) * PTRS_PER_PUD);
  89. ppd->pgtable_area += sizeof(*pud) * PTRS_PER_PUD;
  90. set_p4d(p4d, __p4d(P4D_FLAGS | __pa(pud)));
  91. }
  92. pud = pud_offset(p4d, ppd->vaddr);
  93. if (pud_none(*pud)) {
  94. pmd = ppd->pgtable_area;
  95. memset(pmd, 0, sizeof(*pmd) * PTRS_PER_PMD);
  96. ppd->pgtable_area += sizeof(*pmd) * PTRS_PER_PMD;
  97. set_pud(pud, __pud(PUD_FLAGS | __pa(pmd)));
  98. }
  99. if (pud_large(*pud))
  100. return NULL;
  101. return pud;
  102. }
  103. static void __init sme_populate_pgd_large(struct sme_populate_pgd_data *ppd)
  104. {
  105. pud_t *pud;
  106. pmd_t *pmd;
  107. pud = sme_prepare_pgd(ppd);
  108. if (!pud)
  109. return;
  110. pmd = pmd_offset(pud, ppd->vaddr);
  111. if (pmd_large(*pmd))
  112. return;
  113. set_pmd(pmd, __pmd(ppd->paddr | ppd->pmd_flags));
  114. }
  115. static void __init sme_populate_pgd(struct sme_populate_pgd_data *ppd)
  116. {
  117. pud_t *pud;
  118. pmd_t *pmd;
  119. pte_t *pte;
  120. pud = sme_prepare_pgd(ppd);
  121. if (!pud)
  122. return;
  123. pmd = pmd_offset(pud, ppd->vaddr);
  124. if (pmd_none(*pmd)) {
  125. pte = ppd->pgtable_area;
  126. memset(pte, 0, sizeof(pte) * PTRS_PER_PTE);
  127. ppd->pgtable_area += sizeof(pte) * PTRS_PER_PTE;
  128. set_pmd(pmd, __pmd(PMD_FLAGS | __pa(pte)));
  129. }
  130. if (pmd_large(*pmd))
  131. return;
  132. pte = pte_offset_map(pmd, ppd->vaddr);
  133. if (pte_none(*pte))
  134. set_pte(pte, __pte(ppd->paddr | ppd->pte_flags));
  135. }
  136. static void __init __sme_map_range_pmd(struct sme_populate_pgd_data *ppd)
  137. {
  138. while (ppd->vaddr < ppd->vaddr_end) {
  139. sme_populate_pgd_large(ppd);
  140. ppd->vaddr += PMD_PAGE_SIZE;
  141. ppd->paddr += PMD_PAGE_SIZE;
  142. }
  143. }
  144. static void __init __sme_map_range_pte(struct sme_populate_pgd_data *ppd)
  145. {
  146. while (ppd->vaddr < ppd->vaddr_end) {
  147. sme_populate_pgd(ppd);
  148. ppd->vaddr += PAGE_SIZE;
  149. ppd->paddr += PAGE_SIZE;
  150. }
  151. }
  152. static void __init __sme_map_range(struct sme_populate_pgd_data *ppd,
  153. pmdval_t pmd_flags, pteval_t pte_flags)
  154. {
  155. unsigned long vaddr_end;
  156. ppd->pmd_flags = pmd_flags;
  157. ppd->pte_flags = pte_flags;
  158. /* Save original end value since we modify the struct value */
  159. vaddr_end = ppd->vaddr_end;
  160. /* If start is not 2MB aligned, create PTE entries */
  161. ppd->vaddr_end = ALIGN(ppd->vaddr, PMD_PAGE_SIZE);
  162. __sme_map_range_pte(ppd);
  163. /* Create PMD entries */
  164. ppd->vaddr_end = vaddr_end & PMD_PAGE_MASK;
  165. __sme_map_range_pmd(ppd);
  166. /* If end is not 2MB aligned, create PTE entries */
  167. ppd->vaddr_end = vaddr_end;
  168. __sme_map_range_pte(ppd);
  169. }
  170. static void __init sme_map_range_encrypted(struct sme_populate_pgd_data *ppd)
  171. {
  172. __sme_map_range(ppd, PMD_FLAGS_ENC, PTE_FLAGS_ENC);
  173. }
  174. static void __init sme_map_range_decrypted(struct sme_populate_pgd_data *ppd)
  175. {
  176. __sme_map_range(ppd, PMD_FLAGS_DEC, PTE_FLAGS_DEC);
  177. }
  178. static void __init sme_map_range_decrypted_wp(struct sme_populate_pgd_data *ppd)
  179. {
  180. __sme_map_range(ppd, PMD_FLAGS_DEC_WP, PTE_FLAGS_DEC_WP);
  181. }
  182. static unsigned long __init sme_pgtable_calc(unsigned long len)
  183. {
  184. unsigned long entries = 0, tables = 0;
  185. /*
  186. * Perform a relatively simplistic calculation of the pagetable
  187. * entries that are needed. Those mappings will be covered mostly
  188. * by 2MB PMD entries so we can conservatively calculate the required
  189. * number of P4D, PUD and PMD structures needed to perform the
  190. * mappings. For mappings that are not 2MB aligned, PTE mappings
  191. * would be needed for the start and end portion of the address range
  192. * that fall outside of the 2MB alignment. This results in, at most,
  193. * two extra pages to hold PTE entries for each range that is mapped.
  194. * Incrementing the count for each covers the case where the addresses
  195. * cross entries.
  196. */
  197. /* PGDIR_SIZE is equal to P4D_SIZE on 4-level machine. */
  198. if (PTRS_PER_P4D > 1)
  199. entries += (DIV_ROUND_UP(len, PGDIR_SIZE) + 1) * sizeof(p4d_t) * PTRS_PER_P4D;
  200. entries += (DIV_ROUND_UP(len, P4D_SIZE) + 1) * sizeof(pud_t) * PTRS_PER_PUD;
  201. entries += (DIV_ROUND_UP(len, PUD_SIZE) + 1) * sizeof(pmd_t) * PTRS_PER_PMD;
  202. entries += 2 * sizeof(pte_t) * PTRS_PER_PTE;
  203. /*
  204. * Now calculate the added pagetable structures needed to populate
  205. * the new pagetables.
  206. */
  207. if (PTRS_PER_P4D > 1)
  208. tables += DIV_ROUND_UP(entries, PGDIR_SIZE) * sizeof(p4d_t) * PTRS_PER_P4D;
  209. tables += DIV_ROUND_UP(entries, P4D_SIZE) * sizeof(pud_t) * PTRS_PER_PUD;
  210. tables += DIV_ROUND_UP(entries, PUD_SIZE) * sizeof(pmd_t) * PTRS_PER_PMD;
  211. return entries + tables;
  212. }
  213. void __init sme_encrypt_kernel(struct boot_params *bp)
  214. {
  215. unsigned long workarea_start, workarea_end, workarea_len;
  216. unsigned long execute_start, execute_end, execute_len;
  217. unsigned long kernel_start, kernel_end, kernel_len;
  218. unsigned long initrd_start, initrd_end, initrd_len;
  219. struct sme_populate_pgd_data ppd;
  220. unsigned long pgtable_area_len;
  221. unsigned long decrypted_base;
  222. if (!sme_active())
  223. return;
  224. /*
  225. * Prepare for encrypting the kernel and initrd by building new
  226. * pagetables with the necessary attributes needed to encrypt the
  227. * kernel in place.
  228. *
  229. * One range of virtual addresses will map the memory occupied
  230. * by the kernel and initrd as encrypted.
  231. *
  232. * Another range of virtual addresses will map the memory occupied
  233. * by the kernel and initrd as decrypted and write-protected.
  234. *
  235. * The use of write-protect attribute will prevent any of the
  236. * memory from being cached.
  237. */
  238. /* Physical addresses gives us the identity mapped virtual addresses */
  239. kernel_start = __pa_symbol(_text);
  240. kernel_end = ALIGN(__pa_symbol(_end), PMD_PAGE_SIZE);
  241. kernel_len = kernel_end - kernel_start;
  242. initrd_start = 0;
  243. initrd_end = 0;
  244. initrd_len = 0;
  245. #ifdef CONFIG_BLK_DEV_INITRD
  246. initrd_len = (unsigned long)bp->hdr.ramdisk_size |
  247. ((unsigned long)bp->ext_ramdisk_size << 32);
  248. if (initrd_len) {
  249. initrd_start = (unsigned long)bp->hdr.ramdisk_image |
  250. ((unsigned long)bp->ext_ramdisk_image << 32);
  251. initrd_end = PAGE_ALIGN(initrd_start + initrd_len);
  252. initrd_len = initrd_end - initrd_start;
  253. }
  254. #endif
  255. /* Set the encryption workarea to be immediately after the kernel */
  256. workarea_start = kernel_end;
  257. /*
  258. * Calculate required number of workarea bytes needed:
  259. * executable encryption area size:
  260. * stack page (PAGE_SIZE)
  261. * encryption routine page (PAGE_SIZE)
  262. * intermediate copy buffer (PMD_PAGE_SIZE)
  263. * pagetable structures for the encryption of the kernel
  264. * pagetable structures for workarea (in case not currently mapped)
  265. */
  266. execute_start = workarea_start;
  267. execute_end = execute_start + (PAGE_SIZE * 2) + PMD_PAGE_SIZE;
  268. execute_len = execute_end - execute_start;
  269. /*
  270. * One PGD for both encrypted and decrypted mappings and a set of
  271. * PUDs and PMDs for each of the encrypted and decrypted mappings.
  272. */
  273. pgtable_area_len = sizeof(pgd_t) * PTRS_PER_PGD;
  274. pgtable_area_len += sme_pgtable_calc(execute_end - kernel_start) * 2;
  275. if (initrd_len)
  276. pgtable_area_len += sme_pgtable_calc(initrd_len) * 2;
  277. /* PUDs and PMDs needed in the current pagetables for the workarea */
  278. pgtable_area_len += sme_pgtable_calc(execute_len + pgtable_area_len);
  279. /*
  280. * The total workarea includes the executable encryption area and
  281. * the pagetable area. The start of the workarea is already 2MB
  282. * aligned, align the end of the workarea on a 2MB boundary so that
  283. * we don't try to create/allocate PTE entries from the workarea
  284. * before it is mapped.
  285. */
  286. workarea_len = execute_len + pgtable_area_len;
  287. workarea_end = ALIGN(workarea_start + workarea_len, PMD_PAGE_SIZE);
  288. /*
  289. * Set the address to the start of where newly created pagetable
  290. * structures (PGDs, PUDs and PMDs) will be allocated. New pagetable
  291. * structures are created when the workarea is added to the current
  292. * pagetables and when the new encrypted and decrypted kernel
  293. * mappings are populated.
  294. */
  295. ppd.pgtable_area = (void *)execute_end;
  296. /*
  297. * Make sure the current pagetable structure has entries for
  298. * addressing the workarea.
  299. */
  300. ppd.pgd = (pgd_t *)native_read_cr3_pa();
  301. ppd.paddr = workarea_start;
  302. ppd.vaddr = workarea_start;
  303. ppd.vaddr_end = workarea_end;
  304. sme_map_range_decrypted(&ppd);
  305. /* Flush the TLB - no globals so cr3 is enough */
  306. native_write_cr3(__native_read_cr3());
  307. /*
  308. * A new pagetable structure is being built to allow for the kernel
  309. * and initrd to be encrypted. It starts with an empty PGD that will
  310. * then be populated with new PUDs and PMDs as the encrypted and
  311. * decrypted kernel mappings are created.
  312. */
  313. ppd.pgd = ppd.pgtable_area;
  314. memset(ppd.pgd, 0, sizeof(pgd_t) * PTRS_PER_PGD);
  315. ppd.pgtable_area += sizeof(pgd_t) * PTRS_PER_PGD;
  316. /*
  317. * A different PGD index/entry must be used to get different
  318. * pagetable entries for the decrypted mapping. Choose the next
  319. * PGD index and convert it to a virtual address to be used as
  320. * the base of the mapping.
  321. */
  322. decrypted_base = (pgd_index(workarea_end) + 1) & (PTRS_PER_PGD - 1);
  323. if (initrd_len) {
  324. unsigned long check_base;
  325. check_base = (pgd_index(initrd_end) + 1) & (PTRS_PER_PGD - 1);
  326. decrypted_base = max(decrypted_base, check_base);
  327. }
  328. decrypted_base <<= PGDIR_SHIFT;
  329. /* Add encrypted kernel (identity) mappings */
  330. ppd.paddr = kernel_start;
  331. ppd.vaddr = kernel_start;
  332. ppd.vaddr_end = kernel_end;
  333. sme_map_range_encrypted(&ppd);
  334. /* Add decrypted, write-protected kernel (non-identity) mappings */
  335. ppd.paddr = kernel_start;
  336. ppd.vaddr = kernel_start + decrypted_base;
  337. ppd.vaddr_end = kernel_end + decrypted_base;
  338. sme_map_range_decrypted_wp(&ppd);
  339. if (initrd_len) {
  340. /* Add encrypted initrd (identity) mappings */
  341. ppd.paddr = initrd_start;
  342. ppd.vaddr = initrd_start;
  343. ppd.vaddr_end = initrd_end;
  344. sme_map_range_encrypted(&ppd);
  345. /*
  346. * Add decrypted, write-protected initrd (non-identity) mappings
  347. */
  348. ppd.paddr = initrd_start;
  349. ppd.vaddr = initrd_start + decrypted_base;
  350. ppd.vaddr_end = initrd_end + decrypted_base;
  351. sme_map_range_decrypted_wp(&ppd);
  352. }
  353. /* Add decrypted workarea mappings to both kernel mappings */
  354. ppd.paddr = workarea_start;
  355. ppd.vaddr = workarea_start;
  356. ppd.vaddr_end = workarea_end;
  357. sme_map_range_decrypted(&ppd);
  358. ppd.paddr = workarea_start;
  359. ppd.vaddr = workarea_start + decrypted_base;
  360. ppd.vaddr_end = workarea_end + decrypted_base;
  361. sme_map_range_decrypted(&ppd);
  362. /* Perform the encryption */
  363. sme_encrypt_execute(kernel_start, kernel_start + decrypted_base,
  364. kernel_len, workarea_start, (unsigned long)ppd.pgd);
  365. if (initrd_len)
  366. sme_encrypt_execute(initrd_start, initrd_start + decrypted_base,
  367. initrd_len, workarea_start,
  368. (unsigned long)ppd.pgd);
  369. /*
  370. * At this point we are running encrypted. Remove the mappings for
  371. * the decrypted areas - all that is needed for this is to remove
  372. * the PGD entry/entries.
  373. */
  374. ppd.vaddr = kernel_start + decrypted_base;
  375. ppd.vaddr_end = kernel_end + decrypted_base;
  376. sme_clear_pgd(&ppd);
  377. if (initrd_len) {
  378. ppd.vaddr = initrd_start + decrypted_base;
  379. ppd.vaddr_end = initrd_end + decrypted_base;
  380. sme_clear_pgd(&ppd);
  381. }
  382. ppd.vaddr = workarea_start + decrypted_base;
  383. ppd.vaddr_end = workarea_end + decrypted_base;
  384. sme_clear_pgd(&ppd);
  385. /* Flush the TLB - no globals so cr3 is enough */
  386. native_write_cr3(__native_read_cr3());
  387. }
  388. void __init sme_enable(struct boot_params *bp)
  389. {
  390. const char *cmdline_ptr, *cmdline_arg, *cmdline_on, *cmdline_off;
  391. unsigned int eax, ebx, ecx, edx;
  392. unsigned long feature_mask;
  393. bool active_by_default;
  394. unsigned long me_mask;
  395. char buffer[16];
  396. u64 msr;
  397. /* Check for the SME/SEV support leaf */
  398. eax = 0x80000000;
  399. ecx = 0;
  400. native_cpuid(&eax, &ebx, &ecx, &edx);
  401. if (eax < 0x8000001f)
  402. return;
  403. #define AMD_SME_BIT BIT(0)
  404. #define AMD_SEV_BIT BIT(1)
  405. /*
  406. * Set the feature mask (SME or SEV) based on whether we are
  407. * running under a hypervisor.
  408. */
  409. eax = 1;
  410. ecx = 0;
  411. native_cpuid(&eax, &ebx, &ecx, &edx);
  412. feature_mask = (ecx & BIT(31)) ? AMD_SEV_BIT : AMD_SME_BIT;
  413. /*
  414. * Check for the SME/SEV feature:
  415. * CPUID Fn8000_001F[EAX]
  416. * - Bit 0 - Secure Memory Encryption support
  417. * - Bit 1 - Secure Encrypted Virtualization support
  418. * CPUID Fn8000_001F[EBX]
  419. * - Bits 5:0 - Pagetable bit position used to indicate encryption
  420. */
  421. eax = 0x8000001f;
  422. ecx = 0;
  423. native_cpuid(&eax, &ebx, &ecx, &edx);
  424. if (!(eax & feature_mask))
  425. return;
  426. me_mask = 1UL << (ebx & 0x3f);
  427. /* Check if memory encryption is enabled */
  428. if (feature_mask == AMD_SME_BIT) {
  429. /* For SME, check the SYSCFG MSR */
  430. msr = __rdmsr(MSR_K8_SYSCFG);
  431. if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
  432. return;
  433. } else {
  434. /* For SEV, check the SEV MSR */
  435. msr = __rdmsr(MSR_AMD64_SEV);
  436. if (!(msr & MSR_AMD64_SEV_ENABLED))
  437. return;
  438. /* SEV state cannot be controlled by a command line option */
  439. sme_me_mask = me_mask;
  440. sev_enabled = true;
  441. physical_mask &= ~sme_me_mask;
  442. return;
  443. }
  444. /*
  445. * Fixups have not been applied to phys_base yet and we're running
  446. * identity mapped, so we must obtain the address to the SME command
  447. * line argument data using rip-relative addressing.
  448. */
  449. asm ("lea sme_cmdline_arg(%%rip), %0"
  450. : "=r" (cmdline_arg)
  451. : "p" (sme_cmdline_arg));
  452. asm ("lea sme_cmdline_on(%%rip), %0"
  453. : "=r" (cmdline_on)
  454. : "p" (sme_cmdline_on));
  455. asm ("lea sme_cmdline_off(%%rip), %0"
  456. : "=r" (cmdline_off)
  457. : "p" (sme_cmdline_off));
  458. if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT))
  459. active_by_default = true;
  460. else
  461. active_by_default = false;
  462. cmdline_ptr = (const char *)((u64)bp->hdr.cmd_line_ptr |
  463. ((u64)bp->ext_cmd_line_ptr << 32));
  464. cmdline_find_option(cmdline_ptr, cmdline_arg, buffer, sizeof(buffer));
  465. if (!strncmp(buffer, cmdline_on, sizeof(buffer)))
  466. sme_me_mask = me_mask;
  467. else if (!strncmp(buffer, cmdline_off, sizeof(buffer)))
  468. sme_me_mask = 0;
  469. else
  470. sme_me_mask = active_by_default ? me_mask : 0;
  471. physical_mask &= ~sme_me_mask;
  472. }