hibernate.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555
  1. /*:
  2. * Hibernate support specific for ARM64
  3. *
  4. * Derived from work on ARM hibernation support by:
  5. *
  6. * Ubuntu project, hibernation support for mach-dove
  7. * Copyright (C) 2010 Nokia Corporation (Hiroshi Doyu)
  8. * Copyright (C) 2010 Texas Instruments, Inc. (Teerth Reddy et al.)
  9. * https://lkml.org/lkml/2010/6/18/4
  10. * https://lists.linux-foundation.org/pipermail/linux-pm/2010-June/027422.html
  11. * https://patchwork.kernel.org/patch/96442/
  12. *
  13. * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
  14. *
  15. * License terms: GNU General Public License (GPL) version 2
  16. */
  17. #define pr_fmt(x) "hibernate: " x
  18. #include <linux/cpu.h>
  19. #include <linux/kvm_host.h>
  20. #include <linux/mm.h>
  21. #include <linux/pm.h>
  22. #include <linux/sched.h>
  23. #include <linux/suspend.h>
  24. #include <linux/utsname.h>
  25. #include <linux/version.h>
  26. #include <asm/barrier.h>
  27. #include <asm/cacheflush.h>
  28. #include <asm/cputype.h>
  29. #include <asm/irqflags.h>
  30. #include <asm/memory.h>
  31. #include <asm/mmu_context.h>
  32. #include <asm/pgalloc.h>
  33. #include <asm/pgtable.h>
  34. #include <asm/pgtable-hwdef.h>
  35. #include <asm/sections.h>
  36. #include <asm/smp.h>
  37. #include <asm/smp_plat.h>
  38. #include <asm/suspend.h>
  39. #include <asm/sysreg.h>
  40. #include <asm/virt.h>
  41. /*
  42. * Hibernate core relies on this value being 0 on resume, and marks it
  43. * __nosavedata assuming it will keep the resume kernel's '0' value. This
  44. * doesn't happen with either KASLR.
  45. *
  46. * defined as "__visible int in_suspend __nosavedata" in
  47. * kernel/power/hibernate.c
  48. */
  49. extern int in_suspend;
  50. /* Find a symbols alias in the linear map */
  51. #define LMADDR(x) phys_to_virt(virt_to_phys(x))
  52. /* Do we need to reset el2? */
  53. #define el2_reset_needed() (is_hyp_mode_available() && !is_kernel_in_hyp_mode())
  54. /* temporary el2 vectors in the __hibernate_exit_text section. */
  55. extern char hibernate_el2_vectors[];
  56. /* hyp-stub vectors, used to restore el2 during resume from hibernate. */
  57. extern char __hyp_stub_vectors[];
  58. /*
  59. * The logical cpu number we should resume on, initialised to a non-cpu
  60. * number.
  61. */
  62. static int sleep_cpu = -EINVAL;
  63. /*
  64. * Values that may not change over hibernate/resume. We put the build number
  65. * and date in here so that we guarantee not to resume with a different
  66. * kernel.
  67. */
  68. struct arch_hibernate_hdr_invariants {
  69. char uts_version[__NEW_UTS_LEN + 1];
  70. };
  71. /* These values need to be know across a hibernate/restore. */
  72. static struct arch_hibernate_hdr {
  73. struct arch_hibernate_hdr_invariants invariants;
  74. /* These are needed to find the relocated kernel if built with kaslr */
  75. phys_addr_t ttbr1_el1;
  76. void (*reenter_kernel)(void);
  77. /*
  78. * We need to know where the __hyp_stub_vectors are after restore to
  79. * re-configure el2.
  80. */
  81. phys_addr_t __hyp_stub_vectors;
  82. u64 sleep_cpu_mpidr;
  83. } resume_hdr;
  84. static inline void arch_hdr_invariants(struct arch_hibernate_hdr_invariants *i)
  85. {
  86. memset(i, 0, sizeof(*i));
  87. memcpy(i->uts_version, init_utsname()->version, sizeof(i->uts_version));
  88. }
  89. int pfn_is_nosave(unsigned long pfn)
  90. {
  91. unsigned long nosave_begin_pfn = virt_to_pfn(&__nosave_begin);
  92. unsigned long nosave_end_pfn = virt_to_pfn(&__nosave_end - 1);
  93. return (pfn >= nosave_begin_pfn) && (pfn <= nosave_end_pfn);
  94. }
  95. void notrace save_processor_state(void)
  96. {
  97. WARN_ON(num_online_cpus() != 1);
  98. }
  99. void notrace restore_processor_state(void)
  100. {
  101. }
  102. int arch_hibernation_header_save(void *addr, unsigned int max_size)
  103. {
  104. struct arch_hibernate_hdr *hdr = addr;
  105. if (max_size < sizeof(*hdr))
  106. return -EOVERFLOW;
  107. arch_hdr_invariants(&hdr->invariants);
  108. hdr->ttbr1_el1 = virt_to_phys(swapper_pg_dir);
  109. hdr->reenter_kernel = _cpu_resume;
  110. /* We can't use __hyp_get_vectors() because kvm may still be loaded */
  111. if (el2_reset_needed())
  112. hdr->__hyp_stub_vectors = virt_to_phys(__hyp_stub_vectors);
  113. else
  114. hdr->__hyp_stub_vectors = 0;
  115. /* Save the mpidr of the cpu we called cpu_suspend() on... */
  116. if (sleep_cpu < 0) {
  117. pr_err("Failing to hibernate on an unknown CPU.\n");
  118. return -ENODEV;
  119. }
  120. hdr->sleep_cpu_mpidr = cpu_logical_map(sleep_cpu);
  121. pr_info("Hibernating on CPU %d [mpidr:0x%llx]\n", sleep_cpu,
  122. hdr->sleep_cpu_mpidr);
  123. return 0;
  124. }
  125. EXPORT_SYMBOL(arch_hibernation_header_save);
  126. int arch_hibernation_header_restore(void *addr)
  127. {
  128. int ret;
  129. struct arch_hibernate_hdr_invariants invariants;
  130. struct arch_hibernate_hdr *hdr = addr;
  131. arch_hdr_invariants(&invariants);
  132. if (memcmp(&hdr->invariants, &invariants, sizeof(invariants))) {
  133. pr_crit("Hibernate image not generated by this kernel!\n");
  134. return -EINVAL;
  135. }
  136. sleep_cpu = get_logical_index(hdr->sleep_cpu_mpidr);
  137. pr_info("Hibernated on CPU %d [mpidr:0x%llx]\n", sleep_cpu,
  138. hdr->sleep_cpu_mpidr);
  139. if (sleep_cpu < 0) {
  140. pr_crit("Hibernated on a CPU not known to this kernel!\n");
  141. sleep_cpu = -EINVAL;
  142. return -EINVAL;
  143. }
  144. if (!cpu_online(sleep_cpu)) {
  145. pr_info("Hibernated on a CPU that is offline! Bringing CPU up.\n");
  146. ret = cpu_up(sleep_cpu);
  147. if (ret) {
  148. pr_err("Failed to bring hibernate-CPU up!\n");
  149. sleep_cpu = -EINVAL;
  150. return ret;
  151. }
  152. }
  153. resume_hdr = *hdr;
  154. return 0;
  155. }
  156. EXPORT_SYMBOL(arch_hibernation_header_restore);
  157. /*
  158. * Copies length bytes, starting at src_start into an new page,
  159. * perform cache maintentance, then maps it at the specified address low
  160. * address as executable.
  161. *
  162. * This is used by hibernate to copy the code it needs to execute when
  163. * overwriting the kernel text. This function generates a new set of page
  164. * tables, which it loads into ttbr0.
  165. *
  166. * Length is provided as we probably only want 4K of data, even on a 64K
  167. * page system.
  168. */
  169. static int create_safe_exec_page(void *src_start, size_t length,
  170. unsigned long dst_addr,
  171. phys_addr_t *phys_dst_addr,
  172. void *(*allocator)(gfp_t mask),
  173. gfp_t mask)
  174. {
  175. int rc = 0;
  176. pgd_t *pgd;
  177. pud_t *pud;
  178. pmd_t *pmd;
  179. pte_t *pte;
  180. unsigned long dst = (unsigned long)allocator(mask);
  181. if (!dst) {
  182. rc = -ENOMEM;
  183. goto out;
  184. }
  185. memcpy((void *)dst, src_start, length);
  186. flush_icache_range(dst, dst + length);
  187. pgd = pgd_offset_raw(allocator(mask), dst_addr);
  188. if (pgd_none(*pgd)) {
  189. pud = allocator(mask);
  190. if (!pud) {
  191. rc = -ENOMEM;
  192. goto out;
  193. }
  194. pgd_populate(&init_mm, pgd, pud);
  195. }
  196. pud = pud_offset(pgd, dst_addr);
  197. if (pud_none(*pud)) {
  198. pmd = allocator(mask);
  199. if (!pmd) {
  200. rc = -ENOMEM;
  201. goto out;
  202. }
  203. pud_populate(&init_mm, pud, pmd);
  204. }
  205. pmd = pmd_offset(pud, dst_addr);
  206. if (pmd_none(*pmd)) {
  207. pte = allocator(mask);
  208. if (!pte) {
  209. rc = -ENOMEM;
  210. goto out;
  211. }
  212. pmd_populate_kernel(&init_mm, pmd, pte);
  213. }
  214. pte = pte_offset_kernel(pmd, dst_addr);
  215. set_pte(pte, __pte(virt_to_phys((void *)dst) |
  216. pgprot_val(PAGE_KERNEL_EXEC)));
  217. /*
  218. * Load our new page tables. A strict BBM approach requires that we
  219. * ensure that TLBs are free of any entries that may overlap with the
  220. * global mappings we are about to install.
  221. *
  222. * For a real hibernate/resume cycle TTBR0 currently points to a zero
  223. * page, but TLBs may contain stale ASID-tagged entries (e.g. for EFI
  224. * runtime services), while for a userspace-driven test_resume cycle it
  225. * points to userspace page tables (and we must point it at a zero page
  226. * ourselves). Elsewhere we only (un)install the idmap with preemption
  227. * disabled, so T0SZ should be as required regardless.
  228. */
  229. cpu_set_reserved_ttbr0();
  230. local_flush_tlb_all();
  231. write_sysreg(virt_to_phys(pgd), ttbr0_el1);
  232. isb();
  233. *phys_dst_addr = virt_to_phys((void *)dst);
  234. out:
  235. return rc;
  236. }
  237. #define dcache_clean_range(start, end) __flush_dcache_area(start, (end - start))
  238. int swsusp_arch_suspend(void)
  239. {
  240. int ret = 0;
  241. unsigned long flags;
  242. struct sleep_stack_data state;
  243. if (cpus_are_stuck_in_kernel()) {
  244. pr_err("Can't hibernate: no mechanism to offline secondary CPUs.\n");
  245. return -EBUSY;
  246. }
  247. local_dbg_save(flags);
  248. if (__cpu_suspend_enter(&state)) {
  249. sleep_cpu = smp_processor_id();
  250. ret = swsusp_save();
  251. } else {
  252. /* Clean kernel core startup/idle code to PoC*/
  253. dcache_clean_range(__mmuoff_data_start, __mmuoff_data_end);
  254. dcache_clean_range(__idmap_text_start, __idmap_text_end);
  255. /* Clean kvm setup code to PoC? */
  256. if (el2_reset_needed())
  257. dcache_clean_range(__hyp_idmap_text_start, __hyp_idmap_text_end);
  258. /*
  259. * Tell the hibernation core that we've just restored
  260. * the memory
  261. */
  262. in_suspend = 0;
  263. sleep_cpu = -EINVAL;
  264. __cpu_suspend_exit();
  265. }
  266. local_dbg_restore(flags);
  267. return ret;
  268. }
  269. static void _copy_pte(pte_t *dst_pte, pte_t *src_pte, unsigned long addr)
  270. {
  271. pte_t pte = *src_pte;
  272. if (pte_valid(pte)) {
  273. /*
  274. * Resume will overwrite areas that may be marked
  275. * read only (code, rodata). Clear the RDONLY bit from
  276. * the temporary mappings we use during restore.
  277. */
  278. set_pte(dst_pte, pte_clear_rdonly(pte));
  279. } else if (debug_pagealloc_enabled() && !pte_none(pte)) {
  280. /*
  281. * debug_pagealloc will removed the PTE_VALID bit if
  282. * the page isn't in use by the resume kernel. It may have
  283. * been in use by the original kernel, in which case we need
  284. * to put it back in our copy to do the restore.
  285. *
  286. * Before marking this entry valid, check the pfn should
  287. * be mapped.
  288. */
  289. BUG_ON(!pfn_valid(pte_pfn(pte)));
  290. set_pte(dst_pte, pte_mkpresent(pte_clear_rdonly(pte)));
  291. }
  292. }
  293. static int copy_pte(pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long start,
  294. unsigned long end)
  295. {
  296. pte_t *src_pte;
  297. pte_t *dst_pte;
  298. unsigned long addr = start;
  299. dst_pte = (pte_t *)get_safe_page(GFP_ATOMIC);
  300. if (!dst_pte)
  301. return -ENOMEM;
  302. pmd_populate_kernel(&init_mm, dst_pmd, dst_pte);
  303. dst_pte = pte_offset_kernel(dst_pmd, start);
  304. src_pte = pte_offset_kernel(src_pmd, start);
  305. do {
  306. _copy_pte(dst_pte, src_pte, addr);
  307. } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
  308. return 0;
  309. }
  310. static int copy_pmd(pud_t *dst_pud, pud_t *src_pud, unsigned long start,
  311. unsigned long end)
  312. {
  313. pmd_t *src_pmd;
  314. pmd_t *dst_pmd;
  315. unsigned long next;
  316. unsigned long addr = start;
  317. if (pud_none(*dst_pud)) {
  318. dst_pmd = (pmd_t *)get_safe_page(GFP_ATOMIC);
  319. if (!dst_pmd)
  320. return -ENOMEM;
  321. pud_populate(&init_mm, dst_pud, dst_pmd);
  322. }
  323. dst_pmd = pmd_offset(dst_pud, start);
  324. src_pmd = pmd_offset(src_pud, start);
  325. do {
  326. next = pmd_addr_end(addr, end);
  327. if (pmd_none(*src_pmd))
  328. continue;
  329. if (pmd_table(*src_pmd)) {
  330. if (copy_pte(dst_pmd, src_pmd, addr, next))
  331. return -ENOMEM;
  332. } else {
  333. set_pmd(dst_pmd,
  334. __pmd(pmd_val(*src_pmd) & ~PMD_SECT_RDONLY));
  335. }
  336. } while (dst_pmd++, src_pmd++, addr = next, addr != end);
  337. return 0;
  338. }
  339. static int copy_pud(pgd_t *dst_pgd, pgd_t *src_pgd, unsigned long start,
  340. unsigned long end)
  341. {
  342. pud_t *dst_pud;
  343. pud_t *src_pud;
  344. unsigned long next;
  345. unsigned long addr = start;
  346. if (pgd_none(*dst_pgd)) {
  347. dst_pud = (pud_t *)get_safe_page(GFP_ATOMIC);
  348. if (!dst_pud)
  349. return -ENOMEM;
  350. pgd_populate(&init_mm, dst_pgd, dst_pud);
  351. }
  352. dst_pud = pud_offset(dst_pgd, start);
  353. src_pud = pud_offset(src_pgd, start);
  354. do {
  355. next = pud_addr_end(addr, end);
  356. if (pud_none(*src_pud))
  357. continue;
  358. if (pud_table(*(src_pud))) {
  359. if (copy_pmd(dst_pud, src_pud, addr, next))
  360. return -ENOMEM;
  361. } else {
  362. set_pud(dst_pud,
  363. __pud(pud_val(*src_pud) & ~PMD_SECT_RDONLY));
  364. }
  365. } while (dst_pud++, src_pud++, addr = next, addr != end);
  366. return 0;
  367. }
  368. static int copy_page_tables(pgd_t *dst_pgd, unsigned long start,
  369. unsigned long end)
  370. {
  371. unsigned long next;
  372. unsigned long addr = start;
  373. pgd_t *src_pgd = pgd_offset_k(start);
  374. dst_pgd = pgd_offset_raw(dst_pgd, start);
  375. do {
  376. next = pgd_addr_end(addr, end);
  377. if (pgd_none(*src_pgd))
  378. continue;
  379. if (copy_pud(dst_pgd, src_pgd, addr, next))
  380. return -ENOMEM;
  381. } while (dst_pgd++, src_pgd++, addr = next, addr != end);
  382. return 0;
  383. }
  384. /*
  385. * Setup then Resume from the hibernate image using swsusp_arch_suspend_exit().
  386. *
  387. * Memory allocated by get_safe_page() will be dealt with by the hibernate code,
  388. * we don't need to free it here.
  389. */
  390. int swsusp_arch_resume(void)
  391. {
  392. int rc = 0;
  393. void *zero_page;
  394. size_t exit_size;
  395. pgd_t *tmp_pg_dir;
  396. void *lm_restore_pblist;
  397. phys_addr_t phys_hibernate_exit;
  398. void __noreturn (*hibernate_exit)(phys_addr_t, phys_addr_t, void *,
  399. void *, phys_addr_t, phys_addr_t);
  400. /*
  401. * Restoring the memory image will overwrite the ttbr1 page tables.
  402. * Create a second copy of just the linear map, and use this when
  403. * restoring.
  404. */
  405. tmp_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
  406. if (!tmp_pg_dir) {
  407. pr_err("Failed to allocate memory for temporary page tables.");
  408. rc = -ENOMEM;
  409. goto out;
  410. }
  411. rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, 0);
  412. if (rc)
  413. goto out;
  414. /*
  415. * Since we only copied the linear map, we need to find restore_pblist's
  416. * linear map address.
  417. */
  418. lm_restore_pblist = LMADDR(restore_pblist);
  419. /*
  420. * We need a zero page that is zero before & after resume in order to
  421. * to break before make on the ttbr1 page tables.
  422. */
  423. zero_page = (void *)get_safe_page(GFP_ATOMIC);
  424. if (!zero_page) {
  425. pr_err("Failed to allocate zero page.");
  426. rc = -ENOMEM;
  427. goto out;
  428. }
  429. /*
  430. * Locate the exit code in the bottom-but-one page, so that *NULL
  431. * still has disastrous affects.
  432. */
  433. hibernate_exit = (void *)PAGE_SIZE;
  434. exit_size = __hibernate_exit_text_end - __hibernate_exit_text_start;
  435. /*
  436. * Copy swsusp_arch_suspend_exit() to a safe page. This will generate
  437. * a new set of ttbr0 page tables and load them.
  438. */
  439. rc = create_safe_exec_page(__hibernate_exit_text_start, exit_size,
  440. (unsigned long)hibernate_exit,
  441. &phys_hibernate_exit,
  442. (void *)get_safe_page, GFP_ATOMIC);
  443. if (rc) {
  444. pr_err("Failed to create safe executable page for hibernate_exit code.");
  445. goto out;
  446. }
  447. /*
  448. * The hibernate exit text contains a set of el2 vectors, that will
  449. * be executed at el2 with the mmu off in order to reload hyp-stub.
  450. */
  451. __flush_dcache_area(hibernate_exit, exit_size);
  452. /*
  453. * KASLR will cause the el2 vectors to be in a different location in
  454. * the resumed kernel. Load hibernate's temporary copy into el2.
  455. *
  456. * We can skip this step if we booted at EL1, or are running with VHE.
  457. */
  458. if (el2_reset_needed()) {
  459. phys_addr_t el2_vectors = phys_hibernate_exit; /* base */
  460. el2_vectors += hibernate_el2_vectors -
  461. __hibernate_exit_text_start; /* offset */
  462. __hyp_set_vectors(el2_vectors);
  463. }
  464. hibernate_exit(virt_to_phys(tmp_pg_dir), resume_hdr.ttbr1_el1,
  465. resume_hdr.reenter_kernel, lm_restore_pblist,
  466. resume_hdr.__hyp_stub_vectors, virt_to_phys(zero_page));
  467. out:
  468. return rc;
  469. }
  470. int hibernate_resume_nonboot_cpu_disable(void)
  471. {
  472. if (sleep_cpu < 0) {
  473. pr_err("Failing to resume from hibernate on an unknown CPU.\n");
  474. return -ENODEV;
  475. }
  476. return freeze_secondary_cpus(sleep_cpu);
  477. }