hibernate_64.c 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359
  1. /*
  2. * Hibernation support for x86-64
  3. *
  4. * Distribute under GPLv2
  5. *
  6. * Copyright (c) 2007 Rafael J. Wysocki <rjw@sisk.pl>
  7. * Copyright (c) 2002 Pavel Machek <pavel@ucw.cz>
  8. * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org>
  9. */
  10. #include <linux/gfp.h>
  11. #include <linux/smp.h>
  12. #include <linux/suspend.h>
  13. #include <linux/scatterlist.h>
  14. #include <linux/kdebug.h>
  15. #include <crypto/hash.h>
  16. #include <asm/e820/api.h>
  17. #include <asm/init.h>
  18. #include <asm/proto.h>
  19. #include <asm/page.h>
  20. #include <asm/pgtable.h>
  21. #include <asm/mtrr.h>
  22. #include <asm/sections.h>
  23. #include <asm/suspend.h>
  24. #include <asm/tlbflush.h>
  25. /* Defined in hibernate_asm_64.S */
  26. extern asmlinkage __visible int restore_image(void);
  27. /*
  28. * Address to jump to in the last phase of restore in order to get to the image
  29. * kernel's text (this value is passed in the image header).
  30. */
  31. unsigned long restore_jump_address __visible;
  32. unsigned long jump_address_phys;
  33. /*
  34. * Value of the cr3 register from before the hibernation (this value is passed
  35. * in the image header).
  36. */
  37. unsigned long restore_cr3 __visible;
  38. unsigned long temp_level4_pgt __visible;
  39. unsigned long relocated_restore_code __visible;
  40. static int set_up_temporary_text_mapping(pgd_t *pgd)
  41. {
  42. pmd_t *pmd;
  43. pud_t *pud;
  44. p4d_t *p4d = NULL;
  45. pgprot_t pgtable_prot = __pgprot(_KERNPG_TABLE);
  46. pgprot_t pmd_text_prot = __pgprot(__PAGE_KERNEL_LARGE_EXEC);
  47. /* Filter out unsupported __PAGE_KERNEL* bits: */
  48. pgprot_val(pmd_text_prot) &= __default_kernel_pte_mask;
  49. pgprot_val(pgtable_prot) &= __default_kernel_pte_mask;
  50. /*
  51. * The new mapping only has to cover the page containing the image
  52. * kernel's entry point (jump_address_phys), because the switch over to
  53. * it is carried out by relocated code running from a page allocated
  54. * specifically for this purpose and covered by the identity mapping, so
  55. * the temporary kernel text mapping is only needed for the final jump.
  56. * Moreover, in that mapping the virtual address of the image kernel's
  57. * entry point must be the same as its virtual address in the image
  58. * kernel (restore_jump_address), so the image kernel's
  59. * restore_registers() code doesn't find itself in a different area of
  60. * the virtual address space after switching over to the original page
  61. * tables used by the image kernel.
  62. */
  63. if (pgtable_l5_enabled) {
  64. p4d = (p4d_t *)get_safe_page(GFP_ATOMIC);
  65. if (!p4d)
  66. return -ENOMEM;
  67. }
  68. pud = (pud_t *)get_safe_page(GFP_ATOMIC);
  69. if (!pud)
  70. return -ENOMEM;
  71. pmd = (pmd_t *)get_safe_page(GFP_ATOMIC);
  72. if (!pmd)
  73. return -ENOMEM;
  74. set_pmd(pmd + pmd_index(restore_jump_address),
  75. __pmd((jump_address_phys & PMD_MASK) | pgprot_val(pmd_text_prot)));
  76. set_pud(pud + pud_index(restore_jump_address),
  77. __pud(__pa(pmd) | pgprot_val(pgtable_prot)));
  78. if (p4d) {
  79. p4d_t new_p4d = __p4d(__pa(pud) | pgprot_val(pgtable_prot));
  80. pgd_t new_pgd = __pgd(__pa(p4d) | pgprot_val(pgtable_prot));
  81. set_p4d(p4d + p4d_index(restore_jump_address), new_p4d);
  82. set_pgd(pgd + pgd_index(restore_jump_address), new_pgd);
  83. } else {
  84. /* No p4d for 4-level paging: point the pgd to the pud page table */
  85. pgd_t new_pgd = __pgd(__pa(pud) | pgprot_val(pgtable_prot));
  86. set_pgd(pgd + pgd_index(restore_jump_address), new_pgd);
  87. }
  88. return 0;
  89. }
  90. static void *alloc_pgt_page(void *context)
  91. {
  92. return (void *)get_safe_page(GFP_ATOMIC);
  93. }
  94. static int set_up_temporary_mappings(void)
  95. {
  96. struct x86_mapping_info info = {
  97. .alloc_pgt_page = alloc_pgt_page,
  98. .page_flag = __PAGE_KERNEL_LARGE_EXEC,
  99. .offset = __PAGE_OFFSET,
  100. };
  101. unsigned long mstart, mend;
  102. pgd_t *pgd;
  103. int result;
  104. int i;
  105. pgd = (pgd_t *)get_safe_page(GFP_ATOMIC);
  106. if (!pgd)
  107. return -ENOMEM;
  108. /* Prepare a temporary mapping for the kernel text */
  109. result = set_up_temporary_text_mapping(pgd);
  110. if (result)
  111. return result;
  112. /* Set up the direct mapping from scratch */
  113. for (i = 0; i < nr_pfn_mapped; i++) {
  114. mstart = pfn_mapped[i].start << PAGE_SHIFT;
  115. mend = pfn_mapped[i].end << PAGE_SHIFT;
  116. result = kernel_ident_mapping_init(&info, pgd, mstart, mend);
  117. if (result)
  118. return result;
  119. }
  120. temp_level4_pgt = __pa(pgd);
  121. return 0;
  122. }
  123. static int relocate_restore_code(void)
  124. {
  125. pgd_t *pgd;
  126. p4d_t *p4d;
  127. pud_t *pud;
  128. pmd_t *pmd;
  129. pte_t *pte;
  130. relocated_restore_code = get_safe_page(GFP_ATOMIC);
  131. if (!relocated_restore_code)
  132. return -ENOMEM;
  133. memcpy((void *)relocated_restore_code, core_restore_code, PAGE_SIZE);
  134. /* Make the page containing the relocated code executable */
  135. pgd = (pgd_t *)__va(read_cr3_pa()) +
  136. pgd_index(relocated_restore_code);
  137. p4d = p4d_offset(pgd, relocated_restore_code);
  138. if (p4d_large(*p4d)) {
  139. set_p4d(p4d, __p4d(p4d_val(*p4d) & ~_PAGE_NX));
  140. goto out;
  141. }
  142. pud = pud_offset(p4d, relocated_restore_code);
  143. if (pud_large(*pud)) {
  144. set_pud(pud, __pud(pud_val(*pud) & ~_PAGE_NX));
  145. goto out;
  146. }
  147. pmd = pmd_offset(pud, relocated_restore_code);
  148. if (pmd_large(*pmd)) {
  149. set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_NX));
  150. goto out;
  151. }
  152. pte = pte_offset_kernel(pmd, relocated_restore_code);
  153. set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_NX));
  154. out:
  155. __flush_tlb_all();
  156. return 0;
  157. }
  158. asmlinkage int swsusp_arch_resume(void)
  159. {
  160. int error;
  161. /* We have got enough memory and from now on we cannot recover */
  162. error = set_up_temporary_mappings();
  163. if (error)
  164. return error;
  165. error = relocate_restore_code();
  166. if (error)
  167. return error;
  168. restore_image();
  169. return 0;
  170. }
  171. /*
  172. * pfn_is_nosave - check if given pfn is in the 'nosave' section
  173. */
  174. int pfn_is_nosave(unsigned long pfn)
  175. {
  176. unsigned long nosave_begin_pfn = __pa_symbol(&__nosave_begin) >> PAGE_SHIFT;
  177. unsigned long nosave_end_pfn = PAGE_ALIGN(__pa_symbol(&__nosave_end)) >> PAGE_SHIFT;
  178. return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
  179. }
  180. #define MD5_DIGEST_SIZE 16
  181. struct restore_data_record {
  182. unsigned long jump_address;
  183. unsigned long jump_address_phys;
  184. unsigned long cr3;
  185. unsigned long magic;
  186. u8 e820_digest[MD5_DIGEST_SIZE];
  187. };
  188. #define RESTORE_MAGIC 0x23456789ABCDEF01UL
  189. #if IS_BUILTIN(CONFIG_CRYPTO_MD5)
  190. /**
  191. * get_e820_md5 - calculate md5 according to given e820 table
  192. *
  193. * @table: the e820 table to be calculated
  194. * @buf: the md5 result to be stored to
  195. */
  196. static int get_e820_md5(struct e820_table *table, void *buf)
  197. {
  198. struct scatterlist sg;
  199. struct crypto_ahash *tfm;
  200. int size;
  201. int ret = 0;
  202. tfm = crypto_alloc_ahash("md5", 0, CRYPTO_ALG_ASYNC);
  203. if (IS_ERR(tfm))
  204. return -ENOMEM;
  205. {
  206. AHASH_REQUEST_ON_STACK(req, tfm);
  207. size = offsetof(struct e820_table, entries) + sizeof(struct e820_entry) * table->nr_entries;
  208. ahash_request_set_tfm(req, tfm);
  209. sg_init_one(&sg, (u8 *)table, size);
  210. ahash_request_set_callback(req, 0, NULL, NULL);
  211. ahash_request_set_crypt(req, &sg, buf, size);
  212. if (crypto_ahash_digest(req))
  213. ret = -EINVAL;
  214. ahash_request_zero(req);
  215. }
  216. crypto_free_ahash(tfm);
  217. return ret;
  218. }
  219. static void hibernation_e820_save(void *buf)
  220. {
  221. get_e820_md5(e820_table_firmware, buf);
  222. }
  223. static bool hibernation_e820_mismatch(void *buf)
  224. {
  225. int ret;
  226. u8 result[MD5_DIGEST_SIZE];
  227. memset(result, 0, MD5_DIGEST_SIZE);
  228. /* If there is no digest in suspend kernel, let it go. */
  229. if (!memcmp(result, buf, MD5_DIGEST_SIZE))
  230. return false;
  231. ret = get_e820_md5(e820_table_firmware, result);
  232. if (ret)
  233. return true;
  234. return memcmp(result, buf, MD5_DIGEST_SIZE) ? true : false;
  235. }
  236. #else
  237. static void hibernation_e820_save(void *buf)
  238. {
  239. }
  240. static bool hibernation_e820_mismatch(void *buf)
  241. {
  242. /* If md5 is not builtin for restore kernel, let it go. */
  243. return false;
  244. }
  245. #endif
  246. /**
  247. * arch_hibernation_header_save - populate the architecture specific part
  248. * of a hibernation image header
  249. * @addr: address to save the data at
  250. */
  251. int arch_hibernation_header_save(void *addr, unsigned int max_size)
  252. {
  253. struct restore_data_record *rdr = addr;
  254. if (max_size < sizeof(struct restore_data_record))
  255. return -EOVERFLOW;
  256. rdr->jump_address = (unsigned long)restore_registers;
  257. rdr->jump_address_phys = __pa_symbol(restore_registers);
  258. /*
  259. * The restore code fixes up CR3 and CR4 in the following sequence:
  260. *
  261. * [in hibernation asm]
  262. * 1. CR3 <= temporary page tables
  263. * 2. CR4 <= mmu_cr4_features (from the kernel that restores us)
  264. * 3. CR3 <= rdr->cr3
  265. * 4. CR4 <= mmu_cr4_features (from us, i.e. the image kernel)
  266. * [in restore_processor_state()]
  267. * 5. CR4 <= saved CR4
  268. * 6. CR3 <= saved CR3
  269. *
  270. * Our mmu_cr4_features has CR4.PCIDE=0, and toggling
  271. * CR4.PCIDE while CR3's PCID bits are nonzero is illegal, so
  272. * rdr->cr3 needs to point to valid page tables but must not
  273. * have any of the PCID bits set.
  274. */
  275. rdr->cr3 = restore_cr3 & ~CR3_PCID_MASK;
  276. rdr->magic = RESTORE_MAGIC;
  277. hibernation_e820_save(rdr->e820_digest);
  278. return 0;
  279. }
  280. /**
  281. * arch_hibernation_header_restore - read the architecture specific data
  282. * from the hibernation image header
  283. * @addr: address to read the data from
  284. */
  285. int arch_hibernation_header_restore(void *addr)
  286. {
  287. struct restore_data_record *rdr = addr;
  288. restore_jump_address = rdr->jump_address;
  289. jump_address_phys = rdr->jump_address_phys;
  290. restore_cr3 = rdr->cr3;
  291. if (rdr->magic != RESTORE_MAGIC) {
  292. pr_crit("Unrecognized hibernate image header format!\n");
  293. return -EINVAL;
  294. }
  295. if (hibernation_e820_mismatch(rdr->e820_digest)) {
  296. pr_crit("Hibernate inconsistent memory map detected!\n");
  297. return -ENODEV;
  298. }
  299. return 0;
  300. }