hibernate_64.c 3.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143
  1. /*
  2. * Hibernation support for x86-64
  3. *
  4. * Distribute under GPLv2
  5. *
  6. * Copyright (c) 2007 Rafael J. Wysocki <rjw@sisk.pl>
  7. * Copyright (c) 2002 Pavel Machek <pavel@ucw.cz>
  8. * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org>
  9. */
  10. #include <linux/gfp.h>
  11. #include <linux/smp.h>
  12. #include <linux/suspend.h>
  13. #include <linux/scatterlist.h>
  14. #include <linux/kdebug.h>
  15. #include <crypto/hash.h>
  16. #include <asm/e820/api.h>
  17. #include <asm/init.h>
  18. #include <asm/proto.h>
  19. #include <asm/page.h>
  20. #include <asm/pgtable.h>
  21. #include <asm/mtrr.h>
  22. #include <asm/sections.h>
  23. #include <asm/suspend.h>
  24. #include <asm/tlbflush.h>
  25. static int set_up_temporary_text_mapping(pgd_t *pgd)
  26. {
  27. pmd_t *pmd;
  28. pud_t *pud;
  29. p4d_t *p4d = NULL;
  30. pgprot_t pgtable_prot = __pgprot(_KERNPG_TABLE);
  31. pgprot_t pmd_text_prot = __pgprot(__PAGE_KERNEL_LARGE_EXEC);
  32. /* Filter out unsupported __PAGE_KERNEL* bits: */
  33. pgprot_val(pmd_text_prot) &= __default_kernel_pte_mask;
  34. pgprot_val(pgtable_prot) &= __default_kernel_pte_mask;
  35. /*
  36. * The new mapping only has to cover the page containing the image
  37. * kernel's entry point (jump_address_phys), because the switch over to
  38. * it is carried out by relocated code running from a page allocated
  39. * specifically for this purpose and covered by the identity mapping, so
  40. * the temporary kernel text mapping is only needed for the final jump.
  41. * Moreover, in that mapping the virtual address of the image kernel's
  42. * entry point must be the same as its virtual address in the image
  43. * kernel (restore_jump_address), so the image kernel's
  44. * restore_registers() code doesn't find itself in a different area of
  45. * the virtual address space after switching over to the original page
  46. * tables used by the image kernel.
  47. */
  48. if (pgtable_l5_enabled()) {
  49. p4d = (p4d_t *)get_safe_page(GFP_ATOMIC);
  50. if (!p4d)
  51. return -ENOMEM;
  52. }
  53. pud = (pud_t *)get_safe_page(GFP_ATOMIC);
  54. if (!pud)
  55. return -ENOMEM;
  56. pmd = (pmd_t *)get_safe_page(GFP_ATOMIC);
  57. if (!pmd)
  58. return -ENOMEM;
  59. set_pmd(pmd + pmd_index(restore_jump_address),
  60. __pmd((jump_address_phys & PMD_MASK) | pgprot_val(pmd_text_prot)));
  61. set_pud(pud + pud_index(restore_jump_address),
  62. __pud(__pa(pmd) | pgprot_val(pgtable_prot)));
  63. if (p4d) {
  64. p4d_t new_p4d = __p4d(__pa(pud) | pgprot_val(pgtable_prot));
  65. pgd_t new_pgd = __pgd(__pa(p4d) | pgprot_val(pgtable_prot));
  66. set_p4d(p4d + p4d_index(restore_jump_address), new_p4d);
  67. set_pgd(pgd + pgd_index(restore_jump_address), new_pgd);
  68. } else {
  69. /* No p4d for 4-level paging: point the pgd to the pud page table */
  70. pgd_t new_pgd = __pgd(__pa(pud) | pgprot_val(pgtable_prot));
  71. set_pgd(pgd + pgd_index(restore_jump_address), new_pgd);
  72. }
  73. return 0;
  74. }
  75. static void *alloc_pgt_page(void *context)
  76. {
  77. return (void *)get_safe_page(GFP_ATOMIC);
  78. }
  79. static int set_up_temporary_mappings(void)
  80. {
  81. struct x86_mapping_info info = {
  82. .alloc_pgt_page = alloc_pgt_page,
  83. .page_flag = __PAGE_KERNEL_LARGE_EXEC,
  84. .offset = __PAGE_OFFSET,
  85. };
  86. unsigned long mstart, mend;
  87. pgd_t *pgd;
  88. int result;
  89. int i;
  90. pgd = (pgd_t *)get_safe_page(GFP_ATOMIC);
  91. if (!pgd)
  92. return -ENOMEM;
  93. /* Prepare a temporary mapping for the kernel text */
  94. result = set_up_temporary_text_mapping(pgd);
  95. if (result)
  96. return result;
  97. /* Set up the direct mapping from scratch */
  98. for (i = 0; i < nr_pfn_mapped; i++) {
  99. mstart = pfn_mapped[i].start << PAGE_SHIFT;
  100. mend = pfn_mapped[i].end << PAGE_SHIFT;
  101. result = kernel_ident_mapping_init(&info, pgd, mstart, mend);
  102. if (result)
  103. return result;
  104. }
  105. temp_pgt = __pa(pgd);
  106. return 0;
  107. }
  108. asmlinkage int swsusp_arch_resume(void)
  109. {
  110. int error;
  111. /* We have got enough memory and from now on we cannot recover */
  112. error = set_up_temporary_mappings();
  113. if (error)
  114. return error;
  115. error = relocate_restore_code();
  116. if (error)
  117. return error;
  118. restore_image();
  119. return 0;
  120. }