ident_map.c 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Helper routines for building identity mapping page tables. This is
  4. * included by both the compressed kernel and the regular kernel.
  5. */
  6. static void ident_pmd_init(struct x86_mapping_info *info, pmd_t *pmd_page,
  7. unsigned long addr, unsigned long end)
  8. {
  9. addr &= PMD_MASK;
  10. for (; addr < end; addr += PMD_SIZE) {
  11. pmd_t *pmd = pmd_page + pmd_index(addr);
  12. if (pmd_present(*pmd))
  13. continue;
  14. set_pmd(pmd, __pmd((addr - info->offset) | info->page_flag));
  15. }
  16. }
  17. static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
  18. unsigned long addr, unsigned long end)
  19. {
  20. unsigned long next;
  21. for (; addr < end; addr = next) {
  22. pud_t *pud = pud_page + pud_index(addr);
  23. pmd_t *pmd;
  24. next = (addr & PUD_MASK) + PUD_SIZE;
  25. if (next > end)
  26. next = end;
  27. if (info->direct_gbpages) {
  28. pud_t pudval;
  29. if (pud_present(*pud))
  30. continue;
  31. addr &= PUD_MASK;
  32. pudval = __pud((addr - info->offset) | info->page_flag);
  33. set_pud(pud, pudval);
  34. continue;
  35. }
  36. if (pud_present(*pud)) {
  37. pmd = pmd_offset(pud, 0);
  38. ident_pmd_init(info, pmd, addr, next);
  39. continue;
  40. }
  41. pmd = (pmd_t *)info->alloc_pgt_page(info->context);
  42. if (!pmd)
  43. return -ENOMEM;
  44. ident_pmd_init(info, pmd, addr, next);
  45. set_pud(pud, __pud(__pa(pmd) | info->kernpg_flag));
  46. }
  47. return 0;
  48. }
  49. static int ident_p4d_init(struct x86_mapping_info *info, p4d_t *p4d_page,
  50. unsigned long addr, unsigned long end)
  51. {
  52. unsigned long next;
  53. for (; addr < end; addr = next) {
  54. p4d_t *p4d = p4d_page + p4d_index(addr);
  55. pud_t *pud;
  56. next = (addr & P4D_MASK) + P4D_SIZE;
  57. if (next > end)
  58. next = end;
  59. if (p4d_present(*p4d)) {
  60. pud = pud_offset(p4d, 0);
  61. ident_pud_init(info, pud, addr, next);
  62. continue;
  63. }
  64. pud = (pud_t *)info->alloc_pgt_page(info->context);
  65. if (!pud)
  66. return -ENOMEM;
  67. ident_pud_init(info, pud, addr, next);
  68. set_p4d(p4d, __p4d(__pa(pud) | info->kernpg_flag));
  69. }
  70. return 0;
  71. }
  72. int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
  73. unsigned long pstart, unsigned long pend)
  74. {
  75. unsigned long addr = pstart + info->offset;
  76. unsigned long end = pend + info->offset;
  77. unsigned long next;
  78. int result;
  79. /* Set the default pagetable flags if not supplied */
  80. if (!info->kernpg_flag)
  81. info->kernpg_flag = _KERNPG_TABLE;
  82. /* Filter out unsupported __PAGE_KERNEL_* bits: */
  83. info->kernpg_flag &= __default_kernel_pte_mask;
  84. for (; addr < end; addr = next) {
  85. pgd_t *pgd = pgd_page + pgd_index(addr);
  86. p4d_t *p4d;
  87. next = (addr & PGDIR_MASK) + PGDIR_SIZE;
  88. if (next > end)
  89. next = end;
  90. if (pgd_present(*pgd)) {
  91. p4d = p4d_offset(pgd, 0);
  92. result = ident_p4d_init(info, p4d, addr, next);
  93. if (result)
  94. return result;
  95. continue;
  96. }
  97. p4d = (p4d_t *)info->alloc_pgt_page(info->context);
  98. if (!p4d)
  99. return -ENOMEM;
  100. result = ident_p4d_init(info, p4d, addr, next);
  101. if (result)
  102. return result;
  103. if (pgtable_l5_enabled()) {
  104. set_pgd(pgd, __pgd(__pa(p4d) | info->kernpg_flag));
  105. } else {
  106. /*
  107. * With p4d folded, pgd is equal to p4d.
  108. * The pgd entry has to point to the pud page table in this case.
  109. */
  110. pud_t *pud = pud_offset(p4d, 0);
  111. set_pgd(pgd, __pgd(__pa(pud) | info->kernpg_flag));
  112. }
  113. }
  114. return 0;
  115. }