ident_map.c 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131
  1. /*
  2. * Helper routines for building identity mapping page tables. This is
  3. * included by both the compressed kernel and the regular kernel.
  4. */
  5. static void ident_pmd_init(struct x86_mapping_info *info, pmd_t *pmd_page,
  6. unsigned long addr, unsigned long end)
  7. {
  8. addr &= PMD_MASK;
  9. for (; addr < end; addr += PMD_SIZE) {
  10. pmd_t *pmd = pmd_page + pmd_index(addr);
  11. if (pmd_present(*pmd))
  12. continue;
  13. set_pmd(pmd, __pmd((addr - info->offset) | info->page_flag));
  14. }
  15. }
  16. static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
  17. unsigned long addr, unsigned long end)
  18. {
  19. unsigned long next;
  20. for (; addr < end; addr = next) {
  21. pud_t *pud = pud_page + pud_index(addr);
  22. pmd_t *pmd;
  23. next = (addr & PUD_MASK) + PUD_SIZE;
  24. if (next > end)
  25. next = end;
  26. if (info->direct_gbpages) {
  27. pud_t pudval;
  28. if (pud_present(*pud))
  29. continue;
  30. addr &= PUD_MASK;
  31. pudval = __pud((addr - info->offset) | info->page_flag);
  32. set_pud(pud, pudval);
  33. continue;
  34. }
  35. if (pud_present(*pud)) {
  36. pmd = pmd_offset(pud, 0);
  37. ident_pmd_init(info, pmd, addr, next);
  38. continue;
  39. }
  40. pmd = (pmd_t *)info->alloc_pgt_page(info->context);
  41. if (!pmd)
  42. return -ENOMEM;
  43. ident_pmd_init(info, pmd, addr, next);
  44. set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
  45. }
  46. return 0;
  47. }
  48. static int ident_p4d_init(struct x86_mapping_info *info, p4d_t *p4d_page,
  49. unsigned long addr, unsigned long end)
  50. {
  51. unsigned long next;
  52. for (; addr < end; addr = next) {
  53. p4d_t *p4d = p4d_page + p4d_index(addr);
  54. pud_t *pud;
  55. next = (addr & P4D_MASK) + P4D_SIZE;
  56. if (next > end)
  57. next = end;
  58. if (p4d_present(*p4d)) {
  59. pud = pud_offset(p4d, 0);
  60. ident_pud_init(info, pud, addr, next);
  61. continue;
  62. }
  63. pud = (pud_t *)info->alloc_pgt_page(info->context);
  64. if (!pud)
  65. return -ENOMEM;
  66. ident_pud_init(info, pud, addr, next);
  67. set_p4d(p4d, __p4d(__pa(pud) | _KERNPG_TABLE));
  68. }
  69. return 0;
  70. }
  71. int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
  72. unsigned long pstart, unsigned long pend)
  73. {
  74. unsigned long addr = pstart + info->offset;
  75. unsigned long end = pend + info->offset;
  76. unsigned long next;
  77. int result;
  78. for (; addr < end; addr = next) {
  79. pgd_t *pgd = pgd_page + pgd_index(addr);
  80. p4d_t *p4d;
  81. next = (addr & PGDIR_MASK) + PGDIR_SIZE;
  82. if (next > end)
  83. next = end;
  84. if (pgd_present(*pgd)) {
  85. p4d = p4d_offset(pgd, 0);
  86. result = ident_p4d_init(info, p4d, addr, next);
  87. if (result)
  88. return result;
  89. continue;
  90. }
  91. p4d = (p4d_t *)info->alloc_pgt_page(info->context);
  92. if (!p4d)
  93. return -ENOMEM;
  94. result = ident_p4d_init(info, p4d, addr, next);
  95. if (result)
  96. return result;
  97. if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
  98. set_pgd(pgd, __pgd(__pa(p4d) | _KERNPG_TABLE));
  99. } else {
  100. /*
  101. * With p4d folded, pgd is equal to p4d.
  102. * The pgd entry has to point to the pud page table in this case.
  103. */
  104. pud_t *pud = pud_offset(p4d, 0);
  105. set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE));
  106. }
  107. }
  108. return 0;
  109. }