mmu.c 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104
  1. /*
  2. * xtensa mmu stuff
  3. *
  4. * Extracted from init.c
  5. */
  6. #include <linux/bootmem.h>
  7. #include <linux/percpu.h>
  8. #include <linux/init.h>
  9. #include <linux/string.h>
  10. #include <linux/slab.h>
  11. #include <linux/cache.h>
  12. #include <asm/tlb.h>
  13. #include <asm/tlbflush.h>
  14. #include <asm/mmu_context.h>
  15. #include <asm/page.h>
  16. #include <asm/initialize_mmu.h>
  17. #include <asm/io.h>
  18. #if defined(CONFIG_HIGHMEM)
  19. static void * __init init_pmd(unsigned long vaddr)
  20. {
  21. pgd_t *pgd = pgd_offset_k(vaddr);
  22. pmd_t *pmd = pmd_offset(pgd, vaddr);
  23. if (pmd_none(*pmd)) {
  24. unsigned i;
  25. pte_t *pte = alloc_bootmem_low_pages(PAGE_SIZE);
  26. for (i = 0; i < 1024; i++)
  27. pte_clear(NULL, 0, pte + i);
  28. set_pmd(pmd, __pmd(((unsigned long)pte) & PAGE_MASK));
  29. BUG_ON(pte != pte_offset_kernel(pmd, 0));
  30. pr_debug("%s: vaddr: 0x%08lx, pmd: 0x%p, pte: 0x%p\n",
  31. __func__, vaddr, pmd, pte);
  32. return pte;
  33. } else {
  34. return pte_offset_kernel(pmd, 0);
  35. }
  36. }
  37. static void __init fixedrange_init(void)
  38. {
  39. BUILD_BUG_ON(FIXADDR_SIZE > PMD_SIZE);
  40. init_pmd(__fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK);
  41. }
  42. #endif
  43. void __init paging_init(void)
  44. {
  45. memset(swapper_pg_dir, 0, PAGE_SIZE);
  46. #ifdef CONFIG_HIGHMEM
  47. fixedrange_init();
  48. pkmap_page_table = init_pmd(PKMAP_BASE);
  49. kmap_init();
  50. #endif
  51. }
  52. /*
  53. * Flush the mmu and reset associated register to default values.
  54. */
  55. void init_mmu(void)
  56. {
  57. #if !(XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY)
  58. /*
  59. * Writing zeros to the instruction and data TLBCFG special
  60. * registers ensure that valid values exist in the register.
  61. *
  62. * For existing PGSZID<w> fields, zero selects the first element
  63. * of the page-size array. For nonexistent PGSZID<w> fields,
  64. * zero is the best value to write. Also, when changing PGSZID<w>
  65. * fields, the corresponding TLB must be flushed.
  66. */
  67. set_itlbcfg_register(0);
  68. set_dtlbcfg_register(0);
  69. #endif
  70. #if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_OF)
  71. /*
  72. * Update the IO area mapping in case xtensa_kio_paddr has changed
  73. */
  74. write_dtlb_entry(__pte(xtensa_kio_paddr + CA_WRITEBACK),
  75. XCHAL_KIO_CACHED_VADDR + 6);
  76. write_itlb_entry(__pte(xtensa_kio_paddr + CA_WRITEBACK),
  77. XCHAL_KIO_CACHED_VADDR + 6);
  78. write_dtlb_entry(__pte(xtensa_kio_paddr + CA_BYPASS),
  79. XCHAL_KIO_BYPASS_VADDR + 6);
  80. write_itlb_entry(__pte(xtensa_kio_paddr + CA_BYPASS),
  81. XCHAL_KIO_BYPASS_VADDR + 6);
  82. #endif
  83. local_flush_tlb_all();
  84. /* Set rasid register to a known value. */
  85. set_rasid_register(ASID_INSERT(ASID_USER_FIRST));
  86. /* Set PTEVADDR special register to the start of the page
  87. * table, which is in kernel mappable space (ie. not
  88. * statically mapped). This register's value is undefined on
  89. * reset.
  90. */
  91. set_ptevaddr_register(PGTABLE_START);
  92. }