pgtable-hash64.c 2.5 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495
  1. /*
  2. * Copyright 2005, Paul Mackerras, IBM Corporation.
  3. * Copyright 2009, Benjamin Herrenschmidt, IBM Corporation.
  4. * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include <linux/sched.h>
  12. #include <asm/pgalloc.h>
  13. #include <asm/tlb.h>
  14. #include "mmu_decl.h"
  15. #ifdef CONFIG_SPARSEMEM_VMEMMAP
  16. /*
  17. * On hash-based CPUs, the vmemmap is bolted in the hash table.
  18. *
  19. */
  20. int __meminit hash__vmemmap_create_mapping(unsigned long start,
  21. unsigned long page_size,
  22. unsigned long phys)
  23. {
  24. int rc = htab_bolt_mapping(start, start + page_size, phys,
  25. pgprot_val(PAGE_KERNEL),
  26. mmu_vmemmap_psize, mmu_kernel_ssize);
  27. if (rc < 0) {
  28. int rc2 = htab_remove_mapping(start, start + page_size,
  29. mmu_vmemmap_psize,
  30. mmu_kernel_ssize);
  31. BUG_ON(rc2 && (rc2 != -ENOENT));
  32. }
  33. return rc;
  34. }
  35. #ifdef CONFIG_MEMORY_HOTPLUG
  36. void hash__vmemmap_remove_mapping(unsigned long start,
  37. unsigned long page_size)
  38. {
  39. int rc = htab_remove_mapping(start, start + page_size,
  40. mmu_vmemmap_psize,
  41. mmu_kernel_ssize);
  42. BUG_ON((rc < 0) && (rc != -ENOENT));
  43. WARN_ON(rc == -ENOENT);
  44. }
  45. #endif
  46. #endif /* CONFIG_SPARSEMEM_VMEMMAP */
  47. /*
  48. * map_kernel_page currently only called by __ioremap
  49. * map_kernel_page adds an entry to the ioremap page table
  50. * and adds an entry to the HPT, possibly bolting it
  51. */
  52. int hash__map_kernel_page(unsigned long ea, unsigned long pa, unsigned long flags)
  53. {
  54. pgd_t *pgdp;
  55. pud_t *pudp;
  56. pmd_t *pmdp;
  57. pte_t *ptep;
  58. BUILD_BUG_ON(TASK_SIZE_USER64 > H_PGTABLE_RANGE);
  59. if (slab_is_available()) {
  60. pgdp = pgd_offset_k(ea);
  61. pudp = pud_alloc(&init_mm, pgdp, ea);
  62. if (!pudp)
  63. return -ENOMEM;
  64. pmdp = pmd_alloc(&init_mm, pudp, ea);
  65. if (!pmdp)
  66. return -ENOMEM;
  67. ptep = pte_alloc_kernel(pmdp, ea);
  68. if (!ptep)
  69. return -ENOMEM;
  70. set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
  71. __pgprot(flags)));
  72. } else {
  73. /*
  74. * If the mm subsystem is not fully up, we cannot create a
  75. * linux page table entry for this mapping. Simply bolt an
  76. * entry in the hardware page table.
  77. *
  78. */
  79. if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, flags,
  80. mmu_io_psize, mmu_kernel_ssize)) {
  81. printk(KERN_ERR "Failed to do bolted mapping IO "
  82. "memory at %016lx !\n", pa);
  83. return -ENOMEM;
  84. }
  85. }
  86. smp_wmb();
  87. return 0;
  88. }