pgalloc.h 3.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167
  1. #ifndef _ASM_IA64_PGALLOC_H
  2. #define _ASM_IA64_PGALLOC_H
  3. /*
  4. * This file contains the functions and defines necessary to allocate
  5. * page tables.
  6. *
  7. * This hopefully works with any (fixed) ia-64 page-size, as defined
  8. * in <asm/page.h> (currently 8192).
  9. *
  10. * Copyright (C) 1998-2001 Hewlett-Packard Co
  11. * David Mosberger-Tang <davidm@hpl.hp.com>
  12. * Copyright (C) 2000, Goutham Rao <goutham.rao@intel.com>
  13. */
  14. #include <linux/config.h>
  15. #include <linux/compiler.h>
  16. #include <linux/mm.h>
  17. #include <linux/page-flags.h>
  18. #include <linux/threads.h>
  19. #include <asm/mmu_context.h>
  20. /*
  21. * Very stupidly, we used to get new pgd's and pmd's, init their contents
  22. * to point to the NULL versions of the next level page table, later on
  23. * completely re-init them the same way, then free them up. This wasted
  24. * a lot of work and caused unnecessary memory traffic. How broken...
  25. * We fix this by caching them.
  26. */
  27. #define pgd_quicklist (local_cpu_data->pgd_quick)
  28. #define pmd_quicklist (local_cpu_data->pmd_quick)
  29. #define pgtable_cache_size (local_cpu_data->pgtable_cache_sz)
  30. static inline pgd_t*
  31. pgd_alloc_one_fast (struct mm_struct *mm)
  32. {
  33. unsigned long *ret = NULL;
  34. preempt_disable();
  35. ret = pgd_quicklist;
  36. if (likely(ret != NULL)) {
  37. pgd_quicklist = (unsigned long *)(*ret);
  38. ret[0] = 0;
  39. --pgtable_cache_size;
  40. } else
  41. ret = NULL;
  42. preempt_enable();
  43. return (pgd_t *) ret;
  44. }
  45. static inline pgd_t*
  46. pgd_alloc (struct mm_struct *mm)
  47. {
  48. /* the VM system never calls pgd_alloc_one_fast(), so we do it here. */
  49. pgd_t *pgd = pgd_alloc_one_fast(mm);
  50. if (unlikely(pgd == NULL)) {
  51. pgd = (pgd_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
  52. }
  53. return pgd;
  54. }
  55. static inline void
  56. pgd_free (pgd_t *pgd)
  57. {
  58. preempt_disable();
  59. *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
  60. pgd_quicklist = (unsigned long *) pgd;
  61. ++pgtable_cache_size;
  62. preempt_enable();
  63. }
  64. static inline void
  65. pud_populate (struct mm_struct *mm, pud_t *pud_entry, pmd_t *pmd)
  66. {
  67. pud_val(*pud_entry) = __pa(pmd);
  68. }
  69. static inline pmd_t*
  70. pmd_alloc_one_fast (struct mm_struct *mm, unsigned long addr)
  71. {
  72. unsigned long *ret = NULL;
  73. preempt_disable();
  74. ret = (unsigned long *)pmd_quicklist;
  75. if (likely(ret != NULL)) {
  76. pmd_quicklist = (unsigned long *)(*ret);
  77. ret[0] = 0;
  78. --pgtable_cache_size;
  79. }
  80. preempt_enable();
  81. return (pmd_t *)ret;
  82. }
  83. static inline pmd_t*
  84. pmd_alloc_one (struct mm_struct *mm, unsigned long addr)
  85. {
  86. pmd_t *pmd = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
  87. return pmd;
  88. }
  89. static inline void
  90. pmd_free (pmd_t *pmd)
  91. {
  92. preempt_disable();
  93. *(unsigned long *)pmd = (unsigned long) pmd_quicklist;
  94. pmd_quicklist = (unsigned long *) pmd;
  95. ++pgtable_cache_size;
  96. preempt_enable();
  97. }
  98. #define __pmd_free_tlb(tlb, pmd) pmd_free(pmd)
  99. static inline void
  100. pmd_populate (struct mm_struct *mm, pmd_t *pmd_entry, struct page *pte)
  101. {
  102. pmd_val(*pmd_entry) = page_to_phys(pte);
  103. }
  104. static inline void
  105. pmd_populate_kernel (struct mm_struct *mm, pmd_t *pmd_entry, pte_t *pte)
  106. {
  107. pmd_val(*pmd_entry) = __pa(pte);
  108. }
  109. static inline struct page *
  110. pte_alloc_one (struct mm_struct *mm, unsigned long addr)
  111. {
  112. struct page *pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
  113. return pte;
  114. }
  115. static inline pte_t *
  116. pte_alloc_one_kernel (struct mm_struct *mm, unsigned long addr)
  117. {
  118. pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
  119. return pte;
  120. }
  121. static inline void
  122. pte_free (struct page *pte)
  123. {
  124. __free_page(pte);
  125. }
  126. static inline void
  127. pte_free_kernel (pte_t *pte)
  128. {
  129. free_page((unsigned long) pte);
  130. }
  131. #define __pte_free_tlb(tlb, pte) tlb_remove_page((tlb), (pte))
  132. extern void check_pgt_cache (void);
  133. #endif /* _ASM_IA64_PGALLOC_H */