pgalloc.h 2.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
  3. #ifndef __ASM_CSKY_PGALLOC_H
  4. #define __ASM_CSKY_PGALLOC_H
  5. #include <linux/highmem.h>
  6. #include <linux/mm.h>
  7. #include <linux/sched.h>
  8. static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
  9. pte_t *pte)
  10. {
  11. set_pmd(pmd, __pmd(__pa(pte)));
  12. }
  13. static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
  14. pgtable_t pte)
  15. {
  16. set_pmd(pmd, __pmd(__pa(page_address(pte))));
  17. }
  18. #define pmd_pgtable(pmd) pmd_page(pmd)
  19. extern void pgd_init(unsigned long *p);
  20. static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
  21. unsigned long address)
  22. {
  23. pte_t *pte;
  24. unsigned long *kaddr, i;
  25. pte = (pte_t *) __get_free_pages(GFP_KERNEL | __GFP_RETRY_MAYFAIL,
  26. PTE_ORDER);
  27. kaddr = (unsigned long *)pte;
  28. if (address & 0x80000000)
  29. for (i = 0; i < (PAGE_SIZE/4); i++)
  30. *(kaddr + i) = 0x1;
  31. else
  32. clear_page(kaddr);
  33. return pte;
  34. }
  35. static inline struct page *pte_alloc_one(struct mm_struct *mm,
  36. unsigned long address)
  37. {
  38. struct page *pte;
  39. unsigned long *kaddr, i;
  40. pte = alloc_pages(GFP_KERNEL | __GFP_RETRY_MAYFAIL, PTE_ORDER);
  41. if (pte) {
  42. kaddr = kmap_atomic(pte);
  43. if (address & 0x80000000) {
  44. for (i = 0; i < (PAGE_SIZE/4); i++)
  45. *(kaddr + i) = 0x1;
  46. } else
  47. clear_page(kaddr);
  48. kunmap_atomic(kaddr);
  49. pgtable_page_ctor(pte);
  50. }
  51. return pte;
  52. }
  53. static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
  54. {
  55. free_pages((unsigned long)pte, PTE_ORDER);
  56. }
  57. static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
  58. {
  59. pgtable_page_dtor(pte);
  60. __free_pages(pte, PTE_ORDER);
  61. }
  62. static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
  63. {
  64. free_pages((unsigned long)pgd, PGD_ORDER);
  65. }
  66. static inline pgd_t *pgd_alloc(struct mm_struct *mm)
  67. {
  68. pgd_t *ret;
  69. pgd_t *init;
  70. ret = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER);
  71. if (ret) {
  72. init = pgd_offset(&init_mm, 0UL);
  73. pgd_init((unsigned long *)ret);
  74. memcpy(ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
  75. (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
  76. /* prevent out of order excute */
  77. smp_mb();
  78. #ifdef CONFIG_CPU_NEED_TLBSYNC
  79. dcache_wb_range((unsigned int)ret,
  80. (unsigned int)(ret + PTRS_PER_PGD));
  81. #endif
  82. }
  83. return ret;
  84. }
  85. #define __pte_free_tlb(tlb, pte, address) \
  86. do { \
  87. pgtable_page_dtor(pte); \
  88. tlb_remove_page(tlb, pte); \
  89. } while (0)
  90. #define check_pgt_cache() do {} while (0)
  91. extern void pagetable_init(void);
  92. extern void pre_mmu_init(void);
  93. extern void pre_trap_init(void);
  94. #endif /* __ASM_CSKY_PGALLOC_H */