page.h 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167
  1. /*
  2. * S390 version
  3. * Copyright IBM Corp. 1999, 2000
  4. * Author(s): Hartmut Penner (hp@de.ibm.com)
  5. */
  6. #ifndef _S390_PAGE_H
  7. #define _S390_PAGE_H
  8. #include <linux/const.h>
  9. #include <asm/types.h>
  10. /* PAGE_SHIFT determines the page size */
  11. #define PAGE_SHIFT 12
  12. #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
  13. #define PAGE_MASK (~(PAGE_SIZE-1))
  14. #define PAGE_DEFAULT_ACC 0
  15. #define PAGE_DEFAULT_KEY (PAGE_DEFAULT_ACC << 4)
  16. #define HPAGE_SHIFT 20
  17. #define HPAGE_SIZE (1UL << HPAGE_SHIFT)
  18. #define HPAGE_MASK (~(HPAGE_SIZE - 1))
  19. #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
  20. #define HUGE_MAX_HSTATE 2
  21. #define ARCH_HAS_SETCLEAR_HUGE_PTE
  22. #define ARCH_HAS_HUGE_PTE_TYPE
  23. #define ARCH_HAS_PREPARE_HUGEPAGE
  24. #define ARCH_HAS_HUGEPAGE_CLEAR_FLUSH
  25. #include <asm/setup.h>
  26. #ifndef __ASSEMBLY__
  27. void __storage_key_init_range(unsigned long start, unsigned long end);
  28. static inline void storage_key_init_range(unsigned long start, unsigned long end)
  29. {
  30. if (PAGE_DEFAULT_KEY)
  31. __storage_key_init_range(start, end);
  32. }
  33. #define clear_page(page) memset((page), 0, PAGE_SIZE)
  34. /*
  35. * copy_page uses the mvcl instruction with 0xb0 padding byte in order to
  36. * bypass caches when copying a page. Especially when copying huge pages
  37. * this keeps L1 and L2 data caches alive.
  38. */
  39. static inline void copy_page(void *to, void *from)
  40. {
  41. register void *reg2 asm ("2") = to;
  42. register unsigned long reg3 asm ("3") = 0x1000;
  43. register void *reg4 asm ("4") = from;
  44. register unsigned long reg5 asm ("5") = 0xb0001000;
  45. asm volatile(
  46. " mvcl 2,4"
  47. : "+d" (reg2), "+d" (reg3), "+d" (reg4), "+d" (reg5)
  48. : : "memory", "cc");
  49. }
  50. #define clear_user_page(page, vaddr, pg) clear_page(page)
  51. #define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
  52. #define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
  53. alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
  54. #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
  55. /*
  56. * These are used to make use of C type-checking..
  57. */
  58. typedef struct { unsigned long pgprot; } pgprot_t;
  59. typedef struct { unsigned long pgste; } pgste_t;
  60. typedef struct { unsigned long pte; } pte_t;
  61. typedef struct { unsigned long pmd; } pmd_t;
  62. typedef struct { unsigned long pud; } pud_t;
  63. typedef struct { unsigned long p4d; } p4d_t;
  64. typedef struct { unsigned long pgd; } pgd_t;
  65. typedef pte_t *pgtable_t;
  66. #define pgprot_val(x) ((x).pgprot)
  67. #define pgste_val(x) ((x).pgste)
  68. #define pte_val(x) ((x).pte)
  69. #define pmd_val(x) ((x).pmd)
  70. #define pud_val(x) ((x).pud)
  71. #define p4d_val(x) ((x).p4d)
  72. #define pgd_val(x) ((x).pgd)
  73. #define __pgste(x) ((pgste_t) { (x) } )
  74. #define __pte(x) ((pte_t) { (x) } )
  75. #define __pmd(x) ((pmd_t) { (x) } )
  76. #define __pud(x) ((pud_t) { (x) } )
  77. #define __p4d(x) ((p4d_t) { (x) } )
  78. #define __pgd(x) ((pgd_t) { (x) } )
  79. #define __pgprot(x) ((pgprot_t) { (x) } )
  80. static inline void page_set_storage_key(unsigned long addr,
  81. unsigned char skey, int mapped)
  82. {
  83. if (!mapped)
  84. asm volatile(".insn rrf,0xb22b0000,%0,%1,8,0"
  85. : : "d" (skey), "a" (addr));
  86. else
  87. asm volatile("sske %0,%1" : : "d" (skey), "a" (addr));
  88. }
  89. static inline unsigned char page_get_storage_key(unsigned long addr)
  90. {
  91. unsigned char skey;
  92. asm volatile("iske %0,%1" : "=d" (skey) : "a" (addr));
  93. return skey;
  94. }
  95. static inline int page_reset_referenced(unsigned long addr)
  96. {
  97. int cc;
  98. asm volatile(
  99. " rrbe 0,%1\n"
  100. " ipm %0\n"
  101. " srl %0,28\n"
  102. : "=d" (cc) : "a" (addr) : "cc");
  103. return cc;
  104. }
  105. /* Bits int the storage key */
  106. #define _PAGE_CHANGED 0x02 /* HW changed bit */
  107. #define _PAGE_REFERENCED 0x04 /* HW referenced bit */
  108. #define _PAGE_FP_BIT 0x08 /* HW fetch protection bit */
  109. #define _PAGE_ACC_BITS 0xf0 /* HW access control bits */
  110. struct page;
  111. void arch_free_page(struct page *page, int order);
  112. void arch_alloc_page(struct page *page, int order);
  113. void arch_set_page_dat(struct page *page, int order);
  114. void arch_set_page_nodat(struct page *page, int order);
  115. int arch_test_page_nodat(struct page *page);
  116. void arch_set_page_states(int make_stable);
  117. static inline int devmem_is_allowed(unsigned long pfn)
  118. {
  119. return 0;
  120. }
  121. #define HAVE_ARCH_FREE_PAGE
  122. #define HAVE_ARCH_ALLOC_PAGE
  123. #endif /* !__ASSEMBLY__ */
  124. #define __PAGE_OFFSET 0x0UL
  125. #define PAGE_OFFSET 0x0UL
  126. #define __pa(x) (unsigned long)(x)
  127. #define __va(x) (void *)(unsigned long)(x)
  128. #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
  129. #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
  130. #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
  131. #define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT)
  132. #define page_to_virt(page) pfn_to_virt(page_to_pfn(page))
  133. #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \
  134. VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
  135. #include <asm-generic/memory_model.h>
  136. #include <asm-generic/getorder.h>
  137. #endif /* _S390_PAGE_H */