mmu.h 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242
  1. /*
  2. * PowerPC memory management structures
  3. *
  4. * Dave Engebretsen & Mike Corrigan <{engebret|mikejc}@us.ibm.com>
  5. * PPC64 rework.
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License
  9. * as published by the Free Software Foundation; either version
  10. * 2 of the License, or (at your option) any later version.
  11. */
  12. #ifndef _PPC64_MMU_H_
  13. #define _PPC64_MMU_H_
  14. #include <linux/config.h>
  15. #include <asm/page.h>
  16. #include <linux/stringify.h>
  17. #ifndef __ASSEMBLY__
  18. /* Time to allow for more things here */
  19. typedef unsigned long mm_context_id_t;
  20. typedef struct {
  21. mm_context_id_t id;
  22. #ifdef CONFIG_HUGETLB_PAGE
  23. pgd_t *huge_pgdir;
  24. u16 htlb_segs; /* bitmask */
  25. #endif
  26. } mm_context_t;
  27. #define STE_ESID_V 0x80
  28. #define STE_ESID_KS 0x20
  29. #define STE_ESID_KP 0x10
  30. #define STE_ESID_N 0x08
  31. #define STE_VSID_SHIFT 12
  32. struct stab_entry {
  33. unsigned long esid_data;
  34. unsigned long vsid_data;
  35. };
  36. /* Hardware Page Table Entry */
  37. #define HPTES_PER_GROUP 8
  38. typedef struct {
  39. unsigned long avpn:57; /* vsid | api == avpn */
  40. unsigned long : 2; /* Software use */
  41. unsigned long bolted: 1; /* HPTE is "bolted" */
  42. unsigned long lock: 1; /* lock on pSeries SMP */
  43. unsigned long l: 1; /* Virtual page is large (L=1) or 4 KB (L=0) */
  44. unsigned long h: 1; /* Hash function identifier */
  45. unsigned long v: 1; /* Valid (v=1) or invalid (v=0) */
  46. } Hpte_dword0;
  47. typedef struct {
  48. unsigned long pp0: 1; /* Page protection bit 0 */
  49. unsigned long ts: 1; /* Tag set bit */
  50. unsigned long rpn: 50; /* Real page number */
  51. unsigned long : 2; /* Reserved */
  52. unsigned long ac: 1; /* Address compare */
  53. unsigned long r: 1; /* Referenced */
  54. unsigned long c: 1; /* Changed */
  55. unsigned long w: 1; /* Write-thru cache mode */
  56. unsigned long i: 1; /* Cache inhibited */
  57. unsigned long m: 1; /* Memory coherence required */
  58. unsigned long g: 1; /* Guarded */
  59. unsigned long n: 1; /* No-execute */
  60. unsigned long pp: 2; /* Page protection bits 1:2 */
  61. } Hpte_dword1;
  62. typedef struct {
  63. char padding[6]; /* padding */
  64. unsigned long : 6; /* padding */
  65. unsigned long flags: 10; /* HPTE flags */
  66. } Hpte_dword1_flags;
  67. typedef struct {
  68. union {
  69. unsigned long dword0;
  70. Hpte_dword0 dw0;
  71. } dw0;
  72. union {
  73. unsigned long dword1;
  74. Hpte_dword1 dw1;
  75. Hpte_dword1_flags flags;
  76. } dw1;
  77. } HPTE;
  78. /* Values for PP (assumes Ks=0, Kp=1) */
  79. /* pp0 will always be 0 for linux */
  80. #define PP_RWXX 0 /* Supervisor read/write, User none */
  81. #define PP_RWRX 1 /* Supervisor read/write, User read */
  82. #define PP_RWRW 2 /* Supervisor read/write, User read/write */
  83. #define PP_RXRX 3 /* Supervisor read, User read */
  84. extern HPTE * htab_address;
  85. extern unsigned long htab_hash_mask;
  86. static inline unsigned long hpt_hash(unsigned long vpn, int large)
  87. {
  88. unsigned long vsid;
  89. unsigned long page;
  90. if (large) {
  91. vsid = vpn >> 4;
  92. page = vpn & 0xf;
  93. } else {
  94. vsid = vpn >> 16;
  95. page = vpn & 0xffff;
  96. }
  97. return (vsid & 0x7fffffffffUL) ^ page;
  98. }
  99. static inline void __tlbie(unsigned long va, int large)
  100. {
  101. /* clear top 16 bits, non SLS segment */
  102. va &= ~(0xffffULL << 48);
  103. if (large) {
  104. va &= HPAGE_MASK;
  105. asm volatile("tlbie %0,1" : : "r"(va) : "memory");
  106. } else {
  107. va &= PAGE_MASK;
  108. asm volatile("tlbie %0,0" : : "r"(va) : "memory");
  109. }
  110. }
  111. static inline void tlbie(unsigned long va, int large)
  112. {
  113. asm volatile("ptesync": : :"memory");
  114. __tlbie(va, large);
  115. asm volatile("eieio; tlbsync; ptesync": : :"memory");
  116. }
  117. static inline void __tlbiel(unsigned long va)
  118. {
  119. /* clear top 16 bits, non SLS segment */
  120. va &= ~(0xffffULL << 48);
  121. va &= PAGE_MASK;
  122. /*
  123. * Thanks to Alan Modra we are now able to use machine specific
  124. * assembly instructions (like tlbiel) by using the gas -many flag.
  125. * However we have to support older toolchains so for the moment
  126. * we hardwire it.
  127. */
  128. #if 0
  129. asm volatile("tlbiel %0" : : "r"(va) : "memory");
  130. #else
  131. asm volatile(".long 0x7c000224 | (%0 << 11)" : : "r"(va) : "memory");
  132. #endif
  133. }
  134. static inline void tlbiel(unsigned long va)
  135. {
  136. asm volatile("ptesync": : :"memory");
  137. __tlbiel(va);
  138. asm volatile("ptesync": : :"memory");
  139. }
  140. /*
  141. * Handle a fault by adding an HPTE. If the address can't be determined
  142. * to be valid via Linux page tables, return 1. If handled return 0
  143. */
  144. extern int __hash_page(unsigned long ea, unsigned long access,
  145. unsigned long vsid, pte_t *ptep, unsigned long trap,
  146. int local);
  147. extern void htab_finish_init(void);
  148. #endif /* __ASSEMBLY__ */
  149. /*
  150. * Location of cpu0's segment table
  151. */
  152. #define STAB0_PAGE 0x9
  153. #define STAB0_PHYS_ADDR (STAB0_PAGE<<PAGE_SHIFT)
  154. #define STAB0_VIRT_ADDR (KERNELBASE+STAB0_PHYS_ADDR)
  155. #define SLB_NUM_BOLTED 3
  156. #define SLB_CACHE_ENTRIES 8
  157. /* Bits in the SLB ESID word */
  158. #define SLB_ESID_V 0x0000000008000000 /* entry is valid */
  159. /* Bits in the SLB VSID word */
  160. #define SLB_VSID_SHIFT 12
  161. #define SLB_VSID_KS 0x0000000000000800
  162. #define SLB_VSID_KP 0x0000000000000400
  163. #define SLB_VSID_N 0x0000000000000200 /* no-execute */
  164. #define SLB_VSID_L 0x0000000000000100 /* largepage (4M) */
  165. #define SLB_VSID_C 0x0000000000000080 /* class */
  166. #define SLB_VSID_KERNEL (SLB_VSID_KP|SLB_VSID_C)
  167. #define SLB_VSID_USER (SLB_VSID_KP|SLB_VSID_KS)
  168. #define VSID_MULTIPLIER ASM_CONST(200730139) /* 28-bit prime */
  169. #define VSID_BITS 36
  170. #define VSID_MODULUS ((1UL<<VSID_BITS)-1)
  171. #define CONTEXT_BITS 20
  172. #define USER_ESID_BITS 15
  173. /*
  174. * This macro generates asm code to compute the VSID scramble
  175. * function. Used in slb_allocate() and do_stab_bolted. The function
  176. * computed is: (protovsid*VSID_MULTIPLIER) % VSID_MODULUS
  177. *
  178. * rt = register continaing the proto-VSID and into which the
  179. * VSID will be stored
  180. * rx = scratch register (clobbered)
  181. *
  182. * - rt and rx must be different registers
  183. * - The answer will end up in the low 36 bits of rt. The higher
  184. * bits may contain other garbage, so you may need to mask the
  185. * result.
  186. */
  187. #define ASM_VSID_SCRAMBLE(rt, rx) \
  188. lis rx,VSID_MULTIPLIER@h; \
  189. ori rx,rx,VSID_MULTIPLIER@l; \
  190. mulld rt,rt,rx; /* rt = rt * MULTIPLIER */ \
  191. \
  192. srdi rx,rt,VSID_BITS; \
  193. clrldi rt,rt,(64-VSID_BITS); \
  194. add rt,rt,rx; /* add high and low bits */ \
  195. /* Now, r3 == VSID (mod 2^36-1), and lies between 0 and \
  196. * 2^36-1+2^28-1. That in particular means that if r3 >= \
  197. * 2^36-1, then r3+1 has the 2^36 bit set. So, if r3+1 has \
  198. * the bit clear, r3 already has the answer we want, if it \
  199. * doesn't, the answer is the low 36 bits of r3+1. So in all \
  200. * cases the answer is the low 36 bits of (r3 + ((r3+1) >> 36))*/\
  201. addi rx,rt,1; \
  202. srdi rx,rx,VSID_BITS; /* extract 2^36 bit */ \
  203. add rt,rt,rx
  204. #endif /* _PPC64_MMU_H_ */