pageattr.c 8.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359
  1. /*
  2. * Copyright IBM Corp. 2011
  3. * Author(s): Jan Glauber <jang@linux.vnet.ibm.com>
  4. */
  5. #include <linux/hugetlb.h>
  6. #include <linux/mm.h>
  7. #include <asm/cacheflush.h>
  8. #include <asm/facility.h>
  9. #include <asm/pgtable.h>
  10. #include <asm/page.h>
  11. static inline unsigned long sske_frame(unsigned long addr, unsigned char skey)
  12. {
  13. asm volatile(".insn rrf,0xb22b0000,%[skey],%[addr],9,0"
  14. : [addr] "+a" (addr) : [skey] "d" (skey));
  15. return addr;
  16. }
  17. void __storage_key_init_range(unsigned long start, unsigned long end)
  18. {
  19. unsigned long boundary, size;
  20. if (!PAGE_DEFAULT_KEY)
  21. return;
  22. while (start < end) {
  23. if (MACHINE_HAS_EDAT1) {
  24. /* set storage keys for a 1MB frame */
  25. size = 1UL << 20;
  26. boundary = (start + size) & ~(size - 1);
  27. if (boundary <= end) {
  28. do {
  29. start = sske_frame(start, PAGE_DEFAULT_KEY);
  30. } while (start < boundary);
  31. continue;
  32. }
  33. }
  34. page_set_storage_key(start, PAGE_DEFAULT_KEY, 0);
  35. start += PAGE_SIZE;
  36. }
  37. }
  38. #ifdef CONFIG_PROC_FS
  39. atomic_long_t direct_pages_count[PG_DIRECT_MAP_MAX];
  40. void arch_report_meminfo(struct seq_file *m)
  41. {
  42. seq_printf(m, "DirectMap4k: %8lu kB\n",
  43. atomic_long_read(&direct_pages_count[PG_DIRECT_MAP_4K]) << 2);
  44. seq_printf(m, "DirectMap1M: %8lu kB\n",
  45. atomic_long_read(&direct_pages_count[PG_DIRECT_MAP_1M]) << 10);
  46. seq_printf(m, "DirectMap2G: %8lu kB\n",
  47. atomic_long_read(&direct_pages_count[PG_DIRECT_MAP_2G]) << 21);
  48. }
  49. #endif /* CONFIG_PROC_FS */
  50. static void pgt_set(unsigned long *old, unsigned long new, unsigned long addr,
  51. unsigned long dtt)
  52. {
  53. unsigned long table, mask;
  54. mask = 0;
  55. if (MACHINE_HAS_EDAT2) {
  56. switch (dtt) {
  57. case CRDTE_DTT_REGION3:
  58. mask = ~(PTRS_PER_PUD * sizeof(pud_t) - 1);
  59. break;
  60. case CRDTE_DTT_SEGMENT:
  61. mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
  62. break;
  63. case CRDTE_DTT_PAGE:
  64. mask = ~(PTRS_PER_PTE * sizeof(pte_t) - 1);
  65. break;
  66. }
  67. table = (unsigned long)old & mask;
  68. crdte(*old, new, table, dtt, addr, S390_lowcore.kernel_asce);
  69. } else if (MACHINE_HAS_IDTE) {
  70. cspg(old, *old, new);
  71. } else {
  72. csp((unsigned int *)old + 1, *old, new);
  73. }
  74. }
  75. static int walk_pte_level(pmd_t *pmdp, unsigned long addr, unsigned long end,
  76. unsigned long flags)
  77. {
  78. pte_t *ptep, new;
  79. ptep = pte_offset(pmdp, addr);
  80. do {
  81. new = *ptep;
  82. if (pte_none(new))
  83. return -EINVAL;
  84. if (flags & SET_MEMORY_RO)
  85. new = pte_wrprotect(new);
  86. else if (flags & SET_MEMORY_RW)
  87. new = pte_mkwrite(pte_mkdirty(new));
  88. if ((flags & SET_MEMORY_NX) && MACHINE_HAS_NX)
  89. pte_val(new) |= _PAGE_NOEXEC;
  90. else if (flags & SET_MEMORY_X)
  91. pte_val(new) &= ~_PAGE_NOEXEC;
  92. pgt_set((unsigned long *)ptep, pte_val(new), addr, CRDTE_DTT_PAGE);
  93. ptep++;
  94. addr += PAGE_SIZE;
  95. cond_resched();
  96. } while (addr < end);
  97. return 0;
  98. }
  99. static int split_pmd_page(pmd_t *pmdp, unsigned long addr)
  100. {
  101. unsigned long pte_addr, prot;
  102. pte_t *pt_dir, *ptep;
  103. pmd_t new;
  104. int i, ro, nx;
  105. pt_dir = vmem_pte_alloc();
  106. if (!pt_dir)
  107. return -ENOMEM;
  108. pte_addr = pmd_pfn(*pmdp) << PAGE_SHIFT;
  109. ro = !!(pmd_val(*pmdp) & _SEGMENT_ENTRY_PROTECT);
  110. nx = !!(pmd_val(*pmdp) & _SEGMENT_ENTRY_NOEXEC);
  111. prot = pgprot_val(ro ? PAGE_KERNEL_RO : PAGE_KERNEL);
  112. if (!nx)
  113. prot &= ~_PAGE_NOEXEC;
  114. ptep = pt_dir;
  115. for (i = 0; i < PTRS_PER_PTE; i++) {
  116. pte_val(*ptep) = pte_addr | prot;
  117. pte_addr += PAGE_SIZE;
  118. ptep++;
  119. }
  120. pmd_val(new) = __pa(pt_dir) | _SEGMENT_ENTRY;
  121. pgt_set((unsigned long *)pmdp, pmd_val(new), addr, CRDTE_DTT_SEGMENT);
  122. update_page_count(PG_DIRECT_MAP_4K, PTRS_PER_PTE);
  123. update_page_count(PG_DIRECT_MAP_1M, -1);
  124. return 0;
  125. }
  126. static void modify_pmd_page(pmd_t *pmdp, unsigned long addr,
  127. unsigned long flags)
  128. {
  129. pmd_t new = *pmdp;
  130. if (flags & SET_MEMORY_RO)
  131. new = pmd_wrprotect(new);
  132. else if (flags & SET_MEMORY_RW)
  133. new = pmd_mkwrite(pmd_mkdirty(new));
  134. if ((flags & SET_MEMORY_NX) && MACHINE_HAS_NX)
  135. pmd_val(new) |= _SEGMENT_ENTRY_NOEXEC;
  136. else if (flags & SET_MEMORY_X)
  137. pmd_val(new) &= ~_SEGMENT_ENTRY_NOEXEC;
  138. pgt_set((unsigned long *)pmdp, pmd_val(new), addr, CRDTE_DTT_SEGMENT);
  139. }
  140. static int walk_pmd_level(pud_t *pudp, unsigned long addr, unsigned long end,
  141. unsigned long flags)
  142. {
  143. unsigned long next;
  144. pmd_t *pmdp;
  145. int rc = 0;
  146. pmdp = pmd_offset(pudp, addr);
  147. do {
  148. if (pmd_none(*pmdp))
  149. return -EINVAL;
  150. next = pmd_addr_end(addr, end);
  151. if (pmd_large(*pmdp)) {
  152. if (addr & ~PMD_MASK || addr + PMD_SIZE > next) {
  153. rc = split_pmd_page(pmdp, addr);
  154. if (rc)
  155. return rc;
  156. continue;
  157. }
  158. modify_pmd_page(pmdp, addr, flags);
  159. } else {
  160. rc = walk_pte_level(pmdp, addr, next, flags);
  161. if (rc)
  162. return rc;
  163. }
  164. pmdp++;
  165. addr = next;
  166. cond_resched();
  167. } while (addr < end);
  168. return rc;
  169. }
  170. static int split_pud_page(pud_t *pudp, unsigned long addr)
  171. {
  172. unsigned long pmd_addr, prot;
  173. pmd_t *pm_dir, *pmdp;
  174. pud_t new;
  175. int i, ro, nx;
  176. pm_dir = vmem_pmd_alloc();
  177. if (!pm_dir)
  178. return -ENOMEM;
  179. pmd_addr = pud_pfn(*pudp) << PAGE_SHIFT;
  180. ro = !!(pud_val(*pudp) & _REGION_ENTRY_PROTECT);
  181. nx = !!(pud_val(*pudp) & _REGION_ENTRY_NOEXEC);
  182. prot = pgprot_val(ro ? SEGMENT_KERNEL_RO : SEGMENT_KERNEL);
  183. if (!nx)
  184. prot &= ~_SEGMENT_ENTRY_NOEXEC;
  185. pmdp = pm_dir;
  186. for (i = 0; i < PTRS_PER_PMD; i++) {
  187. pmd_val(*pmdp) = pmd_addr | prot;
  188. pmd_addr += PMD_SIZE;
  189. pmdp++;
  190. }
  191. pud_val(new) = __pa(pm_dir) | _REGION3_ENTRY;
  192. pgt_set((unsigned long *)pudp, pud_val(new), addr, CRDTE_DTT_REGION3);
  193. update_page_count(PG_DIRECT_MAP_1M, PTRS_PER_PMD);
  194. update_page_count(PG_DIRECT_MAP_2G, -1);
  195. return 0;
  196. }
  197. static void modify_pud_page(pud_t *pudp, unsigned long addr,
  198. unsigned long flags)
  199. {
  200. pud_t new = *pudp;
  201. if (flags & SET_MEMORY_RO)
  202. new = pud_wrprotect(new);
  203. else if (flags & SET_MEMORY_RW)
  204. new = pud_mkwrite(pud_mkdirty(new));
  205. if ((flags & SET_MEMORY_NX) && MACHINE_HAS_NX)
  206. pud_val(new) |= _REGION_ENTRY_NOEXEC;
  207. else if (flags & SET_MEMORY_X)
  208. pud_val(new) &= ~_REGION_ENTRY_NOEXEC;
  209. pgt_set((unsigned long *)pudp, pud_val(new), addr, CRDTE_DTT_REGION3);
  210. }
  211. static int walk_pud_level(pgd_t *pgd, unsigned long addr, unsigned long end,
  212. unsigned long flags)
  213. {
  214. unsigned long next;
  215. pud_t *pudp;
  216. int rc = 0;
  217. pudp = pud_offset(pgd, addr);
  218. do {
  219. if (pud_none(*pudp))
  220. return -EINVAL;
  221. next = pud_addr_end(addr, end);
  222. if (pud_large(*pudp)) {
  223. if (addr & ~PUD_MASK || addr + PUD_SIZE > next) {
  224. rc = split_pud_page(pudp, addr);
  225. if (rc)
  226. break;
  227. continue;
  228. }
  229. modify_pud_page(pudp, addr, flags);
  230. } else {
  231. rc = walk_pmd_level(pudp, addr, next, flags);
  232. }
  233. pudp++;
  234. addr = next;
  235. cond_resched();
  236. } while (addr < end && !rc);
  237. return rc;
  238. }
  239. static DEFINE_MUTEX(cpa_mutex);
  240. static int change_page_attr(unsigned long addr, unsigned long end,
  241. unsigned long flags)
  242. {
  243. unsigned long next;
  244. int rc = -EINVAL;
  245. pgd_t *pgdp;
  246. if (addr == end)
  247. return 0;
  248. if (end >= MODULES_END)
  249. return -EINVAL;
  250. mutex_lock(&cpa_mutex);
  251. pgdp = pgd_offset_k(addr);
  252. do {
  253. if (pgd_none(*pgdp))
  254. break;
  255. next = pgd_addr_end(addr, end);
  256. rc = walk_pud_level(pgdp, addr, next, flags);
  257. if (rc)
  258. break;
  259. cond_resched();
  260. } while (pgdp++, addr = next, addr < end && !rc);
  261. mutex_unlock(&cpa_mutex);
  262. return rc;
  263. }
  264. int __set_memory(unsigned long addr, int numpages, unsigned long flags)
  265. {
  266. addr &= PAGE_MASK;
  267. return change_page_attr(addr, addr + numpages * PAGE_SIZE, flags);
  268. }
  269. #ifdef CONFIG_DEBUG_PAGEALLOC
  270. static void ipte_range(pte_t *pte, unsigned long address, int nr)
  271. {
  272. int i;
  273. if (test_facility(13)) {
  274. __ptep_ipte_range(address, nr - 1, pte, IPTE_GLOBAL);
  275. return;
  276. }
  277. for (i = 0; i < nr; i++) {
  278. __ptep_ipte(address, pte, IPTE_GLOBAL);
  279. address += PAGE_SIZE;
  280. pte++;
  281. }
  282. }
  283. void __kernel_map_pages(struct page *page, int numpages, int enable)
  284. {
  285. unsigned long address;
  286. int nr, i, j;
  287. pgd_t *pgd;
  288. pud_t *pud;
  289. pmd_t *pmd;
  290. pte_t *pte;
  291. for (i = 0; i < numpages;) {
  292. address = page_to_phys(page + i);
  293. pgd = pgd_offset_k(address);
  294. pud = pud_offset(pgd, address);
  295. pmd = pmd_offset(pud, address);
  296. pte = pte_offset_kernel(pmd, address);
  297. nr = (unsigned long)pte >> ilog2(sizeof(long));
  298. nr = PTRS_PER_PTE - (nr & (PTRS_PER_PTE - 1));
  299. nr = min(numpages - i, nr);
  300. if (enable) {
  301. for (j = 0; j < nr; j++) {
  302. pte_val(*pte) &= ~_PAGE_INVALID;
  303. address += PAGE_SIZE;
  304. pte++;
  305. }
  306. } else {
  307. ipte_range(pte, address, nr);
  308. }
  309. i += nr;
  310. }
  311. }
  312. #ifdef CONFIG_HIBERNATION
  313. bool kernel_page_present(struct page *page)
  314. {
  315. unsigned long addr;
  316. int cc;
  317. addr = page_to_phys(page);
  318. asm volatile(
  319. " lra %1,0(%1)\n"
  320. " ipm %0\n"
  321. " srl %0,28"
  322. : "=d" (cc), "+a" (addr) : : "cc");
  323. return cc == 0;
  324. }
  325. #endif /* CONFIG_HIBERNATION */
  326. #endif /* CONFIG_DEBUG_PAGEALLOC */