homecache.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434
  1. /*
  2. * Copyright 2010 Tilera Corporation. All Rights Reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation, version 2.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11. * NON INFRINGEMENT. See the GNU General Public License for
  12. * more details.
  13. *
  14. * This code maintains the "home" for each page in the system.
  15. */
  16. #include <linux/kernel.h>
  17. #include <linux/mm.h>
  18. #include <linux/spinlock.h>
  19. #include <linux/list.h>
  20. #include <linux/bootmem.h>
  21. #include <linux/rmap.h>
  22. #include <linux/pagemap.h>
  23. #include <linux/mutex.h>
  24. #include <linux/interrupt.h>
  25. #include <linux/sysctl.h>
  26. #include <linux/pagevec.h>
  27. #include <linux/ptrace.h>
  28. #include <linux/timex.h>
  29. #include <linux/cache.h>
  30. #include <linux/smp.h>
  31. #include <linux/module.h>
  32. #include <linux/hugetlb.h>
  33. #include <asm/page.h>
  34. #include <asm/sections.h>
  35. #include <asm/tlbflush.h>
  36. #include <asm/pgalloc.h>
  37. #include <asm/homecache.h>
  38. #include <arch/sim.h>
  39. #include "migrate.h"
  40. /*
  41. * The noallocl2 option suppresses all use of the L2 cache to cache
  42. * locally from a remote home.
  43. */
  44. static int __write_once noallocl2;
  45. static int __init set_noallocl2(char *str)
  46. {
  47. noallocl2 = 1;
  48. return 0;
  49. }
  50. early_param("noallocl2", set_noallocl2);
  51. /*
  52. * Update the irq_stat for cpus that we are going to interrupt
  53. * with TLB or cache flushes. Also handle removing dataplane cpus
  54. * from the TLB flush set, and setting dataplane_tlb_state instead.
  55. */
  56. static void hv_flush_update(const struct cpumask *cache_cpumask,
  57. struct cpumask *tlb_cpumask,
  58. unsigned long tlb_va, unsigned long tlb_length,
  59. HV_Remote_ASID *asids, int asidcount)
  60. {
  61. struct cpumask mask;
  62. int i, cpu;
  63. cpumask_clear(&mask);
  64. if (cache_cpumask)
  65. cpumask_or(&mask, &mask, cache_cpumask);
  66. if (tlb_cpumask && tlb_length) {
  67. cpumask_or(&mask, &mask, tlb_cpumask);
  68. }
  69. for (i = 0; i < asidcount; ++i)
  70. cpumask_set_cpu(asids[i].y * smp_width + asids[i].x, &mask);
  71. /*
  72. * Don't bother to update atomically; losing a count
  73. * here is not that critical.
  74. */
  75. for_each_cpu(cpu, &mask)
  76. ++per_cpu(irq_stat, cpu).irq_hv_flush_count;
  77. }
  78. /*
  79. * This wrapper function around hv_flush_remote() does several things:
  80. *
  81. * - Provides a return value error-checking panic path, since
  82. * there's never any good reason for hv_flush_remote() to fail.
  83. * - Accepts a 32-bit PFN rather than a 64-bit PA, which generally
  84. * is the type that Linux wants to pass around anyway.
  85. * - Canonicalizes that lengths of zero make cpumasks NULL.
  86. * - Handles deferring TLB flushes for dataplane tiles.
  87. * - Tracks remote interrupts in the per-cpu irq_cpustat_t.
  88. *
  89. * Note that we have to wait until the cache flush completes before
  90. * updating the per-cpu last_cache_flush word, since otherwise another
  91. * concurrent flush can race, conclude the flush has already
  92. * completed, and start to use the page while it's still dirty
  93. * remotely (running concurrently with the actual evict, presumably).
  94. */
  95. void flush_remote(unsigned long cache_pfn, unsigned long cache_control,
  96. const struct cpumask *cache_cpumask_orig,
  97. HV_VirtAddr tlb_va, unsigned long tlb_length,
  98. unsigned long tlb_pgsize,
  99. const struct cpumask *tlb_cpumask_orig,
  100. HV_Remote_ASID *asids, int asidcount)
  101. {
  102. int rc;
  103. struct cpumask cache_cpumask_copy, tlb_cpumask_copy;
  104. struct cpumask *cache_cpumask, *tlb_cpumask;
  105. HV_PhysAddr cache_pa;
  106. char cache_buf[NR_CPUS*5], tlb_buf[NR_CPUS*5];
  107. mb(); /* provided just to simplify "magic hypervisor" mode */
  108. /*
  109. * Canonicalize and copy the cpumasks.
  110. */
  111. if (cache_cpumask_orig && cache_control) {
  112. cpumask_copy(&cache_cpumask_copy, cache_cpumask_orig);
  113. cache_cpumask = &cache_cpumask_copy;
  114. } else {
  115. cpumask_clear(&cache_cpumask_copy);
  116. cache_cpumask = NULL;
  117. }
  118. if (cache_cpumask == NULL)
  119. cache_control = 0;
  120. if (tlb_cpumask_orig && tlb_length) {
  121. cpumask_copy(&tlb_cpumask_copy, tlb_cpumask_orig);
  122. tlb_cpumask = &tlb_cpumask_copy;
  123. } else {
  124. cpumask_clear(&tlb_cpumask_copy);
  125. tlb_cpumask = NULL;
  126. }
  127. hv_flush_update(cache_cpumask, tlb_cpumask, tlb_va, tlb_length,
  128. asids, asidcount);
  129. cache_pa = (HV_PhysAddr)cache_pfn << PAGE_SHIFT;
  130. rc = hv_flush_remote(cache_pa, cache_control,
  131. cpumask_bits(cache_cpumask),
  132. tlb_va, tlb_length, tlb_pgsize,
  133. cpumask_bits(tlb_cpumask),
  134. asids, asidcount);
  135. if (rc == 0)
  136. return;
  137. cpumask_scnprintf(cache_buf, sizeof(cache_buf), &cache_cpumask_copy);
  138. cpumask_scnprintf(tlb_buf, sizeof(tlb_buf), &tlb_cpumask_copy);
  139. pr_err("hv_flush_remote(%#llx, %#lx, %p [%s], %#lx, %#lx, %#lx, %p [%s], %p, %d) = %d\n",
  140. cache_pa, cache_control, cache_cpumask, cache_buf,
  141. (unsigned long)tlb_va, tlb_length, tlb_pgsize,
  142. tlb_cpumask, tlb_buf, asids, asidcount, rc);
  143. panic("Unsafe to continue.");
  144. }
  145. static void homecache_finv_page_va(void* va, int home)
  146. {
  147. int cpu = get_cpu();
  148. if (home == cpu) {
  149. finv_buffer_local(va, PAGE_SIZE);
  150. } else if (home == PAGE_HOME_HASH) {
  151. finv_buffer_remote(va, PAGE_SIZE, 1);
  152. } else {
  153. BUG_ON(home < 0 || home >= NR_CPUS);
  154. finv_buffer_remote(va, PAGE_SIZE, 0);
  155. }
  156. put_cpu();
  157. }
  158. void homecache_finv_map_page(struct page *page, int home)
  159. {
  160. unsigned long flags;
  161. unsigned long va;
  162. pte_t *ptep;
  163. pte_t pte;
  164. if (home == PAGE_HOME_UNCACHED)
  165. return;
  166. local_irq_save(flags);
  167. #ifdef CONFIG_HIGHMEM
  168. va = __fix_to_virt(FIX_KMAP_BEGIN + kmap_atomic_idx_push() +
  169. (KM_TYPE_NR * smp_processor_id()));
  170. #else
  171. va = __fix_to_virt(FIX_HOMECACHE_BEGIN + smp_processor_id());
  172. #endif
  173. ptep = virt_to_kpte(va);
  174. pte = pfn_pte(page_to_pfn(page), PAGE_KERNEL);
  175. __set_pte(ptep, pte_set_home(pte, home));
  176. homecache_finv_page_va((void *)va, home);
  177. __pte_clear(ptep);
  178. hv_flush_page(va, PAGE_SIZE);
  179. #ifdef CONFIG_HIGHMEM
  180. kmap_atomic_idx_pop();
  181. #endif
  182. local_irq_restore(flags);
  183. }
  184. static void homecache_finv_page_home(struct page *page, int home)
  185. {
  186. if (!PageHighMem(page) && home == page_home(page))
  187. homecache_finv_page_va(page_address(page), home);
  188. else
  189. homecache_finv_map_page(page, home);
  190. }
  191. static inline bool incoherent_home(int home)
  192. {
  193. return home == PAGE_HOME_IMMUTABLE || home == PAGE_HOME_INCOHERENT;
  194. }
  195. static void homecache_finv_page_internal(struct page *page, int force_map)
  196. {
  197. int home = page_home(page);
  198. if (home == PAGE_HOME_UNCACHED)
  199. return;
  200. if (incoherent_home(home)) {
  201. int cpu;
  202. for_each_cpu(cpu, &cpu_cacheable_map)
  203. homecache_finv_map_page(page, cpu);
  204. } else if (force_map) {
  205. /* Force if, e.g., the normal mapping is migrating. */
  206. homecache_finv_map_page(page, home);
  207. } else {
  208. homecache_finv_page_home(page, home);
  209. }
  210. sim_validate_lines_evicted(PFN_PHYS(page_to_pfn(page)), PAGE_SIZE);
  211. }
  212. void homecache_finv_page(struct page *page)
  213. {
  214. homecache_finv_page_internal(page, 0);
  215. }
  216. void homecache_evict(const struct cpumask *mask)
  217. {
  218. flush_remote(0, HV_FLUSH_EVICT_L2, mask, 0, 0, 0, NULL, NULL, 0);
  219. }
  220. /* Report the home corresponding to a given PTE. */
  221. static int pte_to_home(pte_t pte)
  222. {
  223. if (hv_pte_get_nc(pte))
  224. return PAGE_HOME_IMMUTABLE;
  225. switch (hv_pte_get_mode(pte)) {
  226. case HV_PTE_MODE_CACHE_TILE_L3:
  227. return get_remote_cache_cpu(pte);
  228. case HV_PTE_MODE_CACHE_NO_L3:
  229. return PAGE_HOME_INCOHERENT;
  230. case HV_PTE_MODE_UNCACHED:
  231. return PAGE_HOME_UNCACHED;
  232. case HV_PTE_MODE_CACHE_HASH_L3:
  233. return PAGE_HOME_HASH;
  234. }
  235. panic("Bad PTE %#llx\n", pte.val);
  236. }
  237. /* Update the home of a PTE if necessary (can also be used for a pgprot_t). */
  238. pte_t pte_set_home(pte_t pte, int home)
  239. {
  240. /* Check for non-linear file mapping "PTEs" and pass them through. */
  241. if (pte_file(pte))
  242. return pte;
  243. #if CHIP_HAS_MMIO()
  244. /* Check for MMIO mappings and pass them through. */
  245. if (hv_pte_get_mode(pte) == HV_PTE_MODE_MMIO)
  246. return pte;
  247. #endif
  248. /*
  249. * Only immutable pages get NC mappings. If we have a
  250. * non-coherent PTE, but the underlying page is not
  251. * immutable, it's likely the result of a forced
  252. * caching setting running up against ptrace setting
  253. * the page to be writable underneath. In this case,
  254. * just keep the PTE coherent.
  255. */
  256. if (hv_pte_get_nc(pte) && home != PAGE_HOME_IMMUTABLE) {
  257. pte = hv_pte_clear_nc(pte);
  258. pr_err("non-immutable page incoherently referenced: %#llx\n",
  259. pte.val);
  260. }
  261. switch (home) {
  262. case PAGE_HOME_UNCACHED:
  263. pte = hv_pte_set_mode(pte, HV_PTE_MODE_UNCACHED);
  264. break;
  265. case PAGE_HOME_INCOHERENT:
  266. pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_NO_L3);
  267. break;
  268. case PAGE_HOME_IMMUTABLE:
  269. /*
  270. * We could home this page anywhere, since it's immutable,
  271. * but by default just home it to follow "hash_default".
  272. */
  273. BUG_ON(hv_pte_get_writable(pte));
  274. if (pte_get_forcecache(pte)) {
  275. /* Upgrade "force any cpu" to "No L3" for immutable. */
  276. if (hv_pte_get_mode(pte) == HV_PTE_MODE_CACHE_TILE_L3
  277. && pte_get_anyhome(pte)) {
  278. pte = hv_pte_set_mode(pte,
  279. HV_PTE_MODE_CACHE_NO_L3);
  280. }
  281. } else
  282. if (hash_default)
  283. pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_HASH_L3);
  284. else
  285. pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_NO_L3);
  286. pte = hv_pte_set_nc(pte);
  287. break;
  288. case PAGE_HOME_HASH:
  289. pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_HASH_L3);
  290. break;
  291. default:
  292. BUG_ON(home < 0 || home >= NR_CPUS ||
  293. !cpu_is_valid_lotar(home));
  294. pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_TILE_L3);
  295. pte = set_remote_cache_cpu(pte, home);
  296. break;
  297. }
  298. if (noallocl2)
  299. pte = hv_pte_set_no_alloc_l2(pte);
  300. /* Simplify "no local and no l3" to "uncached" */
  301. if (hv_pte_get_no_alloc_l2(pte) && hv_pte_get_no_alloc_l1(pte) &&
  302. hv_pte_get_mode(pte) == HV_PTE_MODE_CACHE_NO_L3) {
  303. pte = hv_pte_set_mode(pte, HV_PTE_MODE_UNCACHED);
  304. }
  305. /* Checking this case here gives a better panic than from the hv. */
  306. BUG_ON(hv_pte_get_mode(pte) == 0);
  307. return pte;
  308. }
  309. EXPORT_SYMBOL(pte_set_home);
  310. /*
  311. * The routines in this section are the "static" versions of the normal
  312. * dynamic homecaching routines; they just set the home cache
  313. * of a kernel page once, and require a full-chip cache/TLB flush,
  314. * so they're not suitable for anything but infrequent use.
  315. */
  316. int page_home(struct page *page)
  317. {
  318. if (PageHighMem(page)) {
  319. return PAGE_HOME_HASH;
  320. } else {
  321. unsigned long kva = (unsigned long)page_address(page);
  322. return pte_to_home(*virt_to_kpte(kva));
  323. }
  324. }
  325. EXPORT_SYMBOL(page_home);
  326. void homecache_change_page_home(struct page *page, int order, int home)
  327. {
  328. int i, pages = (1 << order);
  329. unsigned long kva;
  330. BUG_ON(PageHighMem(page));
  331. BUG_ON(page_count(page) > 1);
  332. BUG_ON(page_mapcount(page) != 0);
  333. kva = (unsigned long) page_address(page);
  334. flush_remote(0, HV_FLUSH_EVICT_L2, &cpu_cacheable_map,
  335. kva, pages * PAGE_SIZE, PAGE_SIZE, cpu_online_mask,
  336. NULL, 0);
  337. for (i = 0; i < pages; ++i, kva += PAGE_SIZE) {
  338. pte_t *ptep = virt_to_kpte(kva);
  339. pte_t pteval = *ptep;
  340. BUG_ON(!pte_present(pteval) || pte_huge(pteval));
  341. __set_pte(ptep, pte_set_home(pteval, home));
  342. }
  343. }
  344. EXPORT_SYMBOL(homecache_change_page_home);
  345. struct page *homecache_alloc_pages(gfp_t gfp_mask,
  346. unsigned int order, int home)
  347. {
  348. struct page *page;
  349. BUG_ON(gfp_mask & __GFP_HIGHMEM); /* must be lowmem */
  350. page = alloc_pages(gfp_mask, order);
  351. if (page)
  352. homecache_change_page_home(page, order, home);
  353. return page;
  354. }
  355. EXPORT_SYMBOL(homecache_alloc_pages);
  356. struct page *homecache_alloc_pages_node(int nid, gfp_t gfp_mask,
  357. unsigned int order, int home)
  358. {
  359. struct page *page;
  360. BUG_ON(gfp_mask & __GFP_HIGHMEM); /* must be lowmem */
  361. page = alloc_pages_node(nid, gfp_mask, order);
  362. if (page)
  363. homecache_change_page_home(page, order, home);
  364. return page;
  365. }
  366. void __homecache_free_pages(struct page *page, unsigned int order)
  367. {
  368. if (put_page_testzero(page)) {
  369. homecache_change_page_home(page, order, PAGE_HOME_HASH);
  370. if (order == 0) {
  371. free_hot_cold_page(page, false);
  372. } else {
  373. init_page_count(page);
  374. __free_pages(page, order);
  375. }
  376. }
  377. }
  378. EXPORT_SYMBOL(__homecache_free_pages);
  379. void homecache_free_pages(unsigned long addr, unsigned int order)
  380. {
  381. if (addr != 0) {
  382. VM_BUG_ON(!virt_addr_valid((void *)addr));
  383. __homecache_free_pages(virt_to_page((void *)addr), order);
  384. }
  385. }
  386. EXPORT_SYMBOL(homecache_free_pages);