iommu.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470
  1. /*
  2. * iommu.c: IOMMU specific routines for memory management.
  3. *
  4. * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
  5. * Copyright (C) 1995,2002 Pete Zaitcev (zaitcev@yahoo.com)
  6. * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
  7. * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/init.h>
  11. #include <linux/mm.h>
  12. #include <linux/slab.h>
  13. #include <linux/highmem.h> /* pte_offset_map => kmap_atomic */
  14. #include <linux/scatterlist.h>
  15. #include <asm/pgalloc.h>
  16. #include <asm/pgtable.h>
  17. #include <asm/sbus.h>
  18. #include <asm/io.h>
  19. #include <asm/mxcc.h>
  20. #include <asm/mbus.h>
  21. #include <asm/cacheflush.h>
  22. #include <asm/tlbflush.h>
  23. #include <asm/bitext.h>
  24. #include <asm/iommu.h>
  25. #include <asm/dma.h>
  26. /*
  27. * This can be sized dynamically, but we will do this
  28. * only when we have a guidance about actual I/O pressures.
  29. */
  30. #define IOMMU_RNGE IOMMU_RNGE_256MB
  31. #define IOMMU_START 0xF0000000
  32. #define IOMMU_WINSIZE (256*1024*1024U)
  33. #define IOMMU_NPTES (IOMMU_WINSIZE/PAGE_SIZE) /* 64K PTEs, 265KB */
  34. #define IOMMU_ORDER 6 /* 4096 * (1<<6) */
  35. /* srmmu.c */
  36. extern int viking_mxcc_present;
  37. BTFIXUPDEF_CALL(void, flush_page_for_dma, unsigned long)
  38. #define flush_page_for_dma(page) BTFIXUP_CALL(flush_page_for_dma)(page)
  39. extern int flush_page_for_dma_global;
  40. static int viking_flush;
  41. /* viking.S */
  42. extern void viking_flush_page(unsigned long page);
  43. extern void viking_mxcc_flush_page(unsigned long page);
  44. /*
  45. * Values precomputed according to CPU type.
  46. */
  47. static unsigned int ioperm_noc; /* Consistent mapping iopte flags */
  48. static pgprot_t dvma_prot; /* Consistent mapping pte flags */
  49. #define IOPERM (IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID)
  50. #define MKIOPTE(pfn, perm) (((((pfn)<<8) & IOPTE_PAGE) | (perm)) & ~IOPTE_WAZ)
  51. static void __init sbus_iommu_init(struct of_device *op)
  52. {
  53. struct iommu_struct *iommu;
  54. unsigned int impl, vers;
  55. unsigned long *bitmap;
  56. unsigned long tmp;
  57. iommu = kmalloc(sizeof(struct iommu_struct), GFP_ATOMIC);
  58. if (!iommu) {
  59. prom_printf("Unable to allocate iommu structure\n");
  60. prom_halt();
  61. }
  62. iommu->regs = of_ioremap(&op->resource[0], 0, PAGE_SIZE * 3,
  63. "iommu_regs");
  64. if (!iommu->regs) {
  65. prom_printf("Cannot map IOMMU registers\n");
  66. prom_halt();
  67. }
  68. impl = (iommu->regs->control & IOMMU_CTRL_IMPL) >> 28;
  69. vers = (iommu->regs->control & IOMMU_CTRL_VERS) >> 24;
  70. tmp = iommu->regs->control;
  71. tmp &= ~(IOMMU_CTRL_RNGE);
  72. tmp |= (IOMMU_RNGE_256MB | IOMMU_CTRL_ENAB);
  73. iommu->regs->control = tmp;
  74. iommu_invalidate(iommu->regs);
  75. iommu->start = IOMMU_START;
  76. iommu->end = 0xffffffff;
  77. /* Allocate IOMMU page table */
  78. /* Stupid alignment constraints give me a headache.
  79. We need 256K or 512K or 1M or 2M area aligned to
  80. its size and current gfp will fortunately give
  81. it to us. */
  82. tmp = __get_free_pages(GFP_KERNEL, IOMMU_ORDER);
  83. if (!tmp) {
  84. prom_printf("Unable to allocate iommu table [0x%08x]\n",
  85. IOMMU_NPTES*sizeof(iopte_t));
  86. prom_halt();
  87. }
  88. iommu->page_table = (iopte_t *)tmp;
  89. /* Initialize new table. */
  90. memset(iommu->page_table, 0, IOMMU_NPTES*sizeof(iopte_t));
  91. flush_cache_all();
  92. flush_tlb_all();
  93. iommu->regs->base = __pa((unsigned long) iommu->page_table) >> 4;
  94. iommu_invalidate(iommu->regs);
  95. bitmap = kmalloc(IOMMU_NPTES>>3, GFP_KERNEL);
  96. if (!bitmap) {
  97. prom_printf("Unable to allocate iommu bitmap [%d]\n",
  98. (int)(IOMMU_NPTES>>3));
  99. prom_halt();
  100. }
  101. bit_map_init(&iommu->usemap, bitmap, IOMMU_NPTES);
  102. /* To be coherent on HyperSparc, the page color of DVMA
  103. * and physical addresses must match.
  104. */
  105. if (srmmu_modtype == HyperSparc)
  106. iommu->usemap.num_colors = vac_cache_size >> PAGE_SHIFT;
  107. else
  108. iommu->usemap.num_colors = 1;
  109. printk(KERN_INFO "IOMMU: impl %d vers %d table 0x%p[%d B] map [%d b]\n",
  110. impl, vers, iommu->page_table,
  111. (int)(IOMMU_NPTES*sizeof(iopte_t)), (int)IOMMU_NPTES);
  112. op->dev.archdata.iommu = iommu;
  113. }
  114. static int __init iommu_init(void)
  115. {
  116. struct device_node *dp;
  117. for_each_node_by_name(dp, "iommu") {
  118. struct of_device *op = of_find_device_by_node(dp);
  119. sbus_iommu_init(op);
  120. of_propagate_archdata(op);
  121. }
  122. return 0;
  123. }
  124. subsys_initcall(iommu_init);
  125. /* This begs to be btfixup-ed by srmmu. */
  126. /* Flush the iotlb entries to ram. */
  127. /* This could be better if we didn't have to flush whole pages. */
  128. static void iommu_flush_iotlb(iopte_t *iopte, unsigned int niopte)
  129. {
  130. unsigned long start;
  131. unsigned long end;
  132. start = (unsigned long)iopte;
  133. end = PAGE_ALIGN(start + niopte*sizeof(iopte_t));
  134. start &= PAGE_MASK;
  135. if (viking_mxcc_present) {
  136. while(start < end) {
  137. viking_mxcc_flush_page(start);
  138. start += PAGE_SIZE;
  139. }
  140. } else if (viking_flush) {
  141. while(start < end) {
  142. viking_flush_page(start);
  143. start += PAGE_SIZE;
  144. }
  145. } else {
  146. while(start < end) {
  147. __flush_page_to_ram(start);
  148. start += PAGE_SIZE;
  149. }
  150. }
  151. }
  152. static u32 iommu_get_one(struct device *dev, struct page *page, int npages)
  153. {
  154. struct iommu_struct *iommu = dev->archdata.iommu;
  155. int ioptex;
  156. iopte_t *iopte, *iopte0;
  157. unsigned int busa, busa0;
  158. int i;
  159. /* page color = pfn of page */
  160. ioptex = bit_map_string_get(&iommu->usemap, npages, page_to_pfn(page));
  161. if (ioptex < 0)
  162. panic("iommu out");
  163. busa0 = iommu->start + (ioptex << PAGE_SHIFT);
  164. iopte0 = &iommu->page_table[ioptex];
  165. busa = busa0;
  166. iopte = iopte0;
  167. for (i = 0; i < npages; i++) {
  168. iopte_val(*iopte) = MKIOPTE(page_to_pfn(page), IOPERM);
  169. iommu_invalidate_page(iommu->regs, busa);
  170. busa += PAGE_SIZE;
  171. iopte++;
  172. page++;
  173. }
  174. iommu_flush_iotlb(iopte0, npages);
  175. return busa0;
  176. }
  177. static u32 iommu_get_scsi_one(struct device *dev, char *vaddr, unsigned int len)
  178. {
  179. unsigned long off;
  180. int npages;
  181. struct page *page;
  182. u32 busa;
  183. off = (unsigned long)vaddr & ~PAGE_MASK;
  184. npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
  185. page = virt_to_page((unsigned long)vaddr & PAGE_MASK);
  186. busa = iommu_get_one(dev, page, npages);
  187. return busa + off;
  188. }
  189. static __u32 iommu_get_scsi_one_noflush(struct device *dev, char *vaddr, unsigned long len)
  190. {
  191. return iommu_get_scsi_one(dev, vaddr, len);
  192. }
  193. static __u32 iommu_get_scsi_one_gflush(struct device *dev, char *vaddr, unsigned long len)
  194. {
  195. flush_page_for_dma(0);
  196. return iommu_get_scsi_one(dev, vaddr, len);
  197. }
  198. static __u32 iommu_get_scsi_one_pflush(struct device *dev, char *vaddr, unsigned long len)
  199. {
  200. unsigned long page = ((unsigned long) vaddr) & PAGE_MASK;
  201. while(page < ((unsigned long)(vaddr + len))) {
  202. flush_page_for_dma(page);
  203. page += PAGE_SIZE;
  204. }
  205. return iommu_get_scsi_one(dev, vaddr, len);
  206. }
  207. static void iommu_get_scsi_sgl_noflush(struct device *dev, struct scatterlist *sg, int sz)
  208. {
  209. int n;
  210. while (sz != 0) {
  211. --sz;
  212. n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
  213. sg->dvma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset;
  214. sg->dvma_length = (__u32) sg->length;
  215. sg = sg_next(sg);
  216. }
  217. }
  218. static void iommu_get_scsi_sgl_gflush(struct device *dev, struct scatterlist *sg, int sz)
  219. {
  220. int n;
  221. flush_page_for_dma(0);
  222. while (sz != 0) {
  223. --sz;
  224. n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
  225. sg->dvma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset;
  226. sg->dvma_length = (__u32) sg->length;
  227. sg = sg_next(sg);
  228. }
  229. }
  230. static void iommu_get_scsi_sgl_pflush(struct device *dev, struct scatterlist *sg, int sz)
  231. {
  232. unsigned long page, oldpage = 0;
  233. int n, i;
  234. while(sz != 0) {
  235. --sz;
  236. n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
  237. /*
  238. * We expect unmapped highmem pages to be not in the cache.
  239. * XXX Is this a good assumption?
  240. * XXX What if someone else unmaps it here and races us?
  241. */
  242. if ((page = (unsigned long) page_address(sg_page(sg))) != 0) {
  243. for (i = 0; i < n; i++) {
  244. if (page != oldpage) { /* Already flushed? */
  245. flush_page_for_dma(page);
  246. oldpage = page;
  247. }
  248. page += PAGE_SIZE;
  249. }
  250. }
  251. sg->dvma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset;
  252. sg->dvma_length = (__u32) sg->length;
  253. sg = sg_next(sg);
  254. }
  255. }
  256. static void iommu_release_one(struct device *dev, u32 busa, int npages)
  257. {
  258. struct iommu_struct *iommu = dev->archdata.iommu;
  259. int ioptex;
  260. int i;
  261. BUG_ON(busa < iommu->start);
  262. ioptex = (busa - iommu->start) >> PAGE_SHIFT;
  263. for (i = 0; i < npages; i++) {
  264. iopte_val(iommu->page_table[ioptex + i]) = 0;
  265. iommu_invalidate_page(iommu->regs, busa);
  266. busa += PAGE_SIZE;
  267. }
  268. bit_map_clear(&iommu->usemap, ioptex, npages);
  269. }
  270. static void iommu_release_scsi_one(struct device *dev, __u32 vaddr, unsigned long len)
  271. {
  272. unsigned long off;
  273. int npages;
  274. off = vaddr & ~PAGE_MASK;
  275. npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
  276. iommu_release_one(dev, vaddr & PAGE_MASK, npages);
  277. }
  278. static void iommu_release_scsi_sgl(struct device *dev, struct scatterlist *sg, int sz)
  279. {
  280. int n;
  281. while(sz != 0) {
  282. --sz;
  283. n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
  284. iommu_release_one(dev, sg->dvma_address & PAGE_MASK, n);
  285. sg->dvma_address = 0x21212121;
  286. sg = sg_next(sg);
  287. }
  288. }
  289. #ifdef CONFIG_SBUS
  290. static int iommu_map_dma_area(struct device *dev, dma_addr_t *pba, unsigned long va,
  291. unsigned long addr, int len)
  292. {
  293. struct iommu_struct *iommu = dev->archdata.iommu;
  294. unsigned long page, end;
  295. iopte_t *iopte = iommu->page_table;
  296. iopte_t *first;
  297. int ioptex;
  298. BUG_ON((va & ~PAGE_MASK) != 0);
  299. BUG_ON((addr & ~PAGE_MASK) != 0);
  300. BUG_ON((len & ~PAGE_MASK) != 0);
  301. /* page color = physical address */
  302. ioptex = bit_map_string_get(&iommu->usemap, len >> PAGE_SHIFT,
  303. addr >> PAGE_SHIFT);
  304. if (ioptex < 0)
  305. panic("iommu out");
  306. iopte += ioptex;
  307. first = iopte;
  308. end = addr + len;
  309. while(addr < end) {
  310. page = va;
  311. {
  312. pgd_t *pgdp;
  313. pmd_t *pmdp;
  314. pte_t *ptep;
  315. if (viking_mxcc_present)
  316. viking_mxcc_flush_page(page);
  317. else if (viking_flush)
  318. viking_flush_page(page);
  319. else
  320. __flush_page_to_ram(page);
  321. pgdp = pgd_offset(&init_mm, addr);
  322. pmdp = pmd_offset(pgdp, addr);
  323. ptep = pte_offset_map(pmdp, addr);
  324. set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
  325. }
  326. iopte_val(*iopte++) =
  327. MKIOPTE(page_to_pfn(virt_to_page(page)), ioperm_noc);
  328. addr += PAGE_SIZE;
  329. va += PAGE_SIZE;
  330. }
  331. /* P3: why do we need this?
  332. *
  333. * DAVEM: Because there are several aspects, none of which
  334. * are handled by a single interface. Some cpus are
  335. * completely not I/O DMA coherent, and some have
  336. * virtually indexed caches. The driver DMA flushing
  337. * methods handle the former case, but here during
  338. * IOMMU page table modifications, and usage of non-cacheable
  339. * cpu mappings of pages potentially in the cpu caches, we have
  340. * to handle the latter case as well.
  341. */
  342. flush_cache_all();
  343. iommu_flush_iotlb(first, len >> PAGE_SHIFT);
  344. flush_tlb_all();
  345. iommu_invalidate(iommu->regs);
  346. *pba = iommu->start + (ioptex << PAGE_SHIFT);
  347. return 0;
  348. }
  349. static void iommu_unmap_dma_area(struct device *dev, unsigned long busa, int len)
  350. {
  351. struct iommu_struct *iommu = dev->archdata.iommu;
  352. iopte_t *iopte = iommu->page_table;
  353. unsigned long end;
  354. int ioptex = (busa - iommu->start) >> PAGE_SHIFT;
  355. BUG_ON((busa & ~PAGE_MASK) != 0);
  356. BUG_ON((len & ~PAGE_MASK) != 0);
  357. iopte += ioptex;
  358. end = busa + len;
  359. while (busa < end) {
  360. iopte_val(*iopte++) = 0;
  361. busa += PAGE_SIZE;
  362. }
  363. flush_tlb_all();
  364. iommu_invalidate(iommu->regs);
  365. bit_map_clear(&iommu->usemap, ioptex, len >> PAGE_SHIFT);
  366. }
  367. #endif
  368. static char *iommu_lockarea(char *vaddr, unsigned long len)
  369. {
  370. return vaddr;
  371. }
  372. static void iommu_unlockarea(char *vaddr, unsigned long len)
  373. {
  374. }
  375. void __init ld_mmu_iommu(void)
  376. {
  377. viking_flush = (BTFIXUPVAL_CALL(flush_page_for_dma) == (unsigned long)viking_flush_page);
  378. BTFIXUPSET_CALL(mmu_lockarea, iommu_lockarea, BTFIXUPCALL_RETO0);
  379. BTFIXUPSET_CALL(mmu_unlockarea, iommu_unlockarea, BTFIXUPCALL_NOP);
  380. if (!BTFIXUPVAL_CALL(flush_page_for_dma)) {
  381. /* IO coherent chip */
  382. BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_noflush, BTFIXUPCALL_RETO0);
  383. BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_noflush, BTFIXUPCALL_NORM);
  384. } else if (flush_page_for_dma_global) {
  385. /* flush_page_for_dma flushes everything, no matter of what page is it */
  386. BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_gflush, BTFIXUPCALL_NORM);
  387. BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_gflush, BTFIXUPCALL_NORM);
  388. } else {
  389. BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_pflush, BTFIXUPCALL_NORM);
  390. BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_pflush, BTFIXUPCALL_NORM);
  391. }
  392. BTFIXUPSET_CALL(mmu_release_scsi_one, iommu_release_scsi_one, BTFIXUPCALL_NORM);
  393. BTFIXUPSET_CALL(mmu_release_scsi_sgl, iommu_release_scsi_sgl, BTFIXUPCALL_NORM);
  394. #ifdef CONFIG_SBUS
  395. BTFIXUPSET_CALL(mmu_map_dma_area, iommu_map_dma_area, BTFIXUPCALL_NORM);
  396. BTFIXUPSET_CALL(mmu_unmap_dma_area, iommu_unmap_dma_area, BTFIXUPCALL_NORM);
  397. #endif
  398. if (viking_mxcc_present || srmmu_modtype == HyperSparc) {
  399. dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
  400. ioperm_noc = IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID;
  401. } else {
  402. dvma_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV);
  403. ioperm_noc = IOPTE_WRITE | IOPTE_VALID;
  404. }
  405. }