ipmmu-vmsa.c 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120
  1. /*
  2. * IPMMU VMSA
  3. *
  4. * Copyright (C) 2014 Renesas Electronics Corporation
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; version 2 of the License.
  9. */
  10. #include <linux/bitmap.h>
  11. #include <linux/delay.h>
  12. #include <linux/dma-iommu.h>
  13. #include <linux/dma-mapping.h>
  14. #include <linux/err.h>
  15. #include <linux/export.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/io.h>
  18. #include <linux/iommu.h>
  19. #include <linux/module.h>
  20. #include <linux/of.h>
  21. #include <linux/of_device.h>
  22. #include <linux/of_iommu.h>
  23. #include <linux/of_platform.h>
  24. #include <linux/platform_device.h>
  25. #include <linux/sizes.h>
  26. #include <linux/slab.h>
  27. #include <linux/sys_soc.h>
  28. #if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA)
  29. #include <asm/dma-iommu.h>
  30. #include <asm/pgalloc.h>
  31. #else
  32. #define arm_iommu_create_mapping(...) NULL
  33. #define arm_iommu_attach_device(...) -ENODEV
  34. #define arm_iommu_release_mapping(...) do {} while (0)
  35. #define arm_iommu_detach_device(...) do {} while (0)
  36. #endif
  37. #include "io-pgtable.h"
  38. #define IPMMU_CTX_MAX 8
  39. struct ipmmu_features {
  40. bool use_ns_alias_offset;
  41. bool has_cache_leaf_nodes;
  42. unsigned int number_of_contexts;
  43. bool setup_imbuscr;
  44. bool twobit_imttbcr_sl0;
  45. };
  46. struct ipmmu_vmsa_device {
  47. struct device *dev;
  48. void __iomem *base;
  49. struct iommu_device iommu;
  50. struct ipmmu_vmsa_device *root;
  51. const struct ipmmu_features *features;
  52. unsigned int num_utlbs;
  53. unsigned int num_ctx;
  54. spinlock_t lock; /* Protects ctx and domains[] */
  55. DECLARE_BITMAP(ctx, IPMMU_CTX_MAX);
  56. struct ipmmu_vmsa_domain *domains[IPMMU_CTX_MAX];
  57. struct iommu_group *group;
  58. struct dma_iommu_mapping *mapping;
  59. };
  60. struct ipmmu_vmsa_domain {
  61. struct ipmmu_vmsa_device *mmu;
  62. struct iommu_domain io_domain;
  63. struct io_pgtable_cfg cfg;
  64. struct io_pgtable_ops *iop;
  65. unsigned int context_id;
  66. spinlock_t lock; /* Protects mappings */
  67. };
  68. static struct ipmmu_vmsa_domain *to_vmsa_domain(struct iommu_domain *dom)
  69. {
  70. return container_of(dom, struct ipmmu_vmsa_domain, io_domain);
  71. }
  72. static struct ipmmu_vmsa_device *to_ipmmu(struct device *dev)
  73. {
  74. return dev->iommu_fwspec ? dev->iommu_fwspec->iommu_priv : NULL;
  75. }
  76. #define TLB_LOOP_TIMEOUT 100 /* 100us */
  77. /* -----------------------------------------------------------------------------
  78. * Registers Definition
  79. */
  80. #define IM_NS_ALIAS_OFFSET 0x800
  81. #define IM_CTX_SIZE 0x40
  82. #define IMCTR 0x0000
  83. #define IMCTR_TRE (1 << 17)
  84. #define IMCTR_AFE (1 << 16)
  85. #define IMCTR_RTSEL_MASK (3 << 4)
  86. #define IMCTR_RTSEL_SHIFT 4
  87. #define IMCTR_TREN (1 << 3)
  88. #define IMCTR_INTEN (1 << 2)
  89. #define IMCTR_FLUSH (1 << 1)
  90. #define IMCTR_MMUEN (1 << 0)
  91. #define IMCAAR 0x0004
  92. #define IMTTBCR 0x0008
  93. #define IMTTBCR_EAE (1 << 31)
  94. #define IMTTBCR_PMB (1 << 30)
  95. #define IMTTBCR_SH1_NON_SHAREABLE (0 << 28)
  96. #define IMTTBCR_SH1_OUTER_SHAREABLE (2 << 28)
  97. #define IMTTBCR_SH1_INNER_SHAREABLE (3 << 28)
  98. #define IMTTBCR_SH1_MASK (3 << 28)
  99. #define IMTTBCR_ORGN1_NC (0 << 26)
  100. #define IMTTBCR_ORGN1_WB_WA (1 << 26)
  101. #define IMTTBCR_ORGN1_WT (2 << 26)
  102. #define IMTTBCR_ORGN1_WB (3 << 26)
  103. #define IMTTBCR_ORGN1_MASK (3 << 26)
  104. #define IMTTBCR_IRGN1_NC (0 << 24)
  105. #define IMTTBCR_IRGN1_WB_WA (1 << 24)
  106. #define IMTTBCR_IRGN1_WT (2 << 24)
  107. #define IMTTBCR_IRGN1_WB (3 << 24)
  108. #define IMTTBCR_IRGN1_MASK (3 << 24)
  109. #define IMTTBCR_TSZ1_MASK (7 << 16)
  110. #define IMTTBCR_TSZ1_SHIFT 16
  111. #define IMTTBCR_SH0_NON_SHAREABLE (0 << 12)
  112. #define IMTTBCR_SH0_OUTER_SHAREABLE (2 << 12)
  113. #define IMTTBCR_SH0_INNER_SHAREABLE (3 << 12)
  114. #define IMTTBCR_SH0_MASK (3 << 12)
  115. #define IMTTBCR_ORGN0_NC (0 << 10)
  116. #define IMTTBCR_ORGN0_WB_WA (1 << 10)
  117. #define IMTTBCR_ORGN0_WT (2 << 10)
  118. #define IMTTBCR_ORGN0_WB (3 << 10)
  119. #define IMTTBCR_ORGN0_MASK (3 << 10)
  120. #define IMTTBCR_IRGN0_NC (0 << 8)
  121. #define IMTTBCR_IRGN0_WB_WA (1 << 8)
  122. #define IMTTBCR_IRGN0_WT (2 << 8)
  123. #define IMTTBCR_IRGN0_WB (3 << 8)
  124. #define IMTTBCR_IRGN0_MASK (3 << 8)
  125. #define IMTTBCR_SL0_LVL_2 (0 << 4)
  126. #define IMTTBCR_SL0_LVL_1 (1 << 4)
  127. #define IMTTBCR_TSZ0_MASK (7 << 0)
  128. #define IMTTBCR_TSZ0_SHIFT O
  129. #define IMTTBCR_SL0_TWOBIT_LVL_3 (0 << 6)
  130. #define IMTTBCR_SL0_TWOBIT_LVL_2 (1 << 6)
  131. #define IMTTBCR_SL0_TWOBIT_LVL_1 (2 << 6)
  132. #define IMBUSCR 0x000c
  133. #define IMBUSCR_DVM (1 << 2)
  134. #define IMBUSCR_BUSSEL_SYS (0 << 0)
  135. #define IMBUSCR_BUSSEL_CCI (1 << 0)
  136. #define IMBUSCR_BUSSEL_IMCAAR (2 << 0)
  137. #define IMBUSCR_BUSSEL_CCI_IMCAAR (3 << 0)
  138. #define IMBUSCR_BUSSEL_MASK (3 << 0)
  139. #define IMTTLBR0 0x0010
  140. #define IMTTUBR0 0x0014
  141. #define IMTTLBR1 0x0018
  142. #define IMTTUBR1 0x001c
  143. #define IMSTR 0x0020
  144. #define IMSTR_ERRLVL_MASK (3 << 12)
  145. #define IMSTR_ERRLVL_SHIFT 12
  146. #define IMSTR_ERRCODE_TLB_FORMAT (1 << 8)
  147. #define IMSTR_ERRCODE_ACCESS_PERM (4 << 8)
  148. #define IMSTR_ERRCODE_SECURE_ACCESS (5 << 8)
  149. #define IMSTR_ERRCODE_MASK (7 << 8)
  150. #define IMSTR_MHIT (1 << 4)
  151. #define IMSTR_ABORT (1 << 2)
  152. #define IMSTR_PF (1 << 1)
  153. #define IMSTR_TF (1 << 0)
  154. #define IMMAIR0 0x0028
  155. #define IMMAIR1 0x002c
  156. #define IMMAIR_ATTR_MASK 0xff
  157. #define IMMAIR_ATTR_DEVICE 0x04
  158. #define IMMAIR_ATTR_NC 0x44
  159. #define IMMAIR_ATTR_WBRWA 0xff
  160. #define IMMAIR_ATTR_SHIFT(n) ((n) << 3)
  161. #define IMMAIR_ATTR_IDX_NC 0
  162. #define IMMAIR_ATTR_IDX_WBRWA 1
  163. #define IMMAIR_ATTR_IDX_DEV 2
  164. #define IMEAR 0x0030
  165. #define IMPCTR 0x0200
  166. #define IMPSTR 0x0208
  167. #define IMPEAR 0x020c
  168. #define IMPMBA(n) (0x0280 + ((n) * 4))
  169. #define IMPMBD(n) (0x02c0 + ((n) * 4))
  170. #define IMUCTR(n) ((n) < 32 ? IMUCTR0(n) : IMUCTR32(n))
  171. #define IMUCTR0(n) (0x0300 + ((n) * 16))
  172. #define IMUCTR32(n) (0x0600 + (((n) - 32) * 16))
  173. #define IMUCTR_FIXADDEN (1 << 31)
  174. #define IMUCTR_FIXADD_MASK (0xff << 16)
  175. #define IMUCTR_FIXADD_SHIFT 16
  176. #define IMUCTR_TTSEL_MMU(n) ((n) << 4)
  177. #define IMUCTR_TTSEL_PMB (8 << 4)
  178. #define IMUCTR_TTSEL_MASK (15 << 4)
  179. #define IMUCTR_FLUSH (1 << 1)
  180. #define IMUCTR_MMUEN (1 << 0)
  181. #define IMUASID(n) ((n) < 32 ? IMUASID0(n) : IMUASID32(n))
  182. #define IMUASID0(n) (0x0308 + ((n) * 16))
  183. #define IMUASID32(n) (0x0608 + (((n) - 32) * 16))
  184. #define IMUASID_ASID8_MASK (0xff << 8)
  185. #define IMUASID_ASID8_SHIFT 8
  186. #define IMUASID_ASID0_MASK (0xff << 0)
  187. #define IMUASID_ASID0_SHIFT 0
  188. /* -----------------------------------------------------------------------------
  189. * Root device handling
  190. */
  191. static struct platform_driver ipmmu_driver;
  192. static bool ipmmu_is_root(struct ipmmu_vmsa_device *mmu)
  193. {
  194. return mmu->root == mmu;
  195. }
  196. static int __ipmmu_check_device(struct device *dev, void *data)
  197. {
  198. struct ipmmu_vmsa_device *mmu = dev_get_drvdata(dev);
  199. struct ipmmu_vmsa_device **rootp = data;
  200. if (ipmmu_is_root(mmu))
  201. *rootp = mmu;
  202. return 0;
  203. }
  204. static struct ipmmu_vmsa_device *ipmmu_find_root(void)
  205. {
  206. struct ipmmu_vmsa_device *root = NULL;
  207. return driver_for_each_device(&ipmmu_driver.driver, NULL, &root,
  208. __ipmmu_check_device) == 0 ? root : NULL;
  209. }
  210. /* -----------------------------------------------------------------------------
  211. * Read/Write Access
  212. */
  213. static u32 ipmmu_read(struct ipmmu_vmsa_device *mmu, unsigned int offset)
  214. {
  215. return ioread32(mmu->base + offset);
  216. }
  217. static void ipmmu_write(struct ipmmu_vmsa_device *mmu, unsigned int offset,
  218. u32 data)
  219. {
  220. iowrite32(data, mmu->base + offset);
  221. }
  222. static u32 ipmmu_ctx_read_root(struct ipmmu_vmsa_domain *domain,
  223. unsigned int reg)
  224. {
  225. return ipmmu_read(domain->mmu->root,
  226. domain->context_id * IM_CTX_SIZE + reg);
  227. }
  228. static void ipmmu_ctx_write_root(struct ipmmu_vmsa_domain *domain,
  229. unsigned int reg, u32 data)
  230. {
  231. ipmmu_write(domain->mmu->root,
  232. domain->context_id * IM_CTX_SIZE + reg, data);
  233. }
  234. static void ipmmu_ctx_write_all(struct ipmmu_vmsa_domain *domain,
  235. unsigned int reg, u32 data)
  236. {
  237. if (domain->mmu != domain->mmu->root)
  238. ipmmu_write(domain->mmu,
  239. domain->context_id * IM_CTX_SIZE + reg, data);
  240. ipmmu_write(domain->mmu->root,
  241. domain->context_id * IM_CTX_SIZE + reg, data);
  242. }
  243. /* -----------------------------------------------------------------------------
  244. * TLB and microTLB Management
  245. */
  246. /* Wait for any pending TLB invalidations to complete */
  247. static void ipmmu_tlb_sync(struct ipmmu_vmsa_domain *domain)
  248. {
  249. unsigned int count = 0;
  250. while (ipmmu_ctx_read_root(domain, IMCTR) & IMCTR_FLUSH) {
  251. cpu_relax();
  252. if (++count == TLB_LOOP_TIMEOUT) {
  253. dev_err_ratelimited(domain->mmu->dev,
  254. "TLB sync timed out -- MMU may be deadlocked\n");
  255. return;
  256. }
  257. udelay(1);
  258. }
  259. }
  260. static void ipmmu_tlb_invalidate(struct ipmmu_vmsa_domain *domain)
  261. {
  262. u32 reg;
  263. reg = ipmmu_ctx_read_root(domain, IMCTR);
  264. reg |= IMCTR_FLUSH;
  265. ipmmu_ctx_write_all(domain, IMCTR, reg);
  266. ipmmu_tlb_sync(domain);
  267. }
  268. /*
  269. * Enable MMU translation for the microTLB.
  270. */
  271. static void ipmmu_utlb_enable(struct ipmmu_vmsa_domain *domain,
  272. unsigned int utlb)
  273. {
  274. struct ipmmu_vmsa_device *mmu = domain->mmu;
  275. /*
  276. * TODO: Reference-count the microTLB as several bus masters can be
  277. * connected to the same microTLB.
  278. */
  279. /* TODO: What should we set the ASID to ? */
  280. ipmmu_write(mmu, IMUASID(utlb), 0);
  281. /* TODO: Do we need to flush the microTLB ? */
  282. ipmmu_write(mmu, IMUCTR(utlb),
  283. IMUCTR_TTSEL_MMU(domain->context_id) | IMUCTR_FLUSH |
  284. IMUCTR_MMUEN);
  285. }
  286. /*
  287. * Disable MMU translation for the microTLB.
  288. */
  289. static void ipmmu_utlb_disable(struct ipmmu_vmsa_domain *domain,
  290. unsigned int utlb)
  291. {
  292. struct ipmmu_vmsa_device *mmu = domain->mmu;
  293. ipmmu_write(mmu, IMUCTR(utlb), 0);
  294. }
  295. static void ipmmu_tlb_flush_all(void *cookie)
  296. {
  297. struct ipmmu_vmsa_domain *domain = cookie;
  298. ipmmu_tlb_invalidate(domain);
  299. }
  300. static void ipmmu_tlb_add_flush(unsigned long iova, size_t size,
  301. size_t granule, bool leaf, void *cookie)
  302. {
  303. /* The hardware doesn't support selective TLB flush. */
  304. }
  305. static const struct iommu_gather_ops ipmmu_gather_ops = {
  306. .tlb_flush_all = ipmmu_tlb_flush_all,
  307. .tlb_add_flush = ipmmu_tlb_add_flush,
  308. .tlb_sync = ipmmu_tlb_flush_all,
  309. };
  310. /* -----------------------------------------------------------------------------
  311. * Domain/Context Management
  312. */
  313. static int ipmmu_domain_allocate_context(struct ipmmu_vmsa_device *mmu,
  314. struct ipmmu_vmsa_domain *domain)
  315. {
  316. unsigned long flags;
  317. int ret;
  318. spin_lock_irqsave(&mmu->lock, flags);
  319. ret = find_first_zero_bit(mmu->ctx, mmu->num_ctx);
  320. if (ret != mmu->num_ctx) {
  321. mmu->domains[ret] = domain;
  322. set_bit(ret, mmu->ctx);
  323. } else
  324. ret = -EBUSY;
  325. spin_unlock_irqrestore(&mmu->lock, flags);
  326. return ret;
  327. }
  328. static void ipmmu_domain_free_context(struct ipmmu_vmsa_device *mmu,
  329. unsigned int context_id)
  330. {
  331. unsigned long flags;
  332. spin_lock_irqsave(&mmu->lock, flags);
  333. clear_bit(context_id, mmu->ctx);
  334. mmu->domains[context_id] = NULL;
  335. spin_unlock_irqrestore(&mmu->lock, flags);
  336. }
  337. static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
  338. {
  339. u64 ttbr;
  340. u32 tmp;
  341. int ret;
  342. /*
  343. * Allocate the page table operations.
  344. *
  345. * VMSA states in section B3.6.3 "Control of Secure or Non-secure memory
  346. * access, Long-descriptor format" that the NStable bit being set in a
  347. * table descriptor will result in the NStable and NS bits of all child
  348. * entries being ignored and considered as being set. The IPMMU seems
  349. * not to comply with this, as it generates a secure access page fault
  350. * if any of the NStable and NS bits isn't set when running in
  351. * non-secure mode.
  352. */
  353. domain->cfg.quirks = IO_PGTABLE_QUIRK_ARM_NS;
  354. domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K;
  355. domain->cfg.ias = 32;
  356. domain->cfg.oas = 40;
  357. domain->cfg.tlb = &ipmmu_gather_ops;
  358. domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32);
  359. domain->io_domain.geometry.force_aperture = true;
  360. /*
  361. * TODO: Add support for coherent walk through CCI with DVM and remove
  362. * cache handling. For now, delegate it to the io-pgtable code.
  363. */
  364. domain->cfg.iommu_dev = domain->mmu->root->dev;
  365. /*
  366. * Find an unused context.
  367. */
  368. ret = ipmmu_domain_allocate_context(domain->mmu->root, domain);
  369. if (ret < 0)
  370. return ret;
  371. domain->context_id = ret;
  372. domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg,
  373. domain);
  374. if (!domain->iop) {
  375. ipmmu_domain_free_context(domain->mmu->root,
  376. domain->context_id);
  377. return -EINVAL;
  378. }
  379. /* TTBR0 */
  380. ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr[0];
  381. ipmmu_ctx_write_root(domain, IMTTLBR0, ttbr);
  382. ipmmu_ctx_write_root(domain, IMTTUBR0, ttbr >> 32);
  383. /*
  384. * TTBCR
  385. * We use long descriptors with inner-shareable WBWA tables and allocate
  386. * the whole 32-bit VA space to TTBR0.
  387. */
  388. if (domain->mmu->features->twobit_imttbcr_sl0)
  389. tmp = IMTTBCR_SL0_TWOBIT_LVL_1;
  390. else
  391. tmp = IMTTBCR_SL0_LVL_1;
  392. ipmmu_ctx_write_root(domain, IMTTBCR, IMTTBCR_EAE |
  393. IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA |
  394. IMTTBCR_IRGN0_WB_WA | tmp);
  395. /* MAIR0 */
  396. ipmmu_ctx_write_root(domain, IMMAIR0,
  397. domain->cfg.arm_lpae_s1_cfg.mair[0]);
  398. /* IMBUSCR */
  399. if (domain->mmu->features->setup_imbuscr)
  400. ipmmu_ctx_write_root(domain, IMBUSCR,
  401. ipmmu_ctx_read_root(domain, IMBUSCR) &
  402. ~(IMBUSCR_DVM | IMBUSCR_BUSSEL_MASK));
  403. /*
  404. * IMSTR
  405. * Clear all interrupt flags.
  406. */
  407. ipmmu_ctx_write_root(domain, IMSTR, ipmmu_ctx_read_root(domain, IMSTR));
  408. /*
  409. * IMCTR
  410. * Enable the MMU and interrupt generation. The long-descriptor
  411. * translation table format doesn't use TEX remapping. Don't enable AF
  412. * software management as we have no use for it. Flush the TLB as
  413. * required when modifying the context registers.
  414. */
  415. ipmmu_ctx_write_all(domain, IMCTR,
  416. IMCTR_INTEN | IMCTR_FLUSH | IMCTR_MMUEN);
  417. return 0;
  418. }
  419. static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain)
  420. {
  421. /*
  422. * Disable the context. Flush the TLB as required when modifying the
  423. * context registers.
  424. *
  425. * TODO: Is TLB flush really needed ?
  426. */
  427. ipmmu_ctx_write_all(domain, IMCTR, IMCTR_FLUSH);
  428. ipmmu_tlb_sync(domain);
  429. ipmmu_domain_free_context(domain->mmu->root, domain->context_id);
  430. }
  431. /* -----------------------------------------------------------------------------
  432. * Fault Handling
  433. */
  434. static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain)
  435. {
  436. const u32 err_mask = IMSTR_MHIT | IMSTR_ABORT | IMSTR_PF | IMSTR_TF;
  437. struct ipmmu_vmsa_device *mmu = domain->mmu;
  438. u32 status;
  439. u32 iova;
  440. status = ipmmu_ctx_read_root(domain, IMSTR);
  441. if (!(status & err_mask))
  442. return IRQ_NONE;
  443. iova = ipmmu_ctx_read_root(domain, IMEAR);
  444. /*
  445. * Clear the error status flags. Unlike traditional interrupt flag
  446. * registers that must be cleared by writing 1, this status register
  447. * seems to require 0. The error address register must be read before,
  448. * otherwise its value will be 0.
  449. */
  450. ipmmu_ctx_write_root(domain, IMSTR, 0);
  451. /* Log fatal errors. */
  452. if (status & IMSTR_MHIT)
  453. dev_err_ratelimited(mmu->dev, "Multiple TLB hits @0x%08x\n",
  454. iova);
  455. if (status & IMSTR_ABORT)
  456. dev_err_ratelimited(mmu->dev, "Page Table Walk Abort @0x%08x\n",
  457. iova);
  458. if (!(status & (IMSTR_PF | IMSTR_TF)))
  459. return IRQ_NONE;
  460. /*
  461. * Try to handle page faults and translation faults.
  462. *
  463. * TODO: We need to look up the faulty device based on the I/O VA. Use
  464. * the IOMMU device for now.
  465. */
  466. if (!report_iommu_fault(&domain->io_domain, mmu->dev, iova, 0))
  467. return IRQ_HANDLED;
  468. dev_err_ratelimited(mmu->dev,
  469. "Unhandled fault: status 0x%08x iova 0x%08x\n",
  470. status, iova);
  471. return IRQ_HANDLED;
  472. }
  473. static irqreturn_t ipmmu_irq(int irq, void *dev)
  474. {
  475. struct ipmmu_vmsa_device *mmu = dev;
  476. irqreturn_t status = IRQ_NONE;
  477. unsigned int i;
  478. unsigned long flags;
  479. spin_lock_irqsave(&mmu->lock, flags);
  480. /*
  481. * Check interrupts for all active contexts.
  482. */
  483. for (i = 0; i < mmu->num_ctx; i++) {
  484. if (!mmu->domains[i])
  485. continue;
  486. if (ipmmu_domain_irq(mmu->domains[i]) == IRQ_HANDLED)
  487. status = IRQ_HANDLED;
  488. }
  489. spin_unlock_irqrestore(&mmu->lock, flags);
  490. return status;
  491. }
  492. /* -----------------------------------------------------------------------------
  493. * IOMMU Operations
  494. */
  495. static struct iommu_domain *__ipmmu_domain_alloc(unsigned type)
  496. {
  497. struct ipmmu_vmsa_domain *domain;
  498. domain = kzalloc(sizeof(*domain), GFP_KERNEL);
  499. if (!domain)
  500. return NULL;
  501. spin_lock_init(&domain->lock);
  502. return &domain->io_domain;
  503. }
  504. static struct iommu_domain *ipmmu_domain_alloc(unsigned type)
  505. {
  506. struct iommu_domain *io_domain = NULL;
  507. switch (type) {
  508. case IOMMU_DOMAIN_UNMANAGED:
  509. io_domain = __ipmmu_domain_alloc(type);
  510. break;
  511. case IOMMU_DOMAIN_DMA:
  512. io_domain = __ipmmu_domain_alloc(type);
  513. if (io_domain && iommu_get_dma_cookie(io_domain)) {
  514. kfree(io_domain);
  515. io_domain = NULL;
  516. }
  517. break;
  518. }
  519. return io_domain;
  520. }
  521. static void ipmmu_domain_free(struct iommu_domain *io_domain)
  522. {
  523. struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
  524. /*
  525. * Free the domain resources. We assume that all devices have already
  526. * been detached.
  527. */
  528. iommu_put_dma_cookie(io_domain);
  529. ipmmu_domain_destroy_context(domain);
  530. free_io_pgtable_ops(domain->iop);
  531. kfree(domain);
  532. }
  533. static int ipmmu_attach_device(struct iommu_domain *io_domain,
  534. struct device *dev)
  535. {
  536. struct iommu_fwspec *fwspec = dev->iommu_fwspec;
  537. struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
  538. struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
  539. unsigned long flags;
  540. unsigned int i;
  541. int ret = 0;
  542. if (!mmu) {
  543. dev_err(dev, "Cannot attach to IPMMU\n");
  544. return -ENXIO;
  545. }
  546. spin_lock_irqsave(&domain->lock, flags);
  547. if (!domain->mmu) {
  548. /* The domain hasn't been used yet, initialize it. */
  549. domain->mmu = mmu;
  550. ret = ipmmu_domain_init_context(domain);
  551. if (ret < 0) {
  552. dev_err(dev, "Unable to initialize IPMMU context\n");
  553. domain->mmu = NULL;
  554. } else {
  555. dev_info(dev, "Using IPMMU context %u\n",
  556. domain->context_id);
  557. }
  558. } else if (domain->mmu != mmu) {
  559. /*
  560. * Something is wrong, we can't attach two devices using
  561. * different IOMMUs to the same domain.
  562. */
  563. dev_err(dev, "Can't attach IPMMU %s to domain on IPMMU %s\n",
  564. dev_name(mmu->dev), dev_name(domain->mmu->dev));
  565. ret = -EINVAL;
  566. } else
  567. dev_info(dev, "Reusing IPMMU context %u\n", domain->context_id);
  568. spin_unlock_irqrestore(&domain->lock, flags);
  569. if (ret < 0)
  570. return ret;
  571. for (i = 0; i < fwspec->num_ids; ++i)
  572. ipmmu_utlb_enable(domain, fwspec->ids[i]);
  573. return 0;
  574. }
  575. static void ipmmu_detach_device(struct iommu_domain *io_domain,
  576. struct device *dev)
  577. {
  578. struct iommu_fwspec *fwspec = dev->iommu_fwspec;
  579. struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
  580. unsigned int i;
  581. for (i = 0; i < fwspec->num_ids; ++i)
  582. ipmmu_utlb_disable(domain, fwspec->ids[i]);
  583. /*
  584. * TODO: Optimize by disabling the context when no device is attached.
  585. */
  586. }
  587. static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova,
  588. phys_addr_t paddr, size_t size, int prot)
  589. {
  590. struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
  591. if (!domain)
  592. return -ENODEV;
  593. return domain->iop->map(domain->iop, iova, paddr, size, prot);
  594. }
  595. static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
  596. size_t size)
  597. {
  598. struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
  599. return domain->iop->unmap(domain->iop, iova, size);
  600. }
  601. static void ipmmu_iotlb_sync(struct iommu_domain *io_domain)
  602. {
  603. struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
  604. if (domain->mmu)
  605. ipmmu_tlb_flush_all(domain);
  606. }
  607. static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain,
  608. dma_addr_t iova)
  609. {
  610. struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
  611. /* TODO: Is locking needed ? */
  612. return domain->iop->iova_to_phys(domain->iop, iova);
  613. }
  614. static int ipmmu_init_platform_device(struct device *dev,
  615. struct of_phandle_args *args)
  616. {
  617. struct platform_device *ipmmu_pdev;
  618. ipmmu_pdev = of_find_device_by_node(args->np);
  619. if (!ipmmu_pdev)
  620. return -ENODEV;
  621. dev->iommu_fwspec->iommu_priv = platform_get_drvdata(ipmmu_pdev);
  622. return 0;
  623. }
  624. static bool ipmmu_slave_whitelist(struct device *dev)
  625. {
  626. /* By default, do not allow use of IPMMU */
  627. return false;
  628. }
  629. static const struct soc_device_attribute soc_r8a7795[] = {
  630. { .soc_id = "r8a7795", },
  631. { /* sentinel */ }
  632. };
  633. static int ipmmu_of_xlate(struct device *dev,
  634. struct of_phandle_args *spec)
  635. {
  636. /* For R-Car Gen3 use a white list to opt-in slave devices */
  637. if (soc_device_match(soc_r8a7795) && !ipmmu_slave_whitelist(dev))
  638. return -ENODEV;
  639. iommu_fwspec_add_ids(dev, spec->args, 1);
  640. /* Initialize once - xlate() will call multiple times */
  641. if (to_ipmmu(dev))
  642. return 0;
  643. return ipmmu_init_platform_device(dev, spec);
  644. }
  645. static int ipmmu_init_arm_mapping(struct device *dev)
  646. {
  647. struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
  648. struct iommu_group *group;
  649. int ret;
  650. /* Create a device group and add the device to it. */
  651. group = iommu_group_alloc();
  652. if (IS_ERR(group)) {
  653. dev_err(dev, "Failed to allocate IOMMU group\n");
  654. return PTR_ERR(group);
  655. }
  656. ret = iommu_group_add_device(group, dev);
  657. iommu_group_put(group);
  658. if (ret < 0) {
  659. dev_err(dev, "Failed to add device to IPMMU group\n");
  660. return ret;
  661. }
  662. /*
  663. * Create the ARM mapping, used by the ARM DMA mapping core to allocate
  664. * VAs. This will allocate a corresponding IOMMU domain.
  665. *
  666. * TODO:
  667. * - Create one mapping per context (TLB).
  668. * - Make the mapping size configurable ? We currently use a 2GB mapping
  669. * at a 1GB offset to ensure that NULL VAs will fault.
  670. */
  671. if (!mmu->mapping) {
  672. struct dma_iommu_mapping *mapping;
  673. mapping = arm_iommu_create_mapping(&platform_bus_type,
  674. SZ_1G, SZ_2G);
  675. if (IS_ERR(mapping)) {
  676. dev_err(mmu->dev, "failed to create ARM IOMMU mapping\n");
  677. ret = PTR_ERR(mapping);
  678. goto error;
  679. }
  680. mmu->mapping = mapping;
  681. }
  682. /* Attach the ARM VA mapping to the device. */
  683. ret = arm_iommu_attach_device(dev, mmu->mapping);
  684. if (ret < 0) {
  685. dev_err(dev, "Failed to attach device to VA mapping\n");
  686. goto error;
  687. }
  688. return 0;
  689. error:
  690. iommu_group_remove_device(dev);
  691. if (mmu->mapping)
  692. arm_iommu_release_mapping(mmu->mapping);
  693. return ret;
  694. }
  695. static int ipmmu_add_device(struct device *dev)
  696. {
  697. struct iommu_group *group;
  698. /*
  699. * Only let through devices that have been verified in xlate()
  700. */
  701. if (!to_ipmmu(dev))
  702. return -ENODEV;
  703. if (IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_IOMMU_DMA))
  704. return ipmmu_init_arm_mapping(dev);
  705. group = iommu_group_get_for_dev(dev);
  706. if (IS_ERR(group))
  707. return PTR_ERR(group);
  708. iommu_group_put(group);
  709. return 0;
  710. }
  711. static void ipmmu_remove_device(struct device *dev)
  712. {
  713. arm_iommu_detach_device(dev);
  714. iommu_group_remove_device(dev);
  715. }
  716. static struct iommu_group *ipmmu_find_group(struct device *dev)
  717. {
  718. struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
  719. struct iommu_group *group;
  720. if (mmu->group)
  721. return iommu_group_ref_get(mmu->group);
  722. group = iommu_group_alloc();
  723. if (!IS_ERR(group))
  724. mmu->group = group;
  725. return group;
  726. }
  727. static const struct iommu_ops ipmmu_ops = {
  728. .domain_alloc = ipmmu_domain_alloc,
  729. .domain_free = ipmmu_domain_free,
  730. .attach_dev = ipmmu_attach_device,
  731. .detach_dev = ipmmu_detach_device,
  732. .map = ipmmu_map,
  733. .unmap = ipmmu_unmap,
  734. .flush_iotlb_all = ipmmu_iotlb_sync,
  735. .iotlb_sync = ipmmu_iotlb_sync,
  736. .map_sg = default_iommu_map_sg,
  737. .iova_to_phys = ipmmu_iova_to_phys,
  738. .add_device = ipmmu_add_device,
  739. .remove_device = ipmmu_remove_device,
  740. .device_group = ipmmu_find_group,
  741. .pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K,
  742. .of_xlate = ipmmu_of_xlate,
  743. };
  744. /* -----------------------------------------------------------------------------
  745. * Probe/remove and init
  746. */
  747. static void ipmmu_device_reset(struct ipmmu_vmsa_device *mmu)
  748. {
  749. unsigned int i;
  750. /* Disable all contexts. */
  751. for (i = 0; i < mmu->num_ctx; ++i)
  752. ipmmu_write(mmu, i * IM_CTX_SIZE + IMCTR, 0);
  753. }
  754. static const struct ipmmu_features ipmmu_features_default = {
  755. .use_ns_alias_offset = true,
  756. .has_cache_leaf_nodes = false,
  757. .number_of_contexts = 1, /* software only tested with one context */
  758. .setup_imbuscr = true,
  759. .twobit_imttbcr_sl0 = false,
  760. };
  761. static const struct ipmmu_features ipmmu_features_r8a7795 = {
  762. .use_ns_alias_offset = false,
  763. .has_cache_leaf_nodes = true,
  764. .number_of_contexts = 8,
  765. .setup_imbuscr = false,
  766. .twobit_imttbcr_sl0 = true,
  767. };
  768. static const struct of_device_id ipmmu_of_ids[] = {
  769. {
  770. .compatible = "renesas,ipmmu-vmsa",
  771. .data = &ipmmu_features_default,
  772. }, {
  773. .compatible = "renesas,ipmmu-r8a7795",
  774. .data = &ipmmu_features_r8a7795,
  775. }, {
  776. /* Terminator */
  777. },
  778. };
  779. MODULE_DEVICE_TABLE(of, ipmmu_of_ids);
  780. static int ipmmu_probe(struct platform_device *pdev)
  781. {
  782. struct ipmmu_vmsa_device *mmu;
  783. struct resource *res;
  784. int irq;
  785. int ret;
  786. mmu = devm_kzalloc(&pdev->dev, sizeof(*mmu), GFP_KERNEL);
  787. if (!mmu) {
  788. dev_err(&pdev->dev, "cannot allocate device data\n");
  789. return -ENOMEM;
  790. }
  791. mmu->dev = &pdev->dev;
  792. mmu->num_utlbs = 48;
  793. spin_lock_init(&mmu->lock);
  794. bitmap_zero(mmu->ctx, IPMMU_CTX_MAX);
  795. mmu->features = of_device_get_match_data(&pdev->dev);
  796. dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
  797. /* Map I/O memory and request IRQ. */
  798. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  799. mmu->base = devm_ioremap_resource(&pdev->dev, res);
  800. if (IS_ERR(mmu->base))
  801. return PTR_ERR(mmu->base);
  802. /*
  803. * The IPMMU has two register banks, for secure and non-secure modes.
  804. * The bank mapped at the beginning of the IPMMU address space
  805. * corresponds to the running mode of the CPU. When running in secure
  806. * mode the non-secure register bank is also available at an offset.
  807. *
  808. * Secure mode operation isn't clearly documented and is thus currently
  809. * not implemented in the driver. Furthermore, preliminary tests of
  810. * non-secure operation with the main register bank were not successful.
  811. * Offset the registers base unconditionally to point to the non-secure
  812. * alias space for now.
  813. */
  814. if (mmu->features->use_ns_alias_offset)
  815. mmu->base += IM_NS_ALIAS_OFFSET;
  816. mmu->num_ctx = min_t(unsigned int, IPMMU_CTX_MAX,
  817. mmu->features->number_of_contexts);
  818. irq = platform_get_irq(pdev, 0);
  819. /*
  820. * Determine if this IPMMU instance is a root device by checking for
  821. * the lack of has_cache_leaf_nodes flag or renesas,ipmmu-main property.
  822. */
  823. if (!mmu->features->has_cache_leaf_nodes ||
  824. !of_find_property(pdev->dev.of_node, "renesas,ipmmu-main", NULL))
  825. mmu->root = mmu;
  826. else
  827. mmu->root = ipmmu_find_root();
  828. /*
  829. * Wait until the root device has been registered for sure.
  830. */
  831. if (!mmu->root)
  832. return -EPROBE_DEFER;
  833. /* Root devices have mandatory IRQs */
  834. if (ipmmu_is_root(mmu)) {
  835. if (irq < 0) {
  836. dev_err(&pdev->dev, "no IRQ found\n");
  837. return irq;
  838. }
  839. ret = devm_request_irq(&pdev->dev, irq, ipmmu_irq, 0,
  840. dev_name(&pdev->dev), mmu);
  841. if (ret < 0) {
  842. dev_err(&pdev->dev, "failed to request IRQ %d\n", irq);
  843. return ret;
  844. }
  845. ipmmu_device_reset(mmu);
  846. }
  847. /*
  848. * Register the IPMMU to the IOMMU subsystem in the following cases:
  849. * - R-Car Gen2 IPMMU (all devices registered)
  850. * - R-Car Gen3 IPMMU (leaf devices only - skip root IPMMU-MM device)
  851. */
  852. if (!mmu->features->has_cache_leaf_nodes || !ipmmu_is_root(mmu)) {
  853. ret = iommu_device_sysfs_add(&mmu->iommu, &pdev->dev, NULL,
  854. dev_name(&pdev->dev));
  855. if (ret)
  856. return ret;
  857. iommu_device_set_ops(&mmu->iommu, &ipmmu_ops);
  858. iommu_device_set_fwnode(&mmu->iommu,
  859. &pdev->dev.of_node->fwnode);
  860. ret = iommu_device_register(&mmu->iommu);
  861. if (ret)
  862. return ret;
  863. #if defined(CONFIG_IOMMU_DMA)
  864. if (!iommu_present(&platform_bus_type))
  865. bus_set_iommu(&platform_bus_type, &ipmmu_ops);
  866. #endif
  867. }
  868. /*
  869. * We can't create the ARM mapping here as it requires the bus to have
  870. * an IOMMU, which only happens when bus_set_iommu() is called in
  871. * ipmmu_init() after the probe function returns.
  872. */
  873. platform_set_drvdata(pdev, mmu);
  874. return 0;
  875. }
  876. static int ipmmu_remove(struct platform_device *pdev)
  877. {
  878. struct ipmmu_vmsa_device *mmu = platform_get_drvdata(pdev);
  879. iommu_device_sysfs_remove(&mmu->iommu);
  880. iommu_device_unregister(&mmu->iommu);
  881. arm_iommu_release_mapping(mmu->mapping);
  882. ipmmu_device_reset(mmu);
  883. return 0;
  884. }
  885. static struct platform_driver ipmmu_driver = {
  886. .driver = {
  887. .name = "ipmmu-vmsa",
  888. .of_match_table = of_match_ptr(ipmmu_of_ids),
  889. },
  890. .probe = ipmmu_probe,
  891. .remove = ipmmu_remove,
  892. };
  893. static int __init ipmmu_init(void)
  894. {
  895. static bool setup_done;
  896. int ret;
  897. if (setup_done)
  898. return 0;
  899. ret = platform_driver_register(&ipmmu_driver);
  900. if (ret < 0)
  901. return ret;
  902. #if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA)
  903. if (!iommu_present(&platform_bus_type))
  904. bus_set_iommu(&platform_bus_type, &ipmmu_ops);
  905. #endif
  906. setup_done = true;
  907. return 0;
  908. }
  909. static void __exit ipmmu_exit(void)
  910. {
  911. return platform_driver_unregister(&ipmmu_driver);
  912. }
  913. subsys_initcall(ipmmu_init);
  914. module_exit(ipmmu_exit);
  915. IOMMU_OF_DECLARE(ipmmu_vmsa_iommu_of, "renesas,ipmmu-vmsa");
  916. IOMMU_OF_DECLARE(ipmmu_r8a7795_iommu_of, "renesas,ipmmu-r8a7795");
  917. MODULE_DESCRIPTION("IOMMU API for Renesas VMSA-compatible IPMMU");
  918. MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
  919. MODULE_LICENSE("GPL v2");