ipmmu-vmsa.c 26 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043
  1. /*
  2. * IPMMU VMSA
  3. *
  4. * Copyright (C) 2014 Renesas Electronics Corporation
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; version 2 of the License.
  9. */
  10. #include <linux/bitmap.h>
  11. #include <linux/delay.h>
  12. #include <linux/dma-iommu.h>
  13. #include <linux/dma-mapping.h>
  14. #include <linux/err.h>
  15. #include <linux/export.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/io.h>
  18. #include <linux/iommu.h>
  19. #include <linux/module.h>
  20. #include <linux/of.h>
  21. #include <linux/of_platform.h>
  22. #include <linux/platform_device.h>
  23. #include <linux/sizes.h>
  24. #include <linux/slab.h>
  25. #if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA)
  26. #include <asm/dma-iommu.h>
  27. #include <asm/pgalloc.h>
  28. #endif
  29. #include "io-pgtable.h"
  30. #define IPMMU_CTX_MAX 1
  31. struct ipmmu_vmsa_device {
  32. struct device *dev;
  33. void __iomem *base;
  34. struct iommu_device iommu;
  35. unsigned int num_utlbs;
  36. spinlock_t lock; /* Protects ctx and domains[] */
  37. DECLARE_BITMAP(ctx, IPMMU_CTX_MAX);
  38. struct ipmmu_vmsa_domain *domains[IPMMU_CTX_MAX];
  39. struct dma_iommu_mapping *mapping;
  40. };
  41. struct ipmmu_vmsa_domain {
  42. struct ipmmu_vmsa_device *mmu;
  43. struct iommu_domain io_domain;
  44. struct io_pgtable_cfg cfg;
  45. struct io_pgtable_ops *iop;
  46. unsigned int context_id;
  47. spinlock_t lock; /* Protects mappings */
  48. };
  49. struct ipmmu_vmsa_iommu_priv {
  50. struct ipmmu_vmsa_device *mmu;
  51. struct device *dev;
  52. struct list_head list;
  53. };
  54. static struct ipmmu_vmsa_domain *to_vmsa_domain(struct iommu_domain *dom)
  55. {
  56. return container_of(dom, struct ipmmu_vmsa_domain, io_domain);
  57. }
  58. static struct ipmmu_vmsa_iommu_priv *to_priv(struct device *dev)
  59. {
  60. return dev->iommu_fwspec ? dev->iommu_fwspec->iommu_priv : NULL;
  61. }
  62. #define TLB_LOOP_TIMEOUT 100 /* 100us */
  63. /* -----------------------------------------------------------------------------
  64. * Registers Definition
  65. */
  66. #define IM_NS_ALIAS_OFFSET 0x800
  67. #define IM_CTX_SIZE 0x40
  68. #define IMCTR 0x0000
  69. #define IMCTR_TRE (1 << 17)
  70. #define IMCTR_AFE (1 << 16)
  71. #define IMCTR_RTSEL_MASK (3 << 4)
  72. #define IMCTR_RTSEL_SHIFT 4
  73. #define IMCTR_TREN (1 << 3)
  74. #define IMCTR_INTEN (1 << 2)
  75. #define IMCTR_FLUSH (1 << 1)
  76. #define IMCTR_MMUEN (1 << 0)
  77. #define IMCAAR 0x0004
  78. #define IMTTBCR 0x0008
  79. #define IMTTBCR_EAE (1 << 31)
  80. #define IMTTBCR_PMB (1 << 30)
  81. #define IMTTBCR_SH1_NON_SHAREABLE (0 << 28)
  82. #define IMTTBCR_SH1_OUTER_SHAREABLE (2 << 28)
  83. #define IMTTBCR_SH1_INNER_SHAREABLE (3 << 28)
  84. #define IMTTBCR_SH1_MASK (3 << 28)
  85. #define IMTTBCR_ORGN1_NC (0 << 26)
  86. #define IMTTBCR_ORGN1_WB_WA (1 << 26)
  87. #define IMTTBCR_ORGN1_WT (2 << 26)
  88. #define IMTTBCR_ORGN1_WB (3 << 26)
  89. #define IMTTBCR_ORGN1_MASK (3 << 26)
  90. #define IMTTBCR_IRGN1_NC (0 << 24)
  91. #define IMTTBCR_IRGN1_WB_WA (1 << 24)
  92. #define IMTTBCR_IRGN1_WT (2 << 24)
  93. #define IMTTBCR_IRGN1_WB (3 << 24)
  94. #define IMTTBCR_IRGN1_MASK (3 << 24)
  95. #define IMTTBCR_TSZ1_MASK (7 << 16)
  96. #define IMTTBCR_TSZ1_SHIFT 16
  97. #define IMTTBCR_SH0_NON_SHAREABLE (0 << 12)
  98. #define IMTTBCR_SH0_OUTER_SHAREABLE (2 << 12)
  99. #define IMTTBCR_SH0_INNER_SHAREABLE (3 << 12)
  100. #define IMTTBCR_SH0_MASK (3 << 12)
  101. #define IMTTBCR_ORGN0_NC (0 << 10)
  102. #define IMTTBCR_ORGN0_WB_WA (1 << 10)
  103. #define IMTTBCR_ORGN0_WT (2 << 10)
  104. #define IMTTBCR_ORGN0_WB (3 << 10)
  105. #define IMTTBCR_ORGN0_MASK (3 << 10)
  106. #define IMTTBCR_IRGN0_NC (0 << 8)
  107. #define IMTTBCR_IRGN0_WB_WA (1 << 8)
  108. #define IMTTBCR_IRGN0_WT (2 << 8)
  109. #define IMTTBCR_IRGN0_WB (3 << 8)
  110. #define IMTTBCR_IRGN0_MASK (3 << 8)
  111. #define IMTTBCR_SL0_LVL_2 (0 << 4)
  112. #define IMTTBCR_SL0_LVL_1 (1 << 4)
  113. #define IMTTBCR_TSZ0_MASK (7 << 0)
  114. #define IMTTBCR_TSZ0_SHIFT O
  115. #define IMBUSCR 0x000c
  116. #define IMBUSCR_DVM (1 << 2)
  117. #define IMBUSCR_BUSSEL_SYS (0 << 0)
  118. #define IMBUSCR_BUSSEL_CCI (1 << 0)
  119. #define IMBUSCR_BUSSEL_IMCAAR (2 << 0)
  120. #define IMBUSCR_BUSSEL_CCI_IMCAAR (3 << 0)
  121. #define IMBUSCR_BUSSEL_MASK (3 << 0)
  122. #define IMTTLBR0 0x0010
  123. #define IMTTUBR0 0x0014
  124. #define IMTTLBR1 0x0018
  125. #define IMTTUBR1 0x001c
  126. #define IMSTR 0x0020
  127. #define IMSTR_ERRLVL_MASK (3 << 12)
  128. #define IMSTR_ERRLVL_SHIFT 12
  129. #define IMSTR_ERRCODE_TLB_FORMAT (1 << 8)
  130. #define IMSTR_ERRCODE_ACCESS_PERM (4 << 8)
  131. #define IMSTR_ERRCODE_SECURE_ACCESS (5 << 8)
  132. #define IMSTR_ERRCODE_MASK (7 << 8)
  133. #define IMSTR_MHIT (1 << 4)
  134. #define IMSTR_ABORT (1 << 2)
  135. #define IMSTR_PF (1 << 1)
  136. #define IMSTR_TF (1 << 0)
  137. #define IMMAIR0 0x0028
  138. #define IMMAIR1 0x002c
  139. #define IMMAIR_ATTR_MASK 0xff
  140. #define IMMAIR_ATTR_DEVICE 0x04
  141. #define IMMAIR_ATTR_NC 0x44
  142. #define IMMAIR_ATTR_WBRWA 0xff
  143. #define IMMAIR_ATTR_SHIFT(n) ((n) << 3)
  144. #define IMMAIR_ATTR_IDX_NC 0
  145. #define IMMAIR_ATTR_IDX_WBRWA 1
  146. #define IMMAIR_ATTR_IDX_DEV 2
  147. #define IMEAR 0x0030
  148. #define IMPCTR 0x0200
  149. #define IMPSTR 0x0208
  150. #define IMPEAR 0x020c
  151. #define IMPMBA(n) (0x0280 + ((n) * 4))
  152. #define IMPMBD(n) (0x02c0 + ((n) * 4))
  153. #define IMUCTR(n) (0x0300 + ((n) * 16))
  154. #define IMUCTR_FIXADDEN (1 << 31)
  155. #define IMUCTR_FIXADD_MASK (0xff << 16)
  156. #define IMUCTR_FIXADD_SHIFT 16
  157. #define IMUCTR_TTSEL_MMU(n) ((n) << 4)
  158. #define IMUCTR_TTSEL_PMB (8 << 4)
  159. #define IMUCTR_TTSEL_MASK (15 << 4)
  160. #define IMUCTR_FLUSH (1 << 1)
  161. #define IMUCTR_MMUEN (1 << 0)
  162. #define IMUASID(n) (0x0308 + ((n) * 16))
  163. #define IMUASID_ASID8_MASK (0xff << 8)
  164. #define IMUASID_ASID8_SHIFT 8
  165. #define IMUASID_ASID0_MASK (0xff << 0)
  166. #define IMUASID_ASID0_SHIFT 0
  167. /* -----------------------------------------------------------------------------
  168. * Read/Write Access
  169. */
  170. static u32 ipmmu_read(struct ipmmu_vmsa_device *mmu, unsigned int offset)
  171. {
  172. return ioread32(mmu->base + offset);
  173. }
  174. static void ipmmu_write(struct ipmmu_vmsa_device *mmu, unsigned int offset,
  175. u32 data)
  176. {
  177. iowrite32(data, mmu->base + offset);
  178. }
  179. static u32 ipmmu_ctx_read(struct ipmmu_vmsa_domain *domain, unsigned int reg)
  180. {
  181. return ipmmu_read(domain->mmu, domain->context_id * IM_CTX_SIZE + reg);
  182. }
  183. static void ipmmu_ctx_write(struct ipmmu_vmsa_domain *domain, unsigned int reg,
  184. u32 data)
  185. {
  186. ipmmu_write(domain->mmu, domain->context_id * IM_CTX_SIZE + reg, data);
  187. }
  188. /* -----------------------------------------------------------------------------
  189. * TLB and microTLB Management
  190. */
  191. /* Wait for any pending TLB invalidations to complete */
  192. static void ipmmu_tlb_sync(struct ipmmu_vmsa_domain *domain)
  193. {
  194. unsigned int count = 0;
  195. while (ipmmu_ctx_read(domain, IMCTR) & IMCTR_FLUSH) {
  196. cpu_relax();
  197. if (++count == TLB_LOOP_TIMEOUT) {
  198. dev_err_ratelimited(domain->mmu->dev,
  199. "TLB sync timed out -- MMU may be deadlocked\n");
  200. return;
  201. }
  202. udelay(1);
  203. }
  204. }
  205. static void ipmmu_tlb_invalidate(struct ipmmu_vmsa_domain *domain)
  206. {
  207. u32 reg;
  208. reg = ipmmu_ctx_read(domain, IMCTR);
  209. reg |= IMCTR_FLUSH;
  210. ipmmu_ctx_write(domain, IMCTR, reg);
  211. ipmmu_tlb_sync(domain);
  212. }
  213. /*
  214. * Enable MMU translation for the microTLB.
  215. */
  216. static void ipmmu_utlb_enable(struct ipmmu_vmsa_domain *domain,
  217. unsigned int utlb)
  218. {
  219. struct ipmmu_vmsa_device *mmu = domain->mmu;
  220. /*
  221. * TODO: Reference-count the microTLB as several bus masters can be
  222. * connected to the same microTLB.
  223. */
  224. /* TODO: What should we set the ASID to ? */
  225. ipmmu_write(mmu, IMUASID(utlb), 0);
  226. /* TODO: Do we need to flush the microTLB ? */
  227. ipmmu_write(mmu, IMUCTR(utlb),
  228. IMUCTR_TTSEL_MMU(domain->context_id) | IMUCTR_FLUSH |
  229. IMUCTR_MMUEN);
  230. }
  231. /*
  232. * Disable MMU translation for the microTLB.
  233. */
  234. static void ipmmu_utlb_disable(struct ipmmu_vmsa_domain *domain,
  235. unsigned int utlb)
  236. {
  237. struct ipmmu_vmsa_device *mmu = domain->mmu;
  238. ipmmu_write(mmu, IMUCTR(utlb), 0);
  239. }
  240. static void ipmmu_tlb_flush_all(void *cookie)
  241. {
  242. struct ipmmu_vmsa_domain *domain = cookie;
  243. ipmmu_tlb_invalidate(domain);
  244. }
  245. static void ipmmu_tlb_add_flush(unsigned long iova, size_t size,
  246. size_t granule, bool leaf, void *cookie)
  247. {
  248. /* The hardware doesn't support selective TLB flush. */
  249. }
  250. static const struct iommu_gather_ops ipmmu_gather_ops = {
  251. .tlb_flush_all = ipmmu_tlb_flush_all,
  252. .tlb_add_flush = ipmmu_tlb_add_flush,
  253. .tlb_sync = ipmmu_tlb_flush_all,
  254. };
  255. /* -----------------------------------------------------------------------------
  256. * Domain/Context Management
  257. */
  258. static int ipmmu_domain_allocate_context(struct ipmmu_vmsa_device *mmu,
  259. struct ipmmu_vmsa_domain *domain)
  260. {
  261. unsigned long flags;
  262. int ret;
  263. spin_lock_irqsave(&mmu->lock, flags);
  264. ret = find_first_zero_bit(mmu->ctx, IPMMU_CTX_MAX);
  265. if (ret != IPMMU_CTX_MAX) {
  266. mmu->domains[ret] = domain;
  267. set_bit(ret, mmu->ctx);
  268. }
  269. spin_unlock_irqrestore(&mmu->lock, flags);
  270. return ret;
  271. }
  272. static void ipmmu_domain_free_context(struct ipmmu_vmsa_device *mmu,
  273. unsigned int context_id)
  274. {
  275. unsigned long flags;
  276. spin_lock_irqsave(&mmu->lock, flags);
  277. clear_bit(context_id, mmu->ctx);
  278. mmu->domains[context_id] = NULL;
  279. spin_unlock_irqrestore(&mmu->lock, flags);
  280. }
  281. static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
  282. {
  283. u64 ttbr;
  284. int ret;
  285. /*
  286. * Allocate the page table operations.
  287. *
  288. * VMSA states in section B3.6.3 "Control of Secure or Non-secure memory
  289. * access, Long-descriptor format" that the NStable bit being set in a
  290. * table descriptor will result in the NStable and NS bits of all child
  291. * entries being ignored and considered as being set. The IPMMU seems
  292. * not to comply with this, as it generates a secure access page fault
  293. * if any of the NStable and NS bits isn't set when running in
  294. * non-secure mode.
  295. */
  296. domain->cfg.quirks = IO_PGTABLE_QUIRK_ARM_NS;
  297. domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K;
  298. domain->cfg.ias = 32;
  299. domain->cfg.oas = 40;
  300. domain->cfg.tlb = &ipmmu_gather_ops;
  301. domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32);
  302. domain->io_domain.geometry.force_aperture = true;
  303. /*
  304. * TODO: Add support for coherent walk through CCI with DVM and remove
  305. * cache handling. For now, delegate it to the io-pgtable code.
  306. */
  307. domain->cfg.iommu_dev = domain->mmu->dev;
  308. /*
  309. * Find an unused context.
  310. */
  311. ret = ipmmu_domain_allocate_context(domain->mmu, domain);
  312. if (ret == IPMMU_CTX_MAX)
  313. return -EBUSY;
  314. domain->context_id = ret;
  315. domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg,
  316. domain);
  317. if (!domain->iop) {
  318. ipmmu_domain_free_context(domain->mmu, domain->context_id);
  319. return -EINVAL;
  320. }
  321. /* TTBR0 */
  322. ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr[0];
  323. ipmmu_ctx_write(domain, IMTTLBR0, ttbr);
  324. ipmmu_ctx_write(domain, IMTTUBR0, ttbr >> 32);
  325. /*
  326. * TTBCR
  327. * We use long descriptors with inner-shareable WBWA tables and allocate
  328. * the whole 32-bit VA space to TTBR0.
  329. */
  330. ipmmu_ctx_write(domain, IMTTBCR, IMTTBCR_EAE |
  331. IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA |
  332. IMTTBCR_IRGN0_WB_WA | IMTTBCR_SL0_LVL_1);
  333. /* MAIR0 */
  334. ipmmu_ctx_write(domain, IMMAIR0, domain->cfg.arm_lpae_s1_cfg.mair[0]);
  335. /* IMBUSCR */
  336. ipmmu_ctx_write(domain, IMBUSCR,
  337. ipmmu_ctx_read(domain, IMBUSCR) &
  338. ~(IMBUSCR_DVM | IMBUSCR_BUSSEL_MASK));
  339. /*
  340. * IMSTR
  341. * Clear all interrupt flags.
  342. */
  343. ipmmu_ctx_write(domain, IMSTR, ipmmu_ctx_read(domain, IMSTR));
  344. /*
  345. * IMCTR
  346. * Enable the MMU and interrupt generation. The long-descriptor
  347. * translation table format doesn't use TEX remapping. Don't enable AF
  348. * software management as we have no use for it. Flush the TLB as
  349. * required when modifying the context registers.
  350. */
  351. ipmmu_ctx_write(domain, IMCTR, IMCTR_INTEN | IMCTR_FLUSH | IMCTR_MMUEN);
  352. return 0;
  353. }
  354. static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain)
  355. {
  356. /*
  357. * Disable the context. Flush the TLB as required when modifying the
  358. * context registers.
  359. *
  360. * TODO: Is TLB flush really needed ?
  361. */
  362. ipmmu_ctx_write(domain, IMCTR, IMCTR_FLUSH);
  363. ipmmu_tlb_sync(domain);
  364. ipmmu_domain_free_context(domain->mmu, domain->context_id);
  365. }
  366. /* -----------------------------------------------------------------------------
  367. * Fault Handling
  368. */
  369. static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain)
  370. {
  371. const u32 err_mask = IMSTR_MHIT | IMSTR_ABORT | IMSTR_PF | IMSTR_TF;
  372. struct ipmmu_vmsa_device *mmu = domain->mmu;
  373. u32 status;
  374. u32 iova;
  375. status = ipmmu_ctx_read(domain, IMSTR);
  376. if (!(status & err_mask))
  377. return IRQ_NONE;
  378. iova = ipmmu_ctx_read(domain, IMEAR);
  379. /*
  380. * Clear the error status flags. Unlike traditional interrupt flag
  381. * registers that must be cleared by writing 1, this status register
  382. * seems to require 0. The error address register must be read before,
  383. * otherwise its value will be 0.
  384. */
  385. ipmmu_ctx_write(domain, IMSTR, 0);
  386. /* Log fatal errors. */
  387. if (status & IMSTR_MHIT)
  388. dev_err_ratelimited(mmu->dev, "Multiple TLB hits @0x%08x\n",
  389. iova);
  390. if (status & IMSTR_ABORT)
  391. dev_err_ratelimited(mmu->dev, "Page Table Walk Abort @0x%08x\n",
  392. iova);
  393. if (!(status & (IMSTR_PF | IMSTR_TF)))
  394. return IRQ_NONE;
  395. /*
  396. * Try to handle page faults and translation faults.
  397. *
  398. * TODO: We need to look up the faulty device based on the I/O VA. Use
  399. * the IOMMU device for now.
  400. */
  401. if (!report_iommu_fault(&domain->io_domain, mmu->dev, iova, 0))
  402. return IRQ_HANDLED;
  403. dev_err_ratelimited(mmu->dev,
  404. "Unhandled fault: status 0x%08x iova 0x%08x\n",
  405. status, iova);
  406. return IRQ_HANDLED;
  407. }
  408. static irqreturn_t ipmmu_irq(int irq, void *dev)
  409. {
  410. struct ipmmu_vmsa_device *mmu = dev;
  411. irqreturn_t status = IRQ_NONE;
  412. unsigned int i;
  413. unsigned long flags;
  414. spin_lock_irqsave(&mmu->lock, flags);
  415. /*
  416. * Check interrupts for all active contexts.
  417. */
  418. for (i = 0; i < IPMMU_CTX_MAX; i++) {
  419. if (!mmu->domains[i])
  420. continue;
  421. if (ipmmu_domain_irq(mmu->domains[i]) == IRQ_HANDLED)
  422. status = IRQ_HANDLED;
  423. }
  424. spin_unlock_irqrestore(&mmu->lock, flags);
  425. return status;
  426. }
  427. /* -----------------------------------------------------------------------------
  428. * IOMMU Operations
  429. */
  430. static struct iommu_domain *__ipmmu_domain_alloc(unsigned type)
  431. {
  432. struct ipmmu_vmsa_domain *domain;
  433. domain = kzalloc(sizeof(*domain), GFP_KERNEL);
  434. if (!domain)
  435. return NULL;
  436. spin_lock_init(&domain->lock);
  437. return &domain->io_domain;
  438. }
  439. static void ipmmu_domain_free(struct iommu_domain *io_domain)
  440. {
  441. struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
  442. /*
  443. * Free the domain resources. We assume that all devices have already
  444. * been detached.
  445. */
  446. ipmmu_domain_destroy_context(domain);
  447. free_io_pgtable_ops(domain->iop);
  448. kfree(domain);
  449. }
  450. static int ipmmu_attach_device(struct iommu_domain *io_domain,
  451. struct device *dev)
  452. {
  453. struct ipmmu_vmsa_iommu_priv *priv = to_priv(dev);
  454. struct iommu_fwspec *fwspec = dev->iommu_fwspec;
  455. struct ipmmu_vmsa_device *mmu = priv->mmu;
  456. struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
  457. unsigned long flags;
  458. unsigned int i;
  459. int ret = 0;
  460. if (!priv || !priv->mmu) {
  461. dev_err(dev, "Cannot attach to IPMMU\n");
  462. return -ENXIO;
  463. }
  464. spin_lock_irqsave(&domain->lock, flags);
  465. if (!domain->mmu) {
  466. /* The domain hasn't been used yet, initialize it. */
  467. domain->mmu = mmu;
  468. ret = ipmmu_domain_init_context(domain);
  469. } else if (domain->mmu != mmu) {
  470. /*
  471. * Something is wrong, we can't attach two devices using
  472. * different IOMMUs to the same domain.
  473. */
  474. dev_err(dev, "Can't attach IPMMU %s to domain on IPMMU %s\n",
  475. dev_name(mmu->dev), dev_name(domain->mmu->dev));
  476. ret = -EINVAL;
  477. } else
  478. dev_info(dev, "Reusing IPMMU context %u\n", domain->context_id);
  479. spin_unlock_irqrestore(&domain->lock, flags);
  480. if (ret < 0)
  481. return ret;
  482. for (i = 0; i < fwspec->num_ids; ++i)
  483. ipmmu_utlb_enable(domain, fwspec->ids[i]);
  484. return 0;
  485. }
  486. static void ipmmu_detach_device(struct iommu_domain *io_domain,
  487. struct device *dev)
  488. {
  489. struct iommu_fwspec *fwspec = dev->iommu_fwspec;
  490. struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
  491. unsigned int i;
  492. for (i = 0; i < fwspec->num_ids; ++i)
  493. ipmmu_utlb_disable(domain, fwspec->ids[i]);
  494. /*
  495. * TODO: Optimize by disabling the context when no device is attached.
  496. */
  497. }
  498. static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova,
  499. phys_addr_t paddr, size_t size, int prot)
  500. {
  501. struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
  502. if (!domain)
  503. return -ENODEV;
  504. return domain->iop->map(domain->iop, iova, paddr, size, prot);
  505. }
  506. static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
  507. size_t size)
  508. {
  509. struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
  510. return domain->iop->unmap(domain->iop, iova, size);
  511. }
  512. static void ipmmu_iotlb_sync(struct iommu_domain *io_domain)
  513. {
  514. struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
  515. if (domain->mmu)
  516. ipmmu_tlb_flush_all(domain);
  517. }
  518. static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain,
  519. dma_addr_t iova)
  520. {
  521. struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
  522. /* TODO: Is locking needed ? */
  523. return domain->iop->iova_to_phys(domain->iop, iova);
  524. }
  525. static int ipmmu_init_platform_device(struct device *dev,
  526. struct of_phandle_args *args)
  527. {
  528. struct platform_device *ipmmu_pdev;
  529. struct ipmmu_vmsa_iommu_priv *priv;
  530. ipmmu_pdev = of_find_device_by_node(args->np);
  531. if (!ipmmu_pdev)
  532. return -ENODEV;
  533. priv = kzalloc(sizeof(*priv), GFP_KERNEL);
  534. if (!priv)
  535. return -ENOMEM;
  536. priv->mmu = platform_get_drvdata(ipmmu_pdev);
  537. priv->dev = dev;
  538. dev->iommu_fwspec->iommu_priv = priv;
  539. return 0;
  540. }
  541. static int ipmmu_of_xlate(struct device *dev,
  542. struct of_phandle_args *spec)
  543. {
  544. iommu_fwspec_add_ids(dev, spec->args, 1);
  545. /* Initialize once - xlate() will call multiple times */
  546. if (to_priv(dev))
  547. return 0;
  548. return ipmmu_init_platform_device(dev, spec);
  549. }
  550. #if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA)
  551. static struct iommu_domain *ipmmu_domain_alloc(unsigned type)
  552. {
  553. if (type != IOMMU_DOMAIN_UNMANAGED)
  554. return NULL;
  555. return __ipmmu_domain_alloc(type);
  556. }
  557. static int ipmmu_add_device(struct device *dev)
  558. {
  559. struct ipmmu_vmsa_device *mmu = NULL;
  560. struct iommu_group *group;
  561. int ret;
  562. /*
  563. * Only let through devices that have been verified in xlate()
  564. */
  565. if (!to_priv(dev))
  566. return -ENODEV;
  567. /* Create a device group and add the device to it. */
  568. group = iommu_group_alloc();
  569. if (IS_ERR(group)) {
  570. dev_err(dev, "Failed to allocate IOMMU group\n");
  571. ret = PTR_ERR(group);
  572. goto error;
  573. }
  574. ret = iommu_group_add_device(group, dev);
  575. iommu_group_put(group);
  576. if (ret < 0) {
  577. dev_err(dev, "Failed to add device to IPMMU group\n");
  578. group = NULL;
  579. goto error;
  580. }
  581. /*
  582. * Create the ARM mapping, used by the ARM DMA mapping core to allocate
  583. * VAs. This will allocate a corresponding IOMMU domain.
  584. *
  585. * TODO:
  586. * - Create one mapping per context (TLB).
  587. * - Make the mapping size configurable ? We currently use a 2GB mapping
  588. * at a 1GB offset to ensure that NULL VAs will fault.
  589. */
  590. mmu = to_priv(dev)->mmu;
  591. if (!mmu->mapping) {
  592. struct dma_iommu_mapping *mapping;
  593. mapping = arm_iommu_create_mapping(&platform_bus_type,
  594. SZ_1G, SZ_2G);
  595. if (IS_ERR(mapping)) {
  596. dev_err(mmu->dev, "failed to create ARM IOMMU mapping\n");
  597. ret = PTR_ERR(mapping);
  598. goto error;
  599. }
  600. mmu->mapping = mapping;
  601. }
  602. /* Attach the ARM VA mapping to the device. */
  603. ret = arm_iommu_attach_device(dev, mmu->mapping);
  604. if (ret < 0) {
  605. dev_err(dev, "Failed to attach device to VA mapping\n");
  606. goto error;
  607. }
  608. return 0;
  609. error:
  610. if (mmu)
  611. arm_iommu_release_mapping(mmu->mapping);
  612. if (!IS_ERR_OR_NULL(group))
  613. iommu_group_remove_device(dev);
  614. return ret;
  615. }
  616. static void ipmmu_remove_device(struct device *dev)
  617. {
  618. arm_iommu_detach_device(dev);
  619. iommu_group_remove_device(dev);
  620. }
  621. static const struct iommu_ops ipmmu_ops = {
  622. .domain_alloc = ipmmu_domain_alloc,
  623. .domain_free = ipmmu_domain_free,
  624. .attach_dev = ipmmu_attach_device,
  625. .detach_dev = ipmmu_detach_device,
  626. .map = ipmmu_map,
  627. .unmap = ipmmu_unmap,
  628. .map_sg = default_iommu_map_sg,
  629. .iova_to_phys = ipmmu_iova_to_phys,
  630. .add_device = ipmmu_add_device,
  631. .remove_device = ipmmu_remove_device,
  632. .pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K,
  633. .of_xlate = ipmmu_of_xlate,
  634. };
  635. #endif /* !CONFIG_ARM && CONFIG_IOMMU_DMA */
  636. #ifdef CONFIG_IOMMU_DMA
  637. static DEFINE_SPINLOCK(ipmmu_slave_devices_lock);
  638. static LIST_HEAD(ipmmu_slave_devices);
  639. static struct iommu_domain *ipmmu_domain_alloc_dma(unsigned type)
  640. {
  641. struct iommu_domain *io_domain = NULL;
  642. switch (type) {
  643. case IOMMU_DOMAIN_UNMANAGED:
  644. io_domain = __ipmmu_domain_alloc(type);
  645. break;
  646. case IOMMU_DOMAIN_DMA:
  647. io_domain = __ipmmu_domain_alloc(type);
  648. if (io_domain)
  649. iommu_get_dma_cookie(io_domain);
  650. break;
  651. }
  652. return io_domain;
  653. }
  654. static void ipmmu_domain_free_dma(struct iommu_domain *io_domain)
  655. {
  656. switch (io_domain->type) {
  657. case IOMMU_DOMAIN_DMA:
  658. iommu_put_dma_cookie(io_domain);
  659. /* fall-through */
  660. default:
  661. ipmmu_domain_free(io_domain);
  662. break;
  663. }
  664. }
  665. static int ipmmu_add_device_dma(struct device *dev)
  666. {
  667. struct iommu_group *group;
  668. /*
  669. * Only let through devices that have been verified in xlate()
  670. */
  671. if (!to_priv(dev))
  672. return -ENODEV;
  673. group = iommu_group_get_for_dev(dev);
  674. if (IS_ERR(group))
  675. return PTR_ERR(group);
  676. spin_lock(&ipmmu_slave_devices_lock);
  677. list_add(&to_priv(dev)->list, &ipmmu_slave_devices);
  678. spin_unlock(&ipmmu_slave_devices_lock);
  679. return 0;
  680. }
  681. static void ipmmu_remove_device_dma(struct device *dev)
  682. {
  683. struct ipmmu_vmsa_iommu_priv *priv = to_priv(dev);
  684. spin_lock(&ipmmu_slave_devices_lock);
  685. list_del(&priv->list);
  686. spin_unlock(&ipmmu_slave_devices_lock);
  687. iommu_group_remove_device(dev);
  688. }
  689. static struct device *ipmmu_find_sibling_device(struct device *dev)
  690. {
  691. struct ipmmu_vmsa_iommu_priv *priv = to_priv(dev);
  692. struct ipmmu_vmsa_iommu_priv *sibling_priv = NULL;
  693. bool found = false;
  694. spin_lock(&ipmmu_slave_devices_lock);
  695. list_for_each_entry(sibling_priv, &ipmmu_slave_devices, list) {
  696. if (priv == sibling_priv)
  697. continue;
  698. if (sibling_priv->mmu == priv->mmu) {
  699. found = true;
  700. break;
  701. }
  702. }
  703. spin_unlock(&ipmmu_slave_devices_lock);
  704. return found ? sibling_priv->dev : NULL;
  705. }
  706. static struct iommu_group *ipmmu_find_group_dma(struct device *dev)
  707. {
  708. struct iommu_group *group;
  709. struct device *sibling;
  710. sibling = ipmmu_find_sibling_device(dev);
  711. if (sibling)
  712. group = iommu_group_get(sibling);
  713. if (!sibling || IS_ERR(group))
  714. group = generic_device_group(dev);
  715. return group;
  716. }
  717. static const struct iommu_ops ipmmu_ops = {
  718. .domain_alloc = ipmmu_domain_alloc_dma,
  719. .domain_free = ipmmu_domain_free_dma,
  720. .attach_dev = ipmmu_attach_device,
  721. .detach_dev = ipmmu_detach_device,
  722. .map = ipmmu_map,
  723. .unmap = ipmmu_unmap,
  724. .flush_iotlb_all = ipmmu_iotlb_sync,
  725. .iotlb_sync = ipmmu_iotlb_sync,
  726. .map_sg = default_iommu_map_sg,
  727. .iova_to_phys = ipmmu_iova_to_phys,
  728. .add_device = ipmmu_add_device_dma,
  729. .remove_device = ipmmu_remove_device_dma,
  730. .device_group = ipmmu_find_group_dma,
  731. .pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K,
  732. .of_xlate = ipmmu_of_xlate,
  733. };
  734. #endif /* CONFIG_IOMMU_DMA */
  735. /* -----------------------------------------------------------------------------
  736. * Probe/remove and init
  737. */
  738. static void ipmmu_device_reset(struct ipmmu_vmsa_device *mmu)
  739. {
  740. unsigned int i;
  741. /* Disable all contexts. */
  742. for (i = 0; i < 4; ++i)
  743. ipmmu_write(mmu, i * IM_CTX_SIZE + IMCTR, 0);
  744. }
  745. static int ipmmu_probe(struct platform_device *pdev)
  746. {
  747. struct ipmmu_vmsa_device *mmu;
  748. struct resource *res;
  749. int irq;
  750. int ret;
  751. mmu = devm_kzalloc(&pdev->dev, sizeof(*mmu), GFP_KERNEL);
  752. if (!mmu) {
  753. dev_err(&pdev->dev, "cannot allocate device data\n");
  754. return -ENOMEM;
  755. }
  756. mmu->dev = &pdev->dev;
  757. mmu->num_utlbs = 32;
  758. spin_lock_init(&mmu->lock);
  759. bitmap_zero(mmu->ctx, IPMMU_CTX_MAX);
  760. /* Map I/O memory and request IRQ. */
  761. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  762. mmu->base = devm_ioremap_resource(&pdev->dev, res);
  763. if (IS_ERR(mmu->base))
  764. return PTR_ERR(mmu->base);
  765. /*
  766. * The IPMMU has two register banks, for secure and non-secure modes.
  767. * The bank mapped at the beginning of the IPMMU address space
  768. * corresponds to the running mode of the CPU. When running in secure
  769. * mode the non-secure register bank is also available at an offset.
  770. *
  771. * Secure mode operation isn't clearly documented and is thus currently
  772. * not implemented in the driver. Furthermore, preliminary tests of
  773. * non-secure operation with the main register bank were not successful.
  774. * Offset the registers base unconditionally to point to the non-secure
  775. * alias space for now.
  776. */
  777. mmu->base += IM_NS_ALIAS_OFFSET;
  778. irq = platform_get_irq(pdev, 0);
  779. if (irq < 0) {
  780. dev_err(&pdev->dev, "no IRQ found\n");
  781. return irq;
  782. }
  783. ret = devm_request_irq(&pdev->dev, irq, ipmmu_irq, 0,
  784. dev_name(&pdev->dev), mmu);
  785. if (ret < 0) {
  786. dev_err(&pdev->dev, "failed to request IRQ %d\n", irq);
  787. return ret;
  788. }
  789. ipmmu_device_reset(mmu);
  790. ret = iommu_device_sysfs_add(&mmu->iommu, &pdev->dev, NULL,
  791. dev_name(&pdev->dev));
  792. if (ret)
  793. return ret;
  794. iommu_device_set_ops(&mmu->iommu, &ipmmu_ops);
  795. iommu_device_set_fwnode(&mmu->iommu, &pdev->dev.of_node->fwnode);
  796. ret = iommu_device_register(&mmu->iommu);
  797. if (ret)
  798. return ret;
  799. /*
  800. * We can't create the ARM mapping here as it requires the bus to have
  801. * an IOMMU, which only happens when bus_set_iommu() is called in
  802. * ipmmu_init() after the probe function returns.
  803. */
  804. platform_set_drvdata(pdev, mmu);
  805. return 0;
  806. }
  807. static int ipmmu_remove(struct platform_device *pdev)
  808. {
  809. struct ipmmu_vmsa_device *mmu = platform_get_drvdata(pdev);
  810. iommu_device_sysfs_remove(&mmu->iommu);
  811. iommu_device_unregister(&mmu->iommu);
  812. #if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA)
  813. arm_iommu_release_mapping(mmu->mapping);
  814. #endif
  815. ipmmu_device_reset(mmu);
  816. return 0;
  817. }
  818. static const struct of_device_id ipmmu_of_ids[] = {
  819. { .compatible = "renesas,ipmmu-vmsa", },
  820. { }
  821. };
  822. static struct platform_driver ipmmu_driver = {
  823. .driver = {
  824. .name = "ipmmu-vmsa",
  825. .of_match_table = of_match_ptr(ipmmu_of_ids),
  826. },
  827. .probe = ipmmu_probe,
  828. .remove = ipmmu_remove,
  829. };
  830. static int __init ipmmu_init(void)
  831. {
  832. int ret;
  833. ret = platform_driver_register(&ipmmu_driver);
  834. if (ret < 0)
  835. return ret;
  836. if (!iommu_present(&platform_bus_type))
  837. bus_set_iommu(&platform_bus_type, &ipmmu_ops);
  838. return 0;
  839. }
  840. static void __exit ipmmu_exit(void)
  841. {
  842. return platform_driver_unregister(&ipmmu_driver);
  843. }
  844. subsys_initcall(ipmmu_init);
  845. module_exit(ipmmu_exit);
  846. MODULE_DESCRIPTION("IOMMU API for Renesas VMSA-compatible IPMMU");
  847. MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
  848. MODULE_LICENSE("GPL v2");