fsl_pamu_domain.c 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License, version 2, as
  4. * published by the Free Software Foundation.
  5. *
  6. * This program is distributed in the hope that it will be useful,
  7. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9. * GNU General Public License for more details.
  10. *
  11. * You should have received a copy of the GNU General Public License
  12. * along with this program; if not, write to the Free Software
  13. * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  14. *
  15. * Copyright (C) 2013 Freescale Semiconductor, Inc.
  16. * Author: Varun Sethi <varun.sethi@freescale.com>
  17. *
  18. */
  19. #define pr_fmt(fmt) "fsl-pamu-domain: %s: " fmt, __func__
  20. #include <linux/init.h>
  21. #include <linux/iommu.h>
  22. #include <linux/notifier.h>
  23. #include <linux/slab.h>
  24. #include <linux/module.h>
  25. #include <linux/types.h>
  26. #include <linux/mm.h>
  27. #include <linux/interrupt.h>
  28. #include <linux/device.h>
  29. #include <linux/of_platform.h>
  30. #include <linux/bootmem.h>
  31. #include <linux/err.h>
  32. #include <asm/io.h>
  33. #include <asm/bitops.h>
  34. #include <asm/pci-bridge.h>
  35. #include <sysdev/fsl_pci.h>
  36. #include "fsl_pamu_domain.h"
  37. /*
  38. * Global spinlock that needs to be held while
  39. * configuring PAMU.
  40. */
  41. static DEFINE_SPINLOCK(iommu_lock);
  42. static struct kmem_cache *fsl_pamu_domain_cache;
  43. static struct kmem_cache *iommu_devinfo_cache;
  44. static DEFINE_SPINLOCK(device_domain_lock);
  45. static int __init iommu_init_mempool(void)
  46. {
  47. fsl_pamu_domain_cache = kmem_cache_create("fsl_pamu_domain",
  48. sizeof(struct fsl_dma_domain),
  49. 0,
  50. SLAB_HWCACHE_ALIGN,
  51. NULL);
  52. if (!fsl_pamu_domain_cache) {
  53. pr_debug("Couldn't create fsl iommu_domain cache\n");
  54. return -ENOMEM;
  55. }
  56. iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
  57. sizeof(struct device_domain_info),
  58. 0,
  59. SLAB_HWCACHE_ALIGN,
  60. NULL);
  61. if (!iommu_devinfo_cache) {
  62. pr_debug("Couldn't create devinfo cache\n");
  63. kmem_cache_destroy(fsl_pamu_domain_cache);
  64. return -ENOMEM;
  65. }
  66. return 0;
  67. }
  68. static phys_addr_t get_phys_addr(struct fsl_dma_domain *dma_domain, dma_addr_t iova)
  69. {
  70. u32 win_cnt = dma_domain->win_cnt;
  71. struct dma_window *win_ptr =
  72. &dma_domain->win_arr[0];
  73. struct iommu_domain_geometry *geom;
  74. geom = &dma_domain->iommu_domain->geometry;
  75. if (!win_cnt || !dma_domain->geom_size) {
  76. pr_debug("Number of windows/geometry not configured for the domain\n");
  77. return 0;
  78. }
  79. if (win_cnt > 1) {
  80. u64 subwin_size;
  81. dma_addr_t subwin_iova;
  82. u32 wnd;
  83. subwin_size = dma_domain->geom_size >> ilog2(win_cnt);
  84. subwin_iova = iova & ~(subwin_size - 1);
  85. wnd = (subwin_iova - geom->aperture_start) >> ilog2(subwin_size);
  86. win_ptr = &dma_domain->win_arr[wnd];
  87. }
  88. if (win_ptr->valid)
  89. return (win_ptr->paddr + (iova & (win_ptr->size - 1)));
  90. return 0;
  91. }
  92. static int map_subwins(int liodn, struct fsl_dma_domain *dma_domain)
  93. {
  94. struct dma_window *sub_win_ptr =
  95. &dma_domain->win_arr[0];
  96. int i, ret;
  97. unsigned long rpn, flags;
  98. for (i = 0; i < dma_domain->win_cnt; i++) {
  99. if (sub_win_ptr[i].valid) {
  100. rpn = sub_win_ptr[i].paddr >>
  101. PAMU_PAGE_SHIFT;
  102. spin_lock_irqsave(&iommu_lock, flags);
  103. ret = pamu_config_spaace(liodn, dma_domain->win_cnt, i,
  104. sub_win_ptr[i].size,
  105. ~(u32)0,
  106. rpn,
  107. dma_domain->snoop_id,
  108. dma_domain->stash_id,
  109. (i > 0) ? 1 : 0,
  110. sub_win_ptr[i].prot);
  111. spin_unlock_irqrestore(&iommu_lock, flags);
  112. if (ret) {
  113. pr_debug("PAMU SPAACE configuration failed for liodn %d\n",
  114. liodn);
  115. return ret;
  116. }
  117. }
  118. }
  119. return ret;
  120. }
  121. static int map_win(int liodn, struct fsl_dma_domain *dma_domain)
  122. {
  123. int ret;
  124. struct dma_window *wnd = &dma_domain->win_arr[0];
  125. phys_addr_t wnd_addr = dma_domain->iommu_domain->geometry.aperture_start;
  126. unsigned long flags;
  127. spin_lock_irqsave(&iommu_lock, flags);
  128. ret = pamu_config_ppaace(liodn, wnd_addr,
  129. wnd->size,
  130. ~(u32)0,
  131. wnd->paddr >> PAMU_PAGE_SHIFT,
  132. dma_domain->snoop_id, dma_domain->stash_id,
  133. 0, wnd->prot);
  134. spin_unlock_irqrestore(&iommu_lock, flags);
  135. if (ret)
  136. pr_debug("PAMU PAACE configuration failed for liodn %d\n",
  137. liodn);
  138. return ret;
  139. }
  140. /* Map the DMA window corresponding to the LIODN */
  141. static int map_liodn(int liodn, struct fsl_dma_domain *dma_domain)
  142. {
  143. if (dma_domain->win_cnt > 1)
  144. return map_subwins(liodn, dma_domain);
  145. else
  146. return map_win(liodn, dma_domain);
  147. }
  148. /* Update window/subwindow mapping for the LIODN */
  149. static int update_liodn(int liodn, struct fsl_dma_domain *dma_domain, u32 wnd_nr)
  150. {
  151. int ret;
  152. struct dma_window *wnd = &dma_domain->win_arr[wnd_nr];
  153. unsigned long flags;
  154. spin_lock_irqsave(&iommu_lock, flags);
  155. if (dma_domain->win_cnt > 1) {
  156. ret = pamu_config_spaace(liodn, dma_domain->win_cnt, wnd_nr,
  157. wnd->size,
  158. ~(u32)0,
  159. wnd->paddr >> PAMU_PAGE_SHIFT,
  160. dma_domain->snoop_id,
  161. dma_domain->stash_id,
  162. (wnd_nr > 0) ? 1 : 0,
  163. wnd->prot);
  164. if (ret)
  165. pr_debug("Subwindow reconfiguration failed for liodn %d\n", liodn);
  166. } else {
  167. phys_addr_t wnd_addr;
  168. wnd_addr = dma_domain->iommu_domain->geometry.aperture_start;
  169. ret = pamu_config_ppaace(liodn, wnd_addr,
  170. wnd->size,
  171. ~(u32)0,
  172. wnd->paddr >> PAMU_PAGE_SHIFT,
  173. dma_domain->snoop_id, dma_domain->stash_id,
  174. 0, wnd->prot);
  175. if (ret)
  176. pr_debug("Window reconfiguration failed for liodn %d\n", liodn);
  177. }
  178. spin_unlock_irqrestore(&iommu_lock, flags);
  179. return ret;
  180. }
  181. static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain,
  182. u32 val)
  183. {
  184. int ret = 0, i;
  185. unsigned long flags;
  186. spin_lock_irqsave(&iommu_lock, flags);
  187. if (!dma_domain->win_arr) {
  188. pr_debug("Windows not configured, stash destination update failed for liodn %d\n", liodn);
  189. spin_unlock_irqrestore(&iommu_lock, flags);
  190. return -EINVAL;
  191. }
  192. for (i = 0; i < dma_domain->win_cnt; i++) {
  193. ret = pamu_update_paace_stash(liodn, i, val);
  194. if (ret) {
  195. pr_debug("Failed to update SPAACE %d field for liodn %d\n ", i, liodn);
  196. spin_unlock_irqrestore(&iommu_lock, flags);
  197. return ret;
  198. }
  199. }
  200. spin_unlock_irqrestore(&iommu_lock, flags);
  201. return ret;
  202. }
  203. /* Set the geometry parameters for a LIODN */
  204. static int pamu_set_liodn(int liodn, struct device *dev,
  205. struct fsl_dma_domain *dma_domain,
  206. struct iommu_domain_geometry *geom_attr,
  207. u32 win_cnt)
  208. {
  209. phys_addr_t window_addr, window_size;
  210. phys_addr_t subwin_size;
  211. int ret = 0, i;
  212. u32 omi_index = ~(u32)0;
  213. unsigned long flags;
  214. /*
  215. * Configure the omi_index at the geometry setup time.
  216. * This is a static value which depends on the type of
  217. * device and would not change thereafter.
  218. */
  219. get_ome_index(&omi_index, dev);
  220. window_addr = geom_attr->aperture_start;
  221. window_size = dma_domain->geom_size;
  222. spin_lock_irqsave(&iommu_lock, flags);
  223. ret = pamu_disable_liodn(liodn);
  224. if (!ret)
  225. ret = pamu_config_ppaace(liodn, window_addr, window_size, omi_index,
  226. 0, dma_domain->snoop_id,
  227. dma_domain->stash_id, win_cnt, 0);
  228. spin_unlock_irqrestore(&iommu_lock, flags);
  229. if (ret) {
  230. pr_debug("PAMU PAACE configuration failed for liodn %d, win_cnt =%d\n", liodn, win_cnt);
  231. return ret;
  232. }
  233. if (win_cnt > 1) {
  234. subwin_size = window_size >> ilog2(win_cnt);
  235. for (i = 0; i < win_cnt; i++) {
  236. spin_lock_irqsave(&iommu_lock, flags);
  237. ret = pamu_disable_spaace(liodn, i);
  238. if (!ret)
  239. ret = pamu_config_spaace(liodn, win_cnt, i,
  240. subwin_size, omi_index,
  241. 0, dma_domain->snoop_id,
  242. dma_domain->stash_id,
  243. 0, 0);
  244. spin_unlock_irqrestore(&iommu_lock, flags);
  245. if (ret) {
  246. pr_debug("PAMU SPAACE configuration failed for liodn %d\n", liodn);
  247. return ret;
  248. }
  249. }
  250. }
  251. return ret;
  252. }
  253. static int check_size(u64 size, dma_addr_t iova)
  254. {
  255. /*
  256. * Size must be a power of two and at least be equal
  257. * to PAMU page size.
  258. */
  259. if ((size & (size - 1)) || size < PAMU_PAGE_SIZE) {
  260. pr_debug("%s: size too small or not a power of two\n", __func__);
  261. return -EINVAL;
  262. }
  263. /* iova must be page size aligned*/
  264. if (iova & (size - 1)) {
  265. pr_debug("%s: address is not aligned with window size\n", __func__);
  266. return -EINVAL;
  267. }
  268. return 0;
  269. }
  270. static struct fsl_dma_domain *iommu_alloc_dma_domain(void)
  271. {
  272. struct fsl_dma_domain *domain;
  273. domain = kmem_cache_zalloc(fsl_pamu_domain_cache, GFP_KERNEL);
  274. if (!domain)
  275. return NULL;
  276. domain->stash_id = ~(u32)0;
  277. domain->snoop_id = ~(u32)0;
  278. domain->win_cnt = pamu_get_max_subwin_cnt();
  279. domain->geom_size = 0;
  280. INIT_LIST_HEAD(&domain->devices);
  281. spin_lock_init(&domain->domain_lock);
  282. return domain;
  283. }
  284. static void remove_device_ref(struct device_domain_info *info, u32 win_cnt)
  285. {
  286. unsigned long flags;
  287. list_del(&info->link);
  288. spin_lock_irqsave(&iommu_lock, flags);
  289. if (win_cnt > 1)
  290. pamu_free_subwins(info->liodn);
  291. pamu_disable_liodn(info->liodn);
  292. spin_unlock_irqrestore(&iommu_lock, flags);
  293. spin_lock_irqsave(&device_domain_lock, flags);
  294. info->dev->archdata.iommu_domain = NULL;
  295. kmem_cache_free(iommu_devinfo_cache, info);
  296. spin_unlock_irqrestore(&device_domain_lock, flags);
  297. }
  298. static void detach_device(struct device *dev, struct fsl_dma_domain *dma_domain)
  299. {
  300. struct device_domain_info *info, *tmp;
  301. unsigned long flags;
  302. spin_lock_irqsave(&dma_domain->domain_lock, flags);
  303. /* Remove the device from the domain device list */
  304. list_for_each_entry_safe(info, tmp, &dma_domain->devices, link) {
  305. if (!dev || (info->dev == dev))
  306. remove_device_ref(info, dma_domain->win_cnt);
  307. }
  308. spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
  309. }
  310. static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct device *dev)
  311. {
  312. struct device_domain_info *info, *old_domain_info;
  313. unsigned long flags;
  314. spin_lock_irqsave(&device_domain_lock, flags);
  315. /*
  316. * Check here if the device is already attached to domain or not.
  317. * If the device is already attached to a domain detach it.
  318. */
  319. old_domain_info = dev->archdata.iommu_domain;
  320. if (old_domain_info && old_domain_info->domain != dma_domain) {
  321. spin_unlock_irqrestore(&device_domain_lock, flags);
  322. detach_device(dev, old_domain_info->domain);
  323. spin_lock_irqsave(&device_domain_lock, flags);
  324. }
  325. info = kmem_cache_zalloc(iommu_devinfo_cache, GFP_ATOMIC);
  326. info->dev = dev;
  327. info->liodn = liodn;
  328. info->domain = dma_domain;
  329. list_add(&info->link, &dma_domain->devices);
  330. /*
  331. * In case of devices with multiple LIODNs just store
  332. * the info for the first LIODN as all
  333. * LIODNs share the same domain
  334. */
  335. if (!dev->archdata.iommu_domain)
  336. dev->archdata.iommu_domain = info;
  337. spin_unlock_irqrestore(&device_domain_lock, flags);
  338. }
  339. static phys_addr_t fsl_pamu_iova_to_phys(struct iommu_domain *domain,
  340. dma_addr_t iova)
  341. {
  342. struct fsl_dma_domain *dma_domain = domain->priv;
  343. if ((iova < domain->geometry.aperture_start) ||
  344. iova > (domain->geometry.aperture_end))
  345. return 0;
  346. return get_phys_addr(dma_domain, iova);
  347. }
  348. static int fsl_pamu_domain_has_cap(struct iommu_domain *domain,
  349. unsigned long cap)
  350. {
  351. return cap == IOMMU_CAP_CACHE_COHERENCY;
  352. }
  353. static void fsl_pamu_domain_destroy(struct iommu_domain *domain)
  354. {
  355. struct fsl_dma_domain *dma_domain = domain->priv;
  356. domain->priv = NULL;
  357. /* remove all the devices from the device list */
  358. detach_device(NULL, dma_domain);
  359. dma_domain->enabled = 0;
  360. dma_domain->mapped = 0;
  361. kmem_cache_free(fsl_pamu_domain_cache, dma_domain);
  362. }
  363. static int fsl_pamu_domain_init(struct iommu_domain *domain)
  364. {
  365. struct fsl_dma_domain *dma_domain;
  366. dma_domain = iommu_alloc_dma_domain();
  367. if (!dma_domain) {
  368. pr_debug("dma_domain allocation failed\n");
  369. return -ENOMEM;
  370. }
  371. domain->priv = dma_domain;
  372. dma_domain->iommu_domain = domain;
  373. /* defaul geometry 64 GB i.e. maximum system address */
  374. domain->geometry.aperture_start = 0;
  375. domain->geometry.aperture_end = (1ULL << 36) - 1;
  376. domain->geometry.force_aperture = true;
  377. return 0;
  378. }
  379. /* Configure geometry settings for all LIODNs associated with domain */
  380. static int pamu_set_domain_geometry(struct fsl_dma_domain *dma_domain,
  381. struct iommu_domain_geometry *geom_attr,
  382. u32 win_cnt)
  383. {
  384. struct device_domain_info *info;
  385. int ret = 0;
  386. list_for_each_entry(info, &dma_domain->devices, link) {
  387. ret = pamu_set_liodn(info->liodn, info->dev, dma_domain,
  388. geom_attr, win_cnt);
  389. if (ret)
  390. break;
  391. }
  392. return ret;
  393. }
  394. /* Update stash destination for all LIODNs associated with the domain */
  395. static int update_domain_stash(struct fsl_dma_domain *dma_domain, u32 val)
  396. {
  397. struct device_domain_info *info;
  398. int ret = 0;
  399. list_for_each_entry(info, &dma_domain->devices, link) {
  400. ret = update_liodn_stash(info->liodn, dma_domain, val);
  401. if (ret)
  402. break;
  403. }
  404. return ret;
  405. }
  406. /* Update domain mappings for all LIODNs associated with the domain */
  407. static int update_domain_mapping(struct fsl_dma_domain *dma_domain, u32 wnd_nr)
  408. {
  409. struct device_domain_info *info;
  410. int ret = 0;
  411. list_for_each_entry(info, &dma_domain->devices, link) {
  412. ret = update_liodn(info->liodn, dma_domain, wnd_nr);
  413. if (ret)
  414. break;
  415. }
  416. return ret;
  417. }
  418. static int disable_domain_win(struct fsl_dma_domain *dma_domain, u32 wnd_nr)
  419. {
  420. struct device_domain_info *info;
  421. int ret = 0;
  422. list_for_each_entry(info, &dma_domain->devices, link) {
  423. if (dma_domain->win_cnt == 1 && dma_domain->enabled) {
  424. ret = pamu_disable_liodn(info->liodn);
  425. if (!ret)
  426. dma_domain->enabled = 0;
  427. } else {
  428. ret = pamu_disable_spaace(info->liodn, wnd_nr);
  429. }
  430. }
  431. return ret;
  432. }
  433. static void fsl_pamu_window_disable(struct iommu_domain *domain, u32 wnd_nr)
  434. {
  435. struct fsl_dma_domain *dma_domain = domain->priv;
  436. unsigned long flags;
  437. int ret;
  438. spin_lock_irqsave(&dma_domain->domain_lock, flags);
  439. if (!dma_domain->win_arr) {
  440. pr_debug("Number of windows not configured\n");
  441. spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
  442. return;
  443. }
  444. if (wnd_nr >= dma_domain->win_cnt) {
  445. pr_debug("Invalid window index\n");
  446. spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
  447. return;
  448. }
  449. if (dma_domain->win_arr[wnd_nr].valid) {
  450. ret = disable_domain_win(dma_domain, wnd_nr);
  451. if (!ret) {
  452. dma_domain->win_arr[wnd_nr].valid = 0;
  453. dma_domain->mapped--;
  454. }
  455. }
  456. spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
  457. }
  458. static int fsl_pamu_window_enable(struct iommu_domain *domain, u32 wnd_nr,
  459. phys_addr_t paddr, u64 size, int prot)
  460. {
  461. struct fsl_dma_domain *dma_domain = domain->priv;
  462. struct dma_window *wnd;
  463. int pamu_prot = 0;
  464. int ret;
  465. unsigned long flags;
  466. u64 win_size;
  467. if (prot & IOMMU_READ)
  468. pamu_prot |= PAACE_AP_PERMS_QUERY;
  469. if (prot & IOMMU_WRITE)
  470. pamu_prot |= PAACE_AP_PERMS_UPDATE;
  471. spin_lock_irqsave(&dma_domain->domain_lock, flags);
  472. if (!dma_domain->win_arr) {
  473. pr_debug("Number of windows not configured\n");
  474. spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
  475. return -ENODEV;
  476. }
  477. if (wnd_nr >= dma_domain->win_cnt) {
  478. pr_debug("Invalid window index\n");
  479. spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
  480. return -EINVAL;
  481. }
  482. win_size = dma_domain->geom_size >> ilog2(dma_domain->win_cnt);
  483. if (size > win_size) {
  484. pr_debug("Invalid window size \n");
  485. spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
  486. return -EINVAL;
  487. }
  488. if (dma_domain->win_cnt == 1) {
  489. if (dma_domain->enabled) {
  490. pr_debug("Disable the window before updating the mapping\n");
  491. spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
  492. return -EBUSY;
  493. }
  494. ret = check_size(size, domain->geometry.aperture_start);
  495. if (ret) {
  496. pr_debug("Aperture start not aligned to the size\n");
  497. spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
  498. return -EINVAL;
  499. }
  500. }
  501. wnd = &dma_domain->win_arr[wnd_nr];
  502. if (!wnd->valid) {
  503. wnd->paddr = paddr;
  504. wnd->size = size;
  505. wnd->prot = pamu_prot;
  506. ret = update_domain_mapping(dma_domain, wnd_nr);
  507. if (!ret) {
  508. wnd->valid = 1;
  509. dma_domain->mapped++;
  510. }
  511. } else {
  512. pr_debug("Disable the window before updating the mapping\n");
  513. ret = -EBUSY;
  514. }
  515. spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
  516. return ret;
  517. }
  518. /*
  519. * Attach the LIODN to the DMA domain and configure the geometry
  520. * and window mappings.
  521. */
  522. static int handle_attach_device(struct fsl_dma_domain *dma_domain,
  523. struct device *dev, const u32 *liodn,
  524. int num)
  525. {
  526. unsigned long flags;
  527. struct iommu_domain *domain = dma_domain->iommu_domain;
  528. int ret = 0;
  529. int i;
  530. spin_lock_irqsave(&dma_domain->domain_lock, flags);
  531. for (i = 0; i < num; i++) {
  532. /* Ensure that LIODN value is valid */
  533. if (liodn[i] >= PAACE_NUMBER_ENTRIES) {
  534. pr_debug("Invalid liodn %d, attach device failed for %s\n",
  535. liodn[i], dev->of_node->full_name);
  536. ret = -EINVAL;
  537. break;
  538. }
  539. attach_device(dma_domain, liodn[i], dev);
  540. /*
  541. * Check if geometry has already been configured
  542. * for the domain. If yes, set the geometry for
  543. * the LIODN.
  544. */
  545. if (dma_domain->win_arr) {
  546. u32 win_cnt = dma_domain->win_cnt > 1 ? dma_domain->win_cnt : 0;
  547. ret = pamu_set_liodn(liodn[i], dev, dma_domain,
  548. &domain->geometry,
  549. win_cnt);
  550. if (ret)
  551. break;
  552. if (dma_domain->mapped) {
  553. /*
  554. * Create window/subwindow mapping for
  555. * the LIODN.
  556. */
  557. ret = map_liodn(liodn[i], dma_domain);
  558. if (ret)
  559. break;
  560. }
  561. }
  562. }
  563. spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
  564. return ret;
  565. }
  566. static int fsl_pamu_attach_device(struct iommu_domain *domain,
  567. struct device *dev)
  568. {
  569. struct fsl_dma_domain *dma_domain = domain->priv;
  570. const u32 *liodn;
  571. u32 liodn_cnt;
  572. int len, ret = 0;
  573. struct pci_dev *pdev = NULL;
  574. struct pci_controller *pci_ctl;
  575. /*
  576. * Use LIODN of the PCI controller while attaching a
  577. * PCI device.
  578. */
  579. if (dev_is_pci(dev)) {
  580. pdev = to_pci_dev(dev);
  581. pci_ctl = pci_bus_to_host(pdev->bus);
  582. /*
  583. * make dev point to pci controller device
  584. * so we can get the LIODN programmed by
  585. * u-boot.
  586. */
  587. dev = pci_ctl->parent;
  588. }
  589. liodn = of_get_property(dev->of_node, "fsl,liodn", &len);
  590. if (liodn) {
  591. liodn_cnt = len / sizeof(u32);
  592. ret = handle_attach_device(dma_domain, dev,
  593. liodn, liodn_cnt);
  594. } else {
  595. pr_debug("missing fsl,liodn property at %s\n",
  596. dev->of_node->full_name);
  597. ret = -EINVAL;
  598. }
  599. return ret;
  600. }
  601. static void fsl_pamu_detach_device(struct iommu_domain *domain,
  602. struct device *dev)
  603. {
  604. struct fsl_dma_domain *dma_domain = domain->priv;
  605. const u32 *prop;
  606. int len;
  607. struct pci_dev *pdev = NULL;
  608. struct pci_controller *pci_ctl;
  609. /*
  610. * Use LIODN of the PCI controller while detaching a
  611. * PCI device.
  612. */
  613. if (dev_is_pci(dev)) {
  614. pdev = to_pci_dev(dev);
  615. pci_ctl = pci_bus_to_host(pdev->bus);
  616. /*
  617. * make dev point to pci controller device
  618. * so we can get the LIODN programmed by
  619. * u-boot.
  620. */
  621. dev = pci_ctl->parent;
  622. }
  623. prop = of_get_property(dev->of_node, "fsl,liodn", &len);
  624. if (prop)
  625. detach_device(dev, dma_domain);
  626. else
  627. pr_debug("missing fsl,liodn property at %s\n",
  628. dev->of_node->full_name);
  629. }
  630. static int configure_domain_geometry(struct iommu_domain *domain, void *data)
  631. {
  632. struct iommu_domain_geometry *geom_attr = data;
  633. struct fsl_dma_domain *dma_domain = domain->priv;
  634. dma_addr_t geom_size;
  635. unsigned long flags;
  636. geom_size = geom_attr->aperture_end - geom_attr->aperture_start + 1;
  637. /*
  638. * Sanity check the geometry size. Also, we do not support
  639. * DMA outside of the geometry.
  640. */
  641. if (check_size(geom_size, geom_attr->aperture_start) ||
  642. !geom_attr->force_aperture) {
  643. pr_debug("Invalid PAMU geometry attributes\n");
  644. return -EINVAL;
  645. }
  646. spin_lock_irqsave(&dma_domain->domain_lock, flags);
  647. if (dma_domain->enabled) {
  648. pr_debug("Can't set geometry attributes as domain is active\n");
  649. spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
  650. return -EBUSY;
  651. }
  652. /* Copy the domain geometry information */
  653. memcpy(&domain->geometry, geom_attr,
  654. sizeof(struct iommu_domain_geometry));
  655. dma_domain->geom_size = geom_size;
  656. spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
  657. return 0;
  658. }
  659. /* Set the domain stash attribute */
  660. static int configure_domain_stash(struct fsl_dma_domain *dma_domain, void *data)
  661. {
  662. struct pamu_stash_attribute *stash_attr = data;
  663. unsigned long flags;
  664. int ret;
  665. spin_lock_irqsave(&dma_domain->domain_lock, flags);
  666. memcpy(&dma_domain->dma_stash, stash_attr,
  667. sizeof(struct pamu_stash_attribute));
  668. dma_domain->stash_id = get_stash_id(stash_attr->cache,
  669. stash_attr->cpu);
  670. if (dma_domain->stash_id == ~(u32)0) {
  671. pr_debug("Invalid stash attributes\n");
  672. spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
  673. return -EINVAL;
  674. }
  675. ret = update_domain_stash(dma_domain, dma_domain->stash_id);
  676. spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
  677. return ret;
  678. }
  679. /* Configure domain dma state i.e. enable/disable DMA*/
  680. static int configure_domain_dma_state(struct fsl_dma_domain *dma_domain, bool enable)
  681. {
  682. struct device_domain_info *info;
  683. unsigned long flags;
  684. int ret;
  685. spin_lock_irqsave(&dma_domain->domain_lock, flags);
  686. if (enable && !dma_domain->mapped) {
  687. pr_debug("Can't enable DMA domain without valid mapping\n");
  688. spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
  689. return -ENODEV;
  690. }
  691. dma_domain->enabled = enable;
  692. list_for_each_entry(info, &dma_domain->devices,
  693. link) {
  694. ret = (enable) ? pamu_enable_liodn(info->liodn) :
  695. pamu_disable_liodn(info->liodn);
  696. if (ret)
  697. pr_debug("Unable to set dma state for liodn %d",
  698. info->liodn);
  699. }
  700. spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
  701. return 0;
  702. }
  703. static int fsl_pamu_set_domain_attr(struct iommu_domain *domain,
  704. enum iommu_attr attr_type, void *data)
  705. {
  706. struct fsl_dma_domain *dma_domain = domain->priv;
  707. int ret = 0;
  708. switch (attr_type) {
  709. case DOMAIN_ATTR_GEOMETRY:
  710. ret = configure_domain_geometry(domain, data);
  711. break;
  712. case DOMAIN_ATTR_FSL_PAMU_STASH:
  713. ret = configure_domain_stash(dma_domain, data);
  714. break;
  715. case DOMAIN_ATTR_FSL_PAMU_ENABLE:
  716. ret = configure_domain_dma_state(dma_domain, *(int *)data);
  717. break;
  718. default:
  719. pr_debug("Unsupported attribute type\n");
  720. ret = -EINVAL;
  721. break;
  722. };
  723. return ret;
  724. }
  725. static int fsl_pamu_get_domain_attr(struct iommu_domain *domain,
  726. enum iommu_attr attr_type, void *data)
  727. {
  728. struct fsl_dma_domain *dma_domain = domain->priv;
  729. int ret = 0;
  730. switch (attr_type) {
  731. case DOMAIN_ATTR_FSL_PAMU_STASH:
  732. memcpy((struct pamu_stash_attribute *) data, &dma_domain->dma_stash,
  733. sizeof(struct pamu_stash_attribute));
  734. break;
  735. case DOMAIN_ATTR_FSL_PAMU_ENABLE:
  736. *(int *)data = dma_domain->enabled;
  737. break;
  738. case DOMAIN_ATTR_FSL_PAMUV1:
  739. *(int *)data = DOMAIN_ATTR_FSL_PAMUV1;
  740. break;
  741. default:
  742. pr_debug("Unsupported attribute type\n");
  743. ret = -EINVAL;
  744. break;
  745. };
  746. return ret;
  747. }
  748. static struct iommu_group *get_device_iommu_group(struct device *dev)
  749. {
  750. struct iommu_group *group;
  751. group = iommu_group_get(dev);
  752. if (!group)
  753. group = iommu_group_alloc();
  754. return group;
  755. }
  756. static bool check_pci_ctl_endpt_part(struct pci_controller *pci_ctl)
  757. {
  758. u32 version;
  759. /* Check the PCI controller version number by readding BRR1 register */
  760. version = in_be32(pci_ctl->cfg_addr + (PCI_FSL_BRR1 >> 2));
  761. version &= PCI_FSL_BRR1_VER;
  762. /* If PCI controller version is >= 0x204 we can partition endpoints*/
  763. if (version >= 0x204)
  764. return 1;
  765. return 0;
  766. }
  767. /* Get iommu group information from peer devices or devices on the parent bus */
  768. static struct iommu_group *get_shared_pci_device_group(struct pci_dev *pdev)
  769. {
  770. struct pci_dev *tmp;
  771. struct iommu_group *group;
  772. struct pci_bus *bus = pdev->bus;
  773. /*
  774. * Traverese the pci bus device list to get
  775. * the shared iommu group.
  776. */
  777. while (bus) {
  778. list_for_each_entry(tmp, &bus->devices, bus_list) {
  779. if (tmp == pdev)
  780. continue;
  781. group = iommu_group_get(&tmp->dev);
  782. if (group)
  783. return group;
  784. }
  785. bus = bus->parent;
  786. }
  787. return NULL;
  788. }
  789. static struct iommu_group *get_pci_device_group(struct pci_dev *pdev)
  790. {
  791. struct pci_controller *pci_ctl;
  792. bool pci_endpt_partioning;
  793. struct iommu_group *group = NULL;
  794. pci_ctl = pci_bus_to_host(pdev->bus);
  795. pci_endpt_partioning = check_pci_ctl_endpt_part(pci_ctl);
  796. /* We can partition PCIe devices so assign device group to the device */
  797. if (pci_endpt_partioning) {
  798. group = iommu_group_get_for_dev(&pdev->dev);
  799. /*
  800. * PCIe controller is not a paritionable entity
  801. * free the controller device iommu_group.
  802. */
  803. if (pci_ctl->parent->iommu_group)
  804. iommu_group_remove_device(pci_ctl->parent);
  805. } else {
  806. /*
  807. * All devices connected to the controller will share the
  808. * PCI controllers device group. If this is the first
  809. * device to be probed for the pci controller, copy the
  810. * device group information from the PCI controller device
  811. * node and remove the PCI controller iommu group.
  812. * For subsequent devices, the iommu group information can
  813. * be obtained from sibling devices (i.e. from the bus_devices
  814. * link list).
  815. */
  816. if (pci_ctl->parent->iommu_group) {
  817. group = get_device_iommu_group(pci_ctl->parent);
  818. iommu_group_remove_device(pci_ctl->parent);
  819. } else
  820. group = get_shared_pci_device_group(pdev);
  821. }
  822. if (!group)
  823. group = ERR_PTR(-ENODEV);
  824. return group;
  825. }
  826. static int fsl_pamu_add_device(struct device *dev)
  827. {
  828. struct iommu_group *group = ERR_PTR(-ENODEV);
  829. struct pci_dev *pdev;
  830. const u32 *prop;
  831. int ret = 0, len;
  832. /*
  833. * For platform devices we allocate a separate group for
  834. * each of the devices.
  835. */
  836. if (dev_is_pci(dev)) {
  837. pdev = to_pci_dev(dev);
  838. /* Don't create device groups for virtual PCI bridges */
  839. if (pdev->subordinate)
  840. return 0;
  841. group = get_pci_device_group(pdev);
  842. } else {
  843. prop = of_get_property(dev->of_node, "fsl,liodn", &len);
  844. if (prop)
  845. group = get_device_iommu_group(dev);
  846. }
  847. if (IS_ERR(group))
  848. return PTR_ERR(group);
  849. /*
  850. * Check if device has already been added to an iommu group.
  851. * Group could have already been created for a PCI device in
  852. * the iommu_group_get_for_dev path.
  853. */
  854. if (!dev->iommu_group)
  855. ret = iommu_group_add_device(group, dev);
  856. iommu_group_put(group);
  857. return ret;
  858. }
  859. static void fsl_pamu_remove_device(struct device *dev)
  860. {
  861. iommu_group_remove_device(dev);
  862. }
  863. static int fsl_pamu_set_windows(struct iommu_domain *domain, u32 w_count)
  864. {
  865. struct fsl_dma_domain *dma_domain = domain->priv;
  866. unsigned long flags;
  867. int ret;
  868. spin_lock_irqsave(&dma_domain->domain_lock, flags);
  869. /* Ensure domain is inactive i.e. DMA should be disabled for the domain */
  870. if (dma_domain->enabled) {
  871. pr_debug("Can't set geometry attributes as domain is active\n");
  872. spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
  873. return -EBUSY;
  874. }
  875. /* Ensure that the geometry has been set for the domain */
  876. if (!dma_domain->geom_size) {
  877. pr_debug("Please configure geometry before setting the number of windows\n");
  878. spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
  879. return -EINVAL;
  880. }
  881. /*
  882. * Ensure we have valid window count i.e. it should be less than
  883. * maximum permissible limit and should be a power of two.
  884. */
  885. if (w_count > pamu_get_max_subwin_cnt() || !is_power_of_2(w_count)) {
  886. pr_debug("Invalid window count\n");
  887. spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
  888. return -EINVAL;
  889. }
  890. ret = pamu_set_domain_geometry(dma_domain, &domain->geometry,
  891. ((w_count > 1) ? w_count : 0));
  892. if (!ret) {
  893. kfree(dma_domain->win_arr);
  894. dma_domain->win_arr = kzalloc(sizeof(struct dma_window) *
  895. w_count, GFP_ATOMIC);
  896. if (!dma_domain->win_arr) {
  897. spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
  898. return -ENOMEM;
  899. }
  900. dma_domain->win_cnt = w_count;
  901. }
  902. spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
  903. return ret;
  904. }
  905. static u32 fsl_pamu_get_windows(struct iommu_domain *domain)
  906. {
  907. struct fsl_dma_domain *dma_domain = domain->priv;
  908. return dma_domain->win_cnt;
  909. }
  910. static const struct iommu_ops fsl_pamu_ops = {
  911. .domain_init = fsl_pamu_domain_init,
  912. .domain_destroy = fsl_pamu_domain_destroy,
  913. .attach_dev = fsl_pamu_attach_device,
  914. .detach_dev = fsl_pamu_detach_device,
  915. .domain_window_enable = fsl_pamu_window_enable,
  916. .domain_window_disable = fsl_pamu_window_disable,
  917. .domain_get_windows = fsl_pamu_get_windows,
  918. .domain_set_windows = fsl_pamu_set_windows,
  919. .iova_to_phys = fsl_pamu_iova_to_phys,
  920. .domain_has_cap = fsl_pamu_domain_has_cap,
  921. .domain_set_attr = fsl_pamu_set_domain_attr,
  922. .domain_get_attr = fsl_pamu_get_domain_attr,
  923. .add_device = fsl_pamu_add_device,
  924. .remove_device = fsl_pamu_remove_device,
  925. };
  926. int pamu_domain_init(void)
  927. {
  928. int ret = 0;
  929. ret = iommu_init_mempool();
  930. if (ret)
  931. return ret;
  932. bus_set_iommu(&platform_bus_type, &fsl_pamu_ops);
  933. bus_set_iommu(&pci_bus_type, &fsl_pamu_ops);
  934. return ret;
  935. }